text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def get_first_part_id_for_assessment(assessment_id, runtime=None, proxy=None, create=False, bank_id=None):
"""Gets the first part id, which represents the first section, of assessment"""
if create and bank_id is None:
raise NullArgument('Bank Id must be provided for create option')
try:
return get_next_part_id(assessment_id, runtime, proxy, sequestered=False)[0]
except IllegalState:
if create:
return create_first_assessment_section(assessment_id, runtime, proxy, bank_id)
else:
raise | [
"def",
"get_first_part_id_for_assessment",
"(",
"assessment_id",
",",
"runtime",
"=",
"None",
",",
"proxy",
"=",
"None",
",",
"create",
"=",
"False",
",",
"bank_id",
"=",
"None",
")",
":",
"if",
"create",
"and",
"bank_id",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
"'Bank Id must be provided for create option'",
")",
"try",
":",
"return",
"get_next_part_id",
"(",
"assessment_id",
",",
"runtime",
",",
"proxy",
",",
"sequestered",
"=",
"False",
")",
"[",
"0",
"]",
"except",
"IllegalState",
":",
"if",
"create",
":",
"return",
"create_first_assessment_section",
"(",
"assessment_id",
",",
"runtime",
",",
"proxy",
",",
"bank_id",
")",
"else",
":",
"raise"
] | 49.909091 | 28.909091 |
def make_logger(name, stream_type, jobs):
"""Create a logger component.
:param name: name of logger child, i.e. logger will be named
`noodles.<name>`.
:type name: str
:param stream_type: type of the stream that this logger will
be inserted into, should be |pull_map| or |push_map|.
:type stream_type: function
:param jobs: job-keeper instance.
:type jobs: dict, |JobKeeper| or |JobDB|.
:return: a stream.
The resulting stream receives messages and sends them on after
sending an INFO message to the logger. In the case of a |JobMessage|
or |ResultMessage| a meaningful message is composed otherwise the
string representation of the object is passed."""
logger = logging.getLogger('noodles').getChild(name)
# logger.setLevel(logging.DEBUG)
@stream_type
def log_message(message):
if message is EndOfQueue:
logger.info("-end-of-queue-")
elif isinstance(message, JobMessage):
logger.info(
"job %10s: %s", message.key, message.node)
elif isinstance(message, ResultMessage):
job = jobs[message.key]
if is_workflow(message.value):
logger.info(
"result %10s [%s]: %s -> workflow %x", message.key,
job.node, message.status, id(message.value))
else:
value_string = repr(message.value)
logger.info(
"result %10s [%s]: %s -> %s", message.key, job.node,
message.status, _sugar(value_string))
else:
logger.info(
"unknown message: %s", message)
return message
return log_message | [
"def",
"make_logger",
"(",
"name",
",",
"stream_type",
",",
"jobs",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'noodles'",
")",
".",
"getChild",
"(",
"name",
")",
"# logger.setLevel(logging.DEBUG)",
"@",
"stream_type",
"def",
"log_message",
"(",
"message",
")",
":",
"if",
"message",
"is",
"EndOfQueue",
":",
"logger",
".",
"info",
"(",
"\"-end-of-queue-\"",
")",
"elif",
"isinstance",
"(",
"message",
",",
"JobMessage",
")",
":",
"logger",
".",
"info",
"(",
"\"job %10s: %s\"",
",",
"message",
".",
"key",
",",
"message",
".",
"node",
")",
"elif",
"isinstance",
"(",
"message",
",",
"ResultMessage",
")",
":",
"job",
"=",
"jobs",
"[",
"message",
".",
"key",
"]",
"if",
"is_workflow",
"(",
"message",
".",
"value",
")",
":",
"logger",
".",
"info",
"(",
"\"result %10s [%s]: %s -> workflow %x\"",
",",
"message",
".",
"key",
",",
"job",
".",
"node",
",",
"message",
".",
"status",
",",
"id",
"(",
"message",
".",
"value",
")",
")",
"else",
":",
"value_string",
"=",
"repr",
"(",
"message",
".",
"value",
")",
"logger",
".",
"info",
"(",
"\"result %10s [%s]: %s -> %s\"",
",",
"message",
".",
"key",
",",
"job",
".",
"node",
",",
"message",
".",
"status",
",",
"_sugar",
"(",
"value_string",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"unknown message: %s\"",
",",
"message",
")",
"return",
"message",
"return",
"log_message"
] | 34.346939 | 19.489796 |
def _tile(self, n):
"""Get the update tile surrounding particle `n` """
pos = self._trans(self.pos[n])
return Tile(pos, pos).pad(self.support_pad) | [
"def",
"_tile",
"(",
"self",
",",
"n",
")",
":",
"pos",
"=",
"self",
".",
"_trans",
"(",
"self",
".",
"pos",
"[",
"n",
"]",
")",
"return",
"Tile",
"(",
"pos",
",",
"pos",
")",
".",
"pad",
"(",
"self",
".",
"support_pad",
")"
] | 41.75 | 8.5 |
def _map_purchase_request_to_func(self, purchase_request_type):
"""Provides appropriate parameters to the on_purchase functions."""
if purchase_request_type in self._intent_view_funcs:
view_func = self._intent_view_funcs[purchase_request_type]
else:
raise NotImplementedError('Request type "{}" not found and no default view specified.'.format(purchase_request_type))
argspec = inspect.getargspec(view_func)
arg_names = argspec.args
arg_values = self._map_params_to_view_args(purchase_request_type, arg_names)
print('_map_purchase_request_to_func', arg_names, arg_values, view_func, purchase_request_type)
return partial(view_func, *arg_values) | [
"def",
"_map_purchase_request_to_func",
"(",
"self",
",",
"purchase_request_type",
")",
":",
"if",
"purchase_request_type",
"in",
"self",
".",
"_intent_view_funcs",
":",
"view_func",
"=",
"self",
".",
"_intent_view_funcs",
"[",
"purchase_request_type",
"]",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Request type \"{}\" not found and no default view specified.'",
".",
"format",
"(",
"purchase_request_type",
")",
")",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"view_func",
")",
"arg_names",
"=",
"argspec",
".",
"args",
"arg_values",
"=",
"self",
".",
"_map_params_to_view_args",
"(",
"purchase_request_type",
",",
"arg_names",
")",
"print",
"(",
"'_map_purchase_request_to_func'",
",",
"arg_names",
",",
"arg_values",
",",
"view_func",
",",
"purchase_request_type",
")",
"return",
"partial",
"(",
"view_func",
",",
"*",
"arg_values",
")"
] | 52.214286 | 30.714286 |
def start(self):
"""
Start a producer/consumer service
"""
component = Component(self.config, self.handlers)
component.run() | [
"def",
"start",
"(",
"self",
")",
":",
"component",
"=",
"Component",
"(",
"self",
".",
"config",
",",
"self",
".",
"handlers",
")",
"component",
".",
"run",
"(",
")"
] | 26.5 | 9.833333 |
def replace_header(self, _name, _value):
"""Replace a header.
Replace the first matching header found in the message, retaining
header order and case. If no matching header was found, a KeyError is
raised.
"""
_name = _name.lower()
for i, (k, v) in zip(range(len(self._headers)), self._headers):
if k.lower() == _name:
self._headers[i] = self.policy.header_store_parse(k, _value)
break
else:
raise KeyError(_name) | [
"def",
"replace_header",
"(",
"self",
",",
"_name",
",",
"_value",
")",
":",
"_name",
"=",
"_name",
".",
"lower",
"(",
")",
"for",
"i",
",",
"(",
"k",
",",
"v",
")",
"in",
"zip",
"(",
"range",
"(",
"len",
"(",
"self",
".",
"_headers",
")",
")",
",",
"self",
".",
"_headers",
")",
":",
"if",
"k",
".",
"lower",
"(",
")",
"==",
"_name",
":",
"self",
".",
"_headers",
"[",
"i",
"]",
"=",
"self",
".",
"policy",
".",
"header_store_parse",
"(",
"k",
",",
"_value",
")",
"break",
"else",
":",
"raise",
"KeyError",
"(",
"_name",
")"
] | 37.285714 | 19.5 |
def zs_to_ws(zs, MWs):
r'''Converts a list of mole fractions to mass fractions. Requires molecular
weights for all species.
.. math::
w_i = \frac{z_i MW_i}{MW_{avg}}
MW_{avg} = \sum_i z_i MW_i
Parameters
----------
zs : iterable
Mole fractions [-]
MWs : iterable
Molecular weights [g/mol]
Returns
-------
ws : iterable
Mass fractions [-]
Notes
-----
Does not check that the sums add to one. Does not check that inputs are of
the same length.
Examples
--------
>>> zs_to_ws([0.5, 0.5], [10, 20])
[0.3333333333333333, 0.6666666666666666]
'''
Mavg = sum(zi*MWi for zi, MWi in zip(zs, MWs))
ws = [zi*MWi/Mavg for zi, MWi in zip(zs, MWs)]
return ws | [
"def",
"zs_to_ws",
"(",
"zs",
",",
"MWs",
")",
":",
"Mavg",
"=",
"sum",
"(",
"zi",
"*",
"MWi",
"for",
"zi",
",",
"MWi",
"in",
"zip",
"(",
"zs",
",",
"MWs",
")",
")",
"ws",
"=",
"[",
"zi",
"*",
"MWi",
"/",
"Mavg",
"for",
"zi",
",",
"MWi",
"in",
"zip",
"(",
"zs",
",",
"MWs",
")",
"]",
"return",
"ws"
] | 21.882353 | 24.058824 |
def get_allow_repeat_items_metadata(self):
"""get the metadata for allow repeat items"""
metadata = dict(self._allow_repeat_items_metadata)
metadata.update({'existing_id_values': self.my_osid_object_form._my_map['allowRepeatItems']})
return Metadata(**metadata) | [
"def",
"get_allow_repeat_items_metadata",
"(",
"self",
")",
":",
"metadata",
"=",
"dict",
"(",
"self",
".",
"_allow_repeat_items_metadata",
")",
"metadata",
".",
"update",
"(",
"{",
"'existing_id_values'",
":",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'allowRepeatItems'",
"]",
"}",
")",
"return",
"Metadata",
"(",
"*",
"*",
"metadata",
")"
] | 57.8 | 17.2 |
def _processEscapeSequences(replaceText):
"""Replace symbols like \n \\, etc
"""
def _replaceFunc(escapeMatchObject):
char = escapeMatchObject.group(0)[1]
if char in _escapeSequences:
return _escapeSequences[char]
return escapeMatchObject.group(0) # no any replacements, return original value
return _seqReplacer.sub(_replaceFunc, replaceText) | [
"def",
"_processEscapeSequences",
"(",
"replaceText",
")",
":",
"def",
"_replaceFunc",
"(",
"escapeMatchObject",
")",
":",
"char",
"=",
"escapeMatchObject",
".",
"group",
"(",
"0",
")",
"[",
"1",
"]",
"if",
"char",
"in",
"_escapeSequences",
":",
"return",
"_escapeSequences",
"[",
"char",
"]",
"return",
"escapeMatchObject",
".",
"group",
"(",
"0",
")",
"# no any replacements, return original value",
"return",
"_seqReplacer",
".",
"sub",
"(",
"_replaceFunc",
",",
"replaceText",
")"
] | 35.272727 | 13.727273 |
def _get_retention_policy_value(self):
"""
Sets the deletion policy on this resource. The default is 'Retain'.
:return: value for the DeletionPolicy attribute.
"""
if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower():
return self.RETAIN
elif self.RetentionPolicy.lower() == self.DELETE.lower():
return self.DELETE
elif self.RetentionPolicy.lower() not in self.retention_policy_options:
raise InvalidResourceException(self.logical_id,
"'{}' must be one of the following options: {}."
.format('RetentionPolicy', [self.RETAIN, self.DELETE])) | [
"def",
"_get_retention_policy_value",
"(",
"self",
")",
":",
"if",
"self",
".",
"RetentionPolicy",
"is",
"None",
"or",
"self",
".",
"RetentionPolicy",
".",
"lower",
"(",
")",
"==",
"self",
".",
"RETAIN",
".",
"lower",
"(",
")",
":",
"return",
"self",
".",
"RETAIN",
"elif",
"self",
".",
"RetentionPolicy",
".",
"lower",
"(",
")",
"==",
"self",
".",
"DELETE",
".",
"lower",
"(",
")",
":",
"return",
"self",
".",
"DELETE",
"elif",
"self",
".",
"RetentionPolicy",
".",
"lower",
"(",
")",
"not",
"in",
"self",
".",
"retention_policy_options",
":",
"raise",
"InvalidResourceException",
"(",
"self",
".",
"logical_id",
",",
"\"'{}' must be one of the following options: {}.\"",
".",
"format",
"(",
"'RetentionPolicy'",
",",
"[",
"self",
".",
"RETAIN",
",",
"self",
".",
"DELETE",
"]",
")",
")"
] | 49.2 | 26.666667 |
def render_category_averages(obj, normalize_to=100):
"""Renders all the sub-averages for each category."""
context = {'reviewed_item': obj}
ctype = ContentType.objects.get_for_model(obj)
reviews = models.Review.objects.filter(
content_type=ctype, object_id=obj.id)
category_averages = {}
for review in reviews:
review_category_averages = review.get_category_averages(normalize_to)
if review_category_averages:
for category, average in review_category_averages.items():
if category not in category_averages:
category_averages[category] = review_category_averages[
category]
else:
category_averages[category] += review_category_averages[
category]
if reviews and category_averages:
for category, average in category_averages.items():
category_averages[category] = \
category_averages[category] / models.Rating.objects.filter(
category=category, value__isnull=False,
review__content_type=ctype,
review__object_id=obj.id).exclude(value='').count()
else:
category_averages = {}
for category in models.RatingCategory.objects.filter(
counts_for_average=True):
category_averages[category] = 0.0
context.update({'category_averages': category_averages})
return context | [
"def",
"render_category_averages",
"(",
"obj",
",",
"normalize_to",
"=",
"100",
")",
":",
"context",
"=",
"{",
"'reviewed_item'",
":",
"obj",
"}",
"ctype",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"obj",
")",
"reviews",
"=",
"models",
".",
"Review",
".",
"objects",
".",
"filter",
"(",
"content_type",
"=",
"ctype",
",",
"object_id",
"=",
"obj",
".",
"id",
")",
"category_averages",
"=",
"{",
"}",
"for",
"review",
"in",
"reviews",
":",
"review_category_averages",
"=",
"review",
".",
"get_category_averages",
"(",
"normalize_to",
")",
"if",
"review_category_averages",
":",
"for",
"category",
",",
"average",
"in",
"review_category_averages",
".",
"items",
"(",
")",
":",
"if",
"category",
"not",
"in",
"category_averages",
":",
"category_averages",
"[",
"category",
"]",
"=",
"review_category_averages",
"[",
"category",
"]",
"else",
":",
"category_averages",
"[",
"category",
"]",
"+=",
"review_category_averages",
"[",
"category",
"]",
"if",
"reviews",
"and",
"category_averages",
":",
"for",
"category",
",",
"average",
"in",
"category_averages",
".",
"items",
"(",
")",
":",
"category_averages",
"[",
"category",
"]",
"=",
"category_averages",
"[",
"category",
"]",
"/",
"models",
".",
"Rating",
".",
"objects",
".",
"filter",
"(",
"category",
"=",
"category",
",",
"value__isnull",
"=",
"False",
",",
"review__content_type",
"=",
"ctype",
",",
"review__object_id",
"=",
"obj",
".",
"id",
")",
".",
"exclude",
"(",
"value",
"=",
"''",
")",
".",
"count",
"(",
")",
"else",
":",
"category_averages",
"=",
"{",
"}",
"for",
"category",
"in",
"models",
".",
"RatingCategory",
".",
"objects",
".",
"filter",
"(",
"counts_for_average",
"=",
"True",
")",
":",
"category_averages",
"[",
"category",
"]",
"=",
"0.0",
"context",
".",
"update",
"(",
"{",
"'category_averages'",
":",
"category_averages",
"}",
")",
"return",
"context"
] | 47.225806 | 15.387097 |
def run_in_order(l, show_output=True, show_err=True, ignore_err=False,
args=(), **kwargs):
'''
Processes each element of l in order:
if it is a string: execute it as a shell command
elif it is a callable, call it with *args, **kwargs
l-->list: Each elem is either a string (shell command) or callable
Any other type is ignored
show_output-->boolean: Show stdout of shell commands
Does not affect callables
show_err-->Boolean: Show stderr of shell commands
Does not affect callables
ignore_err-->boolean: Continue after exception or shell command
wth return code != 0
Returns-->Nothing
if ignore_err == False, exceptions are re-raised, hence shown
------------------------------------------------------------------
show_output show_err ignore_err stdout stderr exception continue
trace
------------------------------------------------------------------
True True False SHOW SHOW SHOW NO
True False False SHOW HIDE SHOW NO
False True False HIDE SHOW SHOW NO
False False False HIDE HIDE SHOW NO
True True True SHOW SHOW SHOW YES
True False True SHOW HIDE HIDE YES
False True True HIDE SHOW SHOW YES
False False True HIDE HIDE HIDE YES
------------------------------------------------------------------
----------- DEFAULT ----------- SHOW SHOW SHOW NO
------------------------------------------------------------------
'''
# Set defaults
if show_output is None:
show_output = True
if show_err is None:
show_err = True
if ignore_err is None:
ignore_err = False
if args is None:
args = ()
for c in l:
try:
if isinstance(c, str):
devnull = open(os.devnull, 'w')
if not show_err:
stderr = devnull
else:
stderr = None
if not show_output:
stdout = devnull
else:
stdout = None
retcode = subprocess.call(
c, shell=True, stdout=stdout, stderr=stderr)
if not ignore_err and retcode != 0:
break
elif hasattr(c, '__call__'):
c(*args, **kwargs)
except:
if not ignore_err:
raise
if show_err:
sys.stderr.write(traceback.format_exc()) | [
"def",
"run_in_order",
"(",
"l",
",",
"show_output",
"=",
"True",
",",
"show_err",
"=",
"True",
",",
"ignore_err",
"=",
"False",
",",
"args",
"=",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"# Set defaults",
"if",
"show_output",
"is",
"None",
":",
"show_output",
"=",
"True",
"if",
"show_err",
"is",
"None",
":",
"show_err",
"=",
"True",
"if",
"ignore_err",
"is",
"None",
":",
"ignore_err",
"=",
"False",
"if",
"args",
"is",
"None",
":",
"args",
"=",
"(",
")",
"for",
"c",
"in",
"l",
":",
"try",
":",
"if",
"isinstance",
"(",
"c",
",",
"str",
")",
":",
"devnull",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"if",
"not",
"show_err",
":",
"stderr",
"=",
"devnull",
"else",
":",
"stderr",
"=",
"None",
"if",
"not",
"show_output",
":",
"stdout",
"=",
"devnull",
"else",
":",
"stdout",
"=",
"None",
"retcode",
"=",
"subprocess",
".",
"call",
"(",
"c",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"stdout",
",",
"stderr",
"=",
"stderr",
")",
"if",
"not",
"ignore_err",
"and",
"retcode",
"!=",
"0",
":",
"break",
"elif",
"hasattr",
"(",
"c",
",",
"'__call__'",
")",
":",
"c",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"if",
"not",
"ignore_err",
":",
"raise",
"if",
"show_err",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")"
] | 39.782609 | 19.086957 |
def path(self, which=None):
"""Extend ``nailgun.entity_mixins.Entity.path``.
The format of the returned path depends on the value of ``which``:
smart_class_parameters
/api/puppetclasses/:puppetclass_id/smart_class_parameters
Otherwise, call ``super``.
"""
if which in ('smart_class_parameters', 'smart_variables'):
return '{0}/{1}'.format(
super(PuppetClass, self).path(which='self'),
which
)
return super(PuppetClass, self).path(which) | [
"def",
"path",
"(",
"self",
",",
"which",
"=",
"None",
")",
":",
"if",
"which",
"in",
"(",
"'smart_class_parameters'",
",",
"'smart_variables'",
")",
":",
"return",
"'{0}/{1}'",
".",
"format",
"(",
"super",
"(",
"PuppetClass",
",",
"self",
")",
".",
"path",
"(",
"which",
"=",
"'self'",
")",
",",
"which",
")",
"return",
"super",
"(",
"PuppetClass",
",",
"self",
")",
".",
"path",
"(",
"which",
")"
] | 34.25 | 19.9375 |
def process_mavlink_packet(self, m):
'''handle an incoming mavlink packet'''
mtype = m.get_type()
# if you add processing for an mtype here, remember to add it
# to mavlink_packet, above
if mtype in ['WAYPOINT_COUNT','MISSION_COUNT']:
if (self.num_wps_expected == 0):
#I haven't asked for WPs, or these messages are duplicates
#of msgs I've already received.
self.console.error("No waypoint load started (from Editor).")
#I only clear the mission in the Editor if this was a read event
elif (self.num_wps_expected == -1):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_CLEAR_MISS_TABLE))
self.num_wps_expected = m.count
self.wps_received = {}
if (m.count > 0):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_ADD_MISS_TABLE_ROWS,num_rows=m.count-1))
#write has been sent by the mission editor:
elif (self.num_wps_expected > 1):
if (m.count != self.num_wps_expected):
self.console.error("Unepxected waypoint count from APM after write (Editor)")
#since this is a write operation from the Editor there
#should be no need to update number of table rows
elif mtype in ['WAYPOINT', 'MISSION_ITEM']:
#still expecting wps?
if (len(self.wps_received) < self.num_wps_expected):
#if we haven't already received this wp, write it to the GUI:
if (m.seq not in self.wps_received.keys()):
self.gui_event_queue.put(MissionEditorEvent(
me_event.MEGE_SET_MISS_ITEM,
num=m.seq,command=m.command,param1=m.param1,
param2=m.param2,param3=m.param3,param4=m.param4,
lat=m.x,lon=m.y,alt=m.z,frame=m.frame))
self.wps_received[m.seq] = True | [
"def",
"process_mavlink_packet",
"(",
"self",
",",
"m",
")",
":",
"mtype",
"=",
"m",
".",
"get_type",
"(",
")",
"# if you add processing for an mtype here, remember to add it",
"# to mavlink_packet, above",
"if",
"mtype",
"in",
"[",
"'WAYPOINT_COUNT'",
",",
"'MISSION_COUNT'",
"]",
":",
"if",
"(",
"self",
".",
"num_wps_expected",
"==",
"0",
")",
":",
"#I haven't asked for WPs, or these messages are duplicates",
"#of msgs I've already received.",
"self",
".",
"console",
".",
"error",
"(",
"\"No waypoint load started (from Editor).\"",
")",
"#I only clear the mission in the Editor if this was a read event",
"elif",
"(",
"self",
".",
"num_wps_expected",
"==",
"-",
"1",
")",
":",
"self",
".",
"gui_event_queue",
".",
"put",
"(",
"MissionEditorEvent",
"(",
"me_event",
".",
"MEGE_CLEAR_MISS_TABLE",
")",
")",
"self",
".",
"num_wps_expected",
"=",
"m",
".",
"count",
"self",
".",
"wps_received",
"=",
"{",
"}",
"if",
"(",
"m",
".",
"count",
">",
"0",
")",
":",
"self",
".",
"gui_event_queue",
".",
"put",
"(",
"MissionEditorEvent",
"(",
"me_event",
".",
"MEGE_ADD_MISS_TABLE_ROWS",
",",
"num_rows",
"=",
"m",
".",
"count",
"-",
"1",
")",
")",
"#write has been sent by the mission editor:",
"elif",
"(",
"self",
".",
"num_wps_expected",
">",
"1",
")",
":",
"if",
"(",
"m",
".",
"count",
"!=",
"self",
".",
"num_wps_expected",
")",
":",
"self",
".",
"console",
".",
"error",
"(",
"\"Unepxected waypoint count from APM after write (Editor)\"",
")",
"#since this is a write operation from the Editor there",
"#should be no need to update number of table rows",
"elif",
"mtype",
"in",
"[",
"'WAYPOINT'",
",",
"'MISSION_ITEM'",
"]",
":",
"#still expecting wps?",
"if",
"(",
"len",
"(",
"self",
".",
"wps_received",
")",
"<",
"self",
".",
"num_wps_expected",
")",
":",
"#if we haven't already received this wp, write it to the GUI:",
"if",
"(",
"m",
".",
"seq",
"not",
"in",
"self",
".",
"wps_received",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"gui_event_queue",
".",
"put",
"(",
"MissionEditorEvent",
"(",
"me_event",
".",
"MEGE_SET_MISS_ITEM",
",",
"num",
"=",
"m",
".",
"seq",
",",
"command",
"=",
"m",
".",
"command",
",",
"param1",
"=",
"m",
".",
"param1",
",",
"param2",
"=",
"m",
".",
"param2",
",",
"param3",
"=",
"m",
".",
"param3",
",",
"param4",
"=",
"m",
".",
"param4",
",",
"lat",
"=",
"m",
".",
"x",
",",
"lon",
"=",
"m",
".",
"y",
",",
"alt",
"=",
"m",
".",
"z",
",",
"frame",
"=",
"m",
".",
"frame",
")",
")",
"self",
".",
"wps_received",
"[",
"m",
".",
"seq",
"]",
"=",
"True"
] | 51.15 | 21.05 |
def describe_identity_pools(IdentityPoolName, IdentityPoolId=None,
region=None, key=None, keyid=None, profile=None):
'''
Given an identity pool name, (optionally if an identity pool id is given,
the given name will be ignored)
Returns a list of matched identity pool name's pool properties
CLI Example:
.. code-block:: bash
salt myminion boto_cognitoidentity.describe_identity_pools my_id_pool_name
salt myminion boto_cognitoidentity.describe_identity_pools '' IdentityPoolId=my_id_pool_id
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
ids = _find_identity_pool_ids(IdentityPoolName, IdentityPoolId, conn)
if ids:
results = []
for pool_id in ids:
response = conn.describe_identity_pool(IdentityPoolId=pool_id)
response.pop('ResponseMetadata', None)
results.append(response)
return {'identity_pools': results}
else:
return {'identity_pools': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | [
"def",
"describe_identity_pools",
"(",
"IdentityPoolName",
",",
"IdentityPoolId",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"ids",
"=",
"_find_identity_pool_ids",
"(",
"IdentityPoolName",
",",
"IdentityPoolId",
",",
"conn",
")",
"if",
"ids",
":",
"results",
"=",
"[",
"]",
"for",
"pool_id",
"in",
"ids",
":",
"response",
"=",
"conn",
".",
"describe_identity_pool",
"(",
"IdentityPoolId",
"=",
"pool_id",
")",
"response",
".",
"pop",
"(",
"'ResponseMetadata'",
",",
"None",
")",
"results",
".",
"append",
"(",
"response",
")",
"return",
"{",
"'identity_pools'",
":",
"results",
"}",
"else",
":",
"return",
"{",
"'identity_pools'",
":",
"None",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'error'",
":",
"__utils__",
"[",
"'boto3.get_error'",
"]",
"(",
"e",
")",
"}"
] | 34.242424 | 27.69697 |
def recursive_symlink_dirs(source_d, destination_d):
'''
Create dirs and symlink all files recursively from source_d, ignoring
errors (e.g. existing files)
'''
func = os.symlink
if os.name == 'nt':
# NOTE: need to verify that default perms only allow admins to create
# symlinks on Windows
func = shutil.copy
if os.path.exists(destination_d):
os.rmdir(destination_d)
shutil.copytree(source_d, destination_d, copy_function=func) | [
"def",
"recursive_symlink_dirs",
"(",
"source_d",
",",
"destination_d",
")",
":",
"func",
"=",
"os",
".",
"symlink",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"# NOTE: need to verify that default perms only allow admins to create",
"# symlinks on Windows",
"func",
"=",
"shutil",
".",
"copy",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"destination_d",
")",
":",
"os",
".",
"rmdir",
"(",
"destination_d",
")",
"shutil",
".",
"copytree",
"(",
"source_d",
",",
"destination_d",
",",
"copy_function",
"=",
"func",
")"
] | 36.846154 | 19.461538 |
def intuition(args):
'''
Main simulation wrapper
Load the configuration, run the engine and return the analyze.
'''
# Use the provided context builder to fill:
# - config: General behavior
# - strategy: Modules properties
# - market: The universe we will trade on
with setup.Context(args['context']) as context:
# Backtest or live engine.
# Registers configuration and setups data client
simulation = Simulation()
# Intuition building blocks
modules = context['config']['modules']
# Prepare benchmark, timezone, trading calendar
simulation.configure_environment(
context['config']['index'][-1],
context['market'].benchmark,
context['market'].timezone)
# Wire togetether modules and initialize them
simulation.build(args['session'],
modules,
context['strategy'])
# Build data generator
# NOTE How can I use several sources ?
data = {'universe': context['market'],
'index': context['config']['index']}
# Add user settings
data.update(context['strategy']['data'])
# Load backtest and / or live module(s)
if 'backtest' in modules:
data['backtest'] = utils.intuition_module(modules['backtest'])
if 'live' in modules:
data['live'] = utils.intuition_module(modules['live'])
# Run the simulation and return an intuition.core.analyzes object
return simulation(datafeed.HybridDataFactory(**data), args['bot']) | [
"def",
"intuition",
"(",
"args",
")",
":",
"# Use the provided context builder to fill:",
"# - config: General behavior",
"# - strategy: Modules properties",
"# - market: The universe we will trade on",
"with",
"setup",
".",
"Context",
"(",
"args",
"[",
"'context'",
"]",
")",
"as",
"context",
":",
"# Backtest or live engine.",
"# Registers configuration and setups data client",
"simulation",
"=",
"Simulation",
"(",
")",
"# Intuition building blocks",
"modules",
"=",
"context",
"[",
"'config'",
"]",
"[",
"'modules'",
"]",
"# Prepare benchmark, timezone, trading calendar",
"simulation",
".",
"configure_environment",
"(",
"context",
"[",
"'config'",
"]",
"[",
"'index'",
"]",
"[",
"-",
"1",
"]",
",",
"context",
"[",
"'market'",
"]",
".",
"benchmark",
",",
"context",
"[",
"'market'",
"]",
".",
"timezone",
")",
"# Wire togetether modules and initialize them",
"simulation",
".",
"build",
"(",
"args",
"[",
"'session'",
"]",
",",
"modules",
",",
"context",
"[",
"'strategy'",
"]",
")",
"# Build data generator",
"# NOTE How can I use several sources ?",
"data",
"=",
"{",
"'universe'",
":",
"context",
"[",
"'market'",
"]",
",",
"'index'",
":",
"context",
"[",
"'config'",
"]",
"[",
"'index'",
"]",
"}",
"# Add user settings",
"data",
".",
"update",
"(",
"context",
"[",
"'strategy'",
"]",
"[",
"'data'",
"]",
")",
"# Load backtest and / or live module(s)",
"if",
"'backtest'",
"in",
"modules",
":",
"data",
"[",
"'backtest'",
"]",
"=",
"utils",
".",
"intuition_module",
"(",
"modules",
"[",
"'backtest'",
"]",
")",
"if",
"'live'",
"in",
"modules",
":",
"data",
"[",
"'live'",
"]",
"=",
"utils",
".",
"intuition_module",
"(",
"modules",
"[",
"'live'",
"]",
")",
"# Run the simulation and return an intuition.core.analyzes object",
"return",
"simulation",
"(",
"datafeed",
".",
"HybridDataFactory",
"(",
"*",
"*",
"data",
")",
",",
"args",
"[",
"'bot'",
"]",
")"
] | 35.977273 | 16.613636 |
def instance_norm(attrs, inputs, proto_obj):
"""Instance Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
new_attrs['eps'] = attrs.get('epsilon', 1e-5)
return 'InstanceNorm', new_attrs, inputs | [
"def",
"instance_norm",
"(",
"attrs",
",",
"inputs",
",",
"proto_obj",
")",
":",
"new_attrs",
"=",
"translation_utils",
".",
"_fix_attribute_names",
"(",
"attrs",
",",
"{",
"'epsilon'",
":",
"'eps'",
"}",
")",
"new_attrs",
"[",
"'eps'",
"]",
"=",
"attrs",
".",
"get",
"(",
"'epsilon'",
",",
"1e-5",
")",
"return",
"'InstanceNorm'",
",",
"new_attrs",
",",
"inputs"
] | 50.4 | 11.8 |
def callAfter(func, *args, **kwargs):
"""call a function on the main thread (async)"""
pool = NSAutoreleasePool.alloc().init()
obj = PyObjCAppHelperCaller_wrap.alloc().initWithArgs_((func, args, kwargs))
obj.callAfter_(None)
del obj
del pool | [
"def",
"callAfter",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"pool",
"=",
"NSAutoreleasePool",
".",
"alloc",
"(",
")",
".",
"init",
"(",
")",
"obj",
"=",
"PyObjCAppHelperCaller_wrap",
".",
"alloc",
"(",
")",
".",
"initWithArgs_",
"(",
"(",
"func",
",",
"args",
",",
"kwargs",
")",
")",
"obj",
".",
"callAfter_",
"(",
"None",
")",
"del",
"obj",
"del",
"pool"
] | 37 | 17 |
def touch_log(log, cwd='.'):
"""
Touches the log file. Creates if not exists OR updates the modification date if exists.
:param log:
:return: nothing
"""
logfile = '%s/%s' % (cwd, log)
with open(logfile, 'a'):
os.utime(logfile, None) | [
"def",
"touch_log",
"(",
"log",
",",
"cwd",
"=",
"'.'",
")",
":",
"logfile",
"=",
"'%s/%s'",
"%",
"(",
"cwd",
",",
"log",
")",
"with",
"open",
"(",
"logfile",
",",
"'a'",
")",
":",
"os",
".",
"utime",
"(",
"logfile",
",",
"None",
")"
] | 27 | 16.111111 |
def _cache_metrics_metadata(self, instance):
"""
Get all the performance counters metadata meaning name/group/description...
from the server instance, attached with the corresponding ID
"""
# ## <TEST-INSTRUMENTATION>
t = Timer()
# ## </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.metadata_cache.init_instance(i_key)
self.log.info("Warming metrics metadata cache for instance {}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
custom_tags = instance.get('tags', [])
new_metadata = {}
metric_ids = []
# Use old behaviour with metrics to collect defined by our constants
if self.in_compatibility_mode(instance, log_warning=True):
for counter in perfManager.perfCounter:
metric_name = self.format_metric_name(counter, compatibility=True)
new_metadata[counter.key] = {'name': metric_name, 'unit': counter.unitInfo.key}
# Build the list of metrics we will want to collect
if instance.get("all_metrics") or metric_name in BASIC_METRICS:
metric_ids.append(vim.PerformanceManager.MetricId(counterId=counter.key, instance="*"))
else:
collection_level = instance.get("collection_level", 1)
for counter in perfManager.QueryPerfCounterByLevel(collection_level):
new_metadata[counter.key] = {"name": self.format_metric_name(counter), "unit": counter.unitInfo.key}
# Build the list of metrics we will want to collect
metric_ids.append(vim.PerformanceManager.MetricId(counterId=counter.key, instance="*"))
self.log.info("Finished metadata collection for instance {}".format(i_key))
# Reset metadata
self.metadata_cache.set_metadata(i_key, new_metadata)
self.metadata_cache.set_metric_ids(i_key, metric_ids)
self.cache_config.set_last(CacheConfig.Metadata, i_key, time.time())
# ## <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total(), tags=custom_tags) | [
"def",
"_cache_metrics_metadata",
"(",
"self",
",",
"instance",
")",
":",
"# ## <TEST-INSTRUMENTATION>",
"t",
"=",
"Timer",
"(",
")",
"# ## </TEST-INSTRUMENTATION>",
"i_key",
"=",
"self",
".",
"_instance_key",
"(",
"instance",
")",
"self",
".",
"metadata_cache",
".",
"init_instance",
"(",
"i_key",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Warming metrics metadata cache for instance {}\"",
".",
"format",
"(",
"i_key",
")",
")",
"server_instance",
"=",
"self",
".",
"_get_server_instance",
"(",
"instance",
")",
"perfManager",
"=",
"server_instance",
".",
"content",
".",
"perfManager",
"custom_tags",
"=",
"instance",
".",
"get",
"(",
"'tags'",
",",
"[",
"]",
")",
"new_metadata",
"=",
"{",
"}",
"metric_ids",
"=",
"[",
"]",
"# Use old behaviour with metrics to collect defined by our constants",
"if",
"self",
".",
"in_compatibility_mode",
"(",
"instance",
",",
"log_warning",
"=",
"True",
")",
":",
"for",
"counter",
"in",
"perfManager",
".",
"perfCounter",
":",
"metric_name",
"=",
"self",
".",
"format_metric_name",
"(",
"counter",
",",
"compatibility",
"=",
"True",
")",
"new_metadata",
"[",
"counter",
".",
"key",
"]",
"=",
"{",
"'name'",
":",
"metric_name",
",",
"'unit'",
":",
"counter",
".",
"unitInfo",
".",
"key",
"}",
"# Build the list of metrics we will want to collect",
"if",
"instance",
".",
"get",
"(",
"\"all_metrics\"",
")",
"or",
"metric_name",
"in",
"BASIC_METRICS",
":",
"metric_ids",
".",
"append",
"(",
"vim",
".",
"PerformanceManager",
".",
"MetricId",
"(",
"counterId",
"=",
"counter",
".",
"key",
",",
"instance",
"=",
"\"*\"",
")",
")",
"else",
":",
"collection_level",
"=",
"instance",
".",
"get",
"(",
"\"collection_level\"",
",",
"1",
")",
"for",
"counter",
"in",
"perfManager",
".",
"QueryPerfCounterByLevel",
"(",
"collection_level",
")",
":",
"new_metadata",
"[",
"counter",
".",
"key",
"]",
"=",
"{",
"\"name\"",
":",
"self",
".",
"format_metric_name",
"(",
"counter",
")",
",",
"\"unit\"",
":",
"counter",
".",
"unitInfo",
".",
"key",
"}",
"# Build the list of metrics we will want to collect",
"metric_ids",
".",
"append",
"(",
"vim",
".",
"PerformanceManager",
".",
"MetricId",
"(",
"counterId",
"=",
"counter",
".",
"key",
",",
"instance",
"=",
"\"*\"",
")",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Finished metadata collection for instance {}\"",
".",
"format",
"(",
"i_key",
")",
")",
"# Reset metadata",
"self",
".",
"metadata_cache",
".",
"set_metadata",
"(",
"i_key",
",",
"new_metadata",
")",
"self",
".",
"metadata_cache",
".",
"set_metric_ids",
"(",
"i_key",
",",
"metric_ids",
")",
"self",
".",
"cache_config",
".",
"set_last",
"(",
"CacheConfig",
".",
"Metadata",
",",
"i_key",
",",
"time",
".",
"time",
"(",
")",
")",
"# ## <TEST-INSTRUMENTATION>",
"self",
".",
"histogram",
"(",
"'datadog.agent.vsphere.metric_metadata_collection.time'",
",",
"t",
".",
"total",
"(",
")",
",",
"tags",
"=",
"custom_tags",
")"
] | 52.690476 | 28.833333 |
def mutual_info_score(self, reference_clusters):
"""
Calculates the MI (mutual information) w.r.t. the reference clusters (explicit evaluation)
:param reference_clusters: Clusters that are to be used as reference
:return: The resulting MI score.
"""
return mutual_info_score(self.get_labels(self), self.get_labels(reference_clusters)) | [
"def",
"mutual_info_score",
"(",
"self",
",",
"reference_clusters",
")",
":",
"return",
"mutual_info_score",
"(",
"self",
".",
"get_labels",
"(",
"self",
")",
",",
"self",
".",
"get_labels",
"(",
"reference_clusters",
")",
")"
] | 47 | 24.25 |
def get_doc(self, objtxt):
"""Get object documentation dictionary"""
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj) | [
"def",
"get_doc",
"(",
"self",
",",
"objtxt",
")",
":",
"obj",
",",
"valid",
"=",
"self",
".",
"_eval",
"(",
"objtxt",
")",
"if",
"valid",
":",
"return",
"getdoc",
"(",
"obj",
")"
] | 33 | 9 |
def idxmax(self,skipna=True, axis=0):
"""
Get the index of the max value in a column or row
:param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence
of NAs renders the entire result NA.
:param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the
result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched
rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.
:returns: either a list of max index values per-column or an H2OFrame containing max index values
per-row from the original frame.
"""
return H2OFrame._expr(expr=ExprNode("which.max", self, skipna, axis)) | [
"def",
"idxmax",
"(",
"self",
",",
"skipna",
"=",
"True",
",",
"axis",
"=",
"0",
")",
":",
"return",
"H2OFrame",
".",
"_expr",
"(",
"expr",
"=",
"ExprNode",
"(",
"\"which.max\"",
",",
"self",
",",
"skipna",
",",
"axis",
")",
")"
] | 67.923077 | 39 |
def bind_top_down(lower, upper, __fval=None, **fval):
"""Bind 2 layers for building.
When the upper layer is added as a payload of the lower layer, all the arguments # noqa: E501
will be applied to them.
ex:
>>> bind_top_down(Ether, SNAP, type=0x1234)
>>> Ether()/SNAP()
<Ether type=0x1234 |<SNAP |>>
"""
if __fval is not None:
fval.update(__fval)
upper._overload_fields = upper._overload_fields.copy()
upper._overload_fields[lower] = fval | [
"def",
"bind_top_down",
"(",
"lower",
",",
"upper",
",",
"__fval",
"=",
"None",
",",
"*",
"*",
"fval",
")",
":",
"if",
"__fval",
"is",
"not",
"None",
":",
"fval",
".",
"update",
"(",
"__fval",
")",
"upper",
".",
"_overload_fields",
"=",
"upper",
".",
"_overload_fields",
".",
"copy",
"(",
")",
"upper",
".",
"_overload_fields",
"[",
"lower",
"]",
"=",
"fval"
] | 35.285714 | 16.214286 |
def build_filter(self, filter):
"""
Tries to build a :class:`filter.Filter` instance from the given filter.
Raises ValueError if the :class:`filter.Filter` object can't be build
from the given filter.
"""
try:
self.filter = Filter.from_string(filter, self.limit)
except ValueError:
raise
return self | [
"def",
"build_filter",
"(",
"self",
",",
"filter",
")",
":",
"try",
":",
"self",
".",
"filter",
"=",
"Filter",
".",
"from_string",
"(",
"filter",
",",
"self",
".",
"limit",
")",
"except",
"ValueError",
":",
"raise",
"return",
"self"
] | 29 | 21.923077 |
def notification_message(self, title, content, icon=""):
"""This function sends "javascript" message to the client, that executes its content.
In this particular code, a notification message is shown
"""
code = """
var options = {
body: "%(content)s",
icon: "%(icon)s"
}
if (!("Notification" in window)) {
alert("%(content)s");
}else if (Notification.permission === "granted") {
var notification = new Notification("%(title)s", options);
}else if (Notification.permission !== 'denied') {
Notification.requestPermission(function (permission) {
if (permission === "granted") {
var notification = new Notification("%(title)s", options);
}
});
}
""" % {'title': title, 'content': content, 'icon': icon}
self.execute_javascript(code) | [
"def",
"notification_message",
"(",
"self",
",",
"title",
",",
"content",
",",
"icon",
"=",
"\"\"",
")",
":",
"code",
"=",
"\"\"\"\n var options = {\n body: \"%(content)s\",\n icon: \"%(icon)s\"\n }\n if (!(\"Notification\" in window)) {\n alert(\"%(content)s\");\n }else if (Notification.permission === \"granted\") {\n var notification = new Notification(\"%(title)s\", options);\n }else if (Notification.permission !== 'denied') {\n Notification.requestPermission(function (permission) {\n if (permission === \"granted\") {\n var notification = new Notification(\"%(title)s\", options);\n }\n });\n }\n \"\"\"",
"%",
"{",
"'title'",
":",
"title",
",",
"'content'",
":",
"content",
",",
"'icon'",
":",
"icon",
"}",
"self",
".",
"execute_javascript",
"(",
"code",
")"
] | 45 | 15.181818 |
def files(self):
"""Return list of files in root directory"""
self._printer('\tFiles Walk')
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isfile(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths() | [
"def",
"files",
"(",
"self",
")",
":",
"self",
".",
"_printer",
"(",
"'\\tFiles Walk'",
")",
"for",
"directory",
"in",
"self",
".",
"directory",
":",
"for",
"path",
"in",
"os",
".",
"listdir",
"(",
"directory",
")",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"path",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"full_path",
")",
":",
"if",
"not",
"path",
".",
"startswith",
"(",
"'.'",
")",
":",
"self",
".",
"filepaths",
".",
"append",
"(",
"full_path",
")",
"return",
"self",
".",
"_get_filepaths",
"(",
")"
] | 43.3 | 8.3 |
def go_to_line(self):
"""
Moves current **Script_Editor_tabWidget** Widget tab Model editor cursor to user defined line.
:return: Method success.
:rtype: bool
:note: May require user interaction.
"""
editor = self.get_current_editor()
if not editor:
return False
line, state = QInputDialog.getInt(self, "Goto Line Number", "Line number:", min=1)
if not state:
return False
LOGGER.debug("> Chosen line number: '{0}'.".format(line))
return editor.go_to_line(line) | [
"def",
"go_to_line",
"(",
"self",
")",
":",
"editor",
"=",
"self",
".",
"get_current_editor",
"(",
")",
"if",
"not",
"editor",
":",
"return",
"False",
"line",
",",
"state",
"=",
"QInputDialog",
".",
"getInt",
"(",
"self",
",",
"\"Goto Line Number\"",
",",
"\"Line number:\"",
",",
"min",
"=",
"1",
")",
"if",
"not",
"state",
":",
"return",
"False",
"LOGGER",
".",
"debug",
"(",
"\"> Chosen line number: '{0}'.\"",
".",
"format",
"(",
"line",
")",
")",
"return",
"editor",
".",
"go_to_line",
"(",
"line",
")"
] | 28.35 | 23.05 |
def _begin_write(session: UpdateSession,
loop: asyncio.AbstractEventLoop,
rootfs_file_path: str):
""" Start the write process. """
session.set_progress(0)
session.set_stage(Stages.WRITING)
write_future = asyncio.ensure_future(loop.run_in_executor(
None, file_actions.write_update, rootfs_file_path,
session.set_progress))
def write_done(fut):
exc = fut.exception()
if exc:
session.set_error(getattr(exc, 'short', str(type(exc))),
str(exc))
else:
session.set_stage(Stages.DONE)
write_future.add_done_callback(write_done) | [
"def",
"_begin_write",
"(",
"session",
":",
"UpdateSession",
",",
"loop",
":",
"asyncio",
".",
"AbstractEventLoop",
",",
"rootfs_file_path",
":",
"str",
")",
":",
"session",
".",
"set_progress",
"(",
"0",
")",
"session",
".",
"set_stage",
"(",
"Stages",
".",
"WRITING",
")",
"write_future",
"=",
"asyncio",
".",
"ensure_future",
"(",
"loop",
".",
"run_in_executor",
"(",
"None",
",",
"file_actions",
".",
"write_update",
",",
"rootfs_file_path",
",",
"session",
".",
"set_progress",
")",
")",
"def",
"write_done",
"(",
"fut",
")",
":",
"exc",
"=",
"fut",
".",
"exception",
"(",
")",
"if",
"exc",
":",
"session",
".",
"set_error",
"(",
"getattr",
"(",
"exc",
",",
"'short'",
",",
"str",
"(",
"type",
"(",
"exc",
")",
")",
")",
",",
"str",
"(",
"exc",
")",
")",
"else",
":",
"session",
".",
"set_stage",
"(",
"Stages",
".",
"DONE",
")",
"write_future",
".",
"add_done_callback",
"(",
"write_done",
")"
] | 34.473684 | 14.263158 |
def pictures(self):
"""list[Picture]: List of embedded pictures"""
return [b for b in self.metadata_blocks if b.code == Picture.code] | [
"def",
"pictures",
"(",
"self",
")",
":",
"return",
"[",
"b",
"for",
"b",
"in",
"self",
".",
"metadata_blocks",
"if",
"b",
".",
"code",
"==",
"Picture",
".",
"code",
"]"
] | 36.75 | 23.75 |
def snooze(self, requester, duration):
"""Snooze incident.
:param requester: The email address of the individual requesting snooze.
"""
path = '{0}/{1}/{2}'.format(self.collection.name, self.id, 'snooze')
data = {"duration": duration}
extra_headers = {"From": requester}
return self.pagerduty.request('POST', path, data=_json_dumper(data), extra_headers=extra_headers) | [
"def",
"snooze",
"(",
"self",
",",
"requester",
",",
"duration",
")",
":",
"path",
"=",
"'{0}/{1}/{2}'",
".",
"format",
"(",
"self",
".",
"collection",
".",
"name",
",",
"self",
".",
"id",
",",
"'snooze'",
")",
"data",
"=",
"{",
"\"duration\"",
":",
"duration",
"}",
"extra_headers",
"=",
"{",
"\"From\"",
":",
"requester",
"}",
"return",
"self",
".",
"pagerduty",
".",
"request",
"(",
"'POST'",
",",
"path",
",",
"data",
"=",
"_json_dumper",
"(",
"data",
")",
",",
"extra_headers",
"=",
"extra_headers",
")"
] | 52.125 | 18.625 |
def lines_from_file(path, as_interned=False, encoding=None):
"""
Create a list of file lines from a given filepath.
Args:
path (str): File path
as_interned (bool): List of "interned" strings (default False)
Returns:
strings (list): File line list
"""
lines = None
with io.open(path, encoding=encoding) as f:
if as_interned:
lines = [sys.intern(line) for line in f.read().splitlines()]
else:
lines = f.read().splitlines()
return lines | [
"def",
"lines_from_file",
"(",
"path",
",",
"as_interned",
"=",
"False",
",",
"encoding",
"=",
"None",
")",
":",
"lines",
"=",
"None",
"with",
"io",
".",
"open",
"(",
"path",
",",
"encoding",
"=",
"encoding",
")",
"as",
"f",
":",
"if",
"as_interned",
":",
"lines",
"=",
"[",
"sys",
".",
"intern",
"(",
"line",
")",
"for",
"line",
"in",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"]",
"else",
":",
"lines",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"return",
"lines"
] | 28.555556 | 19.333333 |
def normalized_mutation_entropy(counts, total_cts=None):
"""Calculate the normalized mutation entropy based on a list/array
of mutation counts.
Note: Any grouping of mutation counts together should be done before hand
Parameters
----------
counts : np.array_like
array/list of mutation counts
Returns
-------
norm_ent : float
normalized entropy of mutation count distribution.
"""
cts = np.asarray(counts, dtype=float)
if total_cts is None:
total_cts = np.sum(cts)
if total_cts > 1:
p = cts / total_cts
ent = shannon_entropy(p)
max_ent = max_shannon_entropy(total_cts)
norm_ent = ent / max_ent
else:
norm_ent = 1.0
return norm_ent | [
"def",
"normalized_mutation_entropy",
"(",
"counts",
",",
"total_cts",
"=",
"None",
")",
":",
"cts",
"=",
"np",
".",
"asarray",
"(",
"counts",
",",
"dtype",
"=",
"float",
")",
"if",
"total_cts",
"is",
"None",
":",
"total_cts",
"=",
"np",
".",
"sum",
"(",
"cts",
")",
"if",
"total_cts",
">",
"1",
":",
"p",
"=",
"cts",
"/",
"total_cts",
"ent",
"=",
"shannon_entropy",
"(",
"p",
")",
"max_ent",
"=",
"max_shannon_entropy",
"(",
"total_cts",
")",
"norm_ent",
"=",
"ent",
"/",
"max_ent",
"else",
":",
"norm_ent",
"=",
"1.0",
"return",
"norm_ent"
] | 27.074074 | 18.740741 |
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None | [
"def",
"getName",
"(",
"self",
")",
":",
"if",
"self",
".",
"__name",
":",
"return",
"self",
".",
"__name",
"elif",
"self",
".",
"__parent",
":",
"par",
"=",
"self",
".",
"__parent",
"(",
")",
"if",
"par",
":",
"return",
"par",
".",
"__lookup",
"(",
"self",
")",
"else",
":",
"return",
"None",
"elif",
"(",
"len",
"(",
"self",
")",
"==",
"1",
"and",
"len",
"(",
"self",
".",
"__tokdict",
")",
"==",
"1",
"and",
"next",
"(",
"iter",
"(",
"self",
".",
"__tokdict",
".",
"values",
"(",
")",
")",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"in",
"(",
"0",
",",
"-",
"1",
")",
")",
":",
"return",
"next",
"(",
"iter",
"(",
"self",
".",
"__tokdict",
".",
"keys",
"(",
")",
")",
")",
"else",
":",
"return",
"None"
] | 32.179487 | 18.358974 |
def getWifiInfo(self, wifiInterfaceId=1, timeout=1):
"""Execute GetInfo action to get Wifi basic information's.
:param int wifiInterfaceId: the id of the Wifi interface
:param float timeout: the timeout to wait for the action to be executed
:return: the basic informations
:rtype: WifiBasicInfo
"""
namespace = Wifi.getServiceType("getWifiInfo") + str(wifiInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetInfo", timeout=timeout)
return WifiBasicInfo(results) | [
"def",
"getWifiInfo",
"(",
"self",
",",
"wifiInterfaceId",
"=",
"1",
",",
"timeout",
"=",
"1",
")",
":",
"namespace",
"=",
"Wifi",
".",
"getServiceType",
"(",
"\"getWifiInfo\"",
")",
"+",
"str",
"(",
"wifiInterfaceId",
")",
"uri",
"=",
"self",
".",
"getControlURL",
"(",
"namespace",
")",
"results",
"=",
"self",
".",
"execute",
"(",
"uri",
",",
"namespace",
",",
"\"GetInfo\"",
",",
"timeout",
"=",
"timeout",
")",
"return",
"WifiBasicInfo",
"(",
"results",
")"
] | 40.785714 | 20.285714 |
def _compute_log_acceptance_correction(current_state_parts,
proposed_state_parts,
current_volatility_parts,
proposed_volatility_parts,
current_drift_parts,
proposed_drift_parts,
step_size_parts,
independent_chain_ndims,
name=None):
r"""Helper to `kernel` which computes the log acceptance-correction.
Computes `log_acceptance_correction` as described in `MetropolisHastings`
class. The proposal density is normal. More specifically,
```none
q(proposed_state | current_state) \sim N(current_state + current_drift,
step_size * current_volatility**2)
q(current_state | proposed_state) \sim N(proposed_state + proposed_drift,
step_size * proposed_volatility**2)
```
The `log_acceptance_correction` is then
```none
log_acceptance_correctio = q(current_state | proposed_state)
- q(proposed_state | current_state)
```
Args:
current_state_parts: Python `list` of `Tensor`s representing the value(s) of
the current state of the chain.
proposed_state_parts: Python `list` of `Tensor`s representing the value(s)
of the proposed state of the chain. Must broadcast with the shape of
`current_state_parts`.
current_volatility_parts: Python `list` of `Tensor`s representing the value
of `volatility_fn(*current_volatility_parts)`. Must broadcast with the
shape of `current_state_parts`.
proposed_volatility_parts: Python `list` of `Tensor`s representing the value
of `volatility_fn(*proposed_volatility_parts)`. Must broadcast with the
shape of `current_state_parts`
current_drift_parts: Python `list` of `Tensor`s representing value of the
drift `_get_drift(*current_state_parts, ..)`. Must broadcast with the
shape of `current_state_parts`.
proposed_drift_parts: Python `list` of `Tensor`s representing value of the
drift `_get_drift(*proposed_drift_parts, ..)`. Must broadcast with the
shape of `current_state_parts`.
step_size_parts: Python `list` of `Tensor`s representing the step size for
Euler-Maruyama method. Must broadcast with the shape of
`current_state_parts`.
independent_chain_ndims: Scalar `int` `Tensor` representing the number of
leftmost `Tensor` dimensions which index independent chains.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., 'compute_log_acceptance_correction').
Returns:
log_acceptance_correction: `Tensor` representing the `log`
acceptance-correction. (See docstring for mathematical definition.)
"""
with tf.compat.v1.name_scope(name, 'compute_log_acceptance_correction', [
current_state_parts, proposed_state_parts, current_volatility_parts,
proposed_volatility_parts, current_drift_parts, proposed_drift_parts,
step_size_parts, independent_chain_ndims
]):
proposed_log_density_parts = []
dual_log_density_parts = []
for [
current_state,
proposed_state,
current_volatility,
proposed_volatility,
current_drift,
proposed_drift,
step_size,
] in zip(
current_state_parts,
proposed_state_parts,
current_volatility_parts,
proposed_volatility_parts,
current_drift_parts,
proposed_drift_parts,
step_size_parts,
):
axis = tf.range(independent_chain_ndims, tf.rank(current_state))
state_diff = proposed_state - current_state
current_volatility *= tf.sqrt(step_size)
proposed_energy = (state_diff - current_drift) / current_volatility
proposed_volatility *= tf.sqrt(step_size)
# Compute part of `q(proposed_state | current_state)`
proposed_energy = (
tf.reduce_sum(
input_tensor=mcmc_util.safe_sum(
[tf.math.log(current_volatility),
0.5 * (proposed_energy**2)]),
axis=axis))
proposed_log_density_parts.append(-proposed_energy)
# Compute part of `q(current_state | proposed_state)`
dual_energy = (state_diff + proposed_drift) / proposed_volatility
dual_energy = (
tf.reduce_sum(
input_tensor=mcmc_util.safe_sum(
[tf.math.log(proposed_volatility), 0.5 * (dual_energy**2)]),
axis=axis))
dual_log_density_parts.append(-dual_energy)
# Compute `q(proposed_state | current_state)`
proposed_log_density_reduce = tf.reduce_sum(
input_tensor=tf.stack(proposed_log_density_parts, axis=-1), axis=-1)
# Compute `q(current_state | proposed_state)`
dual_log_density_reduce = tf.reduce_sum(
input_tensor=tf.stack(dual_log_density_parts, axis=-1), axis=-1)
return mcmc_util.safe_sum([dual_log_density_reduce,
-proposed_log_density_reduce]) | [
"def",
"_compute_log_acceptance_correction",
"(",
"current_state_parts",
",",
"proposed_state_parts",
",",
"current_volatility_parts",
",",
"proposed_volatility_parts",
",",
"current_drift_parts",
",",
"proposed_drift_parts",
",",
"step_size_parts",
",",
"independent_chain_ndims",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"compat",
".",
"v1",
".",
"name_scope",
"(",
"name",
",",
"'compute_log_acceptance_correction'",
",",
"[",
"current_state_parts",
",",
"proposed_state_parts",
",",
"current_volatility_parts",
",",
"proposed_volatility_parts",
",",
"current_drift_parts",
",",
"proposed_drift_parts",
",",
"step_size_parts",
",",
"independent_chain_ndims",
"]",
")",
":",
"proposed_log_density_parts",
"=",
"[",
"]",
"dual_log_density_parts",
"=",
"[",
"]",
"for",
"[",
"current_state",
",",
"proposed_state",
",",
"current_volatility",
",",
"proposed_volatility",
",",
"current_drift",
",",
"proposed_drift",
",",
"step_size",
",",
"]",
"in",
"zip",
"(",
"current_state_parts",
",",
"proposed_state_parts",
",",
"current_volatility_parts",
",",
"proposed_volatility_parts",
",",
"current_drift_parts",
",",
"proposed_drift_parts",
",",
"step_size_parts",
",",
")",
":",
"axis",
"=",
"tf",
".",
"range",
"(",
"independent_chain_ndims",
",",
"tf",
".",
"rank",
"(",
"current_state",
")",
")",
"state_diff",
"=",
"proposed_state",
"-",
"current_state",
"current_volatility",
"*=",
"tf",
".",
"sqrt",
"(",
"step_size",
")",
"proposed_energy",
"=",
"(",
"state_diff",
"-",
"current_drift",
")",
"/",
"current_volatility",
"proposed_volatility",
"*=",
"tf",
".",
"sqrt",
"(",
"step_size",
")",
"# Compute part of `q(proposed_state | current_state)`",
"proposed_energy",
"=",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"mcmc_util",
".",
"safe_sum",
"(",
"[",
"tf",
".",
"math",
".",
"log",
"(",
"current_volatility",
")",
",",
"0.5",
"*",
"(",
"proposed_energy",
"**",
"2",
")",
"]",
")",
",",
"axis",
"=",
"axis",
")",
")",
"proposed_log_density_parts",
".",
"append",
"(",
"-",
"proposed_energy",
")",
"# Compute part of `q(current_state | proposed_state)`",
"dual_energy",
"=",
"(",
"state_diff",
"+",
"proposed_drift",
")",
"/",
"proposed_volatility",
"dual_energy",
"=",
"(",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"mcmc_util",
".",
"safe_sum",
"(",
"[",
"tf",
".",
"math",
".",
"log",
"(",
"proposed_volatility",
")",
",",
"0.5",
"*",
"(",
"dual_energy",
"**",
"2",
")",
"]",
")",
",",
"axis",
"=",
"axis",
")",
")",
"dual_log_density_parts",
".",
"append",
"(",
"-",
"dual_energy",
")",
"# Compute `q(proposed_state | current_state)`",
"proposed_log_density_reduce",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"stack",
"(",
"proposed_log_density_parts",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
"# Compute `q(current_state | proposed_state)`",
"dual_log_density_reduce",
"=",
"tf",
".",
"reduce_sum",
"(",
"input_tensor",
"=",
"tf",
".",
"stack",
"(",
"dual_log_density_parts",
",",
"axis",
"=",
"-",
"1",
")",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"mcmc_util",
".",
"safe_sum",
"(",
"[",
"dual_log_density_reduce",
",",
"-",
"proposed_log_density_reduce",
"]",
")"
] | 40.860656 | 23.081967 |
def namedb_get_names_owned_by_address( cur, address, current_block ):
"""
Get the list of non-expired, non-revoked names owned by an address.
Only works if there is a *singular* address for the name.
"""
unexpired_fragment, unexpired_args = namedb_select_where_unexpired_names( current_block )
select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \
"WHERE name_records.address = ? AND name_records.revoked = 0 AND " + unexpired_fragment + ";"
args = (address,) + unexpired_args
name_rows = namedb_query_execute( cur, select_query, args )
names = []
for name_row in name_rows:
names.append( name_row['name'] )
if len(names) == 0:
return None
else:
return names | [
"def",
"namedb_get_names_owned_by_address",
"(",
"cur",
",",
"address",
",",
"current_block",
")",
":",
"unexpired_fragment",
",",
"unexpired_args",
"=",
"namedb_select_where_unexpired_names",
"(",
"current_block",
")",
"select_query",
"=",
"\"SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id \"",
"+",
"\"WHERE name_records.address = ? AND name_records.revoked = 0 AND \"",
"+",
"unexpired_fragment",
"+",
"\";\"",
"args",
"=",
"(",
"address",
",",
")",
"+",
"unexpired_args",
"name_rows",
"=",
"namedb_query_execute",
"(",
"cur",
",",
"select_query",
",",
"args",
")",
"names",
"=",
"[",
"]",
"for",
"name_row",
"in",
"name_rows",
":",
"names",
".",
"append",
"(",
"name_row",
"[",
"'name'",
"]",
")",
"if",
"len",
"(",
"names",
")",
"==",
"0",
":",
"return",
"None",
"else",
":",
"return",
"names"
] | 36.5 | 29.136364 |
def get_by_code(self, code):
"""
Retrieve a language by a code.
:param code: iso code (any of the three) or its culture code
:return: a Language object
"""
if any(x in code for x in ('_', '-')):
cc = CultureCode.objects.get(code=code.replace('_', '-'))
return cc.language
elif len(code) == 2:
return self.get(iso_639_1=code)
elif len(code) == 3:
return self.get(Q(iso_639_2T=code) |
Q(iso_639_2B=code) |
Q(iso_639_3=code))
raise ValueError(
'Code must be either 2, or 3 characters: "%s" is %s' % (code, len(code))) | [
"def",
"get_by_code",
"(",
"self",
",",
"code",
")",
":",
"if",
"any",
"(",
"x",
"in",
"code",
"for",
"x",
"in",
"(",
"'_'",
",",
"'-'",
")",
")",
":",
"cc",
"=",
"CultureCode",
".",
"objects",
".",
"get",
"(",
"code",
"=",
"code",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
")",
"return",
"cc",
".",
"language",
"elif",
"len",
"(",
"code",
")",
"==",
"2",
":",
"return",
"self",
".",
"get",
"(",
"iso_639_1",
"=",
"code",
")",
"elif",
"len",
"(",
"code",
")",
"==",
"3",
":",
"return",
"self",
".",
"get",
"(",
"Q",
"(",
"iso_639_2T",
"=",
"code",
")",
"|",
"Q",
"(",
"iso_639_2B",
"=",
"code",
")",
"|",
"Q",
"(",
"iso_639_3",
"=",
"code",
")",
")",
"raise",
"ValueError",
"(",
"'Code must be either 2, or 3 characters: \"%s\" is %s'",
"%",
"(",
"code",
",",
"len",
"(",
"code",
")",
")",
")"
] | 32.666667 | 17.238095 |
def build(self, text):
"""
:param text: Content of the paragraph
"""
super(Paragraph, self).build()
self.content = text | [
"def",
"build",
"(",
"self",
",",
"text",
")",
":",
"super",
"(",
"Paragraph",
",",
"self",
")",
".",
"build",
"(",
")",
"self",
".",
"content",
"=",
"text"
] | 25.666667 | 6.333333 |
def loop_check(self):
"""Check if we have a loop in the graph
:return: Nodes in loop
:rtype: list
"""
in_loop = []
# Add the tag for dfs check
for node in list(self.nodes.values()):
node['dfs_loop_status'] = 'DFS_UNCHECKED'
# Now do the job
for node_id, node in self.nodes.items():
# Run the dfs only if the node has not been already done */
if node['dfs_loop_status'] == 'DFS_UNCHECKED':
self.dfs_loop_search(node_id)
# If LOOP_INSIDE, must be returned
if node['dfs_loop_status'] == 'DFS_LOOP_INSIDE':
in_loop.append(node_id)
# Remove the tag
for node in list(self.nodes.values()):
del node['dfs_loop_status']
return in_loop | [
"def",
"loop_check",
"(",
"self",
")",
":",
"in_loop",
"=",
"[",
"]",
"# Add the tag for dfs check",
"for",
"node",
"in",
"list",
"(",
"self",
".",
"nodes",
".",
"values",
"(",
")",
")",
":",
"node",
"[",
"'dfs_loop_status'",
"]",
"=",
"'DFS_UNCHECKED'",
"# Now do the job",
"for",
"node_id",
",",
"node",
"in",
"self",
".",
"nodes",
".",
"items",
"(",
")",
":",
"# Run the dfs only if the node has not been already done */",
"if",
"node",
"[",
"'dfs_loop_status'",
"]",
"==",
"'DFS_UNCHECKED'",
":",
"self",
".",
"dfs_loop_search",
"(",
"node_id",
")",
"# If LOOP_INSIDE, must be returned",
"if",
"node",
"[",
"'dfs_loop_status'",
"]",
"==",
"'DFS_LOOP_INSIDE'",
":",
"in_loop",
".",
"append",
"(",
"node_id",
")",
"# Remove the tag",
"for",
"node",
"in",
"list",
"(",
"self",
".",
"nodes",
".",
"values",
"(",
")",
")",
":",
"del",
"node",
"[",
"'dfs_loop_status'",
"]",
"return",
"in_loop"
] | 32.2 | 15.96 |
def get_all_memberships(
self, limit_to=100, max_calls=None, parameters=None,
since_when=None, start_record=0, verbose=False):
"""
Retrieve all memberships updated since "since_when"
Loop over queries of size limit_to until either a non-full queryset
is returned, or max_depth is reached (used in tests). Then the
recursion collapses to return a single concatenated list.
"""
if not self.client.session_id:
self.client.request_session()
query = "SELECT Objects() FROM Membership"
# collect all where parameters into a list of
# (key, operator, value) tuples
where_params = []
if parameters:
for k, v in parameters.items():
where_params.append((k, "=", v))
if since_when:
d = datetime.date.today() - datetime.timedelta(days=since_when)
where_params.append(
('LastModifiedDate', ">", "'%s 00:00:00'" % d))
if where_params:
query += " WHERE "
query += " AND ".join(
["%s %s %s" % (p[0], p[1], p[2]) for p in where_params])
query += " ORDER BY LocalID"
# note, get_long_query is overkill when just looking at
# one org, but it still only executes once
# `get_long_query` uses `ms_object_to_model` to return Organizations
membership_list = self.get_long_query(
query, limit_to=limit_to, max_calls=max_calls,
start_record=start_record, verbose=verbose)
return membership_list or [] | [
"def",
"get_all_memberships",
"(",
"self",
",",
"limit_to",
"=",
"100",
",",
"max_calls",
"=",
"None",
",",
"parameters",
"=",
"None",
",",
"since_when",
"=",
"None",
",",
"start_record",
"=",
"0",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"client",
".",
"session_id",
":",
"self",
".",
"client",
".",
"request_session",
"(",
")",
"query",
"=",
"\"SELECT Objects() FROM Membership\"",
"# collect all where parameters into a list of",
"# (key, operator, value) tuples",
"where_params",
"=",
"[",
"]",
"if",
"parameters",
":",
"for",
"k",
",",
"v",
"in",
"parameters",
".",
"items",
"(",
")",
":",
"where_params",
".",
"append",
"(",
"(",
"k",
",",
"\"=\"",
",",
"v",
")",
")",
"if",
"since_when",
":",
"d",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"since_when",
")",
"where_params",
".",
"append",
"(",
"(",
"'LastModifiedDate'",
",",
"\">\"",
",",
"\"'%s 00:00:00'\"",
"%",
"d",
")",
")",
"if",
"where_params",
":",
"query",
"+=",
"\" WHERE \"",
"query",
"+=",
"\" AND \"",
".",
"join",
"(",
"[",
"\"%s %s %s\"",
"%",
"(",
"p",
"[",
"0",
"]",
",",
"p",
"[",
"1",
"]",
",",
"p",
"[",
"2",
"]",
")",
"for",
"p",
"in",
"where_params",
"]",
")",
"query",
"+=",
"\" ORDER BY LocalID\"",
"# note, get_long_query is overkill when just looking at",
"# one org, but it still only executes once",
"# `get_long_query` uses `ms_object_to_model` to return Organizations",
"membership_list",
"=",
"self",
".",
"get_long_query",
"(",
"query",
",",
"limit_to",
"=",
"limit_to",
",",
"max_calls",
"=",
"max_calls",
",",
"start_record",
"=",
"start_record",
",",
"verbose",
"=",
"verbose",
")",
"return",
"membership_list",
"or",
"[",
"]"
] | 36.511628 | 20.093023 |
def accel_toggle_transparency(self, *args):
"""Callback to toggle transparency.
"""
self.transparency_toggled = not self.transparency_toggled
self.settings.styleBackground.triggerOnChangedValue(
self.settings.styleBackground, 'transparency'
)
return True | [
"def",
"accel_toggle_transparency",
"(",
"self",
",",
"*",
"args",
")",
":",
"self",
".",
"transparency_toggled",
"=",
"not",
"self",
".",
"transparency_toggled",
"self",
".",
"settings",
".",
"styleBackground",
".",
"triggerOnChangedValue",
"(",
"self",
".",
"settings",
".",
"styleBackground",
",",
"'transparency'",
")",
"return",
"True"
] | 38.375 | 14.625 |
def explore_server(server_url, username, password):
""" Demo of exploring a cim server for characteristics defined by
the server class
"""
print("WBEM server URL:\n %s" % server_url)
conn = WBEMConnection(server_url, (username, password),
no_verification=True)
server = WBEMServer(conn)
print("Brand:\n %s" % server.brand)
print("Version:\n %s" % server.version)
print("Interop namespace:\n %s" % server.interop_ns)
print("All namespaces:")
for ns in server.namespaces:
print(" %s" % ns)
print("Advertised management profiles:")
org_vm = ValueMapping.for_property(server, server.interop_ns,
'CIM_RegisteredProfile',
'RegisteredOrganization')
for inst in server.profiles:
print_profile_info(org_vm, inst)
indication_profiles = server.get_selected_profiles('DMTF', 'Indications')
print('Profiles for DMTF:Indications')
for inst in indication_profiles:
print_profile_info(org_vm, inst)
server_profiles = server.get_selected_profiles('SNIA', 'Server')
print('Profiles for SNIA:Server')
for inst in server_profiles:
print_profile_info(org_vm, inst)
# get Central Instances
for inst in indication_profiles:
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
print("Central instances for profile %s:%s:%s (component):" % \
(org, name, vers))
try:
ci_paths = server.get_central_instances(
inst.path,
"CIM_IndicationService", "CIM_System", ["CIM_HostedService"])
except Exception as exc:
print("Error: %s" % str(exc))
ci_paths = []
for ip in ci_paths:
print(" %s" % str(ip))
for inst in server_profiles:
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
print("Central instances for profile %s:%s:%s(autonomous):" %
(org, name, vers))
try:
ci_paths = server.get_central_instances(inst.path)
except Exception as exc:
print("Error: %s" % str(exc))
ci_paths = []
for ip in ci_paths:
print(" %s" % str(ip)) | [
"def",
"explore_server",
"(",
"server_url",
",",
"username",
",",
"password",
")",
":",
"print",
"(",
"\"WBEM server URL:\\n %s\"",
"%",
"server_url",
")",
"conn",
"=",
"WBEMConnection",
"(",
"server_url",
",",
"(",
"username",
",",
"password",
")",
",",
"no_verification",
"=",
"True",
")",
"server",
"=",
"WBEMServer",
"(",
"conn",
")",
"print",
"(",
"\"Brand:\\n %s\"",
"%",
"server",
".",
"brand",
")",
"print",
"(",
"\"Version:\\n %s\"",
"%",
"server",
".",
"version",
")",
"print",
"(",
"\"Interop namespace:\\n %s\"",
"%",
"server",
".",
"interop_ns",
")",
"print",
"(",
"\"All namespaces:\"",
")",
"for",
"ns",
"in",
"server",
".",
"namespaces",
":",
"print",
"(",
"\" %s\"",
"%",
"ns",
")",
"print",
"(",
"\"Advertised management profiles:\"",
")",
"org_vm",
"=",
"ValueMapping",
".",
"for_property",
"(",
"server",
",",
"server",
".",
"interop_ns",
",",
"'CIM_RegisteredProfile'",
",",
"'RegisteredOrganization'",
")",
"for",
"inst",
"in",
"server",
".",
"profiles",
":",
"print_profile_info",
"(",
"org_vm",
",",
"inst",
")",
"indication_profiles",
"=",
"server",
".",
"get_selected_profiles",
"(",
"'DMTF'",
",",
"'Indications'",
")",
"print",
"(",
"'Profiles for DMTF:Indications'",
")",
"for",
"inst",
"in",
"indication_profiles",
":",
"print_profile_info",
"(",
"org_vm",
",",
"inst",
")",
"server_profiles",
"=",
"server",
".",
"get_selected_profiles",
"(",
"'SNIA'",
",",
"'Server'",
")",
"print",
"(",
"'Profiles for SNIA:Server'",
")",
"for",
"inst",
"in",
"server_profiles",
":",
"print_profile_info",
"(",
"org_vm",
",",
"inst",
")",
"# get Central Instances",
"for",
"inst",
"in",
"indication_profiles",
":",
"org",
"=",
"org_vm",
".",
"tovalues",
"(",
"inst",
"[",
"'RegisteredOrganization'",
"]",
")",
"name",
"=",
"inst",
"[",
"'RegisteredName'",
"]",
"vers",
"=",
"inst",
"[",
"'RegisteredVersion'",
"]",
"print",
"(",
"\"Central instances for profile %s:%s:%s (component):\"",
"%",
"(",
"org",
",",
"name",
",",
"vers",
")",
")",
"try",
":",
"ci_paths",
"=",
"server",
".",
"get_central_instances",
"(",
"inst",
".",
"path",
",",
"\"CIM_IndicationService\"",
",",
"\"CIM_System\"",
",",
"[",
"\"CIM_HostedService\"",
"]",
")",
"except",
"Exception",
"as",
"exc",
":",
"print",
"(",
"\"Error: %s\"",
"%",
"str",
"(",
"exc",
")",
")",
"ci_paths",
"=",
"[",
"]",
"for",
"ip",
"in",
"ci_paths",
":",
"print",
"(",
"\" %s\"",
"%",
"str",
"(",
"ip",
")",
")",
"for",
"inst",
"in",
"server_profiles",
":",
"org",
"=",
"org_vm",
".",
"tovalues",
"(",
"inst",
"[",
"'RegisteredOrganization'",
"]",
")",
"name",
"=",
"inst",
"[",
"'RegisteredName'",
"]",
"vers",
"=",
"inst",
"[",
"'RegisteredVersion'",
"]",
"print",
"(",
"\"Central instances for profile %s:%s:%s(autonomous):\"",
"%",
"(",
"org",
",",
"name",
",",
"vers",
")",
")",
"try",
":",
"ci_paths",
"=",
"server",
".",
"get_central_instances",
"(",
"inst",
".",
"path",
")",
"except",
"Exception",
"as",
"exc",
":",
"print",
"(",
"\"Error: %s\"",
"%",
"str",
"(",
"exc",
")",
")",
"ci_paths",
"=",
"[",
"]",
"for",
"ip",
"in",
"ci_paths",
":",
"print",
"(",
"\" %s\"",
"%",
"str",
"(",
"ip",
")",
")"
] | 34.449275 | 16.623188 |
def replace_bytes(self, m):
"""Replace escapes."""
esc = m.group(0)
value = esc
if m.group('special'):
value = BACK_SLASH_TRANSLATION[esc]
elif m.group('char'):
try:
value = chr(int(esc[2:], 16))
except Exception:
value = esc
elif m.group('oct'):
value = int(esc[1:], 8)
if value > 255:
value -= 256
value = chr(value)
return value.replace('\x00', '\n') | [
"def",
"replace_bytes",
"(",
"self",
",",
"m",
")",
":",
"esc",
"=",
"m",
".",
"group",
"(",
"0",
")",
"value",
"=",
"esc",
"if",
"m",
".",
"group",
"(",
"'special'",
")",
":",
"value",
"=",
"BACK_SLASH_TRANSLATION",
"[",
"esc",
"]",
"elif",
"m",
".",
"group",
"(",
"'char'",
")",
":",
"try",
":",
"value",
"=",
"chr",
"(",
"int",
"(",
"esc",
"[",
"2",
":",
"]",
",",
"16",
")",
")",
"except",
"Exception",
":",
"value",
"=",
"esc",
"elif",
"m",
".",
"group",
"(",
"'oct'",
")",
":",
"value",
"=",
"int",
"(",
"esc",
"[",
"1",
":",
"]",
",",
"8",
")",
"if",
"value",
">",
"255",
":",
"value",
"-=",
"256",
"value",
"=",
"chr",
"(",
"value",
")",
"return",
"value",
".",
"replace",
"(",
"'\\x00'",
",",
"'\\n'",
")"
] | 28.5 | 12.5 |
def get_sshconfig():
r'''
Read user's SSH configuration file
'''
with open(os.path.expanduser('~/.ssh/config')) as f:
cfg = paramiko.SSHConfig()
cfg.parse(f)
ret_dict = {}
for d in cfg._config:
_copy = dict(d)
# Avoid buggy behavior with strange host definitions, we need
# Hostname and not Host.
del _copy['host']
for host in d['host']:
ret_dict[host] = _copy['config']
return ret_dict | [
"def",
"get_sshconfig",
"(",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.ssh/config'",
")",
")",
"as",
"f",
":",
"cfg",
"=",
"paramiko",
".",
"SSHConfig",
"(",
")",
"cfg",
".",
"parse",
"(",
"f",
")",
"ret_dict",
"=",
"{",
"}",
"for",
"d",
"in",
"cfg",
".",
"_config",
":",
"_copy",
"=",
"dict",
"(",
"d",
")",
"# Avoid buggy behavior with strange host definitions, we need",
"# Hostname and not Host.",
"del",
"_copy",
"[",
"'host'",
"]",
"for",
"host",
"in",
"d",
"[",
"'host'",
"]",
":",
"ret_dict",
"[",
"host",
"]",
"=",
"_copy",
"[",
"'config'",
"]",
"return",
"ret_dict"
] | 27.944444 | 18.388889 |
def circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit,
filepath: str,
pdf_kwargs=None,
qcircuit_kwargs=None,
clean_ext=('dvi', 'ps'),
documentclass='article'):
"""Compiles the QCircuit-based latex diagram of the given circuit.
Args:
circuit: The circuit to produce a pdf of.
filepath: Where to output the pdf.
pdf_kwargs: The arguments to pass to generate_pdf.
qcircuit_kwargs: The arguments to pass to
circuit_to_latex_using_qcircuit.
clean_ext: The file extensions to clean up after compilation. By
default, latexmk is used with the '-pdfps' flag, which produces
intermediary dvi and ps files.
documentclass: The documentclass of the latex file.
"""
pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'],
**({} if pdf_kwargs is None else pdf_kwargs)}
qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs
tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs)
doc = Document(documentclass=documentclass, document_options='dvips')
doc.packages.append(Package('amsmath'))
doc.packages.append(Package('qcircuit'))
doc.append(NoEscape(tex))
doc.generate_pdf(filepath, **pdf_kwargs)
for ext in clean_ext:
try:
os.remove(filepath + '.' + ext)
except (OSError, IOError) as e:
if e.errno != errno.ENOENT:
raise | [
"def",
"circuit_to_pdf_using_qcircuit_via_tex",
"(",
"circuit",
":",
"circuits",
".",
"Circuit",
",",
"filepath",
":",
"str",
",",
"pdf_kwargs",
"=",
"None",
",",
"qcircuit_kwargs",
"=",
"None",
",",
"clean_ext",
"=",
"(",
"'dvi'",
",",
"'ps'",
")",
",",
"documentclass",
"=",
"'article'",
")",
":",
"pdf_kwargs",
"=",
"{",
"'compiler'",
":",
"'latexmk'",
",",
"'compiler_args'",
":",
"[",
"'-pdfps'",
"]",
",",
"*",
"*",
"(",
"{",
"}",
"if",
"pdf_kwargs",
"is",
"None",
"else",
"pdf_kwargs",
")",
"}",
"qcircuit_kwargs",
"=",
"{",
"}",
"if",
"qcircuit_kwargs",
"is",
"None",
"else",
"qcircuit_kwargs",
"tex",
"=",
"circuit_to_latex_using_qcircuit",
"(",
"circuit",
",",
"*",
"*",
"qcircuit_kwargs",
")",
"doc",
"=",
"Document",
"(",
"documentclass",
"=",
"documentclass",
",",
"document_options",
"=",
"'dvips'",
")",
"doc",
".",
"packages",
".",
"append",
"(",
"Package",
"(",
"'amsmath'",
")",
")",
"doc",
".",
"packages",
".",
"append",
"(",
"Package",
"(",
"'qcircuit'",
")",
")",
"doc",
".",
"append",
"(",
"NoEscape",
"(",
"tex",
")",
")",
"doc",
".",
"generate_pdf",
"(",
"filepath",
",",
"*",
"*",
"pdf_kwargs",
")",
"for",
"ext",
"in",
"clean_ext",
":",
"try",
":",
"os",
".",
"remove",
"(",
"filepath",
"+",
"'.'",
"+",
"ext",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"ENOENT",
":",
"raise"
] | 48.205882 | 16.882353 |
def read_contents(self, name, conn):
'''Read schema tables'''
sql = '''select c.relname,
d.description,
case c.relkind
when 'r' then 'table'
when 'v' then 'view'
when 'm' then 'materialized view'
when 'f' then 'foreign table'
end as table_type
from pg_catalog.pg_class c
join pg_catalog.pg_namespace n on n.oid = c.relnamespace
left join pg_catalog.pg_description d on (d.objoid = c.oid)
where c.relkind in ('r','v','m','f') and
n.nspname = %s and
n.nspname not like 'pg\_%%' and
n.nspname not in ('information_schema') and
coalesce(d.objsubid,0) = 0
order by c.relname'''
log = get_logger()
cur = conn.cursor()
cur.execute(sql, [name])
tables = cur.fetchall()
from pgdocgen.ddlobject.table import Table
for table in tables:
table_obj = Table(name, table[0], table[1], table[2], conn)
log.debug('{}: {}'.format(table[0], table[1]))
self.contents.append(copy.deepcopy(table_obj))
cur.close() | [
"def",
"read_contents",
"(",
"self",
",",
"name",
",",
"conn",
")",
":",
"sql",
"=",
"'''select c.relname,\n d.description,\n case c.relkind\n when 'r' then 'table'\n when 'v' then 'view'\n when 'm' then 'materialized view'\n when 'f' then 'foreign table'\n end as table_type\n from pg_catalog.pg_class c\n join pg_catalog.pg_namespace n on n.oid = c.relnamespace\n left join pg_catalog.pg_description d on (d.objoid = c.oid)\n where c.relkind in ('r','v','m','f') and\n n.nspname = %s and\n n.nspname not like 'pg\\_%%' and\n n.nspname not in ('information_schema') and\n coalesce(d.objsubid,0) = 0\n order by c.relname'''",
"log",
"=",
"get_logger",
"(",
")",
"cur",
"=",
"conn",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"sql",
",",
"[",
"name",
"]",
")",
"tables",
"=",
"cur",
".",
"fetchall",
"(",
")",
"from",
"pgdocgen",
".",
"ddlobject",
".",
"table",
"import",
"Table",
"for",
"table",
"in",
"tables",
":",
"table_obj",
"=",
"Table",
"(",
"name",
",",
"table",
"[",
"0",
"]",
",",
"table",
"[",
"1",
"]",
",",
"table",
"[",
"2",
"]",
",",
"conn",
")",
"log",
".",
"debug",
"(",
"'{}: {}'",
".",
"format",
"(",
"table",
"[",
"0",
"]",
",",
"table",
"[",
"1",
"]",
")",
")",
"self",
".",
"contents",
".",
"append",
"(",
"copy",
".",
"deepcopy",
"(",
"table_obj",
")",
")",
"cur",
".",
"close",
"(",
")"
] | 45.413793 | 12.655172 |
def is_API_online(self):
"""
Returns True if the OWM Weather API is currently online. A short timeout
is used to determine API service availability.
:returns: bool
"""
params = {'q': 'London,GB'}
uri = http_client.HttpClient.to_url(OBSERVATION_URL,
self._API_key,
self._subscription_type)
try:
_1, _2 = self._wapi.cacheable_get_json(uri, params=params)
return True
except api_call_error.APICallTimeoutError:
return False | [
"def",
"is_API_online",
"(",
"self",
")",
":",
"params",
"=",
"{",
"'q'",
":",
"'London,GB'",
"}",
"uri",
"=",
"http_client",
".",
"HttpClient",
".",
"to_url",
"(",
"OBSERVATION_URL",
",",
"self",
".",
"_API_key",
",",
"self",
".",
"_subscription_type",
")",
"try",
":",
"_1",
",",
"_2",
"=",
"self",
".",
"_wapi",
".",
"cacheable_get_json",
"(",
"uri",
",",
"params",
"=",
"params",
")",
"return",
"True",
"except",
"api_call_error",
".",
"APICallTimeoutError",
":",
"return",
"False"
] | 35.411765 | 20 |
def shutdown(self):
"""
Signals worker to shutdown (via sentinel) then cleanly joins the thread
"""
self.shutdownLocal()
newJobsQueue = self.newJobsQueue
self.newJobsQueue = None
newJobsQueue.put(None)
self.worker.join() | [
"def",
"shutdown",
"(",
"self",
")",
":",
"self",
".",
"shutdownLocal",
"(",
")",
"newJobsQueue",
"=",
"self",
".",
"newJobsQueue",
"self",
".",
"newJobsQueue",
"=",
"None",
"newJobsQueue",
".",
"put",
"(",
"None",
")",
"self",
".",
"worker",
".",
"join",
"(",
")"
] | 27.6 | 14.4 |
def artifact_cache_dir(self):
"""Note that this is unrelated to the general pants artifact cache."""
return (self.get_options().artifact_cache_dir or
os.path.join(self.scratch_dir, 'artifacts')) | [
"def",
"artifact_cache_dir",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"get_options",
"(",
")",
".",
"artifact_cache_dir",
"or",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"scratch_dir",
",",
"'artifacts'",
")",
")"
] | 52.75 | 9.75 |
def long_click(self):
'''
Perform a long click action on the object.
Usage:
d(text="Image").long_click() # long click on the center of the ui object
d(text="Image").long_click.topleft() # long click on the topleft of the ui object
d(text="Image").long_click.bottomright() # long click on the topleft of the ui object
'''
@param_to_property(corner=["tl", "topleft", "br", "bottomright"])
def _long_click(corner=None):
info = self.info
if info["longClickable"]:
if corner:
return self.jsonrpc.longClick(self.selector, corner)
else:
return self.jsonrpc.longClick(self.selector)
else:
bounds = info.get("visibleBounds") or info.get("bounds")
if corner in ["tl", "topleft"]:
x = (5 * bounds["left"] + bounds["right"]) / 6
y = (5 * bounds["top"] + bounds["bottom"]) / 6
elif corner in ["br", "bottomright"]:
x = (bounds["left"] + 5 * bounds["right"]) / 6
y = (bounds["top"] + 5 * bounds["bottom"]) / 6
else:
x = (bounds["left"] + bounds["right"]) / 2
y = (bounds["top"] + bounds["bottom"]) / 2
return self.device.long_click(x, y)
return _long_click | [
"def",
"long_click",
"(",
"self",
")",
":",
"@",
"param_to_property",
"(",
"corner",
"=",
"[",
"\"tl\"",
",",
"\"topleft\"",
",",
"\"br\"",
",",
"\"bottomright\"",
"]",
")",
"def",
"_long_click",
"(",
"corner",
"=",
"None",
")",
":",
"info",
"=",
"self",
".",
"info",
"if",
"info",
"[",
"\"longClickable\"",
"]",
":",
"if",
"corner",
":",
"return",
"self",
".",
"jsonrpc",
".",
"longClick",
"(",
"self",
".",
"selector",
",",
"corner",
")",
"else",
":",
"return",
"self",
".",
"jsonrpc",
".",
"longClick",
"(",
"self",
".",
"selector",
")",
"else",
":",
"bounds",
"=",
"info",
".",
"get",
"(",
"\"visibleBounds\"",
")",
"or",
"info",
".",
"get",
"(",
"\"bounds\"",
")",
"if",
"corner",
"in",
"[",
"\"tl\"",
",",
"\"topleft\"",
"]",
":",
"x",
"=",
"(",
"5",
"*",
"bounds",
"[",
"\"left\"",
"]",
"+",
"bounds",
"[",
"\"right\"",
"]",
")",
"/",
"6",
"y",
"=",
"(",
"5",
"*",
"bounds",
"[",
"\"top\"",
"]",
"+",
"bounds",
"[",
"\"bottom\"",
"]",
")",
"/",
"6",
"elif",
"corner",
"in",
"[",
"\"br\"",
",",
"\"bottomright\"",
"]",
":",
"x",
"=",
"(",
"bounds",
"[",
"\"left\"",
"]",
"+",
"5",
"*",
"bounds",
"[",
"\"right\"",
"]",
")",
"/",
"6",
"y",
"=",
"(",
"bounds",
"[",
"\"top\"",
"]",
"+",
"5",
"*",
"bounds",
"[",
"\"bottom\"",
"]",
")",
"/",
"6",
"else",
":",
"x",
"=",
"(",
"bounds",
"[",
"\"left\"",
"]",
"+",
"bounds",
"[",
"\"right\"",
"]",
")",
"/",
"2",
"y",
"=",
"(",
"bounds",
"[",
"\"top\"",
"]",
"+",
"bounds",
"[",
"\"bottom\"",
"]",
")",
"/",
"2",
"return",
"self",
".",
"device",
".",
"long_click",
"(",
"x",
",",
"y",
")",
"return",
"_long_click"
] | 48.448276 | 22.931034 |
def submit_file_content(self, method, url, data, headers, params, halt_on_error=True):
"""Submit File Content for Documents and Reports to ThreatConnect API.
Args:
method (str): The HTTP method for the request (POST, PUT).
url (str): The URL for the request.
data (str;bytes;file): The body (data) for the request.
headers (dict): The headers for the request.
params (dict): The query string parameters for the request.
halt_on_error (bool, default:True): If True any exception will raise an error.
Returns:
requests.models.Response: The response from the request.
"""
r = None
try:
r = self.tcex.session.request(method, url, data=data, headers=headers, params=params)
except Exception as e:
self.tcex.handle_error(580, [e], halt_on_error)
return r | [
"def",
"submit_file_content",
"(",
"self",
",",
"method",
",",
"url",
",",
"data",
",",
"headers",
",",
"params",
",",
"halt_on_error",
"=",
"True",
")",
":",
"r",
"=",
"None",
"try",
":",
"r",
"=",
"self",
".",
"tcex",
".",
"session",
".",
"request",
"(",
"method",
",",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"tcex",
".",
"handle_error",
"(",
"580",
",",
"[",
"e",
"]",
",",
"halt_on_error",
")",
"return",
"r"
] | 45.15 | 26.4 |
def item(self):
""" ToDo
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface, pub
>>> hp = HydPy('LahnH')
>>> pub.timegrids = '1996-01-01', '1996-01-06', '1d'
>>> with TestIO():
... hp.prepare_everything()
... interface = XMLInterface('multiple_runs.xml')
>>> var = interface.exchange.itemgroups[0].models[0].subvars[0].vars[0]
>>> item = var.item
>>> item.value
array(2.0)
>>> hp.elements.land_dill.model.parameters.control.alpha
alpha(1.0)
>>> item.update_variables()
>>> hp.elements.land_dill.model.parameters.control.alpha
alpha(2.0)
>>> var = interface.exchange.itemgroups[0].models[2].subvars[0].vars[0]
>>> item = var.item
>>> item.value
array(5.0)
>>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag
lag(0.0)
>>> item.update_variables()
>>> hp.elements.stream_dill_lahn_2.model.parameters.control.lag
lag(5.0)
>>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[0]
>>> item = var.item
>>> item.name
'sm_lahn_2'
>>> item.value
array(123.0)
>>> hp.elements.land_lahn_2.model.sequences.states.sm
sm(138.31396, 135.71124, 147.54968, 145.47142, 154.96405, 153.32805,
160.91917, 159.62434, 165.65575, 164.63255)
>>> item.update_variables()
>>> hp.elements.land_lahn_2.model.sequences.states.sm
sm(123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0, 123.0)
>>> var = interface.exchange.itemgroups[1].models[0].subvars[0].vars[1]
>>> item = var.item
>>> item.name
'sm_lahn_1'
>>> item.value
array([ 110., 120., 130., 140., 150., 160., 170., 180., 190.,
200., 210., 220., 230.])
>>> hp.elements.land_lahn_1.model.sequences.states.sm
sm(99.27505, 96.17726, 109.16576, 106.39745, 117.97304, 115.56252,
125.81523, 123.73198, 132.80035, 130.91684, 138.95523, 137.25983,
142.84148)
>>> from hydpy import pub
>>> with pub.options.warntrim(False):
... item.update_variables()
>>> hp.elements.land_lahn_1.model.sequences.states.sm
sm(110.0, 120.0, 130.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0,
206.0, 206.0, 206.0)
>>> for element in pub.selections.headwaters.elements:
... element.model.parameters.control.rfcf(1.1)
>>> for element in pub.selections.nonheadwaters.elements:
... element.model.parameters.control.rfcf(1.0)
>>> for subvars in interface.exchange.itemgroups[2].models[0].subvars:
... for var in subvars.vars:
... var.item.update_variables()
>>> for element in hp.elements.catchment:
... print(element, repr(element.model.parameters.control.sfcf))
land_dill sfcf(1.4)
land_lahn_1 sfcf(1.4)
land_lahn_2 sfcf(1.2)
land_lahn_3 sfcf(field=1.1, forest=1.2)
>>> var = interface.exchange.itemgroups[3].models[0].subvars[1].vars[0]
>>> hp.elements.land_dill.model.sequences.states.sm = 1.0
>>> for name, target in var.item.yield_name2value():
... print(name, target) # doctest: +ELLIPSIS
land_dill_states_sm [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \
1.0, 1.0, 1.0]
land_lahn_1_states_sm [110.0, 120.0, 130.0, 140.0, 150.0, 160.0, \
170.0, 180.0, 190.0, 200.0, 206.0, 206.0, 206.0]
land_lahn_2_states_sm [123.0, 123.0, 123.0, 123.0, 123.0, 123.0, \
123.0, 123.0, 123.0, 123.0]
land_lahn_3_states_sm [101.3124...]
>>> vars_ = interface.exchange.itemgroups[3].models[0].subvars[0].vars
>>> qt = hp.elements.land_dill.model.sequences.fluxes.qt
>>> qt(1.0)
>>> qt.series = 2.0
>>> for var in vars_:
... for name, target in var.item.yield_name2value():
... print(name, target) # doctest: +ELLIPSIS
land_dill_fluxes_qt 1.0
land_dill_fluxes_qt_series [2.0, 2.0, 2.0, 2.0, 2.0]
>>> var = interface.exchange.itemgroups[3].nodes[0].vars[0]
>>> hp.nodes.dill.sequences.sim.series = range(5)
>>> for name, target in var.item.yield_name2value():
... print(name, target) # doctest: +ELLIPSIS
dill_nodes_sim_series [0.0, 1.0, 2.0, 3.0, 4.0]
>>> for name, target in var.item.yield_name2value(2, 4):
... print(name, target) # doctest: +ELLIPSIS
dill_nodes_sim_series [2.0, 3.0]
"""
target = f'{self.master.name}.{self.name}'
if self.master.name == 'nodes':
master = self.master.name
itemgroup = self.master.master.name
else:
master = self.master.master.name
itemgroup = self.master.master.master.name
itemclass = _ITEMGROUP2ITEMCLASS[itemgroup]
if itemgroup == 'getitems':
return self._get_getitem(target, master, itemclass)
return self._get_changeitem(target, master, itemclass, itemgroup) | [
"def",
"item",
"(",
"self",
")",
":",
"target",
"=",
"f'{self.master.name}.{self.name}'",
"if",
"self",
".",
"master",
".",
"name",
"==",
"'nodes'",
":",
"master",
"=",
"self",
".",
"master",
".",
"name",
"itemgroup",
"=",
"self",
".",
"master",
".",
"master",
".",
"name",
"else",
":",
"master",
"=",
"self",
".",
"master",
".",
"master",
".",
"name",
"itemgroup",
"=",
"self",
".",
"master",
".",
"master",
".",
"master",
".",
"name",
"itemclass",
"=",
"_ITEMGROUP2ITEMCLASS",
"[",
"itemgroup",
"]",
"if",
"itemgroup",
"==",
"'getitems'",
":",
"return",
"self",
".",
"_get_getitem",
"(",
"target",
",",
"master",
",",
"itemclass",
")",
"return",
"self",
".",
"_get_changeitem",
"(",
"target",
",",
"master",
",",
"itemclass",
",",
"itemgroup",
")"
] | 42.735537 | 20.826446 |
def can_add_lv_load_area(self, node):
# TODO: check docstring
"""Sums up peak load of LV stations
That is, total peak load for satellite string
Args
----
node: GridDing0
Descr
Returns
-------
bool
True if ????
"""
# get power factor for loads
cos_phi_load = cfg_ding0.get('assumptions', 'cos_phi_load')
lv_load_area = node.lv_load_area
if lv_load_area not in self.lv_load_areas(): # and isinstance(lv_load_area, LVLoadAreaDing0):
path_length_to_root = lv_load_area.mv_grid_district.mv_grid.graph_path_length(self.root_node, node)
if ((path_length_to_root <= self.branch_length_max) and
(lv_load_area.peak_load + self.peak_load) / cos_phi_load <= self.peak_load_max):
return True
else:
return False | [
"def",
"can_add_lv_load_area",
"(",
"self",
",",
"node",
")",
":",
"# TODO: check docstring",
"# get power factor for loads",
"cos_phi_load",
"=",
"cfg_ding0",
".",
"get",
"(",
"'assumptions'",
",",
"'cos_phi_load'",
")",
"lv_load_area",
"=",
"node",
".",
"lv_load_area",
"if",
"lv_load_area",
"not",
"in",
"self",
".",
"lv_load_areas",
"(",
")",
":",
"# and isinstance(lv_load_area, LVLoadAreaDing0):",
"path_length_to_root",
"=",
"lv_load_area",
".",
"mv_grid_district",
".",
"mv_grid",
".",
"graph_path_length",
"(",
"self",
".",
"root_node",
",",
"node",
")",
"if",
"(",
"(",
"path_length_to_root",
"<=",
"self",
".",
"branch_length_max",
")",
"and",
"(",
"lv_load_area",
".",
"peak_load",
"+",
"self",
".",
"peak_load",
")",
"/",
"cos_phi_load",
"<=",
"self",
".",
"peak_load_max",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | 33.178571 | 24.214286 |
def _dump_query_timestamps(self, current_time: float):
"""Output the number of GraphQL queries grouped by their query_hash within the last time."""
windows = [10, 11, 15, 20, 30, 60]
print("GraphQL requests:", file=sys.stderr)
for query_hash, times in self._graphql_query_timestamps.items():
print(" {}".format(query_hash), file=sys.stderr)
for window in windows:
reqs_in_sliding_window = sum(t > current_time - window * 60 for t in times)
print(" last {} minutes: {} requests".format(window, reqs_in_sliding_window), file=sys.stderr) | [
"def",
"_dump_query_timestamps",
"(",
"self",
",",
"current_time",
":",
"float",
")",
":",
"windows",
"=",
"[",
"10",
",",
"11",
",",
"15",
",",
"20",
",",
"30",
",",
"60",
"]",
"print",
"(",
"\"GraphQL requests:\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"for",
"query_hash",
",",
"times",
"in",
"self",
".",
"_graphql_query_timestamps",
".",
"items",
"(",
")",
":",
"print",
"(",
"\" {}\"",
".",
"format",
"(",
"query_hash",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"for",
"window",
"in",
"windows",
":",
"reqs_in_sliding_window",
"=",
"sum",
"(",
"t",
">",
"current_time",
"-",
"window",
"*",
"60",
"for",
"t",
"in",
"times",
")",
"print",
"(",
"\" last {} minutes: {} requests\"",
".",
"format",
"(",
"window",
",",
"reqs_in_sliding_window",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")"
] | 68.666667 | 23.333333 |
def get_scores(self, *args):
'''
In this case, parameters a and b aren't used, since this information is taken
directly from the corpus categories.
Returns
-------
'''
def jelinek_mercer_smoothing(cat):
p_hat_w = self.tdf_[cat] * 1. / self.tdf_[cat].sum()
c_hat_w = (self.smoothing_lambda_) * self.tdf_.sum(axis=1) * 1. / self.tdf_.sum().sum()
return (1 - self.smoothing_lambda_) * p_hat_w + self.smoothing_lambda_ * c_hat_w
p_w = jelinek_mercer_smoothing('cat')
q_w = jelinek_mercer_smoothing('ncat')
kl_divergence = p_w * np.log(p_w / q_w) / np.log(2)
tt, pvals = self.get_t_statistics()
return kl_divergence * (pvals < self.min_p_) | [
"def",
"get_scores",
"(",
"self",
",",
"*",
"args",
")",
":",
"def",
"jelinek_mercer_smoothing",
"(",
"cat",
")",
":",
"p_hat_w",
"=",
"self",
".",
"tdf_",
"[",
"cat",
"]",
"*",
"1.",
"/",
"self",
".",
"tdf_",
"[",
"cat",
"]",
".",
"sum",
"(",
")",
"c_hat_w",
"=",
"(",
"self",
".",
"smoothing_lambda_",
")",
"*",
"self",
".",
"tdf_",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"*",
"1.",
"/",
"self",
".",
"tdf_",
".",
"sum",
"(",
")",
".",
"sum",
"(",
")",
"return",
"(",
"1",
"-",
"self",
".",
"smoothing_lambda_",
")",
"*",
"p_hat_w",
"+",
"self",
".",
"smoothing_lambda_",
"*",
"c_hat_w",
"p_w",
"=",
"jelinek_mercer_smoothing",
"(",
"'cat'",
")",
"q_w",
"=",
"jelinek_mercer_smoothing",
"(",
"'ncat'",
")",
"kl_divergence",
"=",
"p_w",
"*",
"np",
".",
"log",
"(",
"p_w",
"/",
"q_w",
")",
"/",
"np",
".",
"log",
"(",
"2",
")",
"tt",
",",
"pvals",
"=",
"self",
".",
"get_t_statistics",
"(",
")",
"return",
"kl_divergence",
"*",
"(",
"pvals",
"<",
"self",
".",
"min_p_",
")"
] | 28.347826 | 26.086957 |
def set_secondary_vehicle_position(self, m):
'''store second vehicle position for filtering purposes'''
if m.get_type() != 'GLOBAL_POSITION_INT':
return
(lat, lon, heading) = (m.lat*1.0e-7, m.lon*1.0e-7, m.hdg*0.01)
if abs(lat) < 1.0e-3 and abs(lon) < 1.0e-3:
return
self.vehicle2_pos = VehiclePos(m) | [
"def",
"set_secondary_vehicle_position",
"(",
"self",
",",
"m",
")",
":",
"if",
"m",
".",
"get_type",
"(",
")",
"!=",
"'GLOBAL_POSITION_INT'",
":",
"return",
"(",
"lat",
",",
"lon",
",",
"heading",
")",
"=",
"(",
"m",
".",
"lat",
"*",
"1.0e-7",
",",
"m",
".",
"lon",
"*",
"1.0e-7",
",",
"m",
".",
"hdg",
"*",
"0.01",
")",
"if",
"abs",
"(",
"lat",
")",
"<",
"1.0e-3",
"and",
"abs",
"(",
"lon",
")",
"<",
"1.0e-3",
":",
"return",
"self",
".",
"vehicle2_pos",
"=",
"VehiclePos",
"(",
"m",
")"
] | 44.625 | 15.625 |
def get_ips(self, instance_id):
"""Retrieves all IP addresses associated to a given instance.
:return: tuple (IPs)
"""
self._init_os_api()
instance = self._load_instance(instance_id)
try:
ip_addrs = set([self.floating_ip])
except AttributeError:
ip_addrs = set([])
for ip_addr in sum(instance.networks.values(), []):
ip_addrs.add(ip_addr)
log.debug("VM `%s` has IP addresses %r", instance_id, ip_addrs)
return list(ip_addrs) | [
"def",
"get_ips",
"(",
"self",
",",
"instance_id",
")",
":",
"self",
".",
"_init_os_api",
"(",
")",
"instance",
"=",
"self",
".",
"_load_instance",
"(",
"instance_id",
")",
"try",
":",
"ip_addrs",
"=",
"set",
"(",
"[",
"self",
".",
"floating_ip",
"]",
")",
"except",
"AttributeError",
":",
"ip_addrs",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"ip_addr",
"in",
"sum",
"(",
"instance",
".",
"networks",
".",
"values",
"(",
")",
",",
"[",
"]",
")",
":",
"ip_addrs",
".",
"add",
"(",
"ip_addr",
")",
"log",
".",
"debug",
"(",
"\"VM `%s` has IP addresses %r\"",
",",
"instance_id",
",",
"ip_addrs",
")",
"return",
"list",
"(",
"ip_addrs",
")"
] | 35.133333 | 13.8 |
def get_chebi_name_from_id(chebi_id, offline=False):
"""Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
"""
chebi_name = chebi_id_to_name.get(chebi_id)
if chebi_name is None and not offline:
chebi_name = get_chebi_name_from_id_web(chebi_id)
return chebi_name | [
"def",
"get_chebi_name_from_id",
"(",
"chebi_id",
",",
"offline",
"=",
"False",
")",
":",
"chebi_name",
"=",
"chebi_id_to_name",
".",
"get",
"(",
"chebi_id",
")",
"if",
"chebi_name",
"is",
"None",
"and",
"not",
"offline",
":",
"chebi_name",
"=",
"get_chebi_name_from_id_web",
"(",
"chebi_id",
")",
"return",
"chebi_name"
] | 33.190476 | 19.47619 |
def _convert(x, factor1, factor2):
"""
Converts mixing ratio x in comp1 - comp2 tie line to that in
c1 - c2 tie line.
Args:
x (float): Mixing ratio x in comp1 - comp2 tie line, a float
between 0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.0.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in c1 - c2 tie line, a float between 0 and 1.
"""
return x * factor2 / ((1-x) * factor1 + x * factor2) | [
"def",
"_convert",
"(",
"x",
",",
"factor1",
",",
"factor2",
")",
":",
"return",
"x",
"*",
"factor2",
"/",
"(",
"(",
"1",
"-",
"x",
")",
"*",
"factor1",
"+",
"x",
"*",
"factor2",
")"
] | 40.611111 | 21.611111 |
def sigmasq(htilde, psd = None, low_frequency_cutoff=None,
high_frequency_cutoff=None):
"""Return the loudness of the waveform. This is defined (see Duncan
Brown's thesis) as the unnormalized matched-filter of the input waveform,
htilde, with itself. This quantity is usually referred to as (sigma)^2
and is then used to normalize matched-filters with the data.
Parameters
----------
htilde : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : {None, FrequencySeries}, optional
The psd used to weight the accumulated power.
low_frequency_cutoff : {None, float}, optional
The frequency to begin considering waveform power.
high_frequency_cutoff : {None, float}, optional
The frequency to stop considering waveform power.
Returns
-------
sigmasq: float
"""
htilde = make_frequency_series(htilde)
N = (len(htilde)-1) * 2
norm = 4.0 * htilde.delta_f
kmin, kmax = get_cutoff_indices(low_frequency_cutoff,
high_frequency_cutoff, htilde.delta_f, N)
ht = htilde[kmin:kmax]
if psd:
try:
numpy.testing.assert_almost_equal(ht.delta_f, psd.delta_f)
except:
raise ValueError('Waveform does not have same delta_f as psd')
if psd is None:
sq = ht.inner(ht)
else:
sq = ht.weighted_inner(ht, psd[kmin:kmax])
return sq.real * norm | [
"def",
"sigmasq",
"(",
"htilde",
",",
"psd",
"=",
"None",
",",
"low_frequency_cutoff",
"=",
"None",
",",
"high_frequency_cutoff",
"=",
"None",
")",
":",
"htilde",
"=",
"make_frequency_series",
"(",
"htilde",
")",
"N",
"=",
"(",
"len",
"(",
"htilde",
")",
"-",
"1",
")",
"*",
"2",
"norm",
"=",
"4.0",
"*",
"htilde",
".",
"delta_f",
"kmin",
",",
"kmax",
"=",
"get_cutoff_indices",
"(",
"low_frequency_cutoff",
",",
"high_frequency_cutoff",
",",
"htilde",
".",
"delta_f",
",",
"N",
")",
"ht",
"=",
"htilde",
"[",
"kmin",
":",
"kmax",
"]",
"if",
"psd",
":",
"try",
":",
"numpy",
".",
"testing",
".",
"assert_almost_equal",
"(",
"ht",
".",
"delta_f",
",",
"psd",
".",
"delta_f",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Waveform does not have same delta_f as psd'",
")",
"if",
"psd",
"is",
"None",
":",
"sq",
"=",
"ht",
".",
"inner",
"(",
"ht",
")",
"else",
":",
"sq",
"=",
"ht",
".",
"weighted_inner",
"(",
"ht",
",",
"psd",
"[",
"kmin",
":",
"kmax",
"]",
")",
"return",
"sq",
".",
"real",
"*",
"norm"
] | 34.853659 | 20.853659 |
def get_tx_out(self, tx_hash, vout_id, id=None, endpoint=None):
"""
Gets a transaction output by specified transaction hash and output index
Args:
tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d'
vout_id: (int) index of the transaction output in the transaction
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_TX_OUT, params=[tx_hash, vout_id], id=id, endpoint=endpoint) | [
"def",
"get_tx_out",
"(",
"self",
",",
"tx_hash",
",",
"vout_id",
",",
"id",
"=",
"None",
",",
"endpoint",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call_endpoint",
"(",
"GET_TX_OUT",
",",
"params",
"=",
"[",
"tx_hash",
",",
"vout_id",
"]",
",",
"id",
"=",
"id",
",",
"endpoint",
"=",
"endpoint",
")"
] | 57.75 | 31.083333 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'language') and self.language is not None:
_dict['language'] = self.language
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self,
'learning_opt_out') and self.learning_opt_out is not None:
_dict['learning_opt_out'] = self.learning_opt_out
if hasattr(self,
'system_settings') and self.system_settings is not None:
_dict['system_settings'] = self.system_settings._to_dict()
if hasattr(self, 'workspace_id') and self.workspace_id is not None:
_dict['workspace_id'] = self.workspace_id
if hasattr(self, 'status') and self.status is not None:
_dict['status'] = self.status
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'updated') and self.updated is not None:
_dict['updated'] = datetime_to_string(self.updated)
if hasattr(self, 'intents') and self.intents is not None:
_dict['intents'] = [x._to_dict() for x in self.intents]
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
if hasattr(self, 'dialog_nodes') and self.dialog_nodes is not None:
_dict['dialog_nodes'] = [x._to_dict() for x in self.dialog_nodes]
if hasattr(self,
'counterexamples') and self.counterexamples is not None:
_dict['counterexamples'] = [
x._to_dict() for x in self.counterexamples
]
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'name'",
")",
"and",
"self",
".",
"name",
"is",
"not",
"None",
":",
"_dict",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"if",
"hasattr",
"(",
"self",
",",
"'description'",
")",
"and",
"self",
".",
"description",
"is",
"not",
"None",
":",
"_dict",
"[",
"'description'",
"]",
"=",
"self",
".",
"description",
"if",
"hasattr",
"(",
"self",
",",
"'language'",
")",
"and",
"self",
".",
"language",
"is",
"not",
"None",
":",
"_dict",
"[",
"'language'",
"]",
"=",
"self",
".",
"language",
"if",
"hasattr",
"(",
"self",
",",
"'metadata'",
")",
"and",
"self",
".",
"metadata",
"is",
"not",
"None",
":",
"_dict",
"[",
"'metadata'",
"]",
"=",
"self",
".",
"metadata",
"if",
"hasattr",
"(",
"self",
",",
"'learning_opt_out'",
")",
"and",
"self",
".",
"learning_opt_out",
"is",
"not",
"None",
":",
"_dict",
"[",
"'learning_opt_out'",
"]",
"=",
"self",
".",
"learning_opt_out",
"if",
"hasattr",
"(",
"self",
",",
"'system_settings'",
")",
"and",
"self",
".",
"system_settings",
"is",
"not",
"None",
":",
"_dict",
"[",
"'system_settings'",
"]",
"=",
"self",
".",
"system_settings",
".",
"_to_dict",
"(",
")",
"if",
"hasattr",
"(",
"self",
",",
"'workspace_id'",
")",
"and",
"self",
".",
"workspace_id",
"is",
"not",
"None",
":",
"_dict",
"[",
"'workspace_id'",
"]",
"=",
"self",
".",
"workspace_id",
"if",
"hasattr",
"(",
"self",
",",
"'status'",
")",
"and",
"self",
".",
"status",
"is",
"not",
"None",
":",
"_dict",
"[",
"'status'",
"]",
"=",
"self",
".",
"status",
"if",
"hasattr",
"(",
"self",
",",
"'created'",
")",
"and",
"self",
".",
"created",
"is",
"not",
"None",
":",
"_dict",
"[",
"'created'",
"]",
"=",
"datetime_to_string",
"(",
"self",
".",
"created",
")",
"if",
"hasattr",
"(",
"self",
",",
"'updated'",
")",
"and",
"self",
".",
"updated",
"is",
"not",
"None",
":",
"_dict",
"[",
"'updated'",
"]",
"=",
"datetime_to_string",
"(",
"self",
".",
"updated",
")",
"if",
"hasattr",
"(",
"self",
",",
"'intents'",
")",
"and",
"self",
".",
"intents",
"is",
"not",
"None",
":",
"_dict",
"[",
"'intents'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"intents",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'entities'",
")",
"and",
"self",
".",
"entities",
"is",
"not",
"None",
":",
"_dict",
"[",
"'entities'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"entities",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'dialog_nodes'",
")",
"and",
"self",
".",
"dialog_nodes",
"is",
"not",
"None",
":",
"_dict",
"[",
"'dialog_nodes'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"dialog_nodes",
"]",
"if",
"hasattr",
"(",
"self",
",",
"'counterexamples'",
")",
"and",
"self",
".",
"counterexamples",
"is",
"not",
"None",
":",
"_dict",
"[",
"'counterexamples'",
"]",
"=",
"[",
"x",
".",
"_to_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"counterexamples",
"]",
"return",
"_dict"
] | 54.405405 | 21.405405 |
def _normalize_options(self, options):
"""
Generator of 2-tuples (option-key, option-value).
When options spec is a list, generate a 2-tuples per list item.
:param options: dict {option: value}
returns:
iterator (option-key, option-value)
- option names lower cased and prepended with
'--' if necessary. Non-empty values cast to str
"""
for key, value in list(options.items()):
if '--' in key:
normalized_key = self._normalize_arg(key)
else:
normalized_key = '--%s' % self._normalize_arg(key)
if isinstance(value, (list, tuple)):
for opt_val in value:
yield (normalized_key, opt_val)
else:
yield (normalized_key, str(value) if value else value) | [
"def",
"_normalize_options",
"(",
"self",
",",
"options",
")",
":",
"for",
"key",
",",
"value",
"in",
"list",
"(",
"options",
".",
"items",
"(",
")",
")",
":",
"if",
"'--'",
"in",
"key",
":",
"normalized_key",
"=",
"self",
".",
"_normalize_arg",
"(",
"key",
")",
"else",
":",
"normalized_key",
"=",
"'--%s'",
"%",
"self",
".",
"_normalize_arg",
"(",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"for",
"opt_val",
"in",
"value",
":",
"yield",
"(",
"normalized_key",
",",
"opt_val",
")",
"else",
":",
"yield",
"(",
"normalized_key",
",",
"str",
"(",
"value",
")",
"if",
"value",
"else",
"value",
")"
] | 36.652174 | 17.26087 |
def prepare_to_generate(self, data_dir, tmp_dir):
"""Make sure that the data is prepared and the vocab is generated."""
self.get_or_create_vocab(data_dir, tmp_dir)
self.train_text_filepaths(tmp_dir)
self.dev_text_filepaths(tmp_dir) | [
"def",
"prepare_to_generate",
"(",
"self",
",",
"data_dir",
",",
"tmp_dir",
")",
":",
"self",
".",
"get_or_create_vocab",
"(",
"data_dir",
",",
"tmp_dir",
")",
"self",
".",
"train_text_filepaths",
"(",
"tmp_dir",
")",
"self",
".",
"dev_text_filepaths",
"(",
"tmp_dir",
")"
] | 48.6 | 4.4 |
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart VRF search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_vrf(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for v in smart_result['result']:
result['result'].append(VRF.from_dict(v))
return result | [
"def",
"smart_search",
"(",
"cls",
",",
"query_string",
",",
"search_options",
"=",
"None",
",",
"extra_query",
"=",
"None",
")",
":",
"if",
"search_options",
"is",
"None",
":",
"search_options",
"=",
"{",
"}",
"xmlrpc",
"=",
"XMLRPCConnection",
"(",
")",
"try",
":",
"smart_result",
"=",
"xmlrpc",
".",
"connection",
".",
"smart_search_vrf",
"(",
"{",
"'query_string'",
":",
"query_string",
",",
"'search_options'",
":",
"search_options",
",",
"'auth'",
":",
"AuthOptions",
"(",
")",
".",
"options",
",",
"'extra_query'",
":",
"extra_query",
"}",
")",
"except",
"xmlrpclib",
".",
"Fault",
"as",
"xml_fault",
":",
"raise",
"_fault_to_exception",
"(",
"xml_fault",
")",
"result",
"=",
"dict",
"(",
")",
"result",
"[",
"'interpretation'",
"]",
"=",
"smart_result",
"[",
"'interpretation'",
"]",
"result",
"[",
"'search_options'",
"]",
"=",
"smart_result",
"[",
"'search_options'",
"]",
"result",
"[",
"'error'",
"]",
"=",
"smart_result",
"[",
"'error'",
"]",
"if",
"'error_message'",
"in",
"smart_result",
":",
"result",
"[",
"'error_message'",
"]",
"=",
"smart_result",
"[",
"'error_message'",
"]",
"result",
"[",
"'result'",
"]",
"=",
"list",
"(",
")",
"for",
"v",
"in",
"smart_result",
"[",
"'result'",
"]",
":",
"result",
"[",
"'result'",
"]",
".",
"append",
"(",
"VRF",
".",
"from_dict",
"(",
"v",
")",
")",
"return",
"result"
] | 39.029412 | 17.794118 |
def downside_risk(returns, required_return=0, period=DAILY):
"""
Determines the downside deviation below a threshold
Parameters
----------
returns : pd.Series or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~pyfolio.timeseries.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Can be 'monthly', 'weekly', or 'daily'.
- Defaults to 'daily'.
Returns
-------
depends on input type
series ==> float
DataFrame ==> np.array
Annualized downside deviation
"""
return ep.downside_risk(returns,
required_return=required_return,
period=period) | [
"def",
"downside_risk",
"(",
"returns",
",",
"required_return",
"=",
"0",
",",
"period",
"=",
"DAILY",
")",
":",
"return",
"ep",
".",
"downside_risk",
"(",
"returns",
",",
"required_return",
"=",
"required_return",
",",
"period",
"=",
"period",
")"
] | 30.142857 | 18.428571 |
def parse(self, url):
"""
Return a configuration dict from a URL
"""
parsed_url = urlparse.urlparse(url)
try:
default_config = self.CONFIG[parsed_url.scheme]
except KeyError:
raise ValueError(
'unrecognised URL scheme for {}: {}'.format(
self.__class__.__name__, url))
handler = self.get_handler_for_scheme(parsed_url.scheme)
config = copy.deepcopy(default_config)
return handler(parsed_url, config) | [
"def",
"parse",
"(",
"self",
",",
"url",
")",
":",
"parsed_url",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"try",
":",
"default_config",
"=",
"self",
".",
"CONFIG",
"[",
"parsed_url",
".",
"scheme",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'unrecognised URL scheme for {}: {}'",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"url",
")",
")",
"handler",
"=",
"self",
".",
"get_handler_for_scheme",
"(",
"parsed_url",
".",
"scheme",
")",
"config",
"=",
"copy",
".",
"deepcopy",
"(",
"default_config",
")",
"return",
"handler",
"(",
"parsed_url",
",",
"config",
")"
] | 37 | 11.714286 |
def get_opcodes_from_bp_table(bp):
"""Given a 2d list structure, collect the opcodes from the best path."""
x = len(bp) - 1
y = len(bp[0]) - 1
opcodes = []
while x != 0 or y != 0:
this_bp = bp[x][y]
opcodes.append(this_bp)
if this_bp[0] == EQUAL or this_bp[0] == REPLACE:
x = x - 1
y = y - 1
elif this_bp[0] == INSERT:
y = y - 1
elif this_bp[0] == DELETE:
x = x - 1
opcodes.reverse()
return opcodes | [
"def",
"get_opcodes_from_bp_table",
"(",
"bp",
")",
":",
"x",
"=",
"len",
"(",
"bp",
")",
"-",
"1",
"y",
"=",
"len",
"(",
"bp",
"[",
"0",
"]",
")",
"-",
"1",
"opcodes",
"=",
"[",
"]",
"while",
"x",
"!=",
"0",
"or",
"y",
"!=",
"0",
":",
"this_bp",
"=",
"bp",
"[",
"x",
"]",
"[",
"y",
"]",
"opcodes",
".",
"append",
"(",
"this_bp",
")",
"if",
"this_bp",
"[",
"0",
"]",
"==",
"EQUAL",
"or",
"this_bp",
"[",
"0",
"]",
"==",
"REPLACE",
":",
"x",
"=",
"x",
"-",
"1",
"y",
"=",
"y",
"-",
"1",
"elif",
"this_bp",
"[",
"0",
"]",
"==",
"INSERT",
":",
"y",
"=",
"y",
"-",
"1",
"elif",
"this_bp",
"[",
"0",
"]",
"==",
"DELETE",
":",
"x",
"=",
"x",
"-",
"1",
"opcodes",
".",
"reverse",
"(",
")",
"return",
"opcodes"
] | 29.294118 | 14.705882 |
def is_allowed(self, request: AxesHttpRequest, credentials: dict = None) -> bool:
"""
Checks if the user is allowed to access or use given functionality such as a login view or authentication.
This method is abstract and other backends can specialize it as needed, but the default implementation
checks if the user has attempted to authenticate into the site too many times through the
Django authentication backends and returns ``False``if user exceeds the configured Axes thresholds.
This checker can implement arbitrary checks such as IP whitelisting or blacklisting,
request frequency checking, failed attempt monitoring or similar functions.
Please refer to the ``axes.handlers.database.AxesDatabaseHandler`` for the default implementation
and inspiration on some common checks and access restrictions before writing your own implementation.
"""
if self.is_blacklisted(request, credentials):
return False
if self.is_whitelisted(request, credentials):
return True
if self.is_locked(request, credentials):
return False
return True | [
"def",
"is_allowed",
"(",
"self",
",",
"request",
":",
"AxesHttpRequest",
",",
"credentials",
":",
"dict",
"=",
"None",
")",
"->",
"bool",
":",
"if",
"self",
".",
"is_blacklisted",
"(",
"request",
",",
"credentials",
")",
":",
"return",
"False",
"if",
"self",
".",
"is_whitelisted",
"(",
"request",
",",
"credentials",
")",
":",
"return",
"True",
"if",
"self",
".",
"is_locked",
"(",
"request",
",",
"credentials",
")",
":",
"return",
"False",
"return",
"True"
] | 46.56 | 36.88 |
def a_list(label=None, kwargs=None, attributes=None):
"""Return assembled DOT a_list string.
>>> a_list('spam', {'spam': None, 'ham': 'ham ham', 'eggs': ''})
'label=spam eggs="" ham="ham ham"'
"""
result = ['label=%s' % quote(label)] if label is not None else []
if kwargs:
items = ['%s=%s' % (quote(k), quote(v))
for k, v in tools.mapping_items(kwargs) if v is not None]
result.extend(items)
if attributes:
if hasattr(attributes, 'items'):
attributes = tools.mapping_items(attributes)
items = ['%s=%s' % (quote(k), quote(v))
for k, v in attributes if v is not None]
result.extend(items)
return ' '.join(result) | [
"def",
"a_list",
"(",
"label",
"=",
"None",
",",
"kwargs",
"=",
"None",
",",
"attributes",
"=",
"None",
")",
":",
"result",
"=",
"[",
"'label=%s'",
"%",
"quote",
"(",
"label",
")",
"]",
"if",
"label",
"is",
"not",
"None",
"else",
"[",
"]",
"if",
"kwargs",
":",
"items",
"=",
"[",
"'%s=%s'",
"%",
"(",
"quote",
"(",
"k",
")",
",",
"quote",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"tools",
".",
"mapping_items",
"(",
"kwargs",
")",
"if",
"v",
"is",
"not",
"None",
"]",
"result",
".",
"extend",
"(",
"items",
")",
"if",
"attributes",
":",
"if",
"hasattr",
"(",
"attributes",
",",
"'items'",
")",
":",
"attributes",
"=",
"tools",
".",
"mapping_items",
"(",
"attributes",
")",
"items",
"=",
"[",
"'%s=%s'",
"%",
"(",
"quote",
"(",
"k",
")",
",",
"quote",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"attributes",
"if",
"v",
"is",
"not",
"None",
"]",
"result",
".",
"extend",
"(",
"items",
")",
"return",
"' '",
".",
"join",
"(",
"result",
")"
] | 39.055556 | 14.888889 |
def save(nifti_filename, numpy_data):
"""
Export a numpy array to a nifti file. TODO: currently using dummy
headers and identity matrix affine transform. This can be expanded.
Arguments:
nifti_filename (str): A filename to which to save the nifti data
numpy_data (numpy.ndarray): The numpy array to save to nifti
Returns:
String. The expanded filename that now holds the nifti data
"""
# Expand filename to be absolute
nifti_filename = os.path.expanduser(nifti_filename)
try:
nifti_img = nib.Nifti1Image(numpy_data, numpy.eye(4))
nib.save(nifti_img, nifti_filename)
except Exception as e:
raise ValueError("Could not save file {0}.".format(nifti_filename))
return nifti_filename | [
"def",
"save",
"(",
"nifti_filename",
",",
"numpy_data",
")",
":",
"# Expand filename to be absolute",
"nifti_filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"nifti_filename",
")",
"try",
":",
"nifti_img",
"=",
"nib",
".",
"Nifti1Image",
"(",
"numpy_data",
",",
"numpy",
".",
"eye",
"(",
"4",
")",
")",
"nib",
".",
"save",
"(",
"nifti_img",
",",
"nifti_filename",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"Could not save file {0}.\"",
".",
"format",
"(",
"nifti_filename",
")",
")",
"return",
"nifti_filename"
] | 34.272727 | 22.909091 |
def count(keys, axis=semantics.axis_default):
"""count the number of times each key occurs in the input set
Arguments
---------
keys : indexable object
Returns
-------
unique : ndarray, [groups, ...]
unique keys
count : ndarray, [groups], int
the number of times each key occurs in the input set
Notes
-----
Can be seen as numpy work-alike of collections.Counter
Alternatively, as sparse equivalent of count_table
"""
index = as_index(keys, axis, base=True)
return index.unique, index.count | [
"def",
"count",
"(",
"keys",
",",
"axis",
"=",
"semantics",
".",
"axis_default",
")",
":",
"index",
"=",
"as_index",
"(",
"keys",
",",
"axis",
",",
"base",
"=",
"True",
")",
"return",
"index",
".",
"unique",
",",
"index",
".",
"count"
] | 26.142857 | 19.190476 |
def write_touchstone(fname, options, data, noise=None, frac_length=10, exp_length=2):
r"""
Write a `Touchstone`_ file.
Parameter data is first resized to an :code:`points` x :code:`nports` x
:code:`nports` where :code:`points` represents the number of frequency
points and :code:`nports` represents the number of ports in the file; then
parameter data is written to file in scientific notation
:param fname: Touchstone file name
:type fname: `FileNameExists <https://pexdoc.readthedocs.io/en/stable/
ptypes.html#filenameexists>`_
:param options: Touchstone file options
:type options: :ref:`TouchstoneOptions`
:param data: Touchstone file parameter data
:type data: :ref:`TouchstoneData`
:param noise: Touchstone file parameter noise data (only supported in
two-port files)
:type noise: :ref:`TouchstoneNoiseData`
:param frac_length: Number of digits to use in fractional part of data
:type frac_length: non-negative integer
:param exp_length: Number of digits to use in exponent
:type exp_length: positive integer
.. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.touchstone.write_touchstone
:raises:
* RuntimeError (Argument \`data\` is not valid)
* RuntimeError (Argument \`exp_length\` is not valid)
* RuntimeError (Argument \`fname\` is not valid)
* RuntimeError (Argument \`frac_length\` is not valid)
* RuntimeError (Argument \`noise\` is not valid)
* RuntimeError (Argument \`options\` is not valid)
* RuntimeError (File *[fname]* does not have a valid extension)
* RuntimeError (Malformed data)
* RuntimeError (Noise data only supported in two-port files)
.. [[[end]]]
"""
# pylint: disable=R0913
# Exceptions definitions
exnports = pexdoc.exh.addex(
RuntimeError, "File *[fname]* does not have a valid extension"
)
exnoise = pexdoc.exh.addex(
RuntimeError, "Noise data only supported in two-port files"
)
expoints = pexdoc.exh.addex(RuntimeError, "Malformed data")
# Data validation
_, ext = os.path.splitext(fname)
ext = ext.lower()
nports_regexp = re.compile(r"\.s(\d+)p")
match = nports_regexp.match(ext)
exnports(not match, edata={"field": "fname", "value": fname})
nports = int(match.groups()[0])
exnoise(bool((nports != 2) and noise))
nums_per_freq = nports ** 2
expoints(data["points"] * nums_per_freq != data["pars"].size)
#
npoints = data["points"]
par_data = np.resize(np.copy(data["pars"]), (npoints, nports, nports))
if nports == 2:
par_data = np.transpose(par_data, (0, 2, 1))
units_dict = {"ghz": "GHz", "mhz": "MHz", "khz": "KHz", "hz": "Hz"}
options["units"] = units_dict[options["units"].lower()]
fspace = 2 + frac_length + (exp_length + 2)
# Format data
with open(fname, "w") as fobj:
fobj.write(
"# {units} {ptype} {pformat} R {z0}\n".format(
units=options["units"],
ptype=options["ptype"],
pformat=options["pformat"],
z0=options["z0"],
)
)
for row in _chunk_pars(data["freq"], par_data, options["pformat"]):
row_data = [
to_scientific_string(item, frac_length, exp_length, bool(num != 0))
if item is not None
else fspace * " "
for num, item in enumerate(row)
]
fobj.write(" ".join(row_data) + "\n")
if (nports == 2) and noise:
fobj.write("! Noise data\n")
for row in _chunk_noise(noise):
row_data = [
to_scientific_string(item, frac_length, exp_length, bool(num != 0))
for num, item in enumerate(row)
]
fobj.write(" ".join(row_data) + "\n") | [
"def",
"write_touchstone",
"(",
"fname",
",",
"options",
",",
"data",
",",
"noise",
"=",
"None",
",",
"frac_length",
"=",
"10",
",",
"exp_length",
"=",
"2",
")",
":",
"# pylint: disable=R0913",
"# Exceptions definitions",
"exnports",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"File *[fname]* does not have a valid extension\"",
")",
"exnoise",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"Noise data only supported in two-port files\"",
")",
"expoints",
"=",
"pexdoc",
".",
"exh",
".",
"addex",
"(",
"RuntimeError",
",",
"\"Malformed data\"",
")",
"# Data validation",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"ext",
"=",
"ext",
".",
"lower",
"(",
")",
"nports_regexp",
"=",
"re",
".",
"compile",
"(",
"r\"\\.s(\\d+)p\"",
")",
"match",
"=",
"nports_regexp",
".",
"match",
"(",
"ext",
")",
"exnports",
"(",
"not",
"match",
",",
"edata",
"=",
"{",
"\"field\"",
":",
"\"fname\"",
",",
"\"value\"",
":",
"fname",
"}",
")",
"nports",
"=",
"int",
"(",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"exnoise",
"(",
"bool",
"(",
"(",
"nports",
"!=",
"2",
")",
"and",
"noise",
")",
")",
"nums_per_freq",
"=",
"nports",
"**",
"2",
"expoints",
"(",
"data",
"[",
"\"points\"",
"]",
"*",
"nums_per_freq",
"!=",
"data",
"[",
"\"pars\"",
"]",
".",
"size",
")",
"#",
"npoints",
"=",
"data",
"[",
"\"points\"",
"]",
"par_data",
"=",
"np",
".",
"resize",
"(",
"np",
".",
"copy",
"(",
"data",
"[",
"\"pars\"",
"]",
")",
",",
"(",
"npoints",
",",
"nports",
",",
"nports",
")",
")",
"if",
"nports",
"==",
"2",
":",
"par_data",
"=",
"np",
".",
"transpose",
"(",
"par_data",
",",
"(",
"0",
",",
"2",
",",
"1",
")",
")",
"units_dict",
"=",
"{",
"\"ghz\"",
":",
"\"GHz\"",
",",
"\"mhz\"",
":",
"\"MHz\"",
",",
"\"khz\"",
":",
"\"KHz\"",
",",
"\"hz\"",
":",
"\"Hz\"",
"}",
"options",
"[",
"\"units\"",
"]",
"=",
"units_dict",
"[",
"options",
"[",
"\"units\"",
"]",
".",
"lower",
"(",
")",
"]",
"fspace",
"=",
"2",
"+",
"frac_length",
"+",
"(",
"exp_length",
"+",
"2",
")",
"# Format data",
"with",
"open",
"(",
"fname",
",",
"\"w\"",
")",
"as",
"fobj",
":",
"fobj",
".",
"write",
"(",
"\"# {units} {ptype} {pformat} R {z0}\\n\"",
".",
"format",
"(",
"units",
"=",
"options",
"[",
"\"units\"",
"]",
",",
"ptype",
"=",
"options",
"[",
"\"ptype\"",
"]",
",",
"pformat",
"=",
"options",
"[",
"\"pformat\"",
"]",
",",
"z0",
"=",
"options",
"[",
"\"z0\"",
"]",
",",
")",
")",
"for",
"row",
"in",
"_chunk_pars",
"(",
"data",
"[",
"\"freq\"",
"]",
",",
"par_data",
",",
"options",
"[",
"\"pformat\"",
"]",
")",
":",
"row_data",
"=",
"[",
"to_scientific_string",
"(",
"item",
",",
"frac_length",
",",
"exp_length",
",",
"bool",
"(",
"num",
"!=",
"0",
")",
")",
"if",
"item",
"is",
"not",
"None",
"else",
"fspace",
"*",
"\" \"",
"for",
"num",
",",
"item",
"in",
"enumerate",
"(",
"row",
")",
"]",
"fobj",
".",
"write",
"(",
"\" \"",
".",
"join",
"(",
"row_data",
")",
"+",
"\"\\n\"",
")",
"if",
"(",
"nports",
"==",
"2",
")",
"and",
"noise",
":",
"fobj",
".",
"write",
"(",
"\"! Noise data\\n\"",
")",
"for",
"row",
"in",
"_chunk_noise",
"(",
"noise",
")",
":",
"row_data",
"=",
"[",
"to_scientific_string",
"(",
"item",
",",
"frac_length",
",",
"exp_length",
",",
"bool",
"(",
"num",
"!=",
"0",
")",
")",
"for",
"num",
",",
"item",
"in",
"enumerate",
"(",
"row",
")",
"]",
"fobj",
".",
"write",
"(",
"\" \"",
".",
"join",
"(",
"row_data",
")",
"+",
"\"\\n\"",
")"
] | 36.261682 | 20.233645 |
def get_interpolation_function(self, times, series):
""" Initializes interpolation model
:param times: Array of reference times in second relative to the first timestamp
:type times: numpy.array
:param series: One dimensional array of time series
:type series: numpy.array
:return: Initialized interpolation model class
"""
return self.interpolation_object(times, series, **self.interpolation_parameters) | [
"def",
"get_interpolation_function",
"(",
"self",
",",
"times",
",",
"series",
")",
":",
"return",
"self",
".",
"interpolation_object",
"(",
"times",
",",
"series",
",",
"*",
"*",
"self",
".",
"interpolation_parameters",
")"
] | 46 | 19.6 |
def update(self, options=None, attribute_options=None):
"""
Updates this mapping with the given option and attribute option maps.
:param dict options: Maps representer options to their values.
:param dict attribute_options: Maps attribute names to dictionaries
mapping attribute options to their values.
"""
attr_map = self.__get_attribute_map(self.__mapped_cls, None, 0)
for attributes in attribute_options:
for attr_name in attributes:
if not attr_name in attr_map:
raise AttributeError('Trying to configure non-existing '
'resource attribute "%s"'
% (attr_name))
cfg = RepresenterConfiguration(options=options,
attribute_options=attribute_options)
self.configuration.update(cfg) | [
"def",
"update",
"(",
"self",
",",
"options",
"=",
"None",
",",
"attribute_options",
"=",
"None",
")",
":",
"attr_map",
"=",
"self",
".",
"__get_attribute_map",
"(",
"self",
".",
"__mapped_cls",
",",
"None",
",",
"0",
")",
"for",
"attributes",
"in",
"attribute_options",
":",
"for",
"attr_name",
"in",
"attributes",
":",
"if",
"not",
"attr_name",
"in",
"attr_map",
":",
"raise",
"AttributeError",
"(",
"'Trying to configure non-existing '",
"'resource attribute \"%s\"'",
"%",
"(",
"attr_name",
")",
")",
"cfg",
"=",
"RepresenterConfiguration",
"(",
"options",
"=",
"options",
",",
"attribute_options",
"=",
"attribute_options",
")",
"self",
".",
"configuration",
".",
"update",
"(",
"cfg",
")"
] | 50.888889 | 18.777778 |
def update_configuration(app):
"""Update parameters which are dependent on information from the
project-specific conf.py (including its location on the filesystem)"""
config = app.config
project = config.project
config_dir = app.env.srcdir
sys.path.insert(0, os.path.join(config_dir, '..'))
config.html_theme_path.append(os.path.relpath(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'themes'), config_dir))
if not config.html_logo:
config.html_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir)
if not config.html_favicon:
config.html_favicon = os.path.relpath(os.path.join(STATIC_PATH, 'favicon.ico'), config_dir)
config.html_static_path.append(os.path.relpath(STATIC_PATH, config_dir))
if not config.htmlhelp_basename:
config.htmlhelp_basename = '%sdoc' % project
if not config.latex_logo:
config.latex_logo = os.path.relpath(os.path.join(STATIC_PATH, 'safari_logo.png'), config_dir)
if not config.epub_title:
config.epub_title = u'%s Documentation' % project
if not config.epub_publisher:
config.epub_publisher = config.epub_author
if not config.epub_copyright:
config.epub_copyright = config.copyright
config.latex_documents.append(
(master_doc,
'%s.tex' % project,
u'%s Documentation' % project,
u'Safari',
'manual'))
config.man_pages.append(
(master_doc,
project,
u'%s Documentation' % project,
[u'Safari'],
1))
config.texinfo_documents.append(
(master_doc,
project,
u'%s Documentation' % project,
u'Safari',
project,
'One line description of project.',
'Miscellaneous'))
# Parse the version number from setup.py without actually running setup()
with open(os.path.join(config_dir, '..', 'setup.py'), 'r') as f:
content = f.read()
match = re.search(r"version\s*=\s*['\"]([\d\.]+)['\"]", content)
if match:
config.version = match.group(1)
config.release = config.version | [
"def",
"update_configuration",
"(",
"app",
")",
":",
"config",
"=",
"app",
".",
"config",
"project",
"=",
"config",
".",
"project",
"config_dir",
"=",
"app",
".",
"env",
".",
"srcdir",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"os",
".",
"path",
".",
"join",
"(",
"config_dir",
",",
"'..'",
")",
")",
"config",
".",
"html_theme_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"'themes'",
")",
",",
"config_dir",
")",
")",
"if",
"not",
"config",
".",
"html_logo",
":",
"config",
".",
"html_logo",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"STATIC_PATH",
",",
"'safari_logo.png'",
")",
",",
"config_dir",
")",
"if",
"not",
"config",
".",
"html_favicon",
":",
"config",
".",
"html_favicon",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"STATIC_PATH",
",",
"'favicon.ico'",
")",
",",
"config_dir",
")",
"config",
".",
"html_static_path",
".",
"append",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"STATIC_PATH",
",",
"config_dir",
")",
")",
"if",
"not",
"config",
".",
"htmlhelp_basename",
":",
"config",
".",
"htmlhelp_basename",
"=",
"'%sdoc'",
"%",
"project",
"if",
"not",
"config",
".",
"latex_logo",
":",
"config",
".",
"latex_logo",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"STATIC_PATH",
",",
"'safari_logo.png'",
")",
",",
"config_dir",
")",
"if",
"not",
"config",
".",
"epub_title",
":",
"config",
".",
"epub_title",
"=",
"u'%s Documentation'",
"%",
"project",
"if",
"not",
"config",
".",
"epub_publisher",
":",
"config",
".",
"epub_publisher",
"=",
"config",
".",
"epub_author",
"if",
"not",
"config",
".",
"epub_copyright",
":",
"config",
".",
"epub_copyright",
"=",
"config",
".",
"copyright",
"config",
".",
"latex_documents",
".",
"append",
"(",
"(",
"master_doc",
",",
"'%s.tex'",
"%",
"project",
",",
"u'%s Documentation'",
"%",
"project",
",",
"u'Safari'",
",",
"'manual'",
")",
")",
"config",
".",
"man_pages",
".",
"append",
"(",
"(",
"master_doc",
",",
"project",
",",
"u'%s Documentation'",
"%",
"project",
",",
"[",
"u'Safari'",
"]",
",",
"1",
")",
")",
"config",
".",
"texinfo_documents",
".",
"append",
"(",
"(",
"master_doc",
",",
"project",
",",
"u'%s Documentation'",
"%",
"project",
",",
"u'Safari'",
",",
"project",
",",
"'One line description of project.'",
",",
"'Miscellaneous'",
")",
")",
"# Parse the version number from setup.py without actually running setup()",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"config_dir",
",",
"'..'",
",",
"'setup.py'",
")",
",",
"'r'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"match",
"=",
"re",
".",
"search",
"(",
"r\"version\\s*=\\s*['\\\"]([\\d\\.]+)['\\\"]\"",
",",
"content",
")",
"if",
"match",
":",
"config",
".",
"version",
"=",
"match",
".",
"group",
"(",
"1",
")",
"config",
".",
"release",
"=",
"config",
".",
"version"
] | 39.622642 | 19.433962 |
def morelikethis(self, index, doc_type, id, fields, **query_params):
"""
Execute a "more like this" search query against one or more fields and get back search hits.
"""
path = make_path(index, doc_type, id, '_mlt')
query_params['mlt_fields'] = ','.join(fields)
body = query_params["body"] if "body" in query_params else None
return self._send_request('GET', path, body=body, params=query_params) | [
"def",
"morelikethis",
"(",
"self",
",",
"index",
",",
"doc_type",
",",
"id",
",",
"fields",
",",
"*",
"*",
"query_params",
")",
":",
"path",
"=",
"make_path",
"(",
"index",
",",
"doc_type",
",",
"id",
",",
"'_mlt'",
")",
"query_params",
"[",
"'mlt_fields'",
"]",
"=",
"','",
".",
"join",
"(",
"fields",
")",
"body",
"=",
"query_params",
"[",
"\"body\"",
"]",
"if",
"\"body\"",
"in",
"query_params",
"else",
"None",
"return",
"self",
".",
"_send_request",
"(",
"'GET'",
",",
"path",
",",
"body",
"=",
"body",
",",
"params",
"=",
"query_params",
")"
] | 55.625 | 22.875 |
def __checkSPKTimestamp(self):
"""
Check whether the SPK is too old and generate a new one in that case.
"""
if time.time() - self.__spk["timestamp"] > self.__spk_timeout:
self.__generateSPK() | [
"def",
"__checkSPKTimestamp",
"(",
"self",
")",
":",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"__spk",
"[",
"\"timestamp\"",
"]",
">",
"self",
".",
"__spk_timeout",
":",
"self",
".",
"__generateSPK",
"(",
")"
] | 33 | 17.857143 |
def process(self):
"""
Loops over the underlying queue of events and processes them in order.
"""
assert self.queue is not None
while True:
event = self.queue.get()
if self.pre_process_event(event):
self.invoke_handlers(event)
self.queue.task_done() | [
"def",
"process",
"(",
"self",
")",
":",
"assert",
"self",
".",
"queue",
"is",
"not",
"None",
"while",
"True",
":",
"event",
"=",
"self",
".",
"queue",
".",
"get",
"(",
")",
"if",
"self",
".",
"pre_process_event",
"(",
"event",
")",
":",
"self",
".",
"invoke_handlers",
"(",
"event",
")",
"self",
".",
"queue",
".",
"task_done",
"(",
")"
] | 33.2 | 10.2 |
def purge_dict(idict):
"""Remove null items from a dictionary """
odict = {}
for key, val in idict.items():
if is_null(val):
continue
odict[key] = val
return odict | [
"def",
"purge_dict",
"(",
"idict",
")",
":",
"odict",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"idict",
".",
"items",
"(",
")",
":",
"if",
"is_null",
"(",
"val",
")",
":",
"continue",
"odict",
"[",
"key",
"]",
"=",
"val",
"return",
"odict"
] | 25 | 15.75 |
def _generate_download_google_link(link):
"""
-----
Brief
-----
Function that returns a direct download link of a file stored inside a Google Drive
Repository.
-----------
Description
-----------
Generally a link from a Google Drive file is only for viewing purposes.
If the user wants to download the file it can be done with Google Drive graphical user
interface.
However if we try to programmatically download the file it cannot be done with the normal url.
So, the current function converts the "read-only" link to a downloadable format.
----------
Parameters
----------
link : str
Sharable Google Drive link.
Returns
-------
out : str
Manipulated link, that ensures a direct download with wget function.
"""
# Get file id.
if "id=" not in link:
# Split link into segments (split character --> /)
split_link = link.split("/")
file_id = split_link[-2]
else:
# Split link into segments (split string --> "id=")
split_link = link.split("id=")
file_id = split_link[-1]
return "https://drive.google.com/uc?export=download&id=" + file_id | [
"def",
"_generate_download_google_link",
"(",
"link",
")",
":",
"# Get file id.",
"if",
"\"id=\"",
"not",
"in",
"link",
":",
"# Split link into segments (split character --> /)",
"split_link",
"=",
"link",
".",
"split",
"(",
"\"/\"",
")",
"file_id",
"=",
"split_link",
"[",
"-",
"2",
"]",
"else",
":",
"# Split link into segments (split string --> \"id=\")",
"split_link",
"=",
"link",
".",
"split",
"(",
"\"id=\"",
")",
"file_id",
"=",
"split_link",
"[",
"-",
"1",
"]",
"return",
"\"https://drive.google.com/uc?export=download&id=\"",
"+",
"file_id"
] | 26.568182 | 27.295455 |
def _open_fits_files(filenames):
"""
Given a {correlation: filename} mapping for filenames
returns a {correlation: file handle} mapping
"""
kw = { 'mode' : 'update', 'memmap' : False }
def _fh(fn):
""" Returns a filehandle or None if file does not exist """
return fits.open(fn, **kw) if os.path.exists(fn) else None
return collections.OrderedDict(
(corr, tuple(_fh(fn) for fn in files))
for corr, files in filenames.iteritems() ) | [
"def",
"_open_fits_files",
"(",
"filenames",
")",
":",
"kw",
"=",
"{",
"'mode'",
":",
"'update'",
",",
"'memmap'",
":",
"False",
"}",
"def",
"_fh",
"(",
"fn",
")",
":",
"\"\"\" Returns a filehandle or None if file does not exist \"\"\"",
"return",
"fits",
".",
"open",
"(",
"fn",
",",
"*",
"*",
"kw",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fn",
")",
"else",
"None",
"return",
"collections",
".",
"OrderedDict",
"(",
"(",
"corr",
",",
"tuple",
"(",
"_fh",
"(",
"fn",
")",
"for",
"fn",
"in",
"files",
")",
")",
"for",
"corr",
",",
"files",
"in",
"filenames",
".",
"iteritems",
"(",
")",
")"
] | 34.5 | 14 |
def class_declaration(self, type_):
"""
Returns reference to the class declaration.
"""
utils.loggers.queries_engine.debug(
"Container traits: searching class declaration for %s", type_)
cls_declaration = self.get_container_or_none(type_)
if not cls_declaration:
raise TypeError(
'Type "%s" is not instantiation of std::%s' %
(type_.decl_string, self.name()))
return cls_declaration | [
"def",
"class_declaration",
"(",
"self",
",",
"type_",
")",
":",
"utils",
".",
"loggers",
".",
"queries_engine",
".",
"debug",
"(",
"\"Container traits: searching class declaration for %s\"",
",",
"type_",
")",
"cls_declaration",
"=",
"self",
".",
"get_container_or_none",
"(",
"type_",
")",
"if",
"not",
"cls_declaration",
":",
"raise",
"TypeError",
"(",
"'Type \"%s\" is not instantiation of std::%s'",
"%",
"(",
"type_",
".",
"decl_string",
",",
"self",
".",
"name",
"(",
")",
")",
")",
"return",
"cls_declaration"
] | 32.2 | 16.866667 |
def filter_objects(self, objects, perm=None):
""" Return only objects with specified permission in objects list. If perm not specified, 'view' perm will be used. """
if perm is None:
perm = build_permission_name(self.model_class, 'view')
return filter(lambda o: self.user.has_perm(perm, obj=o), objects) | [
"def",
"filter_objects",
"(",
"self",
",",
"objects",
",",
"perm",
"=",
"None",
")",
":",
"if",
"perm",
"is",
"None",
":",
"perm",
"=",
"build_permission_name",
"(",
"self",
".",
"model_class",
",",
"'view'",
")",
"return",
"filter",
"(",
"lambda",
"o",
":",
"self",
".",
"user",
".",
"has_perm",
"(",
"perm",
",",
"obj",
"=",
"o",
")",
",",
"objects",
")"
] | 67 | 16 |
def report(self):
"""Return a report about the pool state and configuration.
:rtype: dict
"""
return {
'connections': {
'busy': len(self.busy_connections),
'closed': len(self.closed_connections),
'executing': len(self.executing_connections),
'idle': len(self.idle_connections),
'locked': len(self.busy_connections)
},
'exceptions': sum([c.exceptions
for c in self.connections.values()]),
'executions': sum([c.executions
for c in self.connections.values()]),
'full': self.is_full,
'idle': {
'duration': self.idle_duration,
'ttl': self.idle_ttl
},
'max_size': self.max_size
} | [
"def",
"report",
"(",
"self",
")",
":",
"return",
"{",
"'connections'",
":",
"{",
"'busy'",
":",
"len",
"(",
"self",
".",
"busy_connections",
")",
",",
"'closed'",
":",
"len",
"(",
"self",
".",
"closed_connections",
")",
",",
"'executing'",
":",
"len",
"(",
"self",
".",
"executing_connections",
")",
",",
"'idle'",
":",
"len",
"(",
"self",
".",
"idle_connections",
")",
",",
"'locked'",
":",
"len",
"(",
"self",
".",
"busy_connections",
")",
"}",
",",
"'exceptions'",
":",
"sum",
"(",
"[",
"c",
".",
"exceptions",
"for",
"c",
"in",
"self",
".",
"connections",
".",
"values",
"(",
")",
"]",
")",
",",
"'executions'",
":",
"sum",
"(",
"[",
"c",
".",
"executions",
"for",
"c",
"in",
"self",
".",
"connections",
".",
"values",
"(",
")",
"]",
")",
",",
"'full'",
":",
"self",
".",
"is_full",
",",
"'idle'",
":",
"{",
"'duration'",
":",
"self",
".",
"idle_duration",
",",
"'ttl'",
":",
"self",
".",
"idle_ttl",
"}",
",",
"'max_size'",
":",
"self",
".",
"max_size",
"}"
] | 34.44 | 16.56 |
def to_python(self, data):
"""
Convert a data to python format.
"""
if data is None:
return ''
if isinstance(data, unicode):
data = data.encode(DEFAULT_ENCODING)
else:
data = str(data)
return data | [
"def",
"to_python",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
"is",
"None",
":",
"return",
"''",
"if",
"isinstance",
"(",
"data",
",",
"unicode",
")",
":",
"data",
"=",
"data",
".",
"encode",
"(",
"DEFAULT_ENCODING",
")",
"else",
":",
"data",
"=",
"str",
"(",
"data",
")",
"return",
"data"
] | 25.272727 | 10.909091 |
def open_data(self, text, colsep=u"\t", rowsep=u"\n",
transpose=False, skiprows=0, comments='#'):
"""Open clipboard text as table"""
if pd:
self.pd_text = text
self.pd_info = dict(sep=colsep, lineterminator=rowsep,
skiprows=skiprows, comment=comments)
if colsep is None:
self.pd_info = dict(lineterminator=rowsep, skiprows=skiprows,
comment=comments, delim_whitespace=True)
self._table_view.process_data(text, colsep, rowsep, transpose,
skiprows, comments) | [
"def",
"open_data",
"(",
"self",
",",
"text",
",",
"colsep",
"=",
"u\"\\t\"",
",",
"rowsep",
"=",
"u\"\\n\"",
",",
"transpose",
"=",
"False",
",",
"skiprows",
"=",
"0",
",",
"comments",
"=",
"'#'",
")",
":",
"if",
"pd",
":",
"self",
".",
"pd_text",
"=",
"text",
"self",
".",
"pd_info",
"=",
"dict",
"(",
"sep",
"=",
"colsep",
",",
"lineterminator",
"=",
"rowsep",
",",
"skiprows",
"=",
"skiprows",
",",
"comment",
"=",
"comments",
")",
"if",
"colsep",
"is",
"None",
":",
"self",
".",
"pd_info",
"=",
"dict",
"(",
"lineterminator",
"=",
"rowsep",
",",
"skiprows",
"=",
"skiprows",
",",
"comment",
"=",
"comments",
",",
"delim_whitespace",
"=",
"True",
")",
"self",
".",
"_table_view",
".",
"process_data",
"(",
"text",
",",
"colsep",
",",
"rowsep",
",",
"transpose",
",",
"skiprows",
",",
"comments",
")"
] | 52 | 18.75 |
def get_neighbor_sentence_ngrams(
mention, d=1, attrib="words", n_min=1, n_max=1, lower=True
):
"""Get the ngrams that are in the neighoring Sentences of the given Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
:param mention: The Mention whose neighbor Sentences are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a *generator* of ngrams
"""
spans = _to_spans(mention)
for span in spans:
for ngram in chain.from_iterable(
[
tokens_to_ngrams(
getattr(sentence, attrib), n_min=n_min, n_max=n_max, lower=lower
)
for sentence in span.sentence.document.sentences
if abs(sentence.position - span.sentence.position) <= d
and sentence != span.sentence
]
):
yield ngram | [
"def",
"get_neighbor_sentence_ngrams",
"(",
"mention",
",",
"d",
"=",
"1",
",",
"attrib",
"=",
"\"words\"",
",",
"n_min",
"=",
"1",
",",
"n_max",
"=",
"1",
",",
"lower",
"=",
"True",
")",
":",
"spans",
"=",
"_to_spans",
"(",
"mention",
")",
"for",
"span",
"in",
"spans",
":",
"for",
"ngram",
"in",
"chain",
".",
"from_iterable",
"(",
"[",
"tokens_to_ngrams",
"(",
"getattr",
"(",
"sentence",
",",
"attrib",
")",
",",
"n_min",
"=",
"n_min",
",",
"n_max",
"=",
"n_max",
",",
"lower",
"=",
"lower",
")",
"for",
"sentence",
"in",
"span",
".",
"sentence",
".",
"document",
".",
"sentences",
"if",
"abs",
"(",
"sentence",
".",
"position",
"-",
"span",
".",
"sentence",
".",
"position",
")",
"<=",
"d",
"and",
"sentence",
"!=",
"span",
".",
"sentence",
"]",
")",
":",
"yield",
"ngram"
] | 41.37037 | 22.518519 |
def extend(*args):
"""shallow dictionary merge
Args:
a: dict to extend
b: dict to apply to a
Returns:
new instance of the same type as _a_, with _a_ and _b_ merged.
"""
if not args:
return {}
first = args[0]
rest = args[1:]
out = type(first)(first)
for each in rest:
out.update(each)
return out | [
"def",
"extend",
"(",
"*",
"args",
")",
":",
"if",
"not",
"args",
":",
"return",
"{",
"}",
"first",
"=",
"args",
"[",
"0",
"]",
"rest",
"=",
"args",
"[",
"1",
":",
"]",
"out",
"=",
"type",
"(",
"first",
")",
"(",
"first",
")",
"for",
"each",
"in",
"rest",
":",
"out",
".",
"update",
"(",
"each",
")",
"return",
"out"
] | 18.894737 | 22.052632 |
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params) | [
"def",
"_get_access_token",
"(",
"self",
")",
":",
"if",
"self",
".",
"_service_token",
":",
"logger",
".",
"info",
"(",
"'Use service token: %s'",
",",
"5",
"*",
"'*'",
"+",
"self",
".",
"_service_token",
"[",
"50",
":",
"]",
")",
"return",
"self",
".",
"_service_token",
"if",
"not",
"all",
"(",
"[",
"self",
".",
"app_id",
",",
"self",
".",
"_login",
",",
"self",
".",
"_password",
"]",
")",
":",
"raise",
"ValueError",
"(",
"'app_id=%s, login=%s password=%s (masked) must be given'",
"%",
"(",
"self",
".",
"app_id",
",",
"self",
".",
"_login",
",",
"'*'",
"*",
"len",
"(",
"self",
".",
"_password",
")",
"if",
"self",
".",
"_password",
"else",
"'None'",
")",
")",
"logger",
".",
"info",
"(",
"\"Getting access token for user '%s'\"",
"%",
"self",
".",
"_login",
")",
"with",
"self",
".",
"http_session",
"as",
"s",
":",
"if",
"self",
".",
"_client_secret",
":",
"url_query_params",
"=",
"self",
".",
"do_direct_authorization",
"(",
"session",
"=",
"s",
")",
"else",
":",
"self",
".",
"do_login",
"(",
"http_session",
"=",
"s",
")",
"url_query_params",
"=",
"self",
".",
"do_implicit_flow_authorization",
"(",
"session",
"=",
"s",
")",
"logger",
".",
"debug",
"(",
"'url_query_params: %s'",
",",
"url_query_params",
")",
"if",
"'access_token'",
"in",
"url_query_params",
":",
"logger",
".",
"info",
"(",
"'Access token has been gotten'",
")",
"return",
"url_query_params",
"[",
"'access_token'",
"]",
"else",
":",
"raise",
"VkAuthError",
"(",
"'OAuth2 authorization error. Url params: %s'",
"%",
"url_query_params",
")"
] | 45 | 18.933333 |
def addTransitions(self, state, transitions):
"""
Create a new L{TransitionTable} with all the same transitions as this
L{TransitionTable} plus a number of new transitions.
@param state: The state for which the new transitions are defined.
@param transitions: A L{dict} mapping inputs to output, nextState
pairs. Each item from this L{dict} will define a new transition in
C{state}.
@return: The newly created L{TransitionTable}.
"""
table = self._copy()
state = table.table.setdefault(state, {})
for (input, (output, nextState)) in transitions.items():
state[input] = Transition(output, nextState)
return table | [
"def",
"addTransitions",
"(",
"self",
",",
"state",
",",
"transitions",
")",
":",
"table",
"=",
"self",
".",
"_copy",
"(",
")",
"state",
"=",
"table",
".",
"table",
".",
"setdefault",
"(",
"state",
",",
"{",
"}",
")",
"for",
"(",
"input",
",",
"(",
"output",
",",
"nextState",
")",
")",
"in",
"transitions",
".",
"items",
"(",
")",
":",
"state",
"[",
"input",
"]",
"=",
"Transition",
"(",
"output",
",",
"nextState",
")",
"return",
"table"
] | 42.470588 | 21.294118 |
def _process(self, plugin, instance=None):
"""Produce `result` from `plugin` and `instance`
:func:`process` shares state with :func:`_iterator` such that
an instance/plugin pair can be fetched and processed in isolation.
Arguments:
plugin (pyblish.api.Plugin): Produce result using plug-in
instance (optional, pyblish.api.Instance): Process this instance,
if no instance is provided, context is processed.
"""
self.processing["nextOrder"] = plugin.order
try:
result = pyblish.plugin.process(plugin, self.context, instance)
except Exception as e:
raise Exception("Unknown error: %s" % e)
else:
# Make note of the order at which the
# potential error error occured.
has_error = result["error"] is not None
if has_error:
self.processing["ordersWithError"].add(plugin.order)
return result | [
"def",
"_process",
"(",
"self",
",",
"plugin",
",",
"instance",
"=",
"None",
")",
":",
"self",
".",
"processing",
"[",
"\"nextOrder\"",
"]",
"=",
"plugin",
".",
"order",
"try",
":",
"result",
"=",
"pyblish",
".",
"plugin",
".",
"process",
"(",
"plugin",
",",
"self",
".",
"context",
",",
"instance",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"\"Unknown error: %s\"",
"%",
"e",
")",
"else",
":",
"# Make note of the order at which the",
"# potential error error occured.",
"has_error",
"=",
"result",
"[",
"\"error\"",
"]",
"is",
"not",
"None",
"if",
"has_error",
":",
"self",
".",
"processing",
"[",
"\"ordersWithError\"",
"]",
".",
"add",
"(",
"plugin",
".",
"order",
")",
"return",
"result"
] | 33.517241 | 24.37931 |
def save(self, filename=None, v2_version=4, v23_sep='/'):
"""Save ID3v2 data to the AIFF file"""
framedata = self._prepare_framedata(v2_version, v23_sep)
framesize = len(framedata)
if filename is None:
filename = self.filename
# Unlike the parent ID3.save method, we won't save to a blank file
# since we would have to construct a empty AIFF file
fileobj = open(filename, 'rb+')
iff_file = IFFFile(fileobj)
try:
if u'ID3' not in iff_file:
iff_file.insert_chunk(u'ID3')
chunk = iff_file[u'ID3']
fileobj.seek(chunk.data_offset)
header = fileobj.read(10)
header = self._prepare_id3_header(header, framesize, v2_version)
header, new_size, _ = header
data = header + framedata + (b'\x00' * (new_size - framesize))
# Include ID3 header size in 'new_size' calculation
new_size += 10
# Expand the chunk if necessary, including pad byte
if new_size > chunk.size:
insert_at = chunk.offset + chunk.size
insert_size = new_size - chunk.size + new_size % 2
insert_bytes(fileobj, insert_size, insert_at)
chunk.resize(new_size)
fileobj.seek(chunk.data_offset)
fileobj.write(data)
finally:
fileobj.close() | [
"def",
"save",
"(",
"self",
",",
"filename",
"=",
"None",
",",
"v2_version",
"=",
"4",
",",
"v23_sep",
"=",
"'/'",
")",
":",
"framedata",
"=",
"self",
".",
"_prepare_framedata",
"(",
"v2_version",
",",
"v23_sep",
")",
"framesize",
"=",
"len",
"(",
"framedata",
")",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"filename",
"# Unlike the parent ID3.save method, we won't save to a blank file",
"# since we would have to construct a empty AIFF file",
"fileobj",
"=",
"open",
"(",
"filename",
",",
"'rb+'",
")",
"iff_file",
"=",
"IFFFile",
"(",
"fileobj",
")",
"try",
":",
"if",
"u'ID3'",
"not",
"in",
"iff_file",
":",
"iff_file",
".",
"insert_chunk",
"(",
"u'ID3'",
")",
"chunk",
"=",
"iff_file",
"[",
"u'ID3'",
"]",
"fileobj",
".",
"seek",
"(",
"chunk",
".",
"data_offset",
")",
"header",
"=",
"fileobj",
".",
"read",
"(",
"10",
")",
"header",
"=",
"self",
".",
"_prepare_id3_header",
"(",
"header",
",",
"framesize",
",",
"v2_version",
")",
"header",
",",
"new_size",
",",
"_",
"=",
"header",
"data",
"=",
"header",
"+",
"framedata",
"+",
"(",
"b'\\x00'",
"*",
"(",
"new_size",
"-",
"framesize",
")",
")",
"# Include ID3 header size in 'new_size' calculation",
"new_size",
"+=",
"10",
"# Expand the chunk if necessary, including pad byte",
"if",
"new_size",
">",
"chunk",
".",
"size",
":",
"insert_at",
"=",
"chunk",
".",
"offset",
"+",
"chunk",
".",
"size",
"insert_size",
"=",
"new_size",
"-",
"chunk",
".",
"size",
"+",
"new_size",
"%",
"2",
"insert_bytes",
"(",
"fileobj",
",",
"insert_size",
",",
"insert_at",
")",
"chunk",
".",
"resize",
"(",
"new_size",
")",
"fileobj",
".",
"seek",
"(",
"chunk",
".",
"data_offset",
")",
"fileobj",
".",
"write",
"(",
"data",
")",
"finally",
":",
"fileobj",
".",
"close",
"(",
")"
] | 34.097561 | 19.804878 |
def update_os_type(self):
"""Update os_type attribute."""
self.chain.connection.log("Detecting os type")
os_type = self.driver.get_os_type(self.version_text)
if os_type:
self.chain.connection.log("SW Type: {}".format(os_type))
self.os_type = os_type | [
"def",
"update_os_type",
"(",
"self",
")",
":",
"self",
".",
"chain",
".",
"connection",
".",
"log",
"(",
"\"Detecting os type\"",
")",
"os_type",
"=",
"self",
".",
"driver",
".",
"get_os_type",
"(",
"self",
".",
"version_text",
")",
"if",
"os_type",
":",
"self",
".",
"chain",
".",
"connection",
".",
"log",
"(",
"\"SW Type: {}\"",
".",
"format",
"(",
"os_type",
")",
")",
"self",
".",
"os_type",
"=",
"os_type"
] | 42.714286 | 14.857143 |
def _set_translated_fields(self, language_code=None, **fields):
"""
Assign fields to the translated models.
"""
objects = [] # no generator, make sure objects are all filled first
for parler_meta, model_fields in self._parler_meta._split_fields(**fields):
translation = self._get_translated_model(language_code=language_code, auto_create=True, meta=parler_meta)
for field, value in six.iteritems(model_fields):
setattr(translation, field, value)
objects.append(translation)
return objects | [
"def",
"_set_translated_fields",
"(",
"self",
",",
"language_code",
"=",
"None",
",",
"*",
"*",
"fields",
")",
":",
"objects",
"=",
"[",
"]",
"# no generator, make sure objects are all filled first",
"for",
"parler_meta",
",",
"model_fields",
"in",
"self",
".",
"_parler_meta",
".",
"_split_fields",
"(",
"*",
"*",
"fields",
")",
":",
"translation",
"=",
"self",
".",
"_get_translated_model",
"(",
"language_code",
"=",
"language_code",
",",
"auto_create",
"=",
"True",
",",
"meta",
"=",
"parler_meta",
")",
"for",
"field",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"model_fields",
")",
":",
"setattr",
"(",
"translation",
",",
"field",
",",
"value",
")",
"objects",
".",
"append",
"(",
"translation",
")",
"return",
"objects"
] | 48.25 | 22.916667 |
def write_alignment(self, filename, file_format, interleaved=None):
"""
Write the alignment to file using Bio.AlignIO
"""
if file_format == 'phylip':
file_format = 'phylip-relaxed'
AlignIO.write(self._msa, filename, file_format) | [
"def",
"write_alignment",
"(",
"self",
",",
"filename",
",",
"file_format",
",",
"interleaved",
"=",
"None",
")",
":",
"if",
"file_format",
"==",
"'phylip'",
":",
"file_format",
"=",
"'phylip-relaxed'",
"AlignIO",
".",
"write",
"(",
"self",
".",
"_msa",
",",
"filename",
",",
"file_format",
")"
] | 39.142857 | 8.857143 |
def delete_processing_block(processing_block_id):
"""Delete Processing Block with the specified ID"""
scheduling_block_id = processing_block_id.split(':')[0]
config = get_scheduling_block(scheduling_block_id)
processing_blocks = config.get('processing_blocks')
processing_block = list(filter(
lambda x: x.get('id') == processing_block_id, processing_blocks))[0]
config['processing_blocks'].remove(processing_block)
DB.set('scheduling_block/{}'.format(config['id']), json.dumps(config))
# Add a event to the scheduling block event list to notify
# of a new scheduling block being added to the db.
DB.rpush('processing_block_events',
json.dumps(dict(type="deleted", id=processing_block_id))) | [
"def",
"delete_processing_block",
"(",
"processing_block_id",
")",
":",
"scheduling_block_id",
"=",
"processing_block_id",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"config",
"=",
"get_scheduling_block",
"(",
"scheduling_block_id",
")",
"processing_blocks",
"=",
"config",
".",
"get",
"(",
"'processing_blocks'",
")",
"processing_block",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"get",
"(",
"'id'",
")",
"==",
"processing_block_id",
",",
"processing_blocks",
")",
")",
"[",
"0",
"]",
"config",
"[",
"'processing_blocks'",
"]",
".",
"remove",
"(",
"processing_block",
")",
"DB",
".",
"set",
"(",
"'scheduling_block/{}'",
".",
"format",
"(",
"config",
"[",
"'id'",
"]",
")",
",",
"json",
".",
"dumps",
"(",
"config",
")",
")",
"# Add a event to the scheduling block event list to notify",
"# of a new scheduling block being added to the db.",
"DB",
".",
"rpush",
"(",
"'processing_block_events'",
",",
"json",
".",
"dumps",
"(",
"dict",
"(",
"type",
"=",
"\"deleted\"",
",",
"id",
"=",
"processing_block_id",
")",
")",
")"
] | 52.714286 | 18.214286 |
def _negotiateHandler(self, request):
"""
Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type.
"""
accept = _parseAccept(request.requestHeaders.getRawHeaders('Accept'))
for contentType in accept.keys():
handler = self._acceptHandlers.get(contentType.lower())
if handler is not None:
return handler, handler.contentType
if self._fallback:
handler = self._handlers[0]
return handler, handler.contentType
return NotAcceptable(), None | [
"def",
"_negotiateHandler",
"(",
"self",
",",
"request",
")",
":",
"accept",
"=",
"_parseAccept",
"(",
"request",
".",
"requestHeaders",
".",
"getRawHeaders",
"(",
"'Accept'",
")",
")",
"for",
"contentType",
"in",
"accept",
".",
"keys",
"(",
")",
":",
"handler",
"=",
"self",
".",
"_acceptHandlers",
".",
"get",
"(",
"contentType",
".",
"lower",
"(",
")",
")",
"if",
"handler",
"is",
"not",
"None",
":",
"return",
"handler",
",",
"handler",
".",
"contentType",
"if",
"self",
".",
"_fallback",
":",
"handler",
"=",
"self",
".",
"_handlers",
"[",
"0",
"]",
"return",
"handler",
",",
"handler",
".",
"contentType",
"return",
"NotAcceptable",
"(",
")",
",",
"None"
] | 38.388889 | 16.277778 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.