text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def customdata(self, lookup, default=None):
"""
Args:
lookup: the custom data file
default: the optional value to return if lookup failed; returns None if not set
Returns:
The custom data returned from the file 'lookup' or default/None if no match found
"""
try:
if lookup in EFConfig.CUSTOM_DATA:
return EFConfig.CUSTOM_DATA[lookup]
else:
return default
except AttributeError:
return default | [
"def",
"customdata",
"(",
"self",
",",
"lookup",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"if",
"lookup",
"in",
"EFConfig",
".",
"CUSTOM_DATA",
":",
"return",
"EFConfig",
".",
"CUSTOM_DATA",
"[",
"lookup",
"]",
"else",
":",
"return",
"default",
"except",
"AttributeError",
":",
"return",
"default"
] | 30.266667 | 18.4 |
def get_type(obj, **kwargs):
"""Return the type of an object. Do some regex to remove the "<class..." bit."""
t = type(obj)
s = extract_type(str(t))
return 'Type: {}'.format(s) | [
"def",
"get_type",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"t",
"=",
"type",
"(",
"obj",
")",
"s",
"=",
"extract_type",
"(",
"str",
"(",
"t",
")",
")",
"return",
"'Type: {}'",
".",
"format",
"(",
"s",
")"
] | 26.857143 | 19.428571 |
def getcoef(self):
"""Get final coefficient map array."""
global mp_Z_Y1
return np.swapaxes(mp_Z_Y1, 0, self.xstep.cri.axisK+1)[0] | [
"def",
"getcoef",
"(",
"self",
")",
":",
"global",
"mp_Z_Y1",
"return",
"np",
".",
"swapaxes",
"(",
"mp_Z_Y1",
",",
"0",
",",
"self",
".",
"xstep",
".",
"cri",
".",
"axisK",
"+",
"1",
")",
"[",
"0",
"]"
] | 30.2 | 21 |
def get_daemon_stats(self, details=False): # pylint: disable=unused-argument
"""Get state of modules and create a scheme for stats data of daemon
This may be overridden in subclasses (and it is...)
:return: A dict with the following structure
::
{
'modules': {
'internal': {'name': "MYMODULE1", 'state': 'ok'},
'external': {'name': "MYMODULE2", 'state': 'stopped'},
},
And some extra information, see the source code below...
}
These information are completed with the data provided by the get_id function
which provides the daemon identification
:rtype: dict
"""
res = self.get_id()
res.update({
"program_start": self.program_start,
"spare": self.spare,
'counters': {},
'metrics': [],
'modules': {
'internal': {}, 'external': {}
}
})
# Modules information
modules = res['modules']
counters = res['counters']
counters['modules'] = len(self.modules_manager.instances)
# first get data for all internal modules
for instance in self.modules_manager.get_internal_instances():
state = {True: 'ok', False: 'stopped'}[(instance
not in self.modules_manager.to_restart)]
modules['internal'][instance.name] = {'name': instance.name, 'state': state}
# Same but for external ones
for instance in self.modules_manager.get_external_instances():
state = {True: 'ok', False: 'stopped'}[(instance
not in self.modules_manager.to_restart)]
modules['internal'][instance.name] = {'name': instance.name, 'state': state}
return res | [
"def",
"get_daemon_stats",
"(",
"self",
",",
"details",
"=",
"False",
")",
":",
"# pylint: disable=unused-argument",
"res",
"=",
"self",
".",
"get_id",
"(",
")",
"res",
".",
"update",
"(",
"{",
"\"program_start\"",
":",
"self",
".",
"program_start",
",",
"\"spare\"",
":",
"self",
".",
"spare",
",",
"'counters'",
":",
"{",
"}",
",",
"'metrics'",
":",
"[",
"]",
",",
"'modules'",
":",
"{",
"'internal'",
":",
"{",
"}",
",",
"'external'",
":",
"{",
"}",
"}",
"}",
")",
"# Modules information",
"modules",
"=",
"res",
"[",
"'modules'",
"]",
"counters",
"=",
"res",
"[",
"'counters'",
"]",
"counters",
"[",
"'modules'",
"]",
"=",
"len",
"(",
"self",
".",
"modules_manager",
".",
"instances",
")",
"# first get data for all internal modules",
"for",
"instance",
"in",
"self",
".",
"modules_manager",
".",
"get_internal_instances",
"(",
")",
":",
"state",
"=",
"{",
"True",
":",
"'ok'",
",",
"False",
":",
"'stopped'",
"}",
"[",
"(",
"instance",
"not",
"in",
"self",
".",
"modules_manager",
".",
"to_restart",
")",
"]",
"modules",
"[",
"'internal'",
"]",
"[",
"instance",
".",
"name",
"]",
"=",
"{",
"'name'",
":",
"instance",
".",
"name",
",",
"'state'",
":",
"state",
"}",
"# Same but for external ones",
"for",
"instance",
"in",
"self",
".",
"modules_manager",
".",
"get_external_instances",
"(",
")",
":",
"state",
"=",
"{",
"True",
":",
"'ok'",
",",
"False",
":",
"'stopped'",
"}",
"[",
"(",
"instance",
"not",
"in",
"self",
".",
"modules_manager",
".",
"to_restart",
")",
"]",
"modules",
"[",
"'internal'",
"]",
"[",
"instance",
".",
"name",
"]",
"=",
"{",
"'name'",
":",
"instance",
".",
"name",
",",
"'state'",
":",
"state",
"}",
"return",
"res"
] | 40.891304 | 23.782609 |
def weighted_random_choice(items):
"""
Returns a weighted random choice from a list of items.
:param items: A list of tuples (object, weight)
:return: A random object, whose likelihood is proportional to its weight.
"""
l = list(items)
r = random.random() * sum([i[1] for i in l])
for x, p in l:
if p > r:
return x
r -= p
return None | [
"def",
"weighted_random_choice",
"(",
"items",
")",
":",
"l",
"=",
"list",
"(",
"items",
")",
"r",
"=",
"random",
".",
"random",
"(",
")",
"*",
"sum",
"(",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"l",
"]",
")",
"for",
"x",
",",
"p",
"in",
"l",
":",
"if",
"p",
">",
"r",
":",
"return",
"x",
"r",
"-=",
"p",
"return",
"None"
] | 29.615385 | 16.692308 |
def get_authserver(self, domainid, serverid):
"""Get an Authentication server"""
return self.api_call(
ENDPOINTS['authservers']['get'],
dict(domainid=domainid, serverid=serverid)) | [
"def",
"get_authserver",
"(",
"self",
",",
"domainid",
",",
"serverid",
")",
":",
"return",
"self",
".",
"api_call",
"(",
"ENDPOINTS",
"[",
"'authservers'",
"]",
"[",
"'get'",
"]",
",",
"dict",
"(",
"domainid",
"=",
"domainid",
",",
"serverid",
"=",
"serverid",
")",
")"
] | 43 | 7 |
def end(self):
"""Close the V interface.
Args::
No argument
Returns::
None
C library equivalent : Vend
"""
# Note: Vend is just a macro; use 'Vfinish' instead
# Note also the the same C function is used to end
# the VS interface
_checkErr('vend', _C.Vfinish(self._hdf_inst._id),
"cannot terminate V interface")
self._hdf_inst = None | [
"def",
"end",
"(",
"self",
")",
":",
"# Note: Vend is just a macro; use 'Vfinish' instead",
"# Note also the the same C function is used to end",
"# the VS interface",
"_checkErr",
"(",
"'vend'",
",",
"_C",
".",
"Vfinish",
"(",
"self",
".",
"_hdf_inst",
".",
"_id",
")",
",",
"\"cannot terminate V interface\"",
")",
"self",
".",
"_hdf_inst",
"=",
"None"
] | 23.85 | 22.65 |
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.prefix = u" "
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source) | [
"def",
"Assign",
"(",
"target",
",",
"source",
")",
":",
"if",
"not",
"isinstance",
"(",
"target",
",",
"list",
")",
":",
"target",
"=",
"[",
"target",
"]",
"if",
"not",
"isinstance",
"(",
"source",
",",
"list",
")",
":",
"source",
".",
"prefix",
"=",
"u\" \"",
"source",
"=",
"[",
"source",
"]",
"return",
"Node",
"(",
"syms",
".",
"atom",
",",
"target",
"+",
"[",
"Leaf",
"(",
"token",
".",
"EQUAL",
",",
"u\"=\"",
",",
"prefix",
"=",
"u\" \"",
")",
"]",
"+",
"source",
")"
] | 31.5 | 15 |
def cleanup(self):
"""
Drops any existing work tables, as returned by
:meth:`~giraffez.load.TeradataBulkLoad.tables`.
:raises `giraffez.TeradataPTError`: if a Teradata error ocurred
"""
threads = []
for i, table in enumerate(filter(lambda x: self.mload.exists(x), self.tables)):
log.info("BulkLoad", "Dropping table '{}'...".format(table))
t = threading.Thread(target=self.mload.drop_table, args=(table,))
threads.append(t)
t.start()
for t in threads:
t.join() | [
"def",
"cleanup",
"(",
"self",
")",
":",
"threads",
"=",
"[",
"]",
"for",
"i",
",",
"table",
"in",
"enumerate",
"(",
"filter",
"(",
"lambda",
"x",
":",
"self",
".",
"mload",
".",
"exists",
"(",
"x",
")",
",",
"self",
".",
"tables",
")",
")",
":",
"log",
".",
"info",
"(",
"\"BulkLoad\"",
",",
"\"Dropping table '{}'...\"",
".",
"format",
"(",
"table",
")",
")",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"mload",
".",
"drop_table",
",",
"args",
"=",
"(",
"table",
",",
")",
")",
"threads",
".",
"append",
"(",
"t",
")",
"t",
".",
"start",
"(",
")",
"for",
"t",
"in",
"threads",
":",
"t",
".",
"join",
"(",
")"
] | 38.066667 | 21.533333 |
def scrape_hive_url(mc_url, num_tracks=sys.maxsize, folders=False, custom_path=''):
"""
Scrape a Hive.co download page.
Returns:
list: filenames to open
"""
try:
data = get_hive_data(mc_url)
except Exception as e:
puts_safe(colored.red("Problem downloading ") + mc_url)
print(e)
filenames = []
# track_artist = sanitize_filename(data['artist'])
# track_title = sanitize_filename(data['title'])
# track_filename = track_artist + ' - ' + track_title + '.mp3'
# if folders:
# track_artist_path = join(custom_path, track_artist)
# if not exists(track_artist_path):
# mkdir(track_artist_path)
# track_filename = join(track_artist_path, track_filename)
# if exists(track_filename):
# puts_safe(colored.yellow("Skipping") + colored.white(': ' + data['title'] + " - it already exists!"))
# return []
# puts_safe(colored.green("Downloading") + colored.white(': ' + data['artist'] + " - " + data['title']))
# download_file(data['mp3_url'], track_filename)
# tag_file(track_filename,
# artist=data['artist'],
# title=data['title'],
# year=data['year'],
# genre=None,
# artwork_url=data['artwork_url'])
# filenames.append(track_filename)
return filenames | [
"def",
"scrape_hive_url",
"(",
"mc_url",
",",
"num_tracks",
"=",
"sys",
".",
"maxsize",
",",
"folders",
"=",
"False",
",",
"custom_path",
"=",
"''",
")",
":",
"try",
":",
"data",
"=",
"get_hive_data",
"(",
"mc_url",
")",
"except",
"Exception",
"as",
"e",
":",
"puts_safe",
"(",
"colored",
".",
"red",
"(",
"\"Problem downloading \"",
")",
"+",
"mc_url",
")",
"print",
"(",
"e",
")",
"filenames",
"=",
"[",
"]",
"# track_artist = sanitize_filename(data['artist'])",
"# track_title = sanitize_filename(data['title'])",
"# track_filename = track_artist + ' - ' + track_title + '.mp3'",
"# if folders:",
"# track_artist_path = join(custom_path, track_artist)",
"# if not exists(track_artist_path):",
"# mkdir(track_artist_path)",
"# track_filename = join(track_artist_path, track_filename)",
"# if exists(track_filename):",
"# puts_safe(colored.yellow(\"Skipping\") + colored.white(': ' + data['title'] + \" - it already exists!\"))",
"# return []",
"# puts_safe(colored.green(\"Downloading\") + colored.white(': ' + data['artist'] + \" - \" + data['title']))",
"# download_file(data['mp3_url'], track_filename)",
"# tag_file(track_filename,",
"# artist=data['artist'],",
"# title=data['title'],",
"# year=data['year'],",
"# genre=None,",
"# artwork_url=data['artwork_url'])",
"# filenames.append(track_filename)",
"return",
"filenames"
] | 32.536585 | 21.902439 |
def reply(request, message_id, form_class=ComposeForm,
template_name='django_messages/compose.html', success_url=None,
recipient_filter=None, quote_helper=format_quote,
subject_template=_(u"Re: %(subject)s"),):
"""
Prepares the ``form_class`` form for writing a reply to a given message
(specified via ``message_id``). Uses the ``format_quote`` helper from
``messages.utils`` to pre-format the quote. To change the quote format
assign a different ``quote_helper`` kwarg in your url-conf.
"""
parent = get_object_or_404(Message, id=message_id)
if parent.sender != request.user and parent.recipient != request.user:
raise Http404
if request.method == "POST":
sender = request.user
form = form_class(request.POST, recipient_filter=recipient_filter)
if form.is_valid():
form.save(sender=request.user, parent_msg=parent)
messages.info(request, _(u"Message successfully sent."))
if success_url is None:
success_url = reverse('messages_inbox')
return HttpResponseRedirect(success_url)
else:
form = form_class(initial={
'body': quote_helper(parent.sender, parent.body),
'subject': subject_template % {'subject': parent.subject},
'recipient': [parent.sender,]
})
return render(request, template_name, {
'form': form,
}) | [
"def",
"reply",
"(",
"request",
",",
"message_id",
",",
"form_class",
"=",
"ComposeForm",
",",
"template_name",
"=",
"'django_messages/compose.html'",
",",
"success_url",
"=",
"None",
",",
"recipient_filter",
"=",
"None",
",",
"quote_helper",
"=",
"format_quote",
",",
"subject_template",
"=",
"_",
"(",
"u\"Re: %(subject)s\"",
")",
",",
")",
":",
"parent",
"=",
"get_object_or_404",
"(",
"Message",
",",
"id",
"=",
"message_id",
")",
"if",
"parent",
".",
"sender",
"!=",
"request",
".",
"user",
"and",
"parent",
".",
"recipient",
"!=",
"request",
".",
"user",
":",
"raise",
"Http404",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"sender",
"=",
"request",
".",
"user",
"form",
"=",
"form_class",
"(",
"request",
".",
"POST",
",",
"recipient_filter",
"=",
"recipient_filter",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"form",
".",
"save",
"(",
"sender",
"=",
"request",
".",
"user",
",",
"parent_msg",
"=",
"parent",
")",
"messages",
".",
"info",
"(",
"request",
",",
"_",
"(",
"u\"Message successfully sent.\"",
")",
")",
"if",
"success_url",
"is",
"None",
":",
"success_url",
"=",
"reverse",
"(",
"'messages_inbox'",
")",
"return",
"HttpResponseRedirect",
"(",
"success_url",
")",
"else",
":",
"form",
"=",
"form_class",
"(",
"initial",
"=",
"{",
"'body'",
":",
"quote_helper",
"(",
"parent",
".",
"sender",
",",
"parent",
".",
"body",
")",
",",
"'subject'",
":",
"subject_template",
"%",
"{",
"'subject'",
":",
"parent",
".",
"subject",
"}",
",",
"'recipient'",
":",
"[",
"parent",
".",
"sender",
",",
"]",
"}",
")",
"return",
"render",
"(",
"request",
",",
"template_name",
",",
"{",
"'form'",
":",
"form",
",",
"}",
")"
] | 41.529412 | 20.588235 |
def get_name(self):
"""Name accessor"""
if self.type is not None and self.name.endswith("." + self.type):
return self.name[:len(self.name) - len(self.type) - 1]
return self.name | [
"def",
"get_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"is",
"not",
"None",
"and",
"self",
".",
"name",
".",
"endswith",
"(",
"\".\"",
"+",
"self",
".",
"type",
")",
":",
"return",
"self",
".",
"name",
"[",
":",
"len",
"(",
"self",
".",
"name",
")",
"-",
"len",
"(",
"self",
".",
"type",
")",
"-",
"1",
"]",
"return",
"self",
".",
"name"
] | 41.8 | 19.2 |
def _normalize(esfilter):
"""
TODO: DO NOT USE Data, WE ARE SPENDING TOO MUCH TIME WRAPPING/UNWRAPPING
REALLY, WE JUST COLLAPSE CASCADING `and` AND `or` FILTERS
"""
if esfilter == MATCH_ALL or esfilter == MATCH_NONE or esfilter.isNormal:
return esfilter
# Log.note("from: " + convert.value2json(esfilter))
isDiff = True
while isDiff:
isDiff = False
if esfilter.bool.filter:
terms = esfilter.bool.filter
for (i0, t0), (i1, t1) in itertools.product(
enumerate(terms), enumerate(terms)
):
if i0 == i1:
continue # SAME, IGNORE
# TERM FILTER ALREADY ASSUMES EXISTENCE
with suppress_exception:
if (
t0.exists.field != None
and t0.exists.field == t1.term.items()[0][0]
):
terms[i0] = MATCH_ALL
continue
# IDENTICAL CAN BE REMOVED
with suppress_exception:
if t0 == t1:
terms[i0] = MATCH_ALL
continue
# MERGE range FILTER WITH SAME FIELD
if i0 > i1:
continue # SAME, IGNORE
with suppress_exception:
f0, tt0 = t0.range.items()[0]
f1, tt1 = t1.range.items()[0]
if f0 == f1:
set_default(terms[i0].range[literal_field(f1)], tt1)
terms[i1] = MATCH_ALL
output = []
for a in terms:
if is_container(a):
from mo_logs import Log
Log.error("and clause is not allowed a list inside a list")
a_ = _normalize(a)
if a_ is not a:
isDiff = True
a = a_
if a == MATCH_ALL:
isDiff = True
continue
if a == MATCH_NONE:
return MATCH_NONE
if a.bool.filter:
isDiff = True
a.isNormal = None
output.extend(a.bool.filter)
else:
a.isNormal = None
output.append(a)
if not output:
return MATCH_ALL
elif len(output) == 1:
# output[0].isNormal = True
esfilter = output[0]
break
elif isDiff:
esfilter = es_and(output)
continue
if esfilter.bool.should:
output = []
for a in esfilter.bool.should:
a_ = _normalize(a)
if a_ is not a:
isDiff = True
a = a_
if a.bool.should:
a.isNormal = None
isDiff = True
output.extend(a.bool.should)
else:
a.isNormal = None
output.append(a)
if not output:
return MATCH_NONE
elif len(output) == 1:
esfilter = output[0]
break
elif isDiff:
esfilter = wrap(es_or(output))
continue
if esfilter.term != None:
if esfilter.term.keys():
esfilter.isNormal = True
return esfilter
else:
return MATCH_ALL
if esfilter.terms:
for k, v in esfilter.terms.items():
if len(v) > 0:
if OR(vv == None for vv in v):
rest = [vv for vv in v if vv != None]
if len(rest) > 0:
output = es_or([es_missing(k), {"terms": {k: rest}}])
else:
output = es_missing(k)
output.isNormal = True
return output
else:
esfilter.isNormal = True
return esfilter
return MATCH_NONE
if esfilter.bool.must_not:
_sub = esfilter.bool.must_not
sub = _normalize(_sub)
if sub == MATCH_NONE:
return MATCH_ALL
elif sub == MATCH_ALL:
return MATCH_NONE
elif sub is not _sub:
sub.isNormal = None
return wrap({"bool": {"must_not": sub, "isNormal": True}})
else:
sub.isNormal = None
esfilter.isNormal = True
return esfilter | [
"def",
"_normalize",
"(",
"esfilter",
")",
":",
"if",
"esfilter",
"==",
"MATCH_ALL",
"or",
"esfilter",
"==",
"MATCH_NONE",
"or",
"esfilter",
".",
"isNormal",
":",
"return",
"esfilter",
"# Log.note(\"from: \" + convert.value2json(esfilter))",
"isDiff",
"=",
"True",
"while",
"isDiff",
":",
"isDiff",
"=",
"False",
"if",
"esfilter",
".",
"bool",
".",
"filter",
":",
"terms",
"=",
"esfilter",
".",
"bool",
".",
"filter",
"for",
"(",
"i0",
",",
"t0",
")",
",",
"(",
"i1",
",",
"t1",
")",
"in",
"itertools",
".",
"product",
"(",
"enumerate",
"(",
"terms",
")",
",",
"enumerate",
"(",
"terms",
")",
")",
":",
"if",
"i0",
"==",
"i1",
":",
"continue",
"# SAME, IGNORE",
"# TERM FILTER ALREADY ASSUMES EXISTENCE",
"with",
"suppress_exception",
":",
"if",
"(",
"t0",
".",
"exists",
".",
"field",
"!=",
"None",
"and",
"t0",
".",
"exists",
".",
"field",
"==",
"t1",
".",
"term",
".",
"items",
"(",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
":",
"terms",
"[",
"i0",
"]",
"=",
"MATCH_ALL",
"continue",
"# IDENTICAL CAN BE REMOVED",
"with",
"suppress_exception",
":",
"if",
"t0",
"==",
"t1",
":",
"terms",
"[",
"i0",
"]",
"=",
"MATCH_ALL",
"continue",
"# MERGE range FILTER WITH SAME FIELD",
"if",
"i0",
">",
"i1",
":",
"continue",
"# SAME, IGNORE",
"with",
"suppress_exception",
":",
"f0",
",",
"tt0",
"=",
"t0",
".",
"range",
".",
"items",
"(",
")",
"[",
"0",
"]",
"f1",
",",
"tt1",
"=",
"t1",
".",
"range",
".",
"items",
"(",
")",
"[",
"0",
"]",
"if",
"f0",
"==",
"f1",
":",
"set_default",
"(",
"terms",
"[",
"i0",
"]",
".",
"range",
"[",
"literal_field",
"(",
"f1",
")",
"]",
",",
"tt1",
")",
"terms",
"[",
"i1",
"]",
"=",
"MATCH_ALL",
"output",
"=",
"[",
"]",
"for",
"a",
"in",
"terms",
":",
"if",
"is_container",
"(",
"a",
")",
":",
"from",
"mo_logs",
"import",
"Log",
"Log",
".",
"error",
"(",
"\"and clause is not allowed a list inside a list\"",
")",
"a_",
"=",
"_normalize",
"(",
"a",
")",
"if",
"a_",
"is",
"not",
"a",
":",
"isDiff",
"=",
"True",
"a",
"=",
"a_",
"if",
"a",
"==",
"MATCH_ALL",
":",
"isDiff",
"=",
"True",
"continue",
"if",
"a",
"==",
"MATCH_NONE",
":",
"return",
"MATCH_NONE",
"if",
"a",
".",
"bool",
".",
"filter",
":",
"isDiff",
"=",
"True",
"a",
".",
"isNormal",
"=",
"None",
"output",
".",
"extend",
"(",
"a",
".",
"bool",
".",
"filter",
")",
"else",
":",
"a",
".",
"isNormal",
"=",
"None",
"output",
".",
"append",
"(",
"a",
")",
"if",
"not",
"output",
":",
"return",
"MATCH_ALL",
"elif",
"len",
"(",
"output",
")",
"==",
"1",
":",
"# output[0].isNormal = True",
"esfilter",
"=",
"output",
"[",
"0",
"]",
"break",
"elif",
"isDiff",
":",
"esfilter",
"=",
"es_and",
"(",
"output",
")",
"continue",
"if",
"esfilter",
".",
"bool",
".",
"should",
":",
"output",
"=",
"[",
"]",
"for",
"a",
"in",
"esfilter",
".",
"bool",
".",
"should",
":",
"a_",
"=",
"_normalize",
"(",
"a",
")",
"if",
"a_",
"is",
"not",
"a",
":",
"isDiff",
"=",
"True",
"a",
"=",
"a_",
"if",
"a",
".",
"bool",
".",
"should",
":",
"a",
".",
"isNormal",
"=",
"None",
"isDiff",
"=",
"True",
"output",
".",
"extend",
"(",
"a",
".",
"bool",
".",
"should",
")",
"else",
":",
"a",
".",
"isNormal",
"=",
"None",
"output",
".",
"append",
"(",
"a",
")",
"if",
"not",
"output",
":",
"return",
"MATCH_NONE",
"elif",
"len",
"(",
"output",
")",
"==",
"1",
":",
"esfilter",
"=",
"output",
"[",
"0",
"]",
"break",
"elif",
"isDiff",
":",
"esfilter",
"=",
"wrap",
"(",
"es_or",
"(",
"output",
")",
")",
"continue",
"if",
"esfilter",
".",
"term",
"!=",
"None",
":",
"if",
"esfilter",
".",
"term",
".",
"keys",
"(",
")",
":",
"esfilter",
".",
"isNormal",
"=",
"True",
"return",
"esfilter",
"else",
":",
"return",
"MATCH_ALL",
"if",
"esfilter",
".",
"terms",
":",
"for",
"k",
",",
"v",
"in",
"esfilter",
".",
"terms",
".",
"items",
"(",
")",
":",
"if",
"len",
"(",
"v",
")",
">",
"0",
":",
"if",
"OR",
"(",
"vv",
"==",
"None",
"for",
"vv",
"in",
"v",
")",
":",
"rest",
"=",
"[",
"vv",
"for",
"vv",
"in",
"v",
"if",
"vv",
"!=",
"None",
"]",
"if",
"len",
"(",
"rest",
")",
">",
"0",
":",
"output",
"=",
"es_or",
"(",
"[",
"es_missing",
"(",
"k",
")",
",",
"{",
"\"terms\"",
":",
"{",
"k",
":",
"rest",
"}",
"}",
"]",
")",
"else",
":",
"output",
"=",
"es_missing",
"(",
"k",
")",
"output",
".",
"isNormal",
"=",
"True",
"return",
"output",
"else",
":",
"esfilter",
".",
"isNormal",
"=",
"True",
"return",
"esfilter",
"return",
"MATCH_NONE",
"if",
"esfilter",
".",
"bool",
".",
"must_not",
":",
"_sub",
"=",
"esfilter",
".",
"bool",
".",
"must_not",
"sub",
"=",
"_normalize",
"(",
"_sub",
")",
"if",
"sub",
"==",
"MATCH_NONE",
":",
"return",
"MATCH_ALL",
"elif",
"sub",
"==",
"MATCH_ALL",
":",
"return",
"MATCH_NONE",
"elif",
"sub",
"is",
"not",
"_sub",
":",
"sub",
".",
"isNormal",
"=",
"None",
"return",
"wrap",
"(",
"{",
"\"bool\"",
":",
"{",
"\"must_not\"",
":",
"sub",
",",
"\"isNormal\"",
":",
"True",
"}",
"}",
")",
"else",
":",
"sub",
".",
"isNormal",
"=",
"None",
"esfilter",
".",
"isNormal",
"=",
"True",
"return",
"esfilter"
] | 32.971429 | 13.471429 |
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, include_deps=False)
result[task.status][task.id] = serialized
return result | [
"def",
"task_search",
"(",
"self",
",",
"task_str",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"prune",
"(",
")",
"result",
"=",
"collections",
".",
"defaultdict",
"(",
"dict",
")",
"for",
"task",
"in",
"self",
".",
"_state",
".",
"get_active_tasks",
"(",
")",
":",
"if",
"task",
".",
"id",
".",
"find",
"(",
"task_str",
")",
"!=",
"-",
"1",
":",
"serialized",
"=",
"self",
".",
"_serialize_task",
"(",
"task",
".",
"id",
",",
"include_deps",
"=",
"False",
")",
"result",
"[",
"task",
".",
"status",
"]",
"[",
"task",
".",
"id",
"]",
"=",
"serialized",
"return",
"result"
] | 33.428571 | 14.571429 |
def copy_and_replace(file_in, file_out, mapping, **kwargs):
'''
Copy a file and replace some placeholders with new values.
'''
separator = '@@'
if 'separator' in kwargs:
separator = kwargs['separator']
file_in = open(file_in, 'r')
file_out = open(file_out, 'w')
s = file_in.read()
for find, replace in mapping:
find = separator + find + separator
print(u'Replacing {0} with {1}'.format(find, replace))
s = s.replace(find, replace)
file_out.write(s) | [
"def",
"copy_and_replace",
"(",
"file_in",
",",
"file_out",
",",
"mapping",
",",
"*",
"*",
"kwargs",
")",
":",
"separator",
"=",
"'@@'",
"if",
"'separator'",
"in",
"kwargs",
":",
"separator",
"=",
"kwargs",
"[",
"'separator'",
"]",
"file_in",
"=",
"open",
"(",
"file_in",
",",
"'r'",
")",
"file_out",
"=",
"open",
"(",
"file_out",
",",
"'w'",
")",
"s",
"=",
"file_in",
".",
"read",
"(",
")",
"for",
"find",
",",
"replace",
"in",
"mapping",
":",
"find",
"=",
"separator",
"+",
"find",
"+",
"separator",
"print",
"(",
"u'Replacing {0} with {1}'",
".",
"format",
"(",
"find",
",",
"replace",
")",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"find",
",",
"replace",
")",
"file_out",
".",
"write",
"(",
"s",
")"
] | 33.733333 | 15.066667 |
def get_namespaces(self):
"""Get a list of namespaces"""
cursor = self.cursor
cursor.execute('SELECT DISTINCT namespace FROM gauged_statistics')
return [namespace for namespace, in cursor] | [
"def",
"get_namespaces",
"(",
"self",
")",
":",
"cursor",
"=",
"self",
".",
"cursor",
"cursor",
".",
"execute",
"(",
"'SELECT DISTINCT namespace FROM gauged_statistics'",
")",
"return",
"[",
"namespace",
"for",
"namespace",
",",
"in",
"cursor",
"]"
] | 43.2 | 14.4 |
def get_email_context(self,**kwargs):
''' Overrides EmailRecipientMixin '''
context = super(TemporaryRegistration,self).get_email_context(**kwargs)
context.update({
'first_name': self.firstName,
'last_name': self.lastName,
'registrationComments': self.comments,
'registrationHowHeardAboutUs': self.howHeardAboutUs,
'eventList': [x.get_email_context(includeName=False) for x in self.temporaryeventregistration_set.all()],
})
if hasattr(self,'invoice') and self.invoice:
context.update({
'invoice': self.invoice.get_email_context(),
})
return context | [
"def",
"get_email_context",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"super",
"(",
"TemporaryRegistration",
",",
"self",
")",
".",
"get_email_context",
"(",
"*",
"*",
"kwargs",
")",
"context",
".",
"update",
"(",
"{",
"'first_name'",
":",
"self",
".",
"firstName",
",",
"'last_name'",
":",
"self",
".",
"lastName",
",",
"'registrationComments'",
":",
"self",
".",
"comments",
",",
"'registrationHowHeardAboutUs'",
":",
"self",
".",
"howHeardAboutUs",
",",
"'eventList'",
":",
"[",
"x",
".",
"get_email_context",
"(",
"includeName",
"=",
"False",
")",
"for",
"x",
"in",
"self",
".",
"temporaryeventregistration_set",
".",
"all",
"(",
")",
"]",
",",
"}",
")",
"if",
"hasattr",
"(",
"self",
",",
"'invoice'",
")",
"and",
"self",
".",
"invoice",
":",
"context",
".",
"update",
"(",
"{",
"'invoice'",
":",
"self",
".",
"invoice",
".",
"get_email_context",
"(",
")",
",",
"}",
")",
"return",
"context"
] | 40.117647 | 22 |
def GetValues(self, table_names, column_names, condition):
"""Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot retrieve values database not opened.')
if condition:
condition = ' WHERE {0:s}'.format(condition)
sql_query = 'SELECT {1:s} FROM {0:s}{2:s}'.format(
', '.join(table_names), ', '.join(column_names), condition)
self._cursor.execute(sql_query)
# TODO: have a look at https://docs.python.org/2/library/
# sqlite3.html#sqlite3.Row.
for row in self._cursor:
yield {
column_name: row[column_index]
for column_index, column_name in enumerate(column_names)} | [
"def",
"GetValues",
"(",
"self",
",",
"table_names",
",",
"column_names",
",",
"condition",
")",
":",
"if",
"not",
"self",
".",
"_connection",
":",
"raise",
"RuntimeError",
"(",
"'Cannot retrieve values database not opened.'",
")",
"if",
"condition",
":",
"condition",
"=",
"' WHERE {0:s}'",
".",
"format",
"(",
"condition",
")",
"sql_query",
"=",
"'SELECT {1:s} FROM {0:s}{2:s}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"table_names",
")",
",",
"', '",
".",
"join",
"(",
"column_names",
")",
",",
"condition",
")",
"self",
".",
"_cursor",
".",
"execute",
"(",
"sql_query",
")",
"# TODO: have a look at https://docs.python.org/2/library/",
"# sqlite3.html#sqlite3.Row.",
"for",
"row",
"in",
"self",
".",
"_cursor",
":",
"yield",
"{",
"column_name",
":",
"row",
"[",
"column_index",
"]",
"for",
"column_index",
",",
"column_name",
"in",
"enumerate",
"(",
"column_names",
")",
"}"
] | 29.625 | 20.375 |
def present(
name,
attributes,
region=None,
key=None,
keyid=None,
profile=None):
'''
Ensure the cloudwatch alarm exists.
name
Name of the alarm
attributes
A dict of key/value cloudwatch alarm attributes.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
alarm_details = __salt__['boto_cloudwatch.get_alarm'](
name, region, key, keyid, profile
)
# Convert to arn's
for k in ["alarm_actions", "insufficient_data_actions", "ok_actions"]:
if k in attributes:
attributes[k] = __salt__['boto_cloudwatch.convert_to_arn'](
attributes[k], region, key, keyid, profile
)
# Diff the alarm_details with the passed-in attributes, allowing for the
# AWS type transformations
difference = []
if alarm_details:
for k, v in six.iteritems(attributes):
if k not in alarm_details:
difference.append("{0}={1} (new)".format(k, v))
continue
v = salt.utils.data.decode(v)
v2 = salt.utils.data.decode(alarm_details[k])
if v == v2:
continue
if isinstance(v, six.string_types) and v == v2:
continue
if isinstance(v, float) and v == float(v2):
continue
if isinstance(v, int) and v == int(v2):
continue
if isinstance(v, list) and sorted(v) == sorted(v2):
continue
difference.append("{0}='{1}' was: '{2}'".format(k, v, v2))
else:
difference.append("new alarm")
create_or_update_alarm_args = {
"name": name,
"region": region,
"key": key,
"keyid": keyid,
"profile": profile
}
create_or_update_alarm_args.update(attributes)
if alarm_details: # alarm is present. update, or do nothing
# check to see if attributes matches is_present. If so, do nothing.
if not difference:
ret['comment'] = "alarm {0} present and matching".format(name)
return ret
if __opts__['test']:
msg = 'alarm {0} is to be created/updated.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
result = __salt__['boto_cloudwatch.create_or_update_alarm'](
**create_or_update_alarm_args
)
if result:
ret['changes']['diff'] = difference
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} alarm'.format(name)
else: # alarm is absent. create it.
if __opts__['test']:
msg = 'alarm {0} is to be created/updated.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
result = __salt__['boto_cloudwatch.create_or_update_alarm'](
**create_or_update_alarm_args
)
if result:
ret['changes']['new'] = attributes
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} alarm'.format(name)
return ret | [
"def",
"present",
"(",
"name",
",",
"attributes",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
",",
"'changes'",
":",
"{",
"}",
"}",
"alarm_details",
"=",
"__salt__",
"[",
"'boto_cloudwatch.get_alarm'",
"]",
"(",
"name",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
"# Convert to arn's",
"for",
"k",
"in",
"[",
"\"alarm_actions\"",
",",
"\"insufficient_data_actions\"",
",",
"\"ok_actions\"",
"]",
":",
"if",
"k",
"in",
"attributes",
":",
"attributes",
"[",
"k",
"]",
"=",
"__salt__",
"[",
"'boto_cloudwatch.convert_to_arn'",
"]",
"(",
"attributes",
"[",
"k",
"]",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
"# Diff the alarm_details with the passed-in attributes, allowing for the",
"# AWS type transformations",
"difference",
"=",
"[",
"]",
"if",
"alarm_details",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"attributes",
")",
":",
"if",
"k",
"not",
"in",
"alarm_details",
":",
"difference",
".",
"append",
"(",
"\"{0}={1} (new)\"",
".",
"format",
"(",
"k",
",",
"v",
")",
")",
"continue",
"v",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"v",
")",
"v2",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"decode",
"(",
"alarm_details",
"[",
"k",
"]",
")",
"if",
"v",
"==",
"v2",
":",
"continue",
"if",
"isinstance",
"(",
"v",
",",
"six",
".",
"string_types",
")",
"and",
"v",
"==",
"v2",
":",
"continue",
"if",
"isinstance",
"(",
"v",
",",
"float",
")",
"and",
"v",
"==",
"float",
"(",
"v2",
")",
":",
"continue",
"if",
"isinstance",
"(",
"v",
",",
"int",
")",
"and",
"v",
"==",
"int",
"(",
"v2",
")",
":",
"continue",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
"and",
"sorted",
"(",
"v",
")",
"==",
"sorted",
"(",
"v2",
")",
":",
"continue",
"difference",
".",
"append",
"(",
"\"{0}='{1}' was: '{2}'\"",
".",
"format",
"(",
"k",
",",
"v",
",",
"v2",
")",
")",
"else",
":",
"difference",
".",
"append",
"(",
"\"new alarm\"",
")",
"create_or_update_alarm_args",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"region\"",
":",
"region",
",",
"\"key\"",
":",
"key",
",",
"\"keyid\"",
":",
"keyid",
",",
"\"profile\"",
":",
"profile",
"}",
"create_or_update_alarm_args",
".",
"update",
"(",
"attributes",
")",
"if",
"alarm_details",
":",
"# alarm is present. update, or do nothing",
"# check to see if attributes matches is_present. If so, do nothing.",
"if",
"not",
"difference",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"\"alarm {0} present and matching\"",
".",
"format",
"(",
"name",
")",
"return",
"ret",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"msg",
"=",
"'alarm {0} is to be created/updated.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"result",
"=",
"__salt__",
"[",
"'boto_cloudwatch.create_or_update_alarm'",
"]",
"(",
"*",
"*",
"create_or_update_alarm_args",
")",
"if",
"result",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'diff'",
"]",
"=",
"difference",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create {0} alarm'",
".",
"format",
"(",
"name",
")",
"else",
":",
"# alarm is absent. create it.",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"msg",
"=",
"'alarm {0} is to be created/updated.'",
".",
"format",
"(",
"name",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"msg",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"return",
"ret",
"result",
"=",
"__salt__",
"[",
"'boto_cloudwatch.create_or_update_alarm'",
"]",
"(",
"*",
"*",
"create_or_update_alarm_args",
")",
"if",
"result",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"'new'",
"]",
"=",
"attributes",
"else",
":",
"ret",
"[",
"'result'",
"]",
"=",
"False",
"ret",
"[",
"'comment'",
"]",
"=",
"'Failed to create {0} alarm'",
".",
"format",
"(",
"name",
")",
"return",
"ret"
] | 31.914286 | 20.733333 |
def closure(self):
"""
Returns a new `Independencies()`-object that additionally contains those `IndependenceAssertions`
that are implied by the the current independencies (using with the `semi-graphoid axioms
<https://en.wikipedia.org/w/index.php?title=Conditional_independence&oldid=708760689#Rules_of_conditional_independence>`_;
see (Pearl, 1989, `Conditional Independence and its representations
<http://www.cs.technion.ac.il/~dang/journal_papers/pearl1989conditional.pdf>`_)).
Might be very slow if more than six variables are involved.
Examples
--------
>>> from pgmpy.independencies import Independencies
>>> ind1 = Independencies(('A', ['B', 'C'], 'D'))
>>> ind1.closure()
(A _|_ B | D, C)
(A _|_ B, C | D)
(A _|_ B | D)
(A _|_ C | D, B)
(A _|_ C | D)
>>> ind2 = Independencies(('W', ['X', 'Y', 'Z']))
>>> ind2.closure()
(W _|_ Y)
(W _|_ Y | X)
(W _|_ Z | Y)
(W _|_ Z, X, Y)
(W _|_ Z)
(W _|_ Z, X)
(W _|_ X, Y)
(W _|_ Z | X)
(W _|_ Z, Y | X)
[..]
"""
def single_var(var):
"Checks if var represents a single variable"
if not hasattr(var, '__iter__'):
return True
else:
return len(var) == 1
def sg0(ind):
"Symmetry rule: 'X ⟂ Y | Z' -> 'Y ⟂ X | Z'"
return IndependenceAssertion(ind.event2, ind.event1, ind.event3)
# since X⟂Y|Z == Y⟂X|Z in pgmpy, sg0 (symmetry) is not used as an axiom/rule.
# instead we use a decorator for the other axioms to apply them on both sides
def apply_left_and_right(func):
def symmetric_func(*args):
if len(args) == 1:
return func(args[0]) + func(sg0(args[0]))
if len(args) == 2:
return (func(*args) + func(args[0], sg0(args[1])) +
func(sg0(args[0]), args[1]) + func(sg0(args[0]), sg0(args[1])))
return symmetric_func
@apply_left_and_right
def sg1(ind):
"Decomposition rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | Z', 'X ⟂ W | Z'"
if single_var(ind.event2):
return []
else:
return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, ind.event3)
for elem in ind.event2]
@apply_left_and_right
def sg2(ind):
"Weak Union rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | W,Z', 'X ⟂ W | Y,Z' "
if single_var(ind.event2):
return []
else:
return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, {elem} | ind.event3)
for elem in ind.event2]
@apply_left_and_right
def sg3(ind1, ind2):
"Contraction rule: 'X ⟂ W | Y,Z' & 'X ⟂ Y | Z' -> 'X ⟂ W,Y | Z'"
if ind1.event1 != ind2.event1:
return []
Y = ind2.event2
Z = ind2.event3
Y_Z = ind1.event3
if Y < Y_Z and Z < Y_Z and Y.isdisjoint(Z):
return [IndependenceAssertion(ind1.event1, ind1.event2 | Y, Z)]
else:
return []
# apply semi-graphoid axioms as long as new independencies are found.
all_independencies = set()
new_inds = set(self.independencies)
while new_inds:
new_pairs = (set(itertools.permutations(new_inds, 2)) |
set(itertools.product(new_inds, all_independencies)) |
set(itertools.product(all_independencies, new_inds)))
all_independencies |= new_inds
new_inds = set(sum([sg1(ind) for ind in new_inds] +
[sg2(ind) for ind in new_inds] +
[sg3(*inds) for inds in new_pairs], []))
new_inds -= all_independencies
return Independencies(*list(all_independencies)) | [
"def",
"closure",
"(",
"self",
")",
":",
"def",
"single_var",
"(",
"var",
")",
":",
"\"Checks if var represents a single variable\"",
"if",
"not",
"hasattr",
"(",
"var",
",",
"'__iter__'",
")",
":",
"return",
"True",
"else",
":",
"return",
"len",
"(",
"var",
")",
"==",
"1",
"def",
"sg0",
"(",
"ind",
")",
":",
"\"Symmetry rule: 'X ⟂ Y | Z' -> 'Y ⟂ X | Z'\"",
"return",
"IndependenceAssertion",
"(",
"ind",
".",
"event2",
",",
"ind",
".",
"event1",
",",
"ind",
".",
"event3",
")",
"# since X⟂Y|Z == Y⟂X|Z in pgmpy, sg0 (symmetry) is not used as an axiom/rule.",
"# instead we use a decorator for the other axioms to apply them on both sides",
"def",
"apply_left_and_right",
"(",
"func",
")",
":",
"def",
"symmetric_func",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"return",
"func",
"(",
"args",
"[",
"0",
"]",
")",
"+",
"func",
"(",
"sg0",
"(",
"args",
"[",
"0",
"]",
")",
")",
"if",
"len",
"(",
"args",
")",
"==",
"2",
":",
"return",
"(",
"func",
"(",
"*",
"args",
")",
"+",
"func",
"(",
"args",
"[",
"0",
"]",
",",
"sg0",
"(",
"args",
"[",
"1",
"]",
")",
")",
"+",
"func",
"(",
"sg0",
"(",
"args",
"[",
"0",
"]",
")",
",",
"args",
"[",
"1",
"]",
")",
"+",
"func",
"(",
"sg0",
"(",
"args",
"[",
"0",
"]",
")",
",",
"sg0",
"(",
"args",
"[",
"1",
"]",
")",
")",
")",
"return",
"symmetric_func",
"@",
"apply_left_and_right",
"def",
"sg1",
"(",
"ind",
")",
":",
"\"Decomposition rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | Z', 'X ⟂ W | Z'\"",
"if",
"single_var",
"(",
"ind",
".",
"event2",
")",
":",
"return",
"[",
"]",
"else",
":",
"return",
"[",
"IndependenceAssertion",
"(",
"ind",
".",
"event1",
",",
"ind",
".",
"event2",
"-",
"{",
"elem",
"}",
",",
"ind",
".",
"event3",
")",
"for",
"elem",
"in",
"ind",
".",
"event2",
"]",
"@",
"apply_left_and_right",
"def",
"sg2",
"(",
"ind",
")",
":",
"\"Weak Union rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | W,Z', 'X ⟂ W | Y,Z' \"",
"if",
"single_var",
"(",
"ind",
".",
"event2",
")",
":",
"return",
"[",
"]",
"else",
":",
"return",
"[",
"IndependenceAssertion",
"(",
"ind",
".",
"event1",
",",
"ind",
".",
"event2",
"-",
"{",
"elem",
"}",
",",
"{",
"elem",
"}",
"|",
"ind",
".",
"event3",
")",
"for",
"elem",
"in",
"ind",
".",
"event2",
"]",
"@",
"apply_left_and_right",
"def",
"sg3",
"(",
"ind1",
",",
"ind2",
")",
":",
"\"Contraction rule: 'X ⟂ W | Y,Z' & 'X ⟂ Y | Z' -> 'X ⟂ W,Y | Z'\"",
"if",
"ind1",
".",
"event1",
"!=",
"ind2",
".",
"event1",
":",
"return",
"[",
"]",
"Y",
"=",
"ind2",
".",
"event2",
"Z",
"=",
"ind2",
".",
"event3",
"Y_Z",
"=",
"ind1",
".",
"event3",
"if",
"Y",
"<",
"Y_Z",
"and",
"Z",
"<",
"Y_Z",
"and",
"Y",
".",
"isdisjoint",
"(",
"Z",
")",
":",
"return",
"[",
"IndependenceAssertion",
"(",
"ind1",
".",
"event1",
",",
"ind1",
".",
"event2",
"|",
"Y",
",",
"Z",
")",
"]",
"else",
":",
"return",
"[",
"]",
"# apply semi-graphoid axioms as long as new independencies are found.",
"all_independencies",
"=",
"set",
"(",
")",
"new_inds",
"=",
"set",
"(",
"self",
".",
"independencies",
")",
"while",
"new_inds",
":",
"new_pairs",
"=",
"(",
"set",
"(",
"itertools",
".",
"permutations",
"(",
"new_inds",
",",
"2",
")",
")",
"|",
"set",
"(",
"itertools",
".",
"product",
"(",
"new_inds",
",",
"all_independencies",
")",
")",
"|",
"set",
"(",
"itertools",
".",
"product",
"(",
"all_independencies",
",",
"new_inds",
")",
")",
")",
"all_independencies",
"|=",
"new_inds",
"new_inds",
"=",
"set",
"(",
"sum",
"(",
"[",
"sg1",
"(",
"ind",
")",
"for",
"ind",
"in",
"new_inds",
"]",
"+",
"[",
"sg2",
"(",
"ind",
")",
"for",
"ind",
"in",
"new_inds",
"]",
"+",
"[",
"sg3",
"(",
"*",
"inds",
")",
"for",
"inds",
"in",
"new_pairs",
"]",
",",
"[",
"]",
")",
")",
"new_inds",
"-=",
"all_independencies",
"return",
"Independencies",
"(",
"*",
"list",
"(",
"all_independencies",
")",
")"
] | 38.019048 | 23.657143 |
def post_appeals_list(self, creator_id=None, creator_name=None,
post_id=None):
"""Function to return list of appeals (Requires login).
Parameters:
creator_id (int): The user id of the appeal's creator.
creator_name (str): The name of the appeal's creator.
post_id (int): The post id if the appeal.
"""
params = {
'creator_id': creator_id,
'creator_name': creator_name,
'post_id': post_id
}
return self._get('post_appeals.json', params, auth=True) | [
"def",
"post_appeals_list",
"(",
"self",
",",
"creator_id",
"=",
"None",
",",
"creator_name",
"=",
"None",
",",
"post_id",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'creator_id'",
":",
"creator_id",
",",
"'creator_name'",
":",
"creator_name",
",",
"'post_id'",
":",
"post_id",
"}",
"return",
"self",
".",
"_get",
"(",
"'post_appeals.json'",
",",
"params",
",",
"auth",
"=",
"True",
")"
] | 38.866667 | 15.666667 |
def _space_in_headerblock(relative_path, contents, linter_options):
"""Check for space between the filename in a header block and description.
like such:
# /path/to/filename
#
# Description
"""
del relative_path
del linter_options
check_index = 1
if len(contents) > 0:
if _line_is_shebang(contents[0]):
check_index = 2
if len(contents) < check_index + 1:
description = ("""Document cannot have less """
"""than {0} lines""").format(check_index + 1)
return LinterFailure(description, 1, replacement=None)
candidate = contents[check_index]
if not _match_space_at_line(candidate):
description = """The second line must be an empty comment"""
return LinterFailure(description, check_index + 1,
_comment_type_from_line(candidate)[:-1] + "\n" +
candidate) | [
"def",
"_space_in_headerblock",
"(",
"relative_path",
",",
"contents",
",",
"linter_options",
")",
":",
"del",
"relative_path",
"del",
"linter_options",
"check_index",
"=",
"1",
"if",
"len",
"(",
"contents",
")",
">",
"0",
":",
"if",
"_line_is_shebang",
"(",
"contents",
"[",
"0",
"]",
")",
":",
"check_index",
"=",
"2",
"if",
"len",
"(",
"contents",
")",
"<",
"check_index",
"+",
"1",
":",
"description",
"=",
"(",
"\"\"\"Document cannot have less \"\"\"",
"\"\"\"than {0} lines\"\"\"",
")",
".",
"format",
"(",
"check_index",
"+",
"1",
")",
"return",
"LinterFailure",
"(",
"description",
",",
"1",
",",
"replacement",
"=",
"None",
")",
"candidate",
"=",
"contents",
"[",
"check_index",
"]",
"if",
"not",
"_match_space_at_line",
"(",
"candidate",
")",
":",
"description",
"=",
"\"\"\"The second line must be an empty comment\"\"\"",
"return",
"LinterFailure",
"(",
"description",
",",
"check_index",
"+",
"1",
",",
"_comment_type_from_line",
"(",
"candidate",
")",
"[",
":",
"-",
"1",
"]",
"+",
"\"\\n\"",
"+",
"candidate",
")"
] | 31.448276 | 18.62069 |
def course_modal(context, course=None):
"""
Django template tag that returns course information to display in a modal.
You may pass in a particular course if you like. Otherwise, the modal will look for course context
within the parent context.
Usage:
{% course_modal %}
{% course_modal course %}
"""
if course:
context.update({
'course_image_uri': course.get('course_image_uri', ''),
'course_title': course.get('course_title', ''),
'course_level_type': course.get('course_level_type', ''),
'course_short_description': course.get('course_short_description', ''),
'course_effort': course.get('course_effort', ''),
'course_full_description': course.get('course_full_description', ''),
'expected_learning_items': course.get('expected_learning_items', []),
'staff': course.get('staff', []),
'premium_modes': course.get('premium_modes', []),
})
return context | [
"def",
"course_modal",
"(",
"context",
",",
"course",
"=",
"None",
")",
":",
"if",
"course",
":",
"context",
".",
"update",
"(",
"{",
"'course_image_uri'",
":",
"course",
".",
"get",
"(",
"'course_image_uri'",
",",
"''",
")",
",",
"'course_title'",
":",
"course",
".",
"get",
"(",
"'course_title'",
",",
"''",
")",
",",
"'course_level_type'",
":",
"course",
".",
"get",
"(",
"'course_level_type'",
",",
"''",
")",
",",
"'course_short_description'",
":",
"course",
".",
"get",
"(",
"'course_short_description'",
",",
"''",
")",
",",
"'course_effort'",
":",
"course",
".",
"get",
"(",
"'course_effort'",
",",
"''",
")",
",",
"'course_full_description'",
":",
"course",
".",
"get",
"(",
"'course_full_description'",
",",
"''",
")",
",",
"'expected_learning_items'",
":",
"course",
".",
"get",
"(",
"'expected_learning_items'",
",",
"[",
"]",
")",
",",
"'staff'",
":",
"course",
".",
"get",
"(",
"'staff'",
",",
"[",
"]",
")",
",",
"'premium_modes'",
":",
"course",
".",
"get",
"(",
"'premium_modes'",
",",
"[",
"]",
")",
",",
"}",
")",
"return",
"context"
] | 41.875 | 24.291667 |
def append(self, data, segment=0):
"""
Append a single row to an SFrame.
Throws a RuntimeError if one or more column's type is incompatible with
a type appended.
Parameters
----------
data : iterable
An iterable representation of a single row.
segment : int
The segment to write this row. Each segment is numbered
sequentially, starting with 0. Any value in segment 1 will be after
any value in segment 0, and the order of rows in each segment is
preserved as they are added.
"""
# Assume this case refers to an SFrame with a single column
if not hasattr(data, '__iter__'):
data = [data]
self._builder.append(data, segment) | [
"def",
"append",
"(",
"self",
",",
"data",
",",
"segment",
"=",
"0",
")",
":",
"# Assume this case refers to an SFrame with a single column",
"if",
"not",
"hasattr",
"(",
"data",
",",
"'__iter__'",
")",
":",
"data",
"=",
"[",
"data",
"]",
"self",
".",
"_builder",
".",
"append",
"(",
"data",
",",
"segment",
")"
] | 35.181818 | 19.272727 |
def from_moy(cls, moy, leap_year=False):
"""Create Ladybug Datetime from a minute of the year.
Args:
moy: An integer value 0 <= and < 525600
"""
if not leap_year:
num_of_minutes_until_month = (0, 44640, 84960, 129600, 172800, 217440,
260640, 305280, 349920, 393120, 437760,
480960, 525600)
else:
num_of_minutes_until_month = (0, 44640, 84960 + 1440, 129600 + 1440,
172800 + 1440, 217440 + 1440, 260640 + 1440,
305280 + 1440, 349920 + 1440, 393120 + 1440,
437760 + 1440, 480960 + 1440, 525600 + 1440)
# find month
for monthCount in range(12):
if int(moy) < num_of_minutes_until_month[monthCount + 1]:
month = monthCount + 1
break
try:
day = int((moy - num_of_minutes_until_month[month - 1]) / (60 * 24)) + 1
except UnboundLocalError:
raise ValueError(
"moy must be positive and smaller than 525600. Invalid input %d" % (moy)
)
else:
hour = int((moy / 60) % 24)
minute = int(moy % 60)
return cls(month, day, hour, minute, leap_year) | [
"def",
"from_moy",
"(",
"cls",
",",
"moy",
",",
"leap_year",
"=",
"False",
")",
":",
"if",
"not",
"leap_year",
":",
"num_of_minutes_until_month",
"=",
"(",
"0",
",",
"44640",
",",
"84960",
",",
"129600",
",",
"172800",
",",
"217440",
",",
"260640",
",",
"305280",
",",
"349920",
",",
"393120",
",",
"437760",
",",
"480960",
",",
"525600",
")",
"else",
":",
"num_of_minutes_until_month",
"=",
"(",
"0",
",",
"44640",
",",
"84960",
"+",
"1440",
",",
"129600",
"+",
"1440",
",",
"172800",
"+",
"1440",
",",
"217440",
"+",
"1440",
",",
"260640",
"+",
"1440",
",",
"305280",
"+",
"1440",
",",
"349920",
"+",
"1440",
",",
"393120",
"+",
"1440",
",",
"437760",
"+",
"1440",
",",
"480960",
"+",
"1440",
",",
"525600",
"+",
"1440",
")",
"# find month",
"for",
"monthCount",
"in",
"range",
"(",
"12",
")",
":",
"if",
"int",
"(",
"moy",
")",
"<",
"num_of_minutes_until_month",
"[",
"monthCount",
"+",
"1",
"]",
":",
"month",
"=",
"monthCount",
"+",
"1",
"break",
"try",
":",
"day",
"=",
"int",
"(",
"(",
"moy",
"-",
"num_of_minutes_until_month",
"[",
"month",
"-",
"1",
"]",
")",
"/",
"(",
"60",
"*",
"24",
")",
")",
"+",
"1",
"except",
"UnboundLocalError",
":",
"raise",
"ValueError",
"(",
"\"moy must be positive and smaller than 525600. Invalid input %d\"",
"%",
"(",
"moy",
")",
")",
"else",
":",
"hour",
"=",
"int",
"(",
"(",
"moy",
"/",
"60",
")",
"%",
"24",
")",
"minute",
"=",
"int",
"(",
"moy",
"%",
"60",
")",
"return",
"cls",
"(",
"month",
",",
"day",
",",
"hour",
",",
"minute",
",",
"leap_year",
")"
] | 43.870968 | 23.548387 |
def _generate_api_gateway_deployment(self):
"""
Generate the API Gateway Deployment/Stage, and add to self.tf_conf
"""
# finally, the deployment
# this resource MUST come last
dep_on = []
for rtype in sorted(self.tf_conf['resource'].keys()):
for rname in sorted(self.tf_conf['resource'][rtype].keys()):
dep_on.append('%s.%s' % (rtype, rname))
self.tf_conf['resource']['aws_api_gateway_deployment']['depl'] = {
'rest_api_id': '${aws_api_gateway_rest_api.rest_api.id}',
'description': self.description,
'stage_name': self.config.stage_name,
'depends_on': dep_on
}
self.tf_conf['output']['deployment_id'] = {
'value': '${aws_api_gateway_deployment.depl.id}'
} | [
"def",
"_generate_api_gateway_deployment",
"(",
"self",
")",
":",
"# finally, the deployment",
"# this resource MUST come last",
"dep_on",
"=",
"[",
"]",
"for",
"rtype",
"in",
"sorted",
"(",
"self",
".",
"tf_conf",
"[",
"'resource'",
"]",
".",
"keys",
"(",
")",
")",
":",
"for",
"rname",
"in",
"sorted",
"(",
"self",
".",
"tf_conf",
"[",
"'resource'",
"]",
"[",
"rtype",
"]",
".",
"keys",
"(",
")",
")",
":",
"dep_on",
".",
"append",
"(",
"'%s.%s'",
"%",
"(",
"rtype",
",",
"rname",
")",
")",
"self",
".",
"tf_conf",
"[",
"'resource'",
"]",
"[",
"'aws_api_gateway_deployment'",
"]",
"[",
"'depl'",
"]",
"=",
"{",
"'rest_api_id'",
":",
"'${aws_api_gateway_rest_api.rest_api.id}'",
",",
"'description'",
":",
"self",
".",
"description",
",",
"'stage_name'",
":",
"self",
".",
"config",
".",
"stage_name",
",",
"'depends_on'",
":",
"dep_on",
"}",
"self",
".",
"tf_conf",
"[",
"'output'",
"]",
"[",
"'deployment_id'",
"]",
"=",
"{",
"'value'",
":",
"'${aws_api_gateway_deployment.depl.id}'",
"}"
] | 42.842105 | 16.421053 |
def scale_image(self, in_fname, out_fname, max_width, max_height):
"""Scales an image with the same aspect ratio centered in an
image with a given max_width and max_height
if in_fname == out_fname the image can only be scaled down
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = max_width / float(width_in)
scale_h = max_height / float(height_in)
if height_in * scale_w <= max_height:
scale = scale_w
else:
scale = scale_h
if scale >= 1.0 and in_fname == out_fname:
return
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (max_width, max_height), (255, 255, 255))
pos_insert = (
(max_width - width_sc) // 2, (max_height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname) | [
"def",
"scale_image",
"(",
"self",
",",
"in_fname",
",",
"out_fname",
",",
"max_width",
",",
"max_height",
")",
":",
"# local import to avoid testing dependency on PIL:",
"try",
":",
"from",
"PIL",
"import",
"Image",
"except",
"ImportError",
":",
"import",
"Image",
"img",
"=",
"Image",
".",
"open",
"(",
"in_fname",
")",
"width_in",
",",
"height_in",
"=",
"img",
".",
"size",
"scale_w",
"=",
"max_width",
"/",
"float",
"(",
"width_in",
")",
"scale_h",
"=",
"max_height",
"/",
"float",
"(",
"height_in",
")",
"if",
"height_in",
"*",
"scale_w",
"<=",
"max_height",
":",
"scale",
"=",
"scale_w",
"else",
":",
"scale",
"=",
"scale_h",
"if",
"scale",
">=",
"1.0",
"and",
"in_fname",
"==",
"out_fname",
":",
"return",
"width_sc",
"=",
"int",
"(",
"round",
"(",
"scale",
"*",
"width_in",
")",
")",
"height_sc",
"=",
"int",
"(",
"round",
"(",
"scale",
"*",
"height_in",
")",
")",
"# resize the image",
"img",
".",
"thumbnail",
"(",
"(",
"width_sc",
",",
"height_sc",
")",
",",
"Image",
".",
"ANTIALIAS",
")",
"# insert centered",
"thumb",
"=",
"Image",
".",
"new",
"(",
"'RGB'",
",",
"(",
"max_width",
",",
"max_height",
")",
",",
"(",
"255",
",",
"255",
",",
"255",
")",
")",
"pos_insert",
"=",
"(",
"(",
"max_width",
"-",
"width_sc",
")",
"//",
"2",
",",
"(",
"max_height",
"-",
"height_sc",
")",
"//",
"2",
")",
"thumb",
".",
"paste",
"(",
"img",
",",
"pos_insert",
")",
"thumb",
".",
"save",
"(",
"out_fname",
")"
] | 33.5 | 18.472222 |
def _update_hash(self, arg):
""" Takes an argument and updates the hash.
The argument can be an np.array, string, or list
of things that are convertable to strings.
"""
if isinstance(arg, np.ndarray):
self.ahash.update(arg.view(np.uint8))
elif isinstance(arg, list):
[self._update_hash(a) for a in arg]
else:
self.ahash.update(str(arg).encode('utf-8')) | [
"def",
"_update_hash",
"(",
"self",
",",
"arg",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"np",
".",
"ndarray",
")",
":",
"self",
".",
"ahash",
".",
"update",
"(",
"arg",
".",
"view",
"(",
"np",
".",
"uint8",
")",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"list",
")",
":",
"[",
"self",
".",
"_update_hash",
"(",
"a",
")",
"for",
"a",
"in",
"arg",
"]",
"else",
":",
"self",
".",
"ahash",
".",
"update",
"(",
"str",
"(",
"arg",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] | 39.454545 | 9.272727 |
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
only arrays are supported.
"""
try:
return getattr(typ, _OTHER_ENDIAN)
except AttributeError:
if type(typ) == _array_type:
return _other_endian(typ._type_) * typ._length_
raise TypeError("This type does not support other endian: %s" % typ) | [
"def",
"_other_endian",
"(",
"typ",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"typ",
",",
"_OTHER_ENDIAN",
")",
"except",
"AttributeError",
":",
"if",
"type",
"(",
"typ",
")",
"==",
"_array_type",
":",
"return",
"_other_endian",
"(",
"typ",
".",
"_type_",
")",
"*",
"typ",
".",
"_length_",
"raise",
"TypeError",
"(",
"\"This type does not support other endian: %s\"",
"%",
"typ",
")"
] | 42.083333 | 15.166667 |
def tokenize(self, data, fit=False):
"""
:param data: a dataframe containing a column to be tokenized
:param fit: if True, self.sequence_length will exactly accomodate the largest tokenized sequence length
:return: 1D array of tokens with length = rows * sequence_length
"""
with timer('tokenize %s' % self.name, logging.DEBUG):
cleaned = self.series(data).str.replace(Token.PUNCTUATION_FILTER, ' ')
lowered = cleaned.str.lower()
dataframe = lowered.str.split(expand=True)
if fit and self.sequence_length is None:
self.sequence_length = len(dataframe.columns)
while len(dataframe.columns) < self.sequence_length:
column = len(dataframe.columns)
logger.warning('No string has %i tokens, adding blank column %i' % (self.sequence_length, column))
dataframe[column] = float('nan')
return pandas.DataFrame({self.column: dataframe.loc[:,0:self.sequence_length - 1].values.flatten()}) | [
"def",
"tokenize",
"(",
"self",
",",
"data",
",",
"fit",
"=",
"False",
")",
":",
"with",
"timer",
"(",
"'tokenize %s'",
"%",
"self",
".",
"name",
",",
"logging",
".",
"DEBUG",
")",
":",
"cleaned",
"=",
"self",
".",
"series",
"(",
"data",
")",
".",
"str",
".",
"replace",
"(",
"Token",
".",
"PUNCTUATION_FILTER",
",",
"' '",
")",
"lowered",
"=",
"cleaned",
".",
"str",
".",
"lower",
"(",
")",
"dataframe",
"=",
"lowered",
".",
"str",
".",
"split",
"(",
"expand",
"=",
"True",
")",
"if",
"fit",
"and",
"self",
".",
"sequence_length",
"is",
"None",
":",
"self",
".",
"sequence_length",
"=",
"len",
"(",
"dataframe",
".",
"columns",
")",
"while",
"len",
"(",
"dataframe",
".",
"columns",
")",
"<",
"self",
".",
"sequence_length",
":",
"column",
"=",
"len",
"(",
"dataframe",
".",
"columns",
")",
"logger",
".",
"warning",
"(",
"'No string has %i tokens, adding blank column %i'",
"%",
"(",
"self",
".",
"sequence_length",
",",
"column",
")",
")",
"dataframe",
"[",
"column",
"]",
"=",
"float",
"(",
"'nan'",
")",
"return",
"pandas",
".",
"DataFrame",
"(",
"{",
"self",
".",
"column",
":",
"dataframe",
".",
"loc",
"[",
":",
",",
"0",
":",
"self",
".",
"sequence_length",
"-",
"1",
"]",
".",
"values",
".",
"flatten",
"(",
")",
"}",
")"
] | 61.470588 | 25.352941 |
def next_tuple(self, latency_in_ns):
"""Apply updates to the next tuple metrics"""
self.update_reduced_metric(self.NEXT_TUPLE_LATENCY, latency_in_ns)
self.update_count(self.NEXT_TUPLE_COUNT) | [
"def",
"next_tuple",
"(",
"self",
",",
"latency_in_ns",
")",
":",
"self",
".",
"update_reduced_metric",
"(",
"self",
".",
"NEXT_TUPLE_LATENCY",
",",
"latency_in_ns",
")",
"self",
".",
"update_count",
"(",
"self",
".",
"NEXT_TUPLE_COUNT",
")"
] | 49.75 | 9.5 |
def get_axis_padding(padding):
"""
Process a padding value supplied as a tuple or number and returns
padding values for x-, y- and z-axis.
"""
if isinstance(padding, tuple):
if len(padding) == 2:
xpad, ypad = padding
zpad = 0
elif len(padding) == 3:
xpad, ypad, zpad = padding
else:
raise ValueError('Padding must be supplied as an number applied '
'to all axes or a length two or three tuple '
'corresponding to the x-, y- and optionally z-axis')
else:
xpad, ypad, zpad = (padding,)*3
return (xpad, ypad, zpad) | [
"def",
"get_axis_padding",
"(",
"padding",
")",
":",
"if",
"isinstance",
"(",
"padding",
",",
"tuple",
")",
":",
"if",
"len",
"(",
"padding",
")",
"==",
"2",
":",
"xpad",
",",
"ypad",
"=",
"padding",
"zpad",
"=",
"0",
"elif",
"len",
"(",
"padding",
")",
"==",
"3",
":",
"xpad",
",",
"ypad",
",",
"zpad",
"=",
"padding",
"else",
":",
"raise",
"ValueError",
"(",
"'Padding must be supplied as an number applied '",
"'to all axes or a length two or three tuple '",
"'corresponding to the x-, y- and optionally z-axis'",
")",
"else",
":",
"xpad",
",",
"ypad",
",",
"zpad",
"=",
"(",
"padding",
",",
")",
"*",
"3",
"return",
"(",
"xpad",
",",
"ypad",
",",
"zpad",
")"
] | 36.666667 | 15.444444 |
def genstis(outname):
""" Generate TestCases from cmdfile according to the pattern in patternfile"""
pattern="""class stisS%d(countrateCase):
def setUp(self):
self.obsmode="%s"
self.spectrum="%s"
self.setglobal(__file__)
self.runpy()\n"""
speclist=['/grp/hst/cdbs/calspec/gd71_mod_005.fits',
'/grp/hst/cdbs/calspec/gd153_mod_004.fits',
'/grp/hst/cdbs/calspec/g191b2b_mod_004.fits']
glist={'g140l':'fuvmama','g230l':'nuvmama','g430l':'ccd','g750l':'ccd',
'g230lb':'ccd'}
out=open(outname,'a')
out.write("""from pytools import testutil
import sys
from basecase import calcphotCase, calcspecCase, countrateCase,SpecSourcerateSpecCase\n
""")
count=0
for g in glist:
for sp in speclist:
obsmode='stis,%s,fuvmama,s52x2'%g
defn=pattern%(count,obsmode,sp)
out.write(defn)
count+=1
out.write("""\n\n
if __name__ == '__main__':
if 'debug' in sys.argv:
testutil.debug(__name__)
else:
testutil.testall(__name__,2)
""")
out.close() | [
"def",
"genstis",
"(",
"outname",
")",
":",
"pattern",
"=",
"\"\"\"class stisS%d(countrateCase):\n def setUp(self):\n self.obsmode=\"%s\"\n self.spectrum=\"%s\"\n self.setglobal(__file__)\n self.runpy()\\n\"\"\"",
"speclist",
"=",
"[",
"'/grp/hst/cdbs/calspec/gd71_mod_005.fits'",
",",
"'/grp/hst/cdbs/calspec/gd153_mod_004.fits'",
",",
"'/grp/hst/cdbs/calspec/g191b2b_mod_004.fits'",
"]",
"glist",
"=",
"{",
"'g140l'",
":",
"'fuvmama'",
",",
"'g230l'",
":",
"'nuvmama'",
",",
"'g430l'",
":",
"'ccd'",
",",
"'g750l'",
":",
"'ccd'",
",",
"'g230lb'",
":",
"'ccd'",
"}",
"out",
"=",
"open",
"(",
"outname",
",",
"'a'",
")",
"out",
".",
"write",
"(",
"\"\"\"from pytools import testutil\nimport sys\nfrom basecase import calcphotCase, calcspecCase, countrateCase,SpecSourcerateSpecCase\\n\n\"\"\"",
")",
"count",
"=",
"0",
"for",
"g",
"in",
"glist",
":",
"for",
"sp",
"in",
"speclist",
":",
"obsmode",
"=",
"'stis,%s,fuvmama,s52x2'",
"%",
"g",
"defn",
"=",
"pattern",
"%",
"(",
"count",
",",
"obsmode",
",",
"sp",
")",
"out",
".",
"write",
"(",
"defn",
")",
"count",
"+=",
"1",
"out",
".",
"write",
"(",
"\"\"\"\\n\\n\nif __name__ == '__main__':\n if 'debug' in sys.argv:\n testutil.debug(__name__)\n else:\n testutil.testall(__name__,2)\n\"\"\"",
")",
"out",
".",
"close",
"(",
")"
] | 28.263158 | 17.763158 |
def _build_field_type_map(base_class):
"""Create mapping from all $type values to their respective Field classes"""
mapping = {}
for cls in _get_recursive_subclasses(base_class):
if cls.field_type:
if isinstance(cls.field_type, tuple):
for field_type in cls.field_type:
mapping[field_type] = cls
elif isinstance(cls.field_type, _string_types):
mapping[cls.field_type] = cls
else:
raise ValueError('Field type must be str or tuple, cannot understand type "{}" on class "{}"'.format(
type(cls.field_type),
cls
))
return mapping | [
"def",
"_build_field_type_map",
"(",
"base_class",
")",
":",
"mapping",
"=",
"{",
"}",
"for",
"cls",
"in",
"_get_recursive_subclasses",
"(",
"base_class",
")",
":",
"if",
"cls",
".",
"field_type",
":",
"if",
"isinstance",
"(",
"cls",
".",
"field_type",
",",
"tuple",
")",
":",
"for",
"field_type",
"in",
"cls",
".",
"field_type",
":",
"mapping",
"[",
"field_type",
"]",
"=",
"cls",
"elif",
"isinstance",
"(",
"cls",
".",
"field_type",
",",
"_string_types",
")",
":",
"mapping",
"[",
"cls",
".",
"field_type",
"]",
"=",
"cls",
"else",
":",
"raise",
"ValueError",
"(",
"'Field type must be str or tuple, cannot understand type \"{}\" on class \"{}\"'",
".",
"format",
"(",
"type",
"(",
"cls",
".",
"field_type",
")",
",",
"cls",
")",
")",
"return",
"mapping"
] | 38.555556 | 19 |
def normalize(self, timestamp, steps=0):
'''
Normalize a timestamp according to the interval configuration. Optionally
can be used to calculate the timestamp N steps away.
'''
# So far, the only commonality with RelativeTime
return self.from_bucket( self.to_bucket(timestamp, steps) ) | [
"def",
"normalize",
"(",
"self",
",",
"timestamp",
",",
"steps",
"=",
"0",
")",
":",
"# So far, the only commonality with RelativeTime",
"return",
"self",
".",
"from_bucket",
"(",
"self",
".",
"to_bucket",
"(",
"timestamp",
",",
"steps",
")",
")"
] | 43.142857 | 22 |
def get_gradebook_form(self, *args, **kwargs):
"""Pass through to provider GradebookAdminSession.get_gradebook_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'gradebook_record_types' in kwargs:
return self.get_gradebook_form_for_create(*args, **kwargs)
else:
return self.get_gradebook_form_for_update(*args, **kwargs) | [
"def",
"get_gradebook_form",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Implemented from kitosid template for -",
"# osid.resource.BinAdminSession.get_bin_form_for_update_template",
"# This method might be a bit sketchy. Time will tell.",
"if",
"isinstance",
"(",
"args",
"[",
"-",
"1",
"]",
",",
"list",
")",
"or",
"'gradebook_record_types'",
"in",
"kwargs",
":",
"return",
"self",
".",
"get_gradebook_form_for_create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"self",
".",
"get_gradebook_form_for_update",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 60.777778 | 21.222222 |
def get_existing_model(model_name):
""" Try to find existing model class named `model_name`.
:param model_name: String name of the model class.
"""
try:
model_cls = engine.get_document_cls(model_name)
log.debug('Model `{}` already exists. Using existing one'.format(
model_name))
return model_cls
except ValueError:
log.debug('Model `{}` does not exist'.format(model_name)) | [
"def",
"get_existing_model",
"(",
"model_name",
")",
":",
"try",
":",
"model_cls",
"=",
"engine",
".",
"get_document_cls",
"(",
"model_name",
")",
"log",
".",
"debug",
"(",
"'Model `{}` already exists. Using existing one'",
".",
"format",
"(",
"model_name",
")",
")",
"return",
"model_cls",
"except",
"ValueError",
":",
"log",
".",
"debug",
"(",
"'Model `{}` does not exist'",
".",
"format",
"(",
"model_name",
")",
")"
] | 35.583333 | 17.833333 |
def __liftover_coordinates_genomic_indels(self, intersect_region):
"""
Lift a region that overlaps the genomic occurrence of the retrotransposon
to consensus sequence coordinates using just the coordinates (not the full
alignment), when they have differing length. This is an internal helper
method. The above length constraint must be true, otherwise an assertion
is failed.
:param intersect_region: a region that intersects this occurrence.
:return: list of GenomicInterval objects. This is a list because a genomic
deletion of part of the retrotransposon can fragment the
intersecting region and result in more than one returned interval.
List might be empty, if intersection overlaps only gaps.
:note: no checks are made for whether the interval really intersects!!
"""
# should never happen, but check anyway...
consensus_match_length = self.consensus_end - self.consensus_start
size_dif = consensus_match_length - len(self)
assert(size_dif != 0)
if size_dif < 0:
# genomic region longer; assume genomic region contains insertions
return self.__liftover_coordinates_genomic_insertions(intersect_region)
else:
# genomic region shorter; assume genomic region contains deletions
return self.__liftover_coordinates_genomic_deletions(intersect_region) | [
"def",
"__liftover_coordinates_genomic_indels",
"(",
"self",
",",
"intersect_region",
")",
":",
"# should never happen, but check anyway...",
"consensus_match_length",
"=",
"self",
".",
"consensus_end",
"-",
"self",
".",
"consensus_start",
"size_dif",
"=",
"consensus_match_length",
"-",
"len",
"(",
"self",
")",
"assert",
"(",
"size_dif",
"!=",
"0",
")",
"if",
"size_dif",
"<",
"0",
":",
"# genomic region longer; assume genomic region contains insertions",
"return",
"self",
".",
"__liftover_coordinates_genomic_insertions",
"(",
"intersect_region",
")",
"else",
":",
"# genomic region shorter; assume genomic region contains deletions",
"return",
"self",
".",
"__liftover_coordinates_genomic_deletions",
"(",
"intersect_region",
")"
] | 52.115385 | 27.884615 |
def add_handler(self, handler: Handler, group: int = 0):
"""Use this method to register an update handler.
You can register multiple handlers, but at most one handler within a group
will be used for a single update. To handle the same update more than once, register
your handler using a different group id (lower group id == higher priority).
Args:
handler (``Handler``):
The handler to be registered.
group (``int``, *optional*):
The group identifier, defaults to 0.
Returns:
A tuple of (handler, group)
"""
if isinstance(handler, DisconnectHandler):
self.disconnect_handler = handler.callback
else:
self.dispatcher.add_handler(handler, group)
return handler, group | [
"def",
"add_handler",
"(",
"self",
",",
"handler",
":",
"Handler",
",",
"group",
":",
"int",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"handler",
",",
"DisconnectHandler",
")",
":",
"self",
".",
"disconnect_handler",
"=",
"handler",
".",
"callback",
"else",
":",
"self",
".",
"dispatcher",
".",
"add_handler",
"(",
"handler",
",",
"group",
")",
"return",
"handler",
",",
"group"
] | 35.73913 | 22 |
def list_tokens(opts):
'''
List all tokens in the store.
:param opts: Salt master config options
:returns: List of dicts (tokens)
'''
ret = []
for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(opts['token_dir']):
for token in filenames:
ret.append(token)
return ret | [
"def",
"list_tokens",
"(",
"opts",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"(",
"dirpath",
",",
"dirnames",
",",
"filenames",
")",
"in",
"salt",
".",
"utils",
".",
"path",
".",
"os_walk",
"(",
"opts",
"[",
"'token_dir'",
"]",
")",
":",
"for",
"token",
"in",
"filenames",
":",
"ret",
".",
"append",
"(",
"token",
")",
"return",
"ret"
] | 26.583333 | 21.416667 |
def partial_page(self, page, used_labels):
"""Allows a page to be marked as already partially used so you can
generate a PDF to print on the remaining labels.
Parameters
----------
page: positive integer
The page number to mark as partially used. The page must not have
already been started, i.e., for page 1 this must be called before
any labels have been started, for page 2 this must be called before
the first page is full and so on.
used_labels: iterable
An iterable of (row, column) pairs marking which labels have been
used already. The rows and columns must be within the bounds of the
sheet.
"""
# Check the page number is valid.
if page <= self.page_count:
raise ValueError("Page {0:d} has already started, cannot mark used labels now.".format(page))
# Add these to any existing labels marked as used.
used = self._used.get(page, set())
for row, column in used_labels:
# Check the index is valid.
if row < 1 or row > self.specs.rows:
raise IndexError("Invalid row number: {0:d}.".format(row))
if column < 1 or column > self.specs.columns:
raise IndexError("Invalid column number: {0:d}.".format(column))
# Add it.
used.add((int(row), int(column)))
# Save the details.
self._used[page] = used | [
"def",
"partial_page",
"(",
"self",
",",
"page",
",",
"used_labels",
")",
":",
"# Check the page number is valid.",
"if",
"page",
"<=",
"self",
".",
"page_count",
":",
"raise",
"ValueError",
"(",
"\"Page {0:d} has already started, cannot mark used labels now.\"",
".",
"format",
"(",
"page",
")",
")",
"# Add these to any existing labels marked as used.",
"used",
"=",
"self",
".",
"_used",
".",
"get",
"(",
"page",
",",
"set",
"(",
")",
")",
"for",
"row",
",",
"column",
"in",
"used_labels",
":",
"# Check the index is valid.",
"if",
"row",
"<",
"1",
"or",
"row",
">",
"self",
".",
"specs",
".",
"rows",
":",
"raise",
"IndexError",
"(",
"\"Invalid row number: {0:d}.\"",
".",
"format",
"(",
"row",
")",
")",
"if",
"column",
"<",
"1",
"or",
"column",
">",
"self",
".",
"specs",
".",
"columns",
":",
"raise",
"IndexError",
"(",
"\"Invalid column number: {0:d}.\"",
".",
"format",
"(",
"column",
")",
")",
"# Add it.",
"used",
".",
"add",
"(",
"(",
"int",
"(",
"row",
")",
",",
"int",
"(",
"column",
")",
")",
")",
"# Save the details.",
"self",
".",
"_used",
"[",
"page",
"]",
"=",
"used"
] | 42.057143 | 21.057143 |
def prg_rom(self):
"""Return the PRG ROM of the ROM file."""
try:
return self.raw_data[self.prg_rom_start:self.prg_rom_stop]
except IndexError:
raise ValueError('failed to read PRG-ROM on ROM.') | [
"def",
"prg_rom",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"raw_data",
"[",
"self",
".",
"prg_rom_start",
":",
"self",
".",
"prg_rom_stop",
"]",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"'failed to read PRG-ROM on ROM.'",
")"
] | 39.5 | 19.333333 |
def append(self, value, key=''):
"""It allows to add child widgets to this.
The key allows to access the specific child in this way widget.children[key].
The key have to be numeric and determines the children order in the layout.
Args:
value (Widget): Child instance to be appended.
key (str): Unique identifier for the child. If key.isdigit()==True '0' '1'.. the value determines the order
in the layout
"""
if type(value) in (list, tuple, dict):
if type(value)==dict:
for k in value.keys():
self.append(value[k], k)
return value.keys()
keys = []
for child in value:
keys.append( self.append(child) )
return keys
key = str(key)
if not isinstance(value, Widget):
raise ValueError('value should be a Widget (otherwise use add_child(key,other)')
if 'left' in value.style.keys():
del value.style['left']
if 'right' in value.style.keys():
del value.style['right']
if not 'order' in value.style.keys():
value.style.update({'position':'static', 'order':'-1'})
if key.isdigit():
value.style['order'] = key
key = value.identifier if key == '' else key
self.add_child(key, value)
return key | [
"def",
"append",
"(",
"self",
",",
"value",
",",
"key",
"=",
"''",
")",
":",
"if",
"type",
"(",
"value",
")",
"in",
"(",
"list",
",",
"tuple",
",",
"dict",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"dict",
":",
"for",
"k",
"in",
"value",
".",
"keys",
"(",
")",
":",
"self",
".",
"append",
"(",
"value",
"[",
"k",
"]",
",",
"k",
")",
"return",
"value",
".",
"keys",
"(",
")",
"keys",
"=",
"[",
"]",
"for",
"child",
"in",
"value",
":",
"keys",
".",
"append",
"(",
"self",
".",
"append",
"(",
"child",
")",
")",
"return",
"keys",
"key",
"=",
"str",
"(",
"key",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"Widget",
")",
":",
"raise",
"ValueError",
"(",
"'value should be a Widget (otherwise use add_child(key,other)'",
")",
"if",
"'left'",
"in",
"value",
".",
"style",
".",
"keys",
"(",
")",
":",
"del",
"value",
".",
"style",
"[",
"'left'",
"]",
"if",
"'right'",
"in",
"value",
".",
"style",
".",
"keys",
"(",
")",
":",
"del",
"value",
".",
"style",
"[",
"'right'",
"]",
"if",
"not",
"'order'",
"in",
"value",
".",
"style",
".",
"keys",
"(",
")",
":",
"value",
".",
"style",
".",
"update",
"(",
"{",
"'position'",
":",
"'static'",
",",
"'order'",
":",
"'-1'",
"}",
")",
"if",
"key",
".",
"isdigit",
"(",
")",
":",
"value",
".",
"style",
"[",
"'order'",
"]",
"=",
"key",
"key",
"=",
"value",
".",
"identifier",
"if",
"key",
"==",
"''",
"else",
"key",
"self",
".",
"add_child",
"(",
"key",
",",
"value",
")",
"return",
"key"
] | 35.641026 | 19.358974 |
def delete_and_upload_images(client, image_type, language, base_dir):
"""
Delete and upload images with given image_type and language.
Function will stage delete and stage upload all
found images in matching folders.
"""
print('{0} {1}'.format(image_type, language))
files_in_dir = os.listdir(os.path.join(base_dir, language))
delete_result = client.deleteall(
'images', imageType=image_type, language=language)
deleted = delete_result.get('deleted', list())
for deleted_files in deleted:
print(' delete image: {0}'.format(deleted_files['id']))
for image_file in files_in_dir[:8]:
image_file_path = os.path.join(base_dir, language, image_file)
image_response = client.upload(
'images',
imageType=image_type,
language=language,
media_body=image_file_path)
print(" upload image {0} new id {1}".format(image_file, image_response['image']['id'])) | [
"def",
"delete_and_upload_images",
"(",
"client",
",",
"image_type",
",",
"language",
",",
"base_dir",
")",
":",
"print",
"(",
"'{0} {1}'",
".",
"format",
"(",
"image_type",
",",
"language",
")",
")",
"files_in_dir",
"=",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"language",
")",
")",
"delete_result",
"=",
"client",
".",
"deleteall",
"(",
"'images'",
",",
"imageType",
"=",
"image_type",
",",
"language",
"=",
"language",
")",
"deleted",
"=",
"delete_result",
".",
"get",
"(",
"'deleted'",
",",
"list",
"(",
")",
")",
"for",
"deleted_files",
"in",
"deleted",
":",
"print",
"(",
"' delete image: {0}'",
".",
"format",
"(",
"deleted_files",
"[",
"'id'",
"]",
")",
")",
"for",
"image_file",
"in",
"files_in_dir",
"[",
":",
"8",
"]",
":",
"image_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_dir",
",",
"language",
",",
"image_file",
")",
"image_response",
"=",
"client",
".",
"upload",
"(",
"'images'",
",",
"imageType",
"=",
"image_type",
",",
"language",
"=",
"language",
",",
"media_body",
"=",
"image_file_path",
")",
"print",
"(",
"\" upload image {0} new id {1}\"",
".",
"format",
"(",
"image_file",
",",
"image_response",
"[",
"'image'",
"]",
"[",
"'id'",
"]",
")",
")"
] | 39.833333 | 16.916667 |
def update(kernel=False):
"""
Upgrade all packages, skip obsoletes if ``obsoletes=0`` in ``yum.conf``.
Exclude *kernel* upgrades by default.
"""
manager = MANAGER
cmds = {'yum -y --color=never': {False: '--exclude=kernel* update', True: 'update'}}
cmd = cmds[manager][kernel]
run_as_root("%(manager)s %(cmd)s" % locals()) | [
"def",
"update",
"(",
"kernel",
"=",
"False",
")",
":",
"manager",
"=",
"MANAGER",
"cmds",
"=",
"{",
"'yum -y --color=never'",
":",
"{",
"False",
":",
"'--exclude=kernel* update'",
",",
"True",
":",
"'update'",
"}",
"}",
"cmd",
"=",
"cmds",
"[",
"manager",
"]",
"[",
"kernel",
"]",
"run_as_root",
"(",
"\"%(manager)s %(cmd)s\"",
"%",
"locals",
"(",
")",
")"
] | 34.5 | 17.7 |
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, str) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index) | [
"def",
"duplicated",
"(",
"self",
",",
"subset",
"=",
"None",
",",
"keep",
"=",
"'first'",
")",
":",
"from",
"pandas",
".",
"core",
".",
"sorting",
"import",
"get_group_index",
"from",
"pandas",
".",
"_libs",
".",
"hashtable",
"import",
"duplicated_int64",
",",
"_SIZE_HINT_LIMIT",
"if",
"self",
".",
"empty",
":",
"return",
"Series",
"(",
"dtype",
"=",
"bool",
")",
"def",
"f",
"(",
"vals",
")",
":",
"labels",
",",
"shape",
"=",
"algorithms",
".",
"factorize",
"(",
"vals",
",",
"size_hint",
"=",
"min",
"(",
"len",
"(",
"self",
")",
",",
"_SIZE_HINT_LIMIT",
")",
")",
"return",
"labels",
".",
"astype",
"(",
"'i8'",
",",
"copy",
"=",
"False",
")",
",",
"len",
"(",
"shape",
")",
"if",
"subset",
"is",
"None",
":",
"subset",
"=",
"self",
".",
"columns",
"elif",
"(",
"not",
"np",
".",
"iterable",
"(",
"subset",
")",
"or",
"isinstance",
"(",
"subset",
",",
"str",
")",
"or",
"isinstance",
"(",
"subset",
",",
"tuple",
")",
"and",
"subset",
"in",
"self",
".",
"columns",
")",
":",
"subset",
"=",
"subset",
",",
"# Verify all columns in subset exist in the queried dataframe",
"# Otherwise, raise a KeyError, same as if you try to __getitem__ with a",
"# key that doesn't exist.",
"diff",
"=",
"Index",
"(",
"subset",
")",
".",
"difference",
"(",
"self",
".",
"columns",
")",
"if",
"not",
"diff",
".",
"empty",
":",
"raise",
"KeyError",
"(",
"diff",
")",
"vals",
"=",
"(",
"col",
".",
"values",
"for",
"name",
",",
"col",
"in",
"self",
".",
"iteritems",
"(",
")",
"if",
"name",
"in",
"subset",
")",
"labels",
",",
"shape",
"=",
"map",
"(",
"list",
",",
"zip",
"(",
"*",
"map",
"(",
"f",
",",
"vals",
")",
")",
")",
"ids",
"=",
"get_group_index",
"(",
"labels",
",",
"shape",
",",
"sort",
"=",
"False",
",",
"xnull",
"=",
"False",
")",
"return",
"Series",
"(",
"duplicated_int64",
"(",
"ids",
",",
"keep",
")",
",",
"index",
"=",
"self",
".",
"index",
")"
] | 37.211538 | 20.326923 |
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string | [
"def",
"decode_unicode_string",
"(",
"string",
")",
":",
"if",
"string",
".",
"startswith",
"(",
"'[BASE64-DATA]'",
")",
"and",
"string",
".",
"endswith",
"(",
"'[/BASE64-DATA]'",
")",
":",
"return",
"base64",
".",
"b64decode",
"(",
"string",
"[",
"len",
"(",
"'[BASE64-DATA]'",
")",
":",
"-",
"len",
"(",
"'[/BASE64-DATA]'",
")",
"]",
")",
"return",
"string"
] | 39.142857 | 16.857143 |
def _find_matching_collections_internally(collections, record):
"""Find matching collections with internal engine.
:param collections: set of collections where search
:param record: record to match
"""
for name, data in iteritems(collections):
if _build_query(data['query']).match(record):
yield data['ancestors']
raise StopIteration | [
"def",
"_find_matching_collections_internally",
"(",
"collections",
",",
"record",
")",
":",
"for",
"name",
",",
"data",
"in",
"iteritems",
"(",
"collections",
")",
":",
"if",
"_build_query",
"(",
"data",
"[",
"'query'",
"]",
")",
".",
"match",
"(",
"record",
")",
":",
"yield",
"data",
"[",
"'ancestors'",
"]",
"raise",
"StopIteration"
] | 36.9 | 12.4 |
def run_mutation_aggregator(job, mutation_results, univ_options):
"""
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
"""
# Setup an input data structure for the merge function
out = {}
for chrom in mutation_results['mutect'].keys():
out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results,
univ_options).rv()
merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, 'merged', univ_options)
job.fileStore.logToMaster('Aggregated mutations for %s successfully' % univ_options['patient'])
return merged_snvs.rv() | [
"def",
"run_mutation_aggregator",
"(",
"job",
",",
"mutation_results",
",",
"univ_options",
")",
":",
"# Setup an input data structure for the merge function",
"out",
"=",
"{",
"}",
"for",
"chrom",
"in",
"mutation_results",
"[",
"'mutect'",
"]",
".",
"keys",
"(",
")",
":",
"out",
"[",
"chrom",
"]",
"=",
"job",
".",
"addChildJobFn",
"(",
"merge_perchrom_mutations",
",",
"chrom",
",",
"mutation_results",
",",
"univ_options",
")",
".",
"rv",
"(",
")",
"merged_snvs",
"=",
"job",
".",
"addFollowOnJobFn",
"(",
"merge_perchrom_vcfs",
",",
"out",
",",
"'merged'",
",",
"univ_options",
")",
"job",
".",
"fileStore",
".",
"logToMaster",
"(",
"'Aggregated mutations for %s successfully'",
"%",
"univ_options",
"[",
"'patient'",
"]",
")",
"return",
"merged_snvs",
".",
"rv",
"(",
")"
] | 48.666667 | 24.777778 |
def iter(context, resource, **kwargs):
"""List all resources"""
data = utils.sanitize_kwargs(**kwargs)
id = data.pop('id', None)
subresource = data.pop('subresource', None)
data['limit'] = data.get('limit', 20)
if subresource:
uri = '%s/%s/%s/%s' % (context.dci_cs_api, resource, id, subresource)
resource = subresource
else:
uri = '%s/%s' % (context.dci_cs_api, resource)
data['offset'] = 0
while True:
j = context.session.get(uri, timeout=HTTP_TIMEOUT, params=data).json()
if len(j[resource]):
for i in j[resource]:
yield i
else:
break
data['offset'] += data['limit'] | [
"def",
"iter",
"(",
"context",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"utils",
".",
"sanitize_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"id",
"=",
"data",
".",
"pop",
"(",
"'id'",
",",
"None",
")",
"subresource",
"=",
"data",
".",
"pop",
"(",
"'subresource'",
",",
"None",
")",
"data",
"[",
"'limit'",
"]",
"=",
"data",
".",
"get",
"(",
"'limit'",
",",
"20",
")",
"if",
"subresource",
":",
"uri",
"=",
"'%s/%s/%s/%s'",
"%",
"(",
"context",
".",
"dci_cs_api",
",",
"resource",
",",
"id",
",",
"subresource",
")",
"resource",
"=",
"subresource",
"else",
":",
"uri",
"=",
"'%s/%s'",
"%",
"(",
"context",
".",
"dci_cs_api",
",",
"resource",
")",
"data",
"[",
"'offset'",
"]",
"=",
"0",
"while",
"True",
":",
"j",
"=",
"context",
".",
"session",
".",
"get",
"(",
"uri",
",",
"timeout",
"=",
"HTTP_TIMEOUT",
",",
"params",
"=",
"data",
")",
".",
"json",
"(",
")",
"if",
"len",
"(",
"j",
"[",
"resource",
"]",
")",
":",
"for",
"i",
"in",
"j",
"[",
"resource",
"]",
":",
"yield",
"i",
"else",
":",
"break",
"data",
"[",
"'offset'",
"]",
"+=",
"data",
"[",
"'limit'",
"]"
] | 31 | 17.454545 |
def resource_reaches_status(self, resource, resource_id,
expected_stat='available',
msg='resource', max_wait=120):
"""Wait for an openstack resources status to reach an
expected status within a specified time. Useful to confirm that
nova instances, cinder vols, snapshots, glance images, heat stacks
and other resources eventually reach the expected status.
:param resource: pointer to os resource type, ex: heat_client.stacks
:param resource_id: unique id for the openstack resource
:param expected_stat: status to expect resource to reach
:param msg: text to identify purpose in logging
:param max_wait: maximum wait time in seconds
:returns: True if successful, False if status is not reached
"""
tries = 0
resource_stat = resource.get(resource_id).status
while resource_stat != expected_stat and tries < (max_wait / 4):
self.log.debug('{} status check: '
'{} [{}:{}] {}'.format(msg, tries,
resource_stat,
expected_stat,
resource_id))
time.sleep(4)
resource_stat = resource.get(resource_id).status
tries += 1
self.log.debug('{}: expected, actual status = {}, '
'{}'.format(msg, resource_stat, expected_stat))
if resource_stat == expected_stat:
return True
else:
self.log.debug('{} never reached expected status: '
'{}'.format(resource_id, expected_stat))
return False | [
"def",
"resource_reaches_status",
"(",
"self",
",",
"resource",
",",
"resource_id",
",",
"expected_stat",
"=",
"'available'",
",",
"msg",
"=",
"'resource'",
",",
"max_wait",
"=",
"120",
")",
":",
"tries",
"=",
"0",
"resource_stat",
"=",
"resource",
".",
"get",
"(",
"resource_id",
")",
".",
"status",
"while",
"resource_stat",
"!=",
"expected_stat",
"and",
"tries",
"<",
"(",
"max_wait",
"/",
"4",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'{} status check: '",
"'{} [{}:{}] {}'",
".",
"format",
"(",
"msg",
",",
"tries",
",",
"resource_stat",
",",
"expected_stat",
",",
"resource_id",
")",
")",
"time",
".",
"sleep",
"(",
"4",
")",
"resource_stat",
"=",
"resource",
".",
"get",
"(",
"resource_id",
")",
".",
"status",
"tries",
"+=",
"1",
"self",
".",
"log",
".",
"debug",
"(",
"'{}: expected, actual status = {}, '",
"'{}'",
".",
"format",
"(",
"msg",
",",
"resource_stat",
",",
"expected_stat",
")",
")",
"if",
"resource_stat",
"==",
"expected_stat",
":",
"return",
"True",
"else",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'{} never reached expected status: '",
"'{}'",
".",
"format",
"(",
"resource_id",
",",
"expected_stat",
")",
")",
"return",
"False"
] | 47.567568 | 22.702703 |
def get_messages(self, params={}):
"""
List messages
http://dev.wheniwork.com/#listing-messages
"""
param_list = [(k, params[k]) for k in sorted(params)]
url = "/2/messages/?%s" % urlencode(param_list)
data = self._get_resource(url)
messages = []
for entry in data["messages"]:
messages.append(self.message_from_json(entry))
return messages | [
"def",
"get_messages",
"(",
"self",
",",
"params",
"=",
"{",
"}",
")",
":",
"param_list",
"=",
"[",
"(",
"k",
",",
"params",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"sorted",
"(",
"params",
")",
"]",
"url",
"=",
"\"/2/messages/?%s\"",
"%",
"urlencode",
"(",
"param_list",
")",
"data",
"=",
"self",
".",
"_get_resource",
"(",
"url",
")",
"messages",
"=",
"[",
"]",
"for",
"entry",
"in",
"data",
"[",
"\"messages\"",
"]",
":",
"messages",
".",
"append",
"(",
"self",
".",
"message_from_json",
"(",
"entry",
")",
")",
"return",
"messages"
] | 28.066667 | 16.6 |
def reset(self):
""" Resets the form. """
self.titleedit.set_edit_text("")
self.sortedit.set_edit_text("")
self.filteredit.set_edit_text("")
self.relevantradio.set_state(True)
self.pile.focus_item = 0 | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"titleedit",
".",
"set_edit_text",
"(",
"\"\"",
")",
"self",
".",
"sortedit",
".",
"set_edit_text",
"(",
"\"\"",
")",
"self",
".",
"filteredit",
".",
"set_edit_text",
"(",
"\"\"",
")",
"self",
".",
"relevantradio",
".",
"set_state",
"(",
"True",
")",
"self",
".",
"pile",
".",
"focus_item",
"=",
"0"
] | 34.571429 | 5.142857 |
def check_lt(self):
"""
Check is the POSTed LoginTicket is valid, if yes invalide it
:return: ``True`` if the LoginTicket is valid, ``False`` otherwise
:rtype: bool
"""
# save LT for later check
lt_valid = self.request.session.get('lt', [])
lt_send = self.request.POST.get('lt')
# generate a new LT (by posting the LT has been consumed)
self.gen_lt()
# check if send LT is valid
if lt_send not in lt_valid:
return False
else:
self.request.session['lt'].remove(lt_send)
# we need to redo the affectation for django to detect that the list has changed
# and for its new value to be store in the session
self.request.session['lt'] = self.request.session['lt']
return True | [
"def",
"check_lt",
"(",
"self",
")",
":",
"# save LT for later check",
"lt_valid",
"=",
"self",
".",
"request",
".",
"session",
".",
"get",
"(",
"'lt'",
",",
"[",
"]",
")",
"lt_send",
"=",
"self",
".",
"request",
".",
"POST",
".",
"get",
"(",
"'lt'",
")",
"# generate a new LT (by posting the LT has been consumed)",
"self",
".",
"gen_lt",
"(",
")",
"# check if send LT is valid",
"if",
"lt_send",
"not",
"in",
"lt_valid",
":",
"return",
"False",
"else",
":",
"self",
".",
"request",
".",
"session",
"[",
"'lt'",
"]",
".",
"remove",
"(",
"lt_send",
")",
"# we need to redo the affectation for django to detect that the list has changed",
"# and for its new value to be store in the session",
"self",
".",
"request",
".",
"session",
"[",
"'lt'",
"]",
"=",
"self",
".",
"request",
".",
"session",
"[",
"'lt'",
"]",
"return",
"True"
] | 39.857143 | 19.095238 |
def get_api_required_params(self):
""" List with required params
:return: Dictionary with API parameters
:raises ValueError: If value of __class__.required_params is not list
:rtype: list
"""
result = self.required_params
if type(result) != list:
raise ValueError(
'{}.required_params should return list'.format(
self.__class__.__name__
)
)
return result | [
"def",
"get_api_required_params",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"required_params",
"if",
"type",
"(",
"result",
")",
"!=",
"list",
":",
"raise",
"ValueError",
"(",
"'{}.required_params should return list'",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"result"
] | 32.066667 | 15.133333 |
def reduce_to_cycles(self):
"""
Iteratively eliminate leafs to reduce the set of objects to only those
that build cycles. Return the reduced graph. If there are no cycles,
None is returned.
"""
if not self._reduced:
reduced = copy(self)
reduced.objects = self.objects[:]
reduced.metadata = []
reduced.edges = []
self.num_in_cycles = reduced._reduce_to_cycles()
reduced.num_in_cycles = self.num_in_cycles
if self.num_in_cycles:
reduced._get_edges()
reduced._annotate_objects()
for meta in reduced.metadata:
meta.cycle = True
else:
reduced = None
self._reduced = reduced
return self._reduced | [
"def",
"reduce_to_cycles",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_reduced",
":",
"reduced",
"=",
"copy",
"(",
"self",
")",
"reduced",
".",
"objects",
"=",
"self",
".",
"objects",
"[",
":",
"]",
"reduced",
".",
"metadata",
"=",
"[",
"]",
"reduced",
".",
"edges",
"=",
"[",
"]",
"self",
".",
"num_in_cycles",
"=",
"reduced",
".",
"_reduce_to_cycles",
"(",
")",
"reduced",
".",
"num_in_cycles",
"=",
"self",
".",
"num_in_cycles",
"if",
"self",
".",
"num_in_cycles",
":",
"reduced",
".",
"_get_edges",
"(",
")",
"reduced",
".",
"_annotate_objects",
"(",
")",
"for",
"meta",
"in",
"reduced",
".",
"metadata",
":",
"meta",
".",
"cycle",
"=",
"True",
"else",
":",
"reduced",
"=",
"None",
"self",
".",
"_reduced",
"=",
"reduced",
"return",
"self",
".",
"_reduced"
] | 37.090909 | 11.272727 |
def parse_value(self, querydict):
""" extract value
extarct value from querydict and convert it to native
missing and empty values result to None
"""
value = self.field.get_value(querydict)
if value in (None, fields.empty, ''):
return None
return self.field.to_internal_value(value) | [
"def",
"parse_value",
"(",
"self",
",",
"querydict",
")",
":",
"value",
"=",
"self",
".",
"field",
".",
"get_value",
"(",
"querydict",
")",
"if",
"value",
"in",
"(",
"None",
",",
"fields",
".",
"empty",
",",
"''",
")",
":",
"return",
"None",
"return",
"self",
".",
"field",
".",
"to_internal_value",
"(",
"value",
")"
] | 34.2 | 11.4 |
def save(self):
"""
Saves changes made to the locally cached Document object's data
structures to the remote database. If the document does not exist
remotely then it is created in the remote database. If the object
does exist remotely then the document is updated remotely. In either
case the locally cached Document object is also updated accordingly
based on the successful response of the operation.
"""
headers = {}
headers.setdefault('Content-Type', 'application/json')
if not self.exists():
self.create()
return
put_resp = self.r_session.put(
self.document_url,
data=self.json(),
headers=headers
)
put_resp.raise_for_status()
data = response_to_json_dict(put_resp)
super(Document, self).__setitem__('_rev', data['rev'])
return | [
"def",
"save",
"(",
"self",
")",
":",
"headers",
"=",
"{",
"}",
"headers",
".",
"setdefault",
"(",
"'Content-Type'",
",",
"'application/json'",
")",
"if",
"not",
"self",
".",
"exists",
"(",
")",
":",
"self",
".",
"create",
"(",
")",
"return",
"put_resp",
"=",
"self",
".",
"r_session",
".",
"put",
"(",
"self",
".",
"document_url",
",",
"data",
"=",
"self",
".",
"json",
"(",
")",
",",
"headers",
"=",
"headers",
")",
"put_resp",
".",
"raise_for_status",
"(",
")",
"data",
"=",
"response_to_json_dict",
"(",
"put_resp",
")",
"super",
"(",
"Document",
",",
"self",
")",
".",
"__setitem__",
"(",
"'_rev'",
",",
"data",
"[",
"'rev'",
"]",
")",
"return"
] | 39.565217 | 18.695652 |
def extract_spans(html_string):
"""
Creates a list of the spanned cell groups of [row, column] pairs.
Parameters
----------
html_string : str
Returns
-------
list of lists of lists of int
"""
try:
from bs4 import BeautifulSoup
except ImportError:
print("ERROR: You must have BeautifulSoup to use html2data")
return
soup = BeautifulSoup(html_string, 'html.parser')
table = soup.find('table')
if not table:
return []
trs = table.findAll('tr')
if len(trs) == 0:
return []
spans = []
for tr in range(len(trs)):
if tr == 0:
ths = trs[tr].findAll('th')
if len(ths) == 0:
ths = trs[tr].findAll('td')
tds = ths
else:
tds = trs[tr].findAll('td')
column = 0
for td in tds:
r_span_count = 1
c_span_count = 1
current_column = column
if td.has_attr('rowspan'):
r_span_count = int(td['rowspan'])
if td.has_attr('colspan'):
c_span_count = int(td['colspan'])
column += c_span_count
else:
column += 1
new_span = []
for r_index in range(tr, tr + r_span_count):
for c_index in range(current_column, column):
if not get_span(spans, r_index, c_index):
new_span.append([r_index, c_index])
if len(new_span) > 0:
spans.append(new_span)
return spans | [
"def",
"extract_spans",
"(",
"html_string",
")",
":",
"try",
":",
"from",
"bs4",
"import",
"BeautifulSoup",
"except",
"ImportError",
":",
"print",
"(",
"\"ERROR: You must have BeautifulSoup to use html2data\"",
")",
"return",
"soup",
"=",
"BeautifulSoup",
"(",
"html_string",
",",
"'html.parser'",
")",
"table",
"=",
"soup",
".",
"find",
"(",
"'table'",
")",
"if",
"not",
"table",
":",
"return",
"[",
"]",
"trs",
"=",
"table",
".",
"findAll",
"(",
"'tr'",
")",
"if",
"len",
"(",
"trs",
")",
"==",
"0",
":",
"return",
"[",
"]",
"spans",
"=",
"[",
"]",
"for",
"tr",
"in",
"range",
"(",
"len",
"(",
"trs",
")",
")",
":",
"if",
"tr",
"==",
"0",
":",
"ths",
"=",
"trs",
"[",
"tr",
"]",
".",
"findAll",
"(",
"'th'",
")",
"if",
"len",
"(",
"ths",
")",
"==",
"0",
":",
"ths",
"=",
"trs",
"[",
"tr",
"]",
".",
"findAll",
"(",
"'td'",
")",
"tds",
"=",
"ths",
"else",
":",
"tds",
"=",
"trs",
"[",
"tr",
"]",
".",
"findAll",
"(",
"'td'",
")",
"column",
"=",
"0",
"for",
"td",
"in",
"tds",
":",
"r_span_count",
"=",
"1",
"c_span_count",
"=",
"1",
"current_column",
"=",
"column",
"if",
"td",
".",
"has_attr",
"(",
"'rowspan'",
")",
":",
"r_span_count",
"=",
"int",
"(",
"td",
"[",
"'rowspan'",
"]",
")",
"if",
"td",
".",
"has_attr",
"(",
"'colspan'",
")",
":",
"c_span_count",
"=",
"int",
"(",
"td",
"[",
"'colspan'",
"]",
")",
"column",
"+=",
"c_span_count",
"else",
":",
"column",
"+=",
"1",
"new_span",
"=",
"[",
"]",
"for",
"r_index",
"in",
"range",
"(",
"tr",
",",
"tr",
"+",
"r_span_count",
")",
":",
"for",
"c_index",
"in",
"range",
"(",
"current_column",
",",
"column",
")",
":",
"if",
"not",
"get_span",
"(",
"spans",
",",
"r_index",
",",
"c_index",
")",
":",
"new_span",
".",
"append",
"(",
"[",
"r_index",
",",
"c_index",
"]",
")",
"if",
"len",
"(",
"new_span",
")",
">",
"0",
":",
"spans",
".",
"append",
"(",
"new_span",
")",
"return",
"spans"
] | 25.147541 | 19.245902 |
def run(self):
"""Runs the sampler."""
if self.target_eff_nsamples and self.checkpoint_interval is None:
raise ValueError("A checkpoint interval must be set if "
"targetting an effective number of samples")
# get the starting number of samples:
# "nsamples" keeps track of the number of samples we've obtained (if
# target_eff_nsamples is not None, this is the effective number of
# samples; otherwise, this is the total number of samples).
# _lastclear is the number of iterations that the file already
# contains (either due to sampler burn-in, or a previous checkpoint)
if self.new_checkpoint:
self._lastclear = 0
else:
with self.io(self.checkpoint_file, "r") as fp:
self._lastclear = fp.niterations
if self.target_eff_nsamples is not None:
target_nsamples = self.target_eff_nsamples
with self.io(self.checkpoint_file, "r") as fp:
nsamples = fp.effective_nsamples
elif self.target_niterations is not None:
# the number of samples is the number of iterations times the
# number of walkers
target_nsamples = self.nwalkers * self.target_niterations
nsamples = self._lastclear * self.nwalkers
else:
raise ValueError("must set either target_eff_nsamples or "
"target_niterations; see set_target")
self._itercounter = 0
# figure out the interval to use
iterinterval = self.checkpoint_interval
if iterinterval is None:
iterinterval = self.target_niterations
# run sampler until we have the desired number of samples
while nsamples < target_nsamples:
# adjust the interval if we would go past the number of iterations
if self.target_niterations is not None and (
self.niterations + iterinterval > self.target_niterations):
iterinterval = self.target_niterations - self.niterations
# run sampler and set initial values to None so that sampler
# picks up from where it left off next call
logging.info("Running sampler for {} to {} iterations".format(
self.niterations, self.niterations + iterinterval))
# run the underlying sampler for the desired interval
self.run_mcmc(iterinterval)
# update the itercounter
self._itercounter = self._itercounter + iterinterval
# dump the current results
self.checkpoint()
# update nsamples for next loop
if self.target_eff_nsamples is not None:
nsamples = self.effective_nsamples
logging.info("Have {} effective samples post burn in".format(
nsamples))
else:
nsamples += iterinterval * self.nwalkers | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"self",
".",
"target_eff_nsamples",
"and",
"self",
".",
"checkpoint_interval",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"A checkpoint interval must be set if \"",
"\"targetting an effective number of samples\"",
")",
"# get the starting number of samples:",
"# \"nsamples\" keeps track of the number of samples we've obtained (if",
"# target_eff_nsamples is not None, this is the effective number of",
"# samples; otherwise, this is the total number of samples).",
"# _lastclear is the number of iterations that the file already",
"# contains (either due to sampler burn-in, or a previous checkpoint)",
"if",
"self",
".",
"new_checkpoint",
":",
"self",
".",
"_lastclear",
"=",
"0",
"else",
":",
"with",
"self",
".",
"io",
"(",
"self",
".",
"checkpoint_file",
",",
"\"r\"",
")",
"as",
"fp",
":",
"self",
".",
"_lastclear",
"=",
"fp",
".",
"niterations",
"if",
"self",
".",
"target_eff_nsamples",
"is",
"not",
"None",
":",
"target_nsamples",
"=",
"self",
".",
"target_eff_nsamples",
"with",
"self",
".",
"io",
"(",
"self",
".",
"checkpoint_file",
",",
"\"r\"",
")",
"as",
"fp",
":",
"nsamples",
"=",
"fp",
".",
"effective_nsamples",
"elif",
"self",
".",
"target_niterations",
"is",
"not",
"None",
":",
"# the number of samples is the number of iterations times the",
"# number of walkers",
"target_nsamples",
"=",
"self",
".",
"nwalkers",
"*",
"self",
".",
"target_niterations",
"nsamples",
"=",
"self",
".",
"_lastclear",
"*",
"self",
".",
"nwalkers",
"else",
":",
"raise",
"ValueError",
"(",
"\"must set either target_eff_nsamples or \"",
"\"target_niterations; see set_target\"",
")",
"self",
".",
"_itercounter",
"=",
"0",
"# figure out the interval to use",
"iterinterval",
"=",
"self",
".",
"checkpoint_interval",
"if",
"iterinterval",
"is",
"None",
":",
"iterinterval",
"=",
"self",
".",
"target_niterations",
"# run sampler until we have the desired number of samples",
"while",
"nsamples",
"<",
"target_nsamples",
":",
"# adjust the interval if we would go past the number of iterations",
"if",
"self",
".",
"target_niterations",
"is",
"not",
"None",
"and",
"(",
"self",
".",
"niterations",
"+",
"iterinterval",
">",
"self",
".",
"target_niterations",
")",
":",
"iterinterval",
"=",
"self",
".",
"target_niterations",
"-",
"self",
".",
"niterations",
"# run sampler and set initial values to None so that sampler",
"# picks up from where it left off next call",
"logging",
".",
"info",
"(",
"\"Running sampler for {} to {} iterations\"",
".",
"format",
"(",
"self",
".",
"niterations",
",",
"self",
".",
"niterations",
"+",
"iterinterval",
")",
")",
"# run the underlying sampler for the desired interval",
"self",
".",
"run_mcmc",
"(",
"iterinterval",
")",
"# update the itercounter",
"self",
".",
"_itercounter",
"=",
"self",
".",
"_itercounter",
"+",
"iterinterval",
"# dump the current results",
"self",
".",
"checkpoint",
"(",
")",
"# update nsamples for next loop",
"if",
"self",
".",
"target_eff_nsamples",
"is",
"not",
"None",
":",
"nsamples",
"=",
"self",
".",
"effective_nsamples",
"logging",
".",
"info",
"(",
"\"Have {} effective samples post burn in\"",
".",
"format",
"(",
"nsamples",
")",
")",
"else",
":",
"nsamples",
"+=",
"iterinterval",
"*",
"self",
".",
"nwalkers"
] | 52.410714 | 18.892857 |
def open(self, _file, target=DEFAULT_TARGET):
"""
Open the existing file for reading.
@param _file : A filename of file descriptor.
@param target: A user-specific BFD target name.
@return : None
"""
# Close any existing BFD structure instance.
self.close()
#
# STEP 1. Open the BFD pointer.
#
# Determine if the user passed a file-descriptor or a _file and
# proceed accordingly.
if type(_file) is FileType:
# The user specified a file descriptor.
filename = _file.name
if islink(filename):
raise BfdException("Symlinks file-descriptors are not valid")
try:
self._ptr = _bfd.fdopenr(filename, target, dup(_file.fileno()))
except Exception, err:
raise BfdException(
"Unable to open file-descriptor %s : %s" % (filename, err))
elif type(_file) is StringType:
# The user spcified a filaname so first check if file exists.
filename = _file
try:
with open(_file): pass
except IOError:
raise BfdException("File %s does not exist." % filename)
#
# Proceed to open the specified file and create a new BFD.
#
try:
self._ptr = _bfd.openr(filename, target)
except (TypeError, IOError), err:
raise BfdException(
"Unable to open file %s : %s" % (filename, err))
elif type(_file) is IntType:
# The user specified an already-open BFD pointer so we avoid any
# further open operation and move on to file format recognition.
self._ptr = _file
else:
raise BfdException(
"Invalid file type specified for open operation (%r)" % _file)
#
# STEP 2. Determine file format of the BFD.
#
# Now that the BFD is open we'll proceed to determine its file format.
# We'll use the objdump logic to determine it and raise an error in
# case we were unable to get it right.
#
try:
# Type opening it as an archieve and if it success then check
# subfiles.
if _bfd.check_format(self._ptr, BfdFormat.ARCHIVE):
# Set current format and store the inner file list.
self.file_format = BfdFormat.ARCHIVE
self.__populate_archive_files()
else:
# DO NOT USE bfd_check_format_matches() becuase its not tested.
# An implementation example if on objdump.c at function
# display_bfd().
if _bfd.check_format(self._ptr, BfdFormat.OBJECT):
self.file_format = BfdFormat.OBJECT
elif _bfd.check_format(self._ptr, BfdFormat.CORE):
self.file_format = BfdFormat.CORE
else:
pass
raise BfdException(_bfd.get_last_error_message())
except TypeError, err:
raise BfdException(
"Unable to initialize file format : %s" % err)
#
# STEP 3. Extract inner sections and symbolic information.
#
if self._ptr is not None:
# If the file is a valid BFD file format but not an archive then
# get its sections and symbolic information (if any).
if self.file_format in [BfdFormat.OBJECT, BfdFormat.CORE]:
self.__populate_sections()
self.__populate_symbols() | [
"def",
"open",
"(",
"self",
",",
"_file",
",",
"target",
"=",
"DEFAULT_TARGET",
")",
":",
"# Close any existing BFD structure instance. ",
"self",
".",
"close",
"(",
")",
"#",
"# STEP 1. Open the BFD pointer.",
"#",
"# Determine if the user passed a file-descriptor or a _file and",
"# proceed accordingly.",
"if",
"type",
"(",
"_file",
")",
"is",
"FileType",
":",
"# The user specified a file descriptor.",
"filename",
"=",
"_file",
".",
"name",
"if",
"islink",
"(",
"filename",
")",
":",
"raise",
"BfdException",
"(",
"\"Symlinks file-descriptors are not valid\"",
")",
"try",
":",
"self",
".",
"_ptr",
"=",
"_bfd",
".",
"fdopenr",
"(",
"filename",
",",
"target",
",",
"dup",
"(",
"_file",
".",
"fileno",
"(",
")",
")",
")",
"except",
"Exception",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to open file-descriptor %s : %s\"",
"%",
"(",
"filename",
",",
"err",
")",
")",
"elif",
"type",
"(",
"_file",
")",
"is",
"StringType",
":",
"# The user spcified a filaname so first check if file exists.",
"filename",
"=",
"_file",
"try",
":",
"with",
"open",
"(",
"_file",
")",
":",
"pass",
"except",
"IOError",
":",
"raise",
"BfdException",
"(",
"\"File %s does not exist.\"",
"%",
"filename",
")",
"#",
"# Proceed to open the specified file and create a new BFD.",
"#",
"try",
":",
"self",
".",
"_ptr",
"=",
"_bfd",
".",
"openr",
"(",
"filename",
",",
"target",
")",
"except",
"(",
"TypeError",
",",
"IOError",
")",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to open file %s : %s\"",
"%",
"(",
"filename",
",",
"err",
")",
")",
"elif",
"type",
"(",
"_file",
")",
"is",
"IntType",
":",
"# The user specified an already-open BFD pointer so we avoid any",
"# further open operation and move on to file format recognition.",
"self",
".",
"_ptr",
"=",
"_file",
"else",
":",
"raise",
"BfdException",
"(",
"\"Invalid file type specified for open operation (%r)\"",
"%",
"_file",
")",
"#",
"# STEP 2. Determine file format of the BFD.",
"#",
"# Now that the BFD is open we'll proceed to determine its file format.",
"# We'll use the objdump logic to determine it and raise an error in",
"# case we were unable to get it right.",
"#",
"try",
":",
"# Type opening it as an archieve and if it success then check",
"# subfiles.",
"if",
"_bfd",
".",
"check_format",
"(",
"self",
".",
"_ptr",
",",
"BfdFormat",
".",
"ARCHIVE",
")",
":",
"# Set current format and store the inner file list.",
"self",
".",
"file_format",
"=",
"BfdFormat",
".",
"ARCHIVE",
"self",
".",
"__populate_archive_files",
"(",
")",
"else",
":",
"# DO NOT USE bfd_check_format_matches() becuase its not tested.",
"# An implementation example if on objdump.c at function",
"# display_bfd().",
"if",
"_bfd",
".",
"check_format",
"(",
"self",
".",
"_ptr",
",",
"BfdFormat",
".",
"OBJECT",
")",
":",
"self",
".",
"file_format",
"=",
"BfdFormat",
".",
"OBJECT",
"elif",
"_bfd",
".",
"check_format",
"(",
"self",
".",
"_ptr",
",",
"BfdFormat",
".",
"CORE",
")",
":",
"self",
".",
"file_format",
"=",
"BfdFormat",
".",
"CORE",
"else",
":",
"pass",
"raise",
"BfdException",
"(",
"_bfd",
".",
"get_last_error_message",
"(",
")",
")",
"except",
"TypeError",
",",
"err",
":",
"raise",
"BfdException",
"(",
"\"Unable to initialize file format : %s\"",
"%",
"err",
")",
"#",
"# STEP 3. Extract inner sections and symbolic information.",
"#",
"if",
"self",
".",
"_ptr",
"is",
"not",
"None",
":",
"# If the file is a valid BFD file format but not an archive then",
"# get its sections and symbolic information (if any).",
"if",
"self",
".",
"file_format",
"in",
"[",
"BfdFormat",
".",
"OBJECT",
",",
"BfdFormat",
".",
"CORE",
"]",
":",
"self",
".",
"__populate_sections",
"(",
")",
"self",
".",
"__populate_symbols",
"(",
")"
] | 36.846939 | 22.418367 |
def get_file_out(build_index, python_name, jenkin_name):
"""
This function will grab one log file from Jenkins and save it to local user directory
:param g_jenkins_url:
:param build_index:
:param airline_java:
:param airline_java_tail:
:return:
"""
global g_log_base_dir
global g_jenkins_url
global g_log_base_dir
directoryB = g_log_base_dir+'/Build'+str(build_index)
if not(os.path.isdir(directoryB)): # make directory if it does not exist
os.mkdir(directoryB)
url_string_full = g_jenkins_url+'/'+str(build_index)+jenkin_name
filename = os.path.join(directoryB, python_name)
full_command = 'curl ' + url_string_full + ' > ' + filename
subprocess.call(full_command,shell=True) | [
"def",
"get_file_out",
"(",
"build_index",
",",
"python_name",
",",
"jenkin_name",
")",
":",
"global",
"g_log_base_dir",
"global",
"g_jenkins_url",
"global",
"g_log_base_dir",
"directoryB",
"=",
"g_log_base_dir",
"+",
"'/Build'",
"+",
"str",
"(",
"build_index",
")",
"if",
"not",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"directoryB",
")",
")",
":",
"# make directory if it does not exist",
"os",
".",
"mkdir",
"(",
"directoryB",
")",
"url_string_full",
"=",
"g_jenkins_url",
"+",
"'/'",
"+",
"str",
"(",
"build_index",
")",
"+",
"jenkin_name",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directoryB",
",",
"python_name",
")",
"full_command",
"=",
"'curl '",
"+",
"url_string_full",
"+",
"' > '",
"+",
"filename",
"subprocess",
".",
"call",
"(",
"full_command",
",",
"shell",
"=",
"True",
")"
] | 31.913043 | 21.304348 |
def textfsm_extractor(cls, template_name, raw_text):
"""
Applies a TextFSM template over a raw text and return the matching table.
Main usage of this method will be to extract data form a non-structured output
from a network device and return the values in a table format.
:param cls: Instance of the driver class
:param template_name: Specifies the name of the template to be used
:param raw_text: Text output as the devices prompts on the CLI
:return: table-like list of entries
"""
textfsm_data = list()
cls.__class__.__name__.replace('Driver', '')
current_dir = os.path.dirname(os.path.abspath(sys.modules[cls.__module__].__file__))
template_dir_path = '{current_dir}/utils/textfsm_templates'.format(
current_dir=current_dir
)
template_path = '{template_dir_path}/{template_name}.tpl'.format(
template_dir_path=template_dir_path,
template_name=template_name
)
try:
fsm_handler = textfsm.TextFSM(open(template_path))
except IOError:
raise napalm_base.exceptions.TemplateNotImplemented(
"TextFSM template {template_name}.tpl is not defined under {path}".format(
template_name=template_name,
path=template_dir_path
)
)
except textfsm.TextFSMTemplateError as tfte:
raise napalm_base.exceptions.TemplateRenderException(
"Wrong format of TextFSM template {template_name}: {error}".format(
template_name=template_name,
error=py23_compat.text_type(tfte)
)
)
objects = fsm_handler.ParseText(raw_text)
for obj in objects:
index = 0
entry = {}
for entry_value in obj:
entry[fsm_handler.header[index].lower()] = entry_value
index += 1
textfsm_data.append(entry)
return textfsm_data | [
"def",
"textfsm_extractor",
"(",
"cls",
",",
"template_name",
",",
"raw_text",
")",
":",
"textfsm_data",
"=",
"list",
"(",
")",
"cls",
".",
"__class__",
".",
"__name__",
".",
"replace",
"(",
"'Driver'",
",",
"''",
")",
"current_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"sys",
".",
"modules",
"[",
"cls",
".",
"__module__",
"]",
".",
"__file__",
")",
")",
"template_dir_path",
"=",
"'{current_dir}/utils/textfsm_templates'",
".",
"format",
"(",
"current_dir",
"=",
"current_dir",
")",
"template_path",
"=",
"'{template_dir_path}/{template_name}.tpl'",
".",
"format",
"(",
"template_dir_path",
"=",
"template_dir_path",
",",
"template_name",
"=",
"template_name",
")",
"try",
":",
"fsm_handler",
"=",
"textfsm",
".",
"TextFSM",
"(",
"open",
"(",
"template_path",
")",
")",
"except",
"IOError",
":",
"raise",
"napalm_base",
".",
"exceptions",
".",
"TemplateNotImplemented",
"(",
"\"TextFSM template {template_name}.tpl is not defined under {path}\"",
".",
"format",
"(",
"template_name",
"=",
"template_name",
",",
"path",
"=",
"template_dir_path",
")",
")",
"except",
"textfsm",
".",
"TextFSMTemplateError",
"as",
"tfte",
":",
"raise",
"napalm_base",
".",
"exceptions",
".",
"TemplateRenderException",
"(",
"\"Wrong format of TextFSM template {template_name}: {error}\"",
".",
"format",
"(",
"template_name",
"=",
"template_name",
",",
"error",
"=",
"py23_compat",
".",
"text_type",
"(",
"tfte",
")",
")",
")",
"objects",
"=",
"fsm_handler",
".",
"ParseText",
"(",
"raw_text",
")",
"for",
"obj",
"in",
"objects",
":",
"index",
"=",
"0",
"entry",
"=",
"{",
"}",
"for",
"entry_value",
"in",
"obj",
":",
"entry",
"[",
"fsm_handler",
".",
"header",
"[",
"index",
"]",
".",
"lower",
"(",
")",
"]",
"=",
"entry_value",
"index",
"+=",
"1",
"textfsm_data",
".",
"append",
"(",
"entry",
")",
"return",
"textfsm_data"
] | 36.254902 | 21.980392 |
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output | [
"def",
"check_output",
"(",
"*",
"popenargs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'stdout'",
"in",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'stdout argument not allowed, it will be overridden.'",
")",
"process",
"=",
"Popen",
"(",
"stdout",
"=",
"PIPE",
",",
"*",
"popenargs",
",",
"*",
"*",
"kwargs",
")",
"output",
",",
"unused_err",
"=",
"process",
".",
"communicate",
"(",
")",
"retcode",
"=",
"process",
".",
"poll",
"(",
")",
"if",
"retcode",
":",
"cmd",
"=",
"kwargs",
".",
"get",
"(",
"\"args\"",
")",
"if",
"cmd",
"is",
"None",
":",
"cmd",
"=",
"popenargs",
"[",
"0",
"]",
"raise",
"CalledProcessError",
"(",
"retcode",
",",
"cmd",
",",
"output",
"=",
"output",
")",
"return",
"output"
] | 39.096774 | 19.516129 |
def run_matrix(self, matrix_definition, document):
"""
Running pipeline via a matrix.
Args:
matrix_definition (dict): one concrete matrix item.
document (dict): spline document (complete) as loaded from yaml file.
"""
matrix = Matrix(matrix_definition, 'matrix(parallel)' in document)
process_data = MatrixProcessData()
process_data.options = self.options
process_data.pipeline = document['pipeline']
process_data.model = {} if 'model' not in document else document['model']
process_data.hooks = Hooks(document)
return matrix.process(process_data) | [
"def",
"run_matrix",
"(",
"self",
",",
"matrix_definition",
",",
"document",
")",
":",
"matrix",
"=",
"Matrix",
"(",
"matrix_definition",
",",
"'matrix(parallel)'",
"in",
"document",
")",
"process_data",
"=",
"MatrixProcessData",
"(",
")",
"process_data",
".",
"options",
"=",
"self",
".",
"options",
"process_data",
".",
"pipeline",
"=",
"document",
"[",
"'pipeline'",
"]",
"process_data",
".",
"model",
"=",
"{",
"}",
"if",
"'model'",
"not",
"in",
"document",
"else",
"document",
"[",
"'model'",
"]",
"process_data",
".",
"hooks",
"=",
"Hooks",
"(",
"document",
")",
"return",
"matrix",
".",
"process",
"(",
"process_data",
")"
] | 38 | 18.941176 |
def from_mpl(fig, savefig_kw=None):
"""Create a SVG figure from a ``matplotlib`` figure.
Parameters
----------
fig : matplotlib.Figure instance
savefig_kw : dict
keyword arguments to be passed to matplotlib's
`savefig`
Returns
-------
SVGFigure
newly created :py:class:`SVGFigure` initialised with the string
content.
Examples
--------
If you want to overlay the figure on another SVG, you may want to pass
the `transparent` option:
>>> from svgutils import transform
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> line, = plt.plot([1,2])
>>> svgfig = transform.from_mpl(fig,
... savefig_kw=dict(transparent=True))
>>> svgfig.getroot()
<svgutils.transform.GroupElement object at ...>
"""
fid = StringIO()
if savefig_kw is None:
savefig_kw = {}
try:
fig.savefig(fid, format='svg', **savefig_kw)
except ValueError:
raise(ValueError, "No matplotlib SVG backend")
fid.seek(0)
fig = fromstring(fid.read())
# workaround mpl units bug
w, h = fig.get_size()
fig.set_size((w.replace('pt', ''), h.replace('pt', '')))
return fig | [
"def",
"from_mpl",
"(",
"fig",
",",
"savefig_kw",
"=",
"None",
")",
":",
"fid",
"=",
"StringIO",
"(",
")",
"if",
"savefig_kw",
"is",
"None",
":",
"savefig_kw",
"=",
"{",
"}",
"try",
":",
"fig",
".",
"savefig",
"(",
"fid",
",",
"format",
"=",
"'svg'",
",",
"*",
"*",
"savefig_kw",
")",
"except",
"ValueError",
":",
"raise",
"(",
"ValueError",
",",
"\"No matplotlib SVG backend\"",
")",
"fid",
".",
"seek",
"(",
"0",
")",
"fig",
"=",
"fromstring",
"(",
"fid",
".",
"read",
"(",
")",
")",
"# workaround mpl units bug",
"w",
",",
"h",
"=",
"fig",
".",
"get_size",
"(",
")",
"fig",
".",
"set_size",
"(",
"(",
"w",
".",
"replace",
"(",
"'pt'",
",",
"''",
")",
",",
"h",
".",
"replace",
"(",
"'pt'",
",",
"''",
")",
")",
")",
"return",
"fig"
] | 22.037037 | 23.277778 |
def measurement_key(
val: Any,
default: Any = RaiseTypeErrorIfNotProvided):
"""Get the measurement key for the given value.
Args:
val: The value which has the measurement key..
default: Determines the fallback behavior when `val` doesn't have
a measurement key. If `default` is not set, a TypeError is raised.
If default is set to a value, that value is returned if the value
does not have `_measurement_key_`.
Returns:
If `val` has a `_measurement_key_` method and its result is not
`NotImplemented`, that result is returned. Otherwise, if a default
value was specified, the default value is returned.
Raises:
TypeError: `val` doesn't have a _measurement_key_ method (or that method
returned NotImplemented) and also no default value was specified.
"""
getter = getattr(val, '_measurement_key_', None)
result = NotImplemented if getter is None else getter()
if result is not NotImplemented:
return result
if default is not RaiseTypeErrorIfNotProvided:
return default
if getter is None:
raise TypeError(
"object of type '{}' has no _measurement_key_ method."
.format(type(val)))
raise TypeError("object of type '{}' does have a _measurement_key_ method, "
"but it returned NotImplemented.".format(type(val))) | [
"def",
"measurement_key",
"(",
"val",
":",
"Any",
",",
"default",
":",
"Any",
"=",
"RaiseTypeErrorIfNotProvided",
")",
":",
"getter",
"=",
"getattr",
"(",
"val",
",",
"'_measurement_key_'",
",",
"None",
")",
"result",
"=",
"NotImplemented",
"if",
"getter",
"is",
"None",
"else",
"getter",
"(",
")",
"if",
"result",
"is",
"not",
"NotImplemented",
":",
"return",
"result",
"if",
"default",
"is",
"not",
"RaiseTypeErrorIfNotProvided",
":",
"return",
"default",
"if",
"getter",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"object of type '{}' has no _measurement_key_ method.\"",
".",
"format",
"(",
"type",
"(",
"val",
")",
")",
")",
"raise",
"TypeError",
"(",
"\"object of type '{}' does have a _measurement_key_ method, \"",
"\"but it returned NotImplemented.\"",
".",
"format",
"(",
"type",
"(",
"val",
")",
")",
")"
] | 39.305556 | 24.75 |
def tokens_create(name, user, scopes, internal):
"""Create a personal OAuth token."""
token = Token.create_personal(
name, user.id, scopes=scopes, is_internal=internal)
db.session.commit()
click.secho(token.access_token, fg='blue') | [
"def",
"tokens_create",
"(",
"name",
",",
"user",
",",
"scopes",
",",
"internal",
")",
":",
"token",
"=",
"Token",
".",
"create_personal",
"(",
"name",
",",
"user",
".",
"id",
",",
"scopes",
"=",
"scopes",
",",
"is_internal",
"=",
"internal",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"click",
".",
"secho",
"(",
"token",
".",
"access_token",
",",
"fg",
"=",
"'blue'",
")"
] | 41.666667 | 9.333333 |
def _reorient_3d(image):
"""
Reorganize the data for a 3d nifti
"""
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
# Fill the new image with the values of the input image but with matching the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i).original_data))
return new_image | [
"def",
"_reorient_3d",
"(",
"image",
")",
":",
"# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size",
"# of the array in each direction is the same with the corresponding direction of the input image.",
"new_image",
"=",
"numpy",
".",
"zeros",
"(",
"[",
"image",
".",
"dimensions",
"[",
"image",
".",
"sagittal_orientation",
".",
"normal_component",
"]",
",",
"image",
".",
"dimensions",
"[",
"image",
".",
"coronal_orientation",
".",
"normal_component",
"]",
",",
"image",
".",
"dimensions",
"[",
"image",
".",
"axial_orientation",
".",
"normal_component",
"]",
"]",
",",
"dtype",
"=",
"image",
".",
"nifti_data",
".",
"dtype",
")",
"# Fill the new image with the values of the input image but with matching the orientation with x,y,z",
"if",
"image",
".",
"coronal_orientation",
".",
"y_inverted",
":",
"for",
"i",
"in",
"range",
"(",
"new_image",
".",
"shape",
"[",
"2",
"]",
")",
":",
"new_image",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"numpy",
".",
"fliplr",
"(",
"numpy",
".",
"squeeze",
"(",
"image",
".",
"get_slice",
"(",
"SliceType",
".",
"AXIAL",
",",
"new_image",
".",
"shape",
"[",
"2",
"]",
"-",
"1",
"-",
"i",
")",
".",
"original_data",
")",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"new_image",
".",
"shape",
"[",
"2",
"]",
")",
":",
"new_image",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"numpy",
".",
"fliplr",
"(",
"numpy",
".",
"squeeze",
"(",
"image",
".",
"get_slice",
"(",
"SliceType",
".",
"AXIAL",
",",
"i",
")",
".",
"original_data",
")",
")",
"return",
"new_image"
] | 58.090909 | 34.636364 |
def _ip_int_from_string(self, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
A long, the IPv6 ip_str.
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
if len(parts) < 3:
raise AddressValueError(ip_str)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
ipv4_int = IPv4Address(parts.pop())._ip
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
if len(parts) > self._HEXTET_COUNT + 1:
raise AddressValueError(ip_str)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
try:
skip_index, = (
[i for i in xrange(1, len(parts) - 1) if not parts[i]] or
[None])
except ValueError:
# Can't have more than one '::'
raise AddressValueError(ip_str)
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
raise AddressValueError(ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
raise AddressValueError(ip_str) # :$ requires ::$
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
raise AddressValueError(ip_str)
else:
# Otherwise, allocate the entire address to parts_hi. The endpoints
# could still be empty, but _parse_hextet() will check for that.
if len(parts) != self._HEXTET_COUNT:
raise AddressValueError(ip_str)
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0L
for i in xrange(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in xrange(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError:
raise AddressValueError(ip_str) | [
"def",
"_ip_int_from_string",
"(",
"self",
",",
"ip_str",
")",
":",
"parts",
"=",
"ip_str",
".",
"split",
"(",
"':'",
")",
"# An IPv6 address needs at least 2 colons (3 parts).",
"if",
"len",
"(",
"parts",
")",
"<",
"3",
":",
"raise",
"AddressValueError",
"(",
"ip_str",
")",
"# If the address has an IPv4-style suffix, convert it to hexadecimal.",
"if",
"'.'",
"in",
"parts",
"[",
"-",
"1",
"]",
":",
"ipv4_int",
"=",
"IPv4Address",
"(",
"parts",
".",
"pop",
"(",
")",
")",
".",
"_ip",
"parts",
".",
"append",
"(",
"'%x'",
"%",
"(",
"(",
"ipv4_int",
">>",
"16",
")",
"&",
"0xFFFF",
")",
")",
"parts",
".",
"append",
"(",
"'%x'",
"%",
"(",
"ipv4_int",
"&",
"0xFFFF",
")",
")",
"# An IPv6 address can't have more than 8 colons (9 parts).",
"if",
"len",
"(",
"parts",
")",
">",
"self",
".",
"_HEXTET_COUNT",
"+",
"1",
":",
"raise",
"AddressValueError",
"(",
"ip_str",
")",
"# Disregarding the endpoints, find '::' with nothing in between.",
"# This indicates that a run of zeroes has been skipped.",
"try",
":",
"skip_index",
",",
"=",
"(",
"[",
"i",
"for",
"i",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"parts",
")",
"-",
"1",
")",
"if",
"not",
"parts",
"[",
"i",
"]",
"]",
"or",
"[",
"None",
"]",
")",
"except",
"ValueError",
":",
"# Can't have more than one '::'",
"raise",
"AddressValueError",
"(",
"ip_str",
")",
"# parts_hi is the number of parts to copy from above/before the '::'",
"# parts_lo is the number of parts to copy from below/after the '::'",
"if",
"skip_index",
"is",
"not",
"None",
":",
"# If we found a '::', then check if it also covers the endpoints.",
"parts_hi",
"=",
"skip_index",
"parts_lo",
"=",
"len",
"(",
"parts",
")",
"-",
"skip_index",
"-",
"1",
"if",
"not",
"parts",
"[",
"0",
"]",
":",
"parts_hi",
"-=",
"1",
"if",
"parts_hi",
":",
"raise",
"AddressValueError",
"(",
"ip_str",
")",
"# ^: requires ^::",
"if",
"not",
"parts",
"[",
"-",
"1",
"]",
":",
"parts_lo",
"-=",
"1",
"if",
"parts_lo",
":",
"raise",
"AddressValueError",
"(",
"ip_str",
")",
"# :$ requires ::$",
"parts_skipped",
"=",
"self",
".",
"_HEXTET_COUNT",
"-",
"(",
"parts_hi",
"+",
"parts_lo",
")",
"if",
"parts_skipped",
"<",
"1",
":",
"raise",
"AddressValueError",
"(",
"ip_str",
")",
"else",
":",
"# Otherwise, allocate the entire address to parts_hi. The endpoints",
"# could still be empty, but _parse_hextet() will check for that.",
"if",
"len",
"(",
"parts",
")",
"!=",
"self",
".",
"_HEXTET_COUNT",
":",
"raise",
"AddressValueError",
"(",
"ip_str",
")",
"parts_hi",
"=",
"len",
"(",
"parts",
")",
"parts_lo",
"=",
"0",
"parts_skipped",
"=",
"0",
"try",
":",
"# Now, parse the hextets into a 128-bit integer.",
"ip_int",
"=",
"0L",
"for",
"i",
"in",
"xrange",
"(",
"parts_hi",
")",
":",
"ip_int",
"<<=",
"16",
"ip_int",
"|=",
"self",
".",
"_parse_hextet",
"(",
"parts",
"[",
"i",
"]",
")",
"ip_int",
"<<=",
"16",
"*",
"parts_skipped",
"for",
"i",
"in",
"xrange",
"(",
"-",
"parts_lo",
",",
"0",
")",
":",
"ip_int",
"<<=",
"16",
"ip_int",
"|=",
"self",
".",
"_parse_hextet",
"(",
"parts",
"[",
"i",
"]",
")",
"return",
"ip_int",
"except",
"ValueError",
":",
"raise",
"AddressValueError",
"(",
"ip_str",
")"
] | 37.051282 | 18.74359 |
def get_child(self):
"""
Find file or folder at the remote_path
:return: File|Folder
"""
path_parts = self.remote_path.split(os.sep)
return self._get_child_recurse(path_parts, self.node) | [
"def",
"get_child",
"(",
"self",
")",
":",
"path_parts",
"=",
"self",
".",
"remote_path",
".",
"split",
"(",
"os",
".",
"sep",
")",
"return",
"self",
".",
"_get_child_recurse",
"(",
"path_parts",
",",
"self",
".",
"node",
")"
] | 32.571429 | 10 |
def filter(self, criteria, applyto='measurement', ID=None):
"""
Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection.
"""
fil = criteria
new = self.copy()
if isinstance(applyto, collections.Mapping):
remove = (k for k, v in self.items() if not fil(applyto[k]))
elif applyto == 'measurement':
remove = (k for k, v in self.items() if not fil(v))
elif applyto == 'keys':
remove = (k for k, v in self.items() if not fil(k))
elif applyto == 'data':
remove = (k for k, v in self.items() if not fil(v.get_data()))
else:
raise ValueError('Unsupported value "%s" for applyto parameter.' % applyto)
for r in remove:
del new[r]
if ID is None:
ID = self.ID
new.ID = ID
return new | [
"def",
"filter",
"(",
"self",
",",
"criteria",
",",
"applyto",
"=",
"'measurement'",
",",
"ID",
"=",
"None",
")",
":",
"fil",
"=",
"criteria",
"new",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"isinstance",
"(",
"applyto",
",",
"collections",
".",
"Mapping",
")",
":",
"remove",
"=",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"if",
"not",
"fil",
"(",
"applyto",
"[",
"k",
"]",
")",
")",
"elif",
"applyto",
"==",
"'measurement'",
":",
"remove",
"=",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"if",
"not",
"fil",
"(",
"v",
")",
")",
"elif",
"applyto",
"==",
"'keys'",
":",
"remove",
"=",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"if",
"not",
"fil",
"(",
"k",
")",
")",
"elif",
"applyto",
"==",
"'data'",
":",
"remove",
"=",
"(",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"if",
"not",
"fil",
"(",
"v",
".",
"get_data",
"(",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unsupported value \"%s\" for applyto parameter.'",
"%",
"applyto",
")",
"for",
"r",
"in",
"remove",
":",
"del",
"new",
"[",
"r",
"]",
"if",
"ID",
"is",
"None",
":",
"ID",
"=",
"self",
".",
"ID",
"new",
".",
"ID",
"=",
"ID",
"return",
"new"
] | 37.97619 | 21.690476 |
def depends(self, offset=0, count=25):
'''Return all the currently dependent jobs'''
return self.client('jobs', 'depends', self.name, offset, count) | [
"def",
"depends",
"(",
"self",
",",
"offset",
"=",
"0",
",",
"count",
"=",
"25",
")",
":",
"return",
"self",
".",
"client",
"(",
"'jobs'",
",",
"'depends'",
",",
"self",
".",
"name",
",",
"offset",
",",
"count",
")"
] | 54 | 15.333333 |
def _on_timeout():
"""Invoked periodically to ensure that metrics that have been collected
are submitted to InfluxDB.
:rtype: tornado.concurrent.Future or None
"""
global _buffer_size
LOGGER.debug('No metrics submitted in the last %.2f seconds',
_timeout_interval / 1000.0)
_buffer_size = _pending_measurements()
if _buffer_size:
return _trigger_batch_write()
_start_timeout() | [
"def",
"_on_timeout",
"(",
")",
":",
"global",
"_buffer_size",
"LOGGER",
".",
"debug",
"(",
"'No metrics submitted in the last %.2f seconds'",
",",
"_timeout_interval",
"/",
"1000.0",
")",
"_buffer_size",
"=",
"_pending_measurements",
"(",
")",
"if",
"_buffer_size",
":",
"return",
"_trigger_batch_write",
"(",
")",
"_start_timeout",
"(",
")"
] | 28.4 | 16.533333 |
def post_status(
app,
user,
status,
visibility='public',
media_ids=None,
sensitive=False,
spoiler_text=None,
in_reply_to_id=None
):
"""
Posts a new status.
https://github.com/tootsuite/documentation/blob/master/Using-the-API/API.md#posting-a-new-status
"""
# Idempotency key assures the same status is not posted multiple times
# if the request is retried.
headers = {"Idempotency-Key": uuid.uuid4().hex}
return http.post(app, user, '/api/v1/statuses', {
'status': status,
'media_ids[]': media_ids,
'visibility': visibility,
'sensitive': str_bool(sensitive),
'spoiler_text': spoiler_text,
'in_reply_to_id': in_reply_to_id,
}, headers=headers).json() | [
"def",
"post_status",
"(",
"app",
",",
"user",
",",
"status",
",",
"visibility",
"=",
"'public'",
",",
"media_ids",
"=",
"None",
",",
"sensitive",
"=",
"False",
",",
"spoiler_text",
"=",
"None",
",",
"in_reply_to_id",
"=",
"None",
")",
":",
"# Idempotency key assures the same status is not posted multiple times",
"# if the request is retried.",
"headers",
"=",
"{",
"\"Idempotency-Key\"",
":",
"uuid",
".",
"uuid4",
"(",
")",
".",
"hex",
"}",
"return",
"http",
".",
"post",
"(",
"app",
",",
"user",
",",
"'/api/v1/statuses'",
",",
"{",
"'status'",
":",
"status",
",",
"'media_ids[]'",
":",
"media_ids",
",",
"'visibility'",
":",
"visibility",
",",
"'sensitive'",
":",
"str_bool",
"(",
"sensitive",
")",
",",
"'spoiler_text'",
":",
"spoiler_text",
",",
"'in_reply_to_id'",
":",
"in_reply_to_id",
",",
"}",
",",
"headers",
"=",
"headers",
")",
".",
"json",
"(",
")"
] | 27.444444 | 19 |
def vminug(vin, ndim):
"""
Negate a double precision vector of arbitrary dimension.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vminug_c.html
:param vin: ndim-dimensional double precision vector to be negated.
:type vin: Array of floats
:param ndim: Dimension of vin.
:type ndim: int
:return: ndim-dimensional double precision vector equal to -vin.
:rtype: list[ndim]
"""
vin = stypes.toDoubleVector(vin)
vout = stypes.emptyDoubleVector(ndim)
ndim = ctypes.c_int(ndim)
libspice.vminug_c(vin, ndim, vout)
return stypes.cVectorToPython(vout) | [
"def",
"vminug",
"(",
"vin",
",",
"ndim",
")",
":",
"vin",
"=",
"stypes",
".",
"toDoubleVector",
"(",
"vin",
")",
"vout",
"=",
"stypes",
".",
"emptyDoubleVector",
"(",
"ndim",
")",
"ndim",
"=",
"ctypes",
".",
"c_int",
"(",
"ndim",
")",
"libspice",
".",
"vminug_c",
"(",
"vin",
",",
"ndim",
",",
"vout",
")",
"return",
"stypes",
".",
"cVectorToPython",
"(",
"vout",
")"
] | 33.222222 | 15.777778 |
def scan(backend, timeout=10):
"""Scan for miflora devices.
Note: this must be run as root!
"""
result = []
for (mac, name) in backend.scan_for_devices(timeout):
if (name is not None and name.lower() in VALID_DEVICE_NAMES) or \
mac is not None and mac.upper().startswith(DEVICE_PREFIX):
result.append(mac.upper())
return result | [
"def",
"scan",
"(",
"backend",
",",
"timeout",
"=",
"10",
")",
":",
"result",
"=",
"[",
"]",
"for",
"(",
"mac",
",",
"name",
")",
"in",
"backend",
".",
"scan_for_devices",
"(",
"timeout",
")",
":",
"if",
"(",
"name",
"is",
"not",
"None",
"and",
"name",
".",
"lower",
"(",
")",
"in",
"VALID_DEVICE_NAMES",
")",
"or",
"mac",
"is",
"not",
"None",
"and",
"mac",
".",
"upper",
"(",
")",
".",
"startswith",
"(",
"DEVICE_PREFIX",
")",
":",
"result",
".",
"append",
"(",
"mac",
".",
"upper",
"(",
")",
")",
"return",
"result"
] | 34.363636 | 17.181818 |
def sort(args):
"""
%prog sort fastafile
Sort a list of sequences and output with sorted IDs, etc.
"""
p = OptionParser(sort.__doc__)
p.add_option("--sizes", default=False, action="store_true",
help="Sort by decreasing size [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
sortedfastafile = fastafile.rsplit(".", 1)[0] + ".sorted.fasta"
f = Fasta(fastafile, index=False)
fw = must_open(sortedfastafile, "w")
if opts.sizes:
# Sort by decreasing size
sortlist = sorted(f.itersizes(), key=lambda x: (-x[1], x[0]))
logging.debug("Sort by size: max: {0}, min: {1}".\
format(sortlist[0], sortlist[-1]))
sortlist = [x for x, s in sortlist]
else:
sortlist = sorted(f.iterkeys())
for key in sortlist:
rec = f[key]
SeqIO.write([rec], fw, "fasta")
logging.debug("Sorted file written to `{0}`.".format(sortedfastafile))
fw.close()
return sortedfastafile | [
"def",
"sort",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"sort",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--sizes\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Sort by decreasing size [default: %default]\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"p",
".",
"print_help",
"(",
")",
")",
"fastafile",
",",
"=",
"args",
"sortedfastafile",
"=",
"fastafile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\".sorted.fasta\"",
"f",
"=",
"Fasta",
"(",
"fastafile",
",",
"index",
"=",
"False",
")",
"fw",
"=",
"must_open",
"(",
"sortedfastafile",
",",
"\"w\"",
")",
"if",
"opts",
".",
"sizes",
":",
"# Sort by decreasing size",
"sortlist",
"=",
"sorted",
"(",
"f",
".",
"itersizes",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"-",
"x",
"[",
"1",
"]",
",",
"x",
"[",
"0",
"]",
")",
")",
"logging",
".",
"debug",
"(",
"\"Sort by size: max: {0}, min: {1}\"",
".",
"format",
"(",
"sortlist",
"[",
"0",
"]",
",",
"sortlist",
"[",
"-",
"1",
"]",
")",
")",
"sortlist",
"=",
"[",
"x",
"for",
"x",
",",
"s",
"in",
"sortlist",
"]",
"else",
":",
"sortlist",
"=",
"sorted",
"(",
"f",
".",
"iterkeys",
"(",
")",
")",
"for",
"key",
"in",
"sortlist",
":",
"rec",
"=",
"f",
"[",
"key",
"]",
"SeqIO",
".",
"write",
"(",
"[",
"rec",
"]",
",",
"fw",
",",
"\"fasta\"",
")",
"logging",
".",
"debug",
"(",
"\"Sorted file written to `{0}`.\"",
".",
"format",
"(",
"sortedfastafile",
")",
")",
"fw",
".",
"close",
"(",
")",
"return",
"sortedfastafile"
] | 28.567568 | 20.513514 |
def focus_next_unfolded(self):
"""focus next unfolded message in depth first order"""
self.focus_property(lambda x: not x.is_collapsed(x.root),
self._tree.next_position) | [
"def",
"focus_next_unfolded",
"(",
"self",
")",
":",
"self",
".",
"focus_property",
"(",
"lambda",
"x",
":",
"not",
"x",
".",
"is_collapsed",
"(",
"x",
".",
"root",
")",
",",
"self",
".",
"_tree",
".",
"next_position",
")"
] | 52.5 | 12 |
def _ReadStorageDataTypeDefinition(
self, definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_attributes, is_member=False):
"""Reads a storage data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
data_type_definition_class (str): data type definition class.
definition_name (str): name of the definition.
supported_attributes (set[str]): names of the supported attributes.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
StorageDataTypeDefinition: storage data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect.
"""
if is_member:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_MEMBER_DATA_TYPE)
else:
supported_definition_values = (
self._SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE)
definition_object = self._ReadDataTypeDefinition(
definitions_registry, definition_values, data_type_definition_class,
definition_name, supported_definition_values)
attributes = definition_values.get('attributes', None)
if attributes:
unsupported_attributes = set(attributes.keys()).difference(
supported_attributes)
if unsupported_attributes:
error_message = 'unsupported attributes: {0:s}'.format(
', '.join(unsupported_attributes))
raise errors.DefinitionReaderError(definition_name, error_message)
byte_order = attributes.get('byte_order', definitions.BYTE_ORDER_NATIVE)
if byte_order not in definitions.BYTE_ORDERS:
error_message = 'unsupported byte-order attribute: {0!s}'.format(
byte_order)
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object.byte_order = byte_order
return definition_object | [
"def",
"_ReadStorageDataTypeDefinition",
"(",
"self",
",",
"definitions_registry",
",",
"definition_values",
",",
"data_type_definition_class",
",",
"definition_name",
",",
"supported_attributes",
",",
"is_member",
"=",
"False",
")",
":",
"if",
"is_member",
":",
"supported_definition_values",
"=",
"(",
"self",
".",
"_SUPPORTED_DEFINITION_VALUES_MEMBER_DATA_TYPE",
")",
"else",
":",
"supported_definition_values",
"=",
"(",
"self",
".",
"_SUPPORTED_DEFINITION_VALUES_STORAGE_DATA_TYPE",
")",
"definition_object",
"=",
"self",
".",
"_ReadDataTypeDefinition",
"(",
"definitions_registry",
",",
"definition_values",
",",
"data_type_definition_class",
",",
"definition_name",
",",
"supported_definition_values",
")",
"attributes",
"=",
"definition_values",
".",
"get",
"(",
"'attributes'",
",",
"None",
")",
"if",
"attributes",
":",
"unsupported_attributes",
"=",
"set",
"(",
"attributes",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"supported_attributes",
")",
"if",
"unsupported_attributes",
":",
"error_message",
"=",
"'unsupported attributes: {0:s}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"unsupported_attributes",
")",
")",
"raise",
"errors",
".",
"DefinitionReaderError",
"(",
"definition_name",
",",
"error_message",
")",
"byte_order",
"=",
"attributes",
".",
"get",
"(",
"'byte_order'",
",",
"definitions",
".",
"BYTE_ORDER_NATIVE",
")",
"if",
"byte_order",
"not",
"in",
"definitions",
".",
"BYTE_ORDERS",
":",
"error_message",
"=",
"'unsupported byte-order attribute: {0!s}'",
".",
"format",
"(",
"byte_order",
")",
"raise",
"errors",
".",
"DefinitionReaderError",
"(",
"definition_name",
",",
"error_message",
")",
"definition_object",
".",
"byte_order",
"=",
"byte_order",
"return",
"definition_object"
] | 40.313725 | 23.509804 |
def to_dict(self, minimal=False):
"""Returns the representation for serialization"""
data = collections.OrderedDict()
if minimal:
# In the minimal representation we just output the value
data['value'] = self._to_python_type(self.value)
else:
# In the complete representation we output everything is needed to re-build the object
data['value'] = self._to_python_type(self.value)
data['desc'] = str(self.description)
data['min_value'] = self._to_python_type(self.min_value)
data['max_value'] = self._to_python_type(self.max_value)
# We use our own thread-safe format for the unit
data['unit'] = self.unit.to_string(format='threadsafe')
return data | [
"def",
"to_dict",
"(",
"self",
",",
"minimal",
"=",
"False",
")",
":",
"data",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"if",
"minimal",
":",
"# In the minimal representation we just output the value",
"data",
"[",
"'value'",
"]",
"=",
"self",
".",
"_to_python_type",
"(",
"self",
".",
"value",
")",
"else",
":",
"# In the complete representation we output everything is needed to re-build the object",
"data",
"[",
"'value'",
"]",
"=",
"self",
".",
"_to_python_type",
"(",
"self",
".",
"value",
")",
"data",
"[",
"'desc'",
"]",
"=",
"str",
"(",
"self",
".",
"description",
")",
"data",
"[",
"'min_value'",
"]",
"=",
"self",
".",
"_to_python_type",
"(",
"self",
".",
"min_value",
")",
"data",
"[",
"'max_value'",
"]",
"=",
"self",
".",
"_to_python_type",
"(",
"self",
".",
"max_value",
")",
"# We use our own thread-safe format for the unit",
"data",
"[",
"'unit'",
"]",
"=",
"self",
".",
"unit",
".",
"to_string",
"(",
"format",
"=",
"'threadsafe'",
")",
"return",
"data"
] | 32.458333 | 28.041667 |
def Parse(self):
"""Parse program output."""
(start_line, lang) = self.ParseDesc()
if start_line < 0:
return
if 'python' == lang:
self.ParsePythonFlags(start_line)
elif 'c' == lang:
self.ParseCFlags(start_line)
elif 'java' == lang:
self.ParseJavaFlags(start_line) | [
"def",
"Parse",
"(",
"self",
")",
":",
"(",
"start_line",
",",
"lang",
")",
"=",
"self",
".",
"ParseDesc",
"(",
")",
"if",
"start_line",
"<",
"0",
":",
"return",
"if",
"'python'",
"==",
"lang",
":",
"self",
".",
"ParsePythonFlags",
"(",
"start_line",
")",
"elif",
"'c'",
"==",
"lang",
":",
"self",
".",
"ParseCFlags",
"(",
"start_line",
")",
"elif",
"'java'",
"==",
"lang",
":",
"self",
".",
"ParseJavaFlags",
"(",
"start_line",
")"
] | 27.363636 | 12 |
def match(self, feature, gps_precision=None, profile='mapbox.driving'):
"""Match features to OpenStreetMap data."""
profile = self._validate_profile(profile)
feature = self._validate_feature(feature)
geojson_line_feature = json.dumps(feature)
uri = URITemplate(self.baseuri + '/{profile}.json').expand(
profile=profile)
params = None
if gps_precision:
params = {'gps_precision': gps_precision}
res = self.session.post(uri, data=geojson_line_feature, params=params,
headers={'Content-Type': 'application/json'})
self.handle_http_error(res)
def geojson():
return res.json()
res.geojson = geojson
return res | [
"def",
"match",
"(",
"self",
",",
"feature",
",",
"gps_precision",
"=",
"None",
",",
"profile",
"=",
"'mapbox.driving'",
")",
":",
"profile",
"=",
"self",
".",
"_validate_profile",
"(",
"profile",
")",
"feature",
"=",
"self",
".",
"_validate_feature",
"(",
"feature",
")",
"geojson_line_feature",
"=",
"json",
".",
"dumps",
"(",
"feature",
")",
"uri",
"=",
"URITemplate",
"(",
"self",
".",
"baseuri",
"+",
"'/{profile}.json'",
")",
".",
"expand",
"(",
"profile",
"=",
"profile",
")",
"params",
"=",
"None",
"if",
"gps_precision",
":",
"params",
"=",
"{",
"'gps_precision'",
":",
"gps_precision",
"}",
"res",
"=",
"self",
".",
"session",
".",
"post",
"(",
"uri",
",",
"data",
"=",
"geojson_line_feature",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
")",
"self",
".",
"handle_http_error",
"(",
"res",
")",
"def",
"geojson",
"(",
")",
":",
"return",
"res",
".",
"json",
"(",
")",
"res",
".",
"geojson",
"=",
"geojson",
"return",
"res"
] | 32.695652 | 22.913043 |
def loads_json_or_yaml(file_path, content):
""" Load JSON or YAML depending on the file extension. Returns a dict """
if os.path.splitext(file_path)[1] == ".json":
return json.loads(content)
else:
return inginious.common.custom_yaml.load(content) | [
"def",
"loads_json_or_yaml",
"(",
"file_path",
",",
"content",
")",
":",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"file_path",
")",
"[",
"1",
"]",
"==",
"\".json\"",
":",
"return",
"json",
".",
"loads",
"(",
"content",
")",
"else",
":",
"return",
"inginious",
".",
"common",
".",
"custom_yaml",
".",
"load",
"(",
"content",
")"
] | 44.833333 | 11 |
def get_attrib(self, et_node, prefixed_attrib):
"""Get a prefixed attribute like 'rdf:resource' from ET node."""
prefix, attrib = prefixed_attrib.split(':')
return et_node.get('{{{0}}}{1}'.format(self.namespaces[prefix],
attrib)) | [
"def",
"get_attrib",
"(",
"self",
",",
"et_node",
",",
"prefixed_attrib",
")",
":",
"prefix",
",",
"attrib",
"=",
"prefixed_attrib",
".",
"split",
"(",
"':'",
")",
"return",
"et_node",
".",
"get",
"(",
"'{{{0}}}{1}'",
".",
"format",
"(",
"self",
".",
"namespaces",
"[",
"prefix",
"]",
",",
"attrib",
")",
")"
] | 59.2 | 12.8 |
def xpat_gen(self, header, msgid_range, *pattern):
"""Generator for the XPAT command.
"""
args = " ".join(
[header, utils.unparse_msgid_range(msgid_range)] + list(pattern)
)
code, message = self.command("XPAT", args)
if code != 221:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield line.strip() | [
"def",
"xpat_gen",
"(",
"self",
",",
"header",
",",
"msgid_range",
",",
"*",
"pattern",
")",
":",
"args",
"=",
"\" \"",
".",
"join",
"(",
"[",
"header",
",",
"utils",
".",
"unparse_msgid_range",
"(",
"msgid_range",
")",
"]",
"+",
"list",
"(",
"pattern",
")",
")",
"code",
",",
"message",
"=",
"self",
".",
"command",
"(",
"\"XPAT\"",
",",
"args",
")",
"if",
"code",
"!=",
"221",
":",
"raise",
"NNTPReplyError",
"(",
"code",
",",
"message",
")",
"for",
"line",
"in",
"self",
".",
"info_gen",
"(",
"code",
",",
"message",
")",
":",
"yield",
"line",
".",
"strip",
"(",
")"
] | 31.615385 | 17.384615 |
def tune(runner, kernel_options, device_options, tuning_options):
""" Find the best performing kernel configuration in the parameter space
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param device_options: A dictionary with all options for the device
on which the kernel should be tuned.
:type device_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:returns: A list of dictionaries for executed kernel configurations and their
execution times. And a dictionary that contains a information
about the hardware/software environment on which the tuning took place.
:rtype: list(dict()), dict()
"""
results = []
cache = {}
method = tuning_options.method
#scale variables in x to make 'eps' relevant for multiple variables
tuning_options["scaling"] = True
bounds, x0, eps = get_bounds_x0_eps(tuning_options)
kwargs = setup_method_arguments(method, bounds)
options = setup_method_options(method, tuning_options)
kwargs['options'] = options
args = (kernel_options, tuning_options, runner, results, cache)
minimizer_kwargs = dict(**kwargs)
minimizer_kwargs["method"] = method
minimizer_kwargs["args"] = args
opt_result = scipy.optimize.basinhopping(_cost_func, x0, stepsize=eps, minimizer_kwargs=minimizer_kwargs, disp=tuning_options.verbose)
if tuning_options.verbose:
print(opt_result.message)
return results, runner.dev.get_environment() | [
"def",
"tune",
"(",
"runner",
",",
"kernel_options",
",",
"device_options",
",",
"tuning_options",
")",
":",
"results",
"=",
"[",
"]",
"cache",
"=",
"{",
"}",
"method",
"=",
"tuning_options",
".",
"method",
"#scale variables in x to make 'eps' relevant for multiple variables",
"tuning_options",
"[",
"\"scaling\"",
"]",
"=",
"True",
"bounds",
",",
"x0",
",",
"eps",
"=",
"get_bounds_x0_eps",
"(",
"tuning_options",
")",
"kwargs",
"=",
"setup_method_arguments",
"(",
"method",
",",
"bounds",
")",
"options",
"=",
"setup_method_options",
"(",
"method",
",",
"tuning_options",
")",
"kwargs",
"[",
"'options'",
"]",
"=",
"options",
"args",
"=",
"(",
"kernel_options",
",",
"tuning_options",
",",
"runner",
",",
"results",
",",
"cache",
")",
"minimizer_kwargs",
"=",
"dict",
"(",
"*",
"*",
"kwargs",
")",
"minimizer_kwargs",
"[",
"\"method\"",
"]",
"=",
"method",
"minimizer_kwargs",
"[",
"\"args\"",
"]",
"=",
"args",
"opt_result",
"=",
"scipy",
".",
"optimize",
".",
"basinhopping",
"(",
"_cost_func",
",",
"x0",
",",
"stepsize",
"=",
"eps",
",",
"minimizer_kwargs",
"=",
"minimizer_kwargs",
",",
"disp",
"=",
"tuning_options",
".",
"verbose",
")",
"if",
"tuning_options",
".",
"verbose",
":",
"print",
"(",
"opt_result",
".",
"message",
")",
"return",
"results",
",",
"runner",
".",
"dev",
".",
"get_environment",
"(",
")"
] | 33.26 | 25.2 |
def set_current_canvas(canvas):
""" Make a canvas active. Used primarily by the canvas itself.
"""
# Notify glir
canvas.context._do_CURRENT_command = True
# Try to be quick
if canvasses and canvasses[-1]() is canvas:
return
# Make this the current
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
cc.append(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] | [
"def",
"set_current_canvas",
"(",
"canvas",
")",
":",
"# Notify glir ",
"canvas",
".",
"context",
".",
"_do_CURRENT_command",
"=",
"True",
"# Try to be quick",
"if",
"canvasses",
"and",
"canvasses",
"[",
"-",
"1",
"]",
"(",
")",
"is",
"canvas",
":",
"return",
"# Make this the current",
"cc",
"=",
"[",
"c",
"(",
")",
"for",
"c",
"in",
"canvasses",
"if",
"c",
"(",
")",
"is",
"not",
"None",
"]",
"while",
"canvas",
"in",
"cc",
":",
"cc",
".",
"remove",
"(",
"canvas",
")",
"cc",
".",
"append",
"(",
"canvas",
")",
"canvasses",
"[",
":",
"]",
"=",
"[",
"weakref",
".",
"ref",
"(",
"c",
")",
"for",
"c",
"in",
"cc",
"]"
] | 31.714286 | 12.214286 |
def resolve(self, symbol):
"""
Resolve a symbol using the entrypoint group.
:param symbol: The symbol being resolved.
:returns: The value of that symbol. If the symbol cannot be
found, or if no entrypoint group was passed to the
constructor, will return ``None``.
"""
# Search for a corresponding symbol
if symbol not in self._resolve_cache:
result = None
# Search through entrypoints only if we have a group
if self._group is not None:
for ep in pkg_resources.iter_entry_points(self._group, symbol):
try:
result = ep.load()
except (ImportError, AttributeError,
pkg_resources.UnknownExtra):
continue
# We found the result we were looking for
break
# Cache the result
self._resolve_cache[symbol] = result
return self._resolve_cache[symbol] | [
"def",
"resolve",
"(",
"self",
",",
"symbol",
")",
":",
"# Search for a corresponding symbol",
"if",
"symbol",
"not",
"in",
"self",
".",
"_resolve_cache",
":",
"result",
"=",
"None",
"# Search through entrypoints only if we have a group",
"if",
"self",
".",
"_group",
"is",
"not",
"None",
":",
"for",
"ep",
"in",
"pkg_resources",
".",
"iter_entry_points",
"(",
"self",
".",
"_group",
",",
"symbol",
")",
":",
"try",
":",
"result",
"=",
"ep",
".",
"load",
"(",
")",
"except",
"(",
"ImportError",
",",
"AttributeError",
",",
"pkg_resources",
".",
"UnknownExtra",
")",
":",
"continue",
"# We found the result we were looking for",
"break",
"# Cache the result",
"self",
".",
"_resolve_cache",
"[",
"symbol",
"]",
"=",
"result",
"return",
"self",
".",
"_resolve_cache",
"[",
"symbol",
"]"
] | 33.806452 | 18.83871 |
def cost(self, t_node, branch_length, multiplicity=2.0):
'''
returns the cost associated with a branch starting at t_node
t_node is time before present, the branch goes back in time
Args:
- t_node: time of the node
- branch_length: branch length, determines when this branch merges with sister
- multiplicity: 2 if merger is binary, higher if this is a polytomy
'''
merger_time = t_node+branch_length
return self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)\
- np.log(self.total_merger_rate(merger_time))*(multiplicity-1.0)/multiplicity | [
"def",
"cost",
"(",
"self",
",",
"t_node",
",",
"branch_length",
",",
"multiplicity",
"=",
"2.0",
")",
":",
"merger_time",
"=",
"t_node",
"+",
"branch_length",
"return",
"self",
".",
"integral_merger_rate",
"(",
"merger_time",
")",
"-",
"self",
".",
"integral_merger_rate",
"(",
"t_node",
")",
"-",
"np",
".",
"log",
"(",
"self",
".",
"total_merger_rate",
"(",
"merger_time",
")",
")",
"*",
"(",
"multiplicity",
"-",
"1.0",
")",
"/",
"multiplicity"
] | 52 | 31.230769 |
def save_scenario(self, scenario_file_path=None):
"""Save current scenario to a text file.
You can use the saved scenario with the batch runner.
:param scenario_file_path: A path to the scenario file.
:type scenario_file_path: str
"""
# Validate Input
warning_title = tr('InaSAFE Save Scenario Warning')
is_valid, warning_message = self.validate_input()
if not is_valid:
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QMessageBox.warning(self, warning_title, warning_message)
return
# Make extent to look like:
# 109.829170982, -8.13333290561, 111.005344795, -7.49226294379
# Added in 2.2 to support user defined analysis extents
if self.dock.extent.user_extent is not None \
and self.dock.extent.crs is not None:
# In V4.0, user_extent is QgsGeometry.
user_extent = self.dock.extent.user_extent.boundingBox()
extent = extent_to_array(user_extent, self.dock.extent.crs)
else:
extent = viewport_geo_array(self.iface.mapCanvas())
extent_string = ', '.join(('%f' % x) for x in extent)
exposure_path = self.exposure_layer.source()
hazard_path = self.hazard_layer.source()
title = self.keyword_io.read_keywords(self.hazard_layer, 'title')
title = tr(title)
default_filename = title.replace(
' ', '_').replace('(', '').replace(')', '')
# Popup a dialog to request the filename if scenario_file_path = None
dialog_title = tr('Save Scenario')
if scenario_file_path is None:
# noinspection PyCallByClass,PyTypeChecker
scenario_file_path, __ = QFileDialog.getSaveFileName(
self,
dialog_title,
os.path.join(self.output_directory, default_filename + '.txt'),
"Text files (*.txt)")
if scenario_file_path is None or scenario_file_path == '':
return
self.output_directory = os.path.dirname(scenario_file_path)
# Write to file
parser = ConfigParser()
parser.add_section(title)
# Relative path is not recognized by the batch runner, so we use
# absolute path.
parser.set(title, 'exposure', exposure_path)
parser.set(title, 'hazard', hazard_path)
parser.set(title, 'extent', extent_string)
if self.dock.extent.crs is None:
parser.set(title, 'extent_crs', 'EPSG:4326')
else:
parser.set(
title,
'extent_crs',
self.dock.extent.crs.authid())
if self.aggregation_layer is not None:
aggregation_path = self.aggregation_layer.source()
relative_aggregation_path = self.relative_path(
scenario_file_path, aggregation_path)
parser.set(title, 'aggregation', relative_aggregation_path)
# noinspection PyBroadException
try:
of = open(scenario_file_path, 'a')
parser.write(of)
of.close()
except Exception as e:
# noinspection PyTypeChecker,PyCallByClass,PyArgumentList
QMessageBox.warning(
self,
'InaSAFE',
tr(
'Failed to save scenario to {path}, exception '
'{exception}').format(
path=scenario_file_path, exception=str(e)))
finally:
of.close()
# Save State
self.save_state() | [
"def",
"save_scenario",
"(",
"self",
",",
"scenario_file_path",
"=",
"None",
")",
":",
"# Validate Input",
"warning_title",
"=",
"tr",
"(",
"'InaSAFE Save Scenario Warning'",
")",
"is_valid",
",",
"warning_message",
"=",
"self",
".",
"validate_input",
"(",
")",
"if",
"not",
"is_valid",
":",
"# noinspection PyCallByClass,PyTypeChecker,PyArgumentList",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"warning_title",
",",
"warning_message",
")",
"return",
"# Make extent to look like:",
"# 109.829170982, -8.13333290561, 111.005344795, -7.49226294379",
"# Added in 2.2 to support user defined analysis extents",
"if",
"self",
".",
"dock",
".",
"extent",
".",
"user_extent",
"is",
"not",
"None",
"and",
"self",
".",
"dock",
".",
"extent",
".",
"crs",
"is",
"not",
"None",
":",
"# In V4.0, user_extent is QgsGeometry.",
"user_extent",
"=",
"self",
".",
"dock",
".",
"extent",
".",
"user_extent",
".",
"boundingBox",
"(",
")",
"extent",
"=",
"extent_to_array",
"(",
"user_extent",
",",
"self",
".",
"dock",
".",
"extent",
".",
"crs",
")",
"else",
":",
"extent",
"=",
"viewport_geo_array",
"(",
"self",
".",
"iface",
".",
"mapCanvas",
"(",
")",
")",
"extent_string",
"=",
"', '",
".",
"join",
"(",
"(",
"'%f'",
"%",
"x",
")",
"for",
"x",
"in",
"extent",
")",
"exposure_path",
"=",
"self",
".",
"exposure_layer",
".",
"source",
"(",
")",
"hazard_path",
"=",
"self",
".",
"hazard_layer",
".",
"source",
"(",
")",
"title",
"=",
"self",
".",
"keyword_io",
".",
"read_keywords",
"(",
"self",
".",
"hazard_layer",
",",
"'title'",
")",
"title",
"=",
"tr",
"(",
"title",
")",
"default_filename",
"=",
"title",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
".",
"replace",
"(",
"'('",
",",
"''",
")",
".",
"replace",
"(",
"')'",
",",
"''",
")",
"# Popup a dialog to request the filename if scenario_file_path = None",
"dialog_title",
"=",
"tr",
"(",
"'Save Scenario'",
")",
"if",
"scenario_file_path",
"is",
"None",
":",
"# noinspection PyCallByClass,PyTypeChecker",
"scenario_file_path",
",",
"__",
"=",
"QFileDialog",
".",
"getSaveFileName",
"(",
"self",
",",
"dialog_title",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"output_directory",
",",
"default_filename",
"+",
"'.txt'",
")",
",",
"\"Text files (*.txt)\"",
")",
"if",
"scenario_file_path",
"is",
"None",
"or",
"scenario_file_path",
"==",
"''",
":",
"return",
"self",
".",
"output_directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"scenario_file_path",
")",
"# Write to file",
"parser",
"=",
"ConfigParser",
"(",
")",
"parser",
".",
"add_section",
"(",
"title",
")",
"# Relative path is not recognized by the batch runner, so we use",
"# absolute path.",
"parser",
".",
"set",
"(",
"title",
",",
"'exposure'",
",",
"exposure_path",
")",
"parser",
".",
"set",
"(",
"title",
",",
"'hazard'",
",",
"hazard_path",
")",
"parser",
".",
"set",
"(",
"title",
",",
"'extent'",
",",
"extent_string",
")",
"if",
"self",
".",
"dock",
".",
"extent",
".",
"crs",
"is",
"None",
":",
"parser",
".",
"set",
"(",
"title",
",",
"'extent_crs'",
",",
"'EPSG:4326'",
")",
"else",
":",
"parser",
".",
"set",
"(",
"title",
",",
"'extent_crs'",
",",
"self",
".",
"dock",
".",
"extent",
".",
"crs",
".",
"authid",
"(",
")",
")",
"if",
"self",
".",
"aggregation_layer",
"is",
"not",
"None",
":",
"aggregation_path",
"=",
"self",
".",
"aggregation_layer",
".",
"source",
"(",
")",
"relative_aggregation_path",
"=",
"self",
".",
"relative_path",
"(",
"scenario_file_path",
",",
"aggregation_path",
")",
"parser",
".",
"set",
"(",
"title",
",",
"'aggregation'",
",",
"relative_aggregation_path",
")",
"# noinspection PyBroadException",
"try",
":",
"of",
"=",
"open",
"(",
"scenario_file_path",
",",
"'a'",
")",
"parser",
".",
"write",
"(",
"of",
")",
"of",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# noinspection PyTypeChecker,PyCallByClass,PyArgumentList",
"QMessageBox",
".",
"warning",
"(",
"self",
",",
"'InaSAFE'",
",",
"tr",
"(",
"'Failed to save scenario to {path}, exception '",
"'{exception}'",
")",
".",
"format",
"(",
"path",
"=",
"scenario_file_path",
",",
"exception",
"=",
"str",
"(",
"e",
")",
")",
")",
"finally",
":",
"of",
".",
"close",
"(",
")",
"# Save State",
"self",
".",
"save_state",
"(",
")"
] | 39.255556 | 19.266667 |
def add_source(self, name, location, schedule='Daily', op=''):
"""
handles the data sources used in projects, mainly as an
abstract to call the data sources in /lib and /dataTools
"""
if op == '':
op = name + '.log'
self.data_sources.append([name, location, schedule, op]) | [
"def",
"add_source",
"(",
"self",
",",
"name",
",",
"location",
",",
"schedule",
"=",
"'Daily'",
",",
"op",
"=",
"''",
")",
":",
"if",
"op",
"==",
"''",
":",
"op",
"=",
"name",
"+",
"'.log'",
"self",
".",
"data_sources",
".",
"append",
"(",
"[",
"name",
",",
"location",
",",
"schedule",
",",
"op",
"]",
")"
] | 40.75 | 15.5 |
async def request_offline_members(self, *guilds):
r"""|coro|
Requests previously offline members from the guild to be filled up
into the :attr:`.Guild.members` cache. This function is usually not
called. It should only be used if you have the ``fetch_offline_members``
parameter set to ``False``.
When the client logs on and connects to the websocket, Discord does
not provide the library with offline members if the number of members
in the guild is larger than 250. You can check if a guild is large
if :attr:`.Guild.large` is ``True``.
Parameters
-----------
\*guilds: :class:`Guild`
An argument list of guilds to request offline members for.
Raises
-------
InvalidArgument
If any guild is unavailable or not large in the collection.
"""
if any(not g.large or g.unavailable for g in guilds):
raise InvalidArgument('An unavailable or non-large guild was passed.')
await self._connection.request_offline_members(guilds) | [
"async",
"def",
"request_offline_members",
"(",
"self",
",",
"*",
"guilds",
")",
":",
"if",
"any",
"(",
"not",
"g",
".",
"large",
"or",
"g",
".",
"unavailable",
"for",
"g",
"in",
"guilds",
")",
":",
"raise",
"InvalidArgument",
"(",
"'An unavailable or non-large guild was passed.'",
")",
"await",
"self",
".",
"_connection",
".",
"request_offline_members",
"(",
"guilds",
")"
] | 39.962963 | 25.851852 |
def editContactItems(self, nickname, **edits):
"""
Update the information on the contact items associated with the wrapped
L{Person}.
@type nickname: C{unicode}
@param nickname: New value to use for the I{name} attribute of the
L{Person}.
@param **edits: mapping from contact type identifiers to
ListChanges instances.
"""
submissions = []
for paramName, submission in edits.iteritems():
contactType = self.contactTypes[paramName]
submissions.append((contactType, submission))
self.person.store.transact(
self.organizer.editPerson,
self.person, nickname, submissions) | [
"def",
"editContactItems",
"(",
"self",
",",
"nickname",
",",
"*",
"*",
"edits",
")",
":",
"submissions",
"=",
"[",
"]",
"for",
"paramName",
",",
"submission",
"in",
"edits",
".",
"iteritems",
"(",
")",
":",
"contactType",
"=",
"self",
".",
"contactTypes",
"[",
"paramName",
"]",
"submissions",
".",
"append",
"(",
"(",
"contactType",
",",
"submission",
")",
")",
"self",
".",
"person",
".",
"store",
".",
"transact",
"(",
"self",
".",
"organizer",
".",
"editPerson",
",",
"self",
".",
"person",
",",
"nickname",
",",
"submissions",
")"
] | 37 | 16.368421 |
def match_trailer(self, tokens, item):
"""Matches typedefs and as patterns."""
internal_assert(len(tokens) > 1 and len(tokens) % 2 == 1, "invalid trailer match tokens", tokens)
match, trailers = tokens[0], tokens[1:]
for i in range(0, len(trailers), 2):
op, arg = trailers[i], trailers[i + 1]
if op == "is":
self.add_check("_coconut.isinstance(" + item + ", " + arg + ")")
elif op == "as":
if arg in self.names:
self.add_check(self.names[arg] + " == " + item)
elif arg != wildcard:
self.add_def(arg + " = " + item)
self.names[arg] = item
else:
raise CoconutInternalException("invalid trailer match operation", op)
self.match(match, item) | [
"def",
"match_trailer",
"(",
"self",
",",
"tokens",
",",
"item",
")",
":",
"internal_assert",
"(",
"len",
"(",
"tokens",
")",
">",
"1",
"and",
"len",
"(",
"tokens",
")",
"%",
"2",
"==",
"1",
",",
"\"invalid trailer match tokens\"",
",",
"tokens",
")",
"match",
",",
"trailers",
"=",
"tokens",
"[",
"0",
"]",
",",
"tokens",
"[",
"1",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"trailers",
")",
",",
"2",
")",
":",
"op",
",",
"arg",
"=",
"trailers",
"[",
"i",
"]",
",",
"trailers",
"[",
"i",
"+",
"1",
"]",
"if",
"op",
"==",
"\"is\"",
":",
"self",
".",
"add_check",
"(",
"\"_coconut.isinstance(\"",
"+",
"item",
"+",
"\", \"",
"+",
"arg",
"+",
"\")\"",
")",
"elif",
"op",
"==",
"\"as\"",
":",
"if",
"arg",
"in",
"self",
".",
"names",
":",
"self",
".",
"add_check",
"(",
"self",
".",
"names",
"[",
"arg",
"]",
"+",
"\" == \"",
"+",
"item",
")",
"elif",
"arg",
"!=",
"wildcard",
":",
"self",
".",
"add_def",
"(",
"arg",
"+",
"\" = \"",
"+",
"item",
")",
"self",
".",
"names",
"[",
"arg",
"]",
"=",
"item",
"else",
":",
"raise",
"CoconutInternalException",
"(",
"\"invalid trailer match operation\"",
",",
"op",
")",
"self",
".",
"match",
"(",
"match",
",",
"item",
")"
] | 49 | 16.352941 |
def store_work_results(self, results, collection, md5):
"""Store the output results of the worker.
Args:
results: a dictionary.
collection: the database collection to store the results in.
md5: the md5 of sample data to be updated.
"""
# Make sure the md5 and time stamp is on the data before storing
results['md5'] = md5
results['__time_stamp'] = datetime.datetime.utcnow()
# If the data doesn't have a 'mod_time' field add one now
if 'mod_time' not in results:
results['mod_time'] = results['__time_stamp']
# Fixme: Occasionally a capped collection will not let you update with a
# larger object, if you have MongoDB 2.6 or above this shouldn't
# really happen, so for now just kinda punting and giving a message.
try:
self.database[collection].update({'md5':md5}, self.clean_for_storage(results), True)
except pymongo.errors.OperationFailure:
#self.database[collection].insert({'md5':md5}, self.clean_for_storage(results), True)
print 'Could not update exising object in capped collection, punting...'
print 'collection: %s md5:%s' % (collection, md5) | [
"def",
"store_work_results",
"(",
"self",
",",
"results",
",",
"collection",
",",
"md5",
")",
":",
"# Make sure the md5 and time stamp is on the data before storing",
"results",
"[",
"'md5'",
"]",
"=",
"md5",
"results",
"[",
"'__time_stamp'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"# If the data doesn't have a 'mod_time' field add one now",
"if",
"'mod_time'",
"not",
"in",
"results",
":",
"results",
"[",
"'mod_time'",
"]",
"=",
"results",
"[",
"'__time_stamp'",
"]",
"# Fixme: Occasionally a capped collection will not let you update with a ",
"# larger object, if you have MongoDB 2.6 or above this shouldn't",
"# really happen, so for now just kinda punting and giving a message.",
"try",
":",
"self",
".",
"database",
"[",
"collection",
"]",
".",
"update",
"(",
"{",
"'md5'",
":",
"md5",
"}",
",",
"self",
".",
"clean_for_storage",
"(",
"results",
")",
",",
"True",
")",
"except",
"pymongo",
".",
"errors",
".",
"OperationFailure",
":",
"#self.database[collection].insert({'md5':md5}, self.clean_for_storage(results), True)",
"print",
"'Could not update exising object in capped collection, punting...'",
"print",
"'collection: %s md5:%s'",
"%",
"(",
"collection",
",",
"md5",
")"
] | 46.222222 | 27.37037 |
def validate_attachment_location(self, location):
"""Validate a proposed attachment location.
:arg location: String representing location to put attachment.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Raises an exception if attachment location is bad.
By default, this just forces reasonable characters.
Sub-classes can override as desired.
"""
if not re.compile(self.valid_attachment_loc_re).match(location):
raise ValueError(
'Bad chars in attachment location. Must match %s' % (
self.valid_attachment_loc_re)) | [
"def",
"validate_attachment_location",
"(",
"self",
",",
"location",
")",
":",
"if",
"not",
"re",
".",
"compile",
"(",
"self",
".",
"valid_attachment_loc_re",
")",
".",
"match",
"(",
"location",
")",
":",
"raise",
"ValueError",
"(",
"'Bad chars in attachment location. Must match %s'",
"%",
"(",
"self",
".",
"valid_attachment_loc_re",
")",
")"
] | 41.3125 | 23.8125 |
def GetMetadataAttribute(self, attribute_name):
"""Retrieves the metadata attribute.
Args:
attribute_name (str): name of the metadata attribute.
Returns:
str: the metadata attribute or None.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_name = 'metadata'
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['value']
condition = 'name == "{0:s}"'.format(attribute_name)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['value']
raise RuntimeError('More than one value found in database.') | [
"def",
"GetMetadataAttribute",
"(",
"self",
",",
"attribute_name",
")",
":",
"table_name",
"=",
"'metadata'",
"has_table",
"=",
"self",
".",
"_database_file",
".",
"HasTable",
"(",
"table_name",
")",
"if",
"not",
"has_table",
":",
"return",
"None",
"column_names",
"=",
"[",
"'value'",
"]",
"condition",
"=",
"'name == \"{0:s}\"'",
".",
"format",
"(",
"attribute_name",
")",
"values",
"=",
"list",
"(",
"self",
".",
"_database_file",
".",
"GetValues",
"(",
"[",
"table_name",
"]",
",",
"column_names",
",",
"condition",
")",
")",
"number_of_values",
"=",
"len",
"(",
"values",
")",
"if",
"number_of_values",
"==",
"0",
":",
"return",
"None",
"if",
"number_of_values",
"==",
"1",
":",
"return",
"values",
"[",
"0",
"]",
"[",
"'value'",
"]",
"raise",
"RuntimeError",
"(",
"'More than one value found in database.'",
")"
] | 24.96875 | 21.9375 |
def _draw_ascii_graph(self, field):
'''Draw graph from field double nested list, format field[x][y] = char'''
row_strings = []
for y in range(len(field[0])):
row = ''
for x in range(len(field)):
row += field[x][y]
row_strings.insert(0, row)
graph_string = '\n'.join(row_strings)
return graph_string | [
"def",
"_draw_ascii_graph",
"(",
"self",
",",
"field",
")",
":",
"row_strings",
"=",
"[",
"]",
"for",
"y",
"in",
"range",
"(",
"len",
"(",
"field",
"[",
"0",
"]",
")",
")",
":",
"row",
"=",
"''",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"field",
")",
")",
":",
"row",
"+=",
"field",
"[",
"x",
"]",
"[",
"y",
"]",
"row_strings",
".",
"insert",
"(",
"0",
",",
"row",
")",
"graph_string",
"=",
"'\\n'",
".",
"join",
"(",
"row_strings",
")",
"return",
"graph_string"
] | 38.1 | 11.1 |
def log_request(self, code='-', size='-'):
# pylint: disable=unused-argument
"""
This function is called during :meth:`send_response`.
We override it to get a little more information logged in a somewhat
better format. We do not use the size method argument.
"""
self.log('%s: HTTP status %s',
(self._get_log_prefix(), code),
logging.INFO) | [
"def",
"log_request",
"(",
"self",
",",
"code",
"=",
"'-'",
",",
"size",
"=",
"'-'",
")",
":",
"# pylint: disable=unused-argument",
"self",
".",
"log",
"(",
"'%s: HTTP status %s'",
",",
"(",
"self",
".",
"_get_log_prefix",
"(",
")",
",",
"code",
")",
",",
"logging",
".",
"INFO",
")"
] | 38.454545 | 13.181818 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.