code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def assign(self, V, py):
"""Store python value in Value
"""
if isinstance(py, (bytes, unicode)):
for i,C in enumerate(V['value.choices'] or self._choices):
if py==C:
V['value.index'] = i
return
# attempt to parse as integer
V['value.index'] = py | def function[assign, parameter[self, V, py]]:
constant[Store python value in Value
]
if call[name[isinstance], parameter[name[py], tuple[[<ast.Name object at 0x7da1b04c9330>, <ast.Name object at 0x7da1b04cbd60>]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b04cb7c0>, <ast.Name object at 0x7da1b04c95a0>]]] in starred[call[name[enumerate], parameter[<ast.BoolOp object at 0x7da1b04c96c0>]]] begin[:]
if compare[name[py] equal[==] name[C]] begin[:]
call[name[V]][constant[value.index]] assign[=] name[i]
return[None]
call[name[V]][constant[value.index]] assign[=] name[py] | keyword[def] identifier[assign] ( identifier[self] , identifier[V] , identifier[py] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[py] ,( identifier[bytes] , identifier[unicode] )):
keyword[for] identifier[i] , identifier[C] keyword[in] identifier[enumerate] ( identifier[V] [ literal[string] ] keyword[or] identifier[self] . identifier[_choices] ):
keyword[if] identifier[py] == identifier[C] :
identifier[V] [ literal[string] ]= identifier[i]
keyword[return]
identifier[V] [ literal[string] ]= identifier[py] | def assign(self, V, py):
"""Store python value in Value
"""
if isinstance(py, (bytes, unicode)):
for (i, C) in enumerate(V['value.choices'] or self._choices):
if py == C:
V['value.index'] = i
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# attempt to parse as integer
V['value.index'] = py |
def filter_paths(pathnames,
included_patterns=None,
excluded_patterns=None,
case_sensitive=True):
"""
Filters from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
A list of pathnames that matched the allowable patterns and passed
through the ignored patterns.
Doctests::
>>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"])
>>> set(filter_paths(pathnames)) == pathnames
True
>>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames
True
>>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"])
True
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for pathname in pathnames:
# We don't call the public match_path because it checks arguments
# and sets default values if none are found. We're already doing that
# above.
if _match_path(pathname, included, excluded, case_sensitive):
yield pathname | def function[filter_paths, parameter[pathnames, included_patterns, excluded_patterns, case_sensitive]]:
constant[
Filters from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
A list of pathnames that matched the allowable patterns and passed
through the ignored patterns.
Doctests::
>>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"])
>>> set(filter_paths(pathnames)) == pathnames
True
>>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames
True
>>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"])
True
]
variable[included] assign[=] <ast.IfExp object at 0x7da18f09e6b0>
variable[excluded] assign[=] <ast.IfExp object at 0x7da18f09d060>
for taget[name[pathname]] in starred[name[pathnames]] begin[:]
if call[name[_match_path], parameter[name[pathname], name[included], name[excluded], name[case_sensitive]]] begin[:]
<ast.Yield object at 0x7da18f09eb60> | keyword[def] identifier[filter_paths] ( identifier[pathnames] ,
identifier[included_patterns] = keyword[None] ,
identifier[excluded_patterns] = keyword[None] ,
identifier[case_sensitive] = keyword[True] ):
literal[string]
identifier[included] =[ literal[string] ] keyword[if] identifier[included_patterns] keyword[is] keyword[None] keyword[else] identifier[included_patterns]
identifier[excluded] =[] keyword[if] identifier[excluded_patterns] keyword[is] keyword[None] keyword[else] identifier[excluded_patterns]
keyword[for] identifier[pathname] keyword[in] identifier[pathnames] :
keyword[if] identifier[_match_path] ( identifier[pathname] , identifier[included] , identifier[excluded] , identifier[case_sensitive] ):
keyword[yield] identifier[pathname] | def filter_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True):
"""
Filters from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
A list of pathnames that matched the allowable patterns and passed
through the ignored patterns.
Doctests::
>>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"])
>>> set(filter_paths(pathnames)) == pathnames
True
>>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames
True
>>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"])
True
"""
included = ['*'] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for pathname in pathnames:
# We don't call the public match_path because it checks arguments
# and sets default values if none are found. We're already doing that
# above.
if _match_path(pathname, included, excluded, case_sensitive):
yield pathname # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pathname']] |
def list(self,walkTrace=tuple(),case=None,element=None):
"""List section titles.
"""
if case == 'sectionmain': print(walkTrace,self.title) | def function[list, parameter[self, walkTrace, case, element]]:
constant[List section titles.
]
if compare[name[case] equal[==] constant[sectionmain]] begin[:]
call[name[print], parameter[name[walkTrace], name[self].title]] | keyword[def] identifier[list] ( identifier[self] , identifier[walkTrace] = identifier[tuple] (), identifier[case] = keyword[None] , identifier[element] = keyword[None] ):
literal[string]
keyword[if] identifier[case] == literal[string] : identifier[print] ( identifier[walkTrace] , identifier[self] . identifier[title] ) | def list(self, walkTrace=tuple(), case=None, element=None):
"""List section titles.
"""
if case == 'sectionmain':
print(walkTrace, self.title) # depends on [control=['if'], data=[]] |
def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in xrange(lo, hi):
yield '%s %s' % (tag, x[i]) | def function[_dump, parameter[self, tag, x, lo, hi]]:
constant[Generate comparison results for a same-tagged range.]
for taget[name[i]] in starred[call[name[xrange], parameter[name[lo], name[hi]]]] begin[:]
<ast.Yield object at 0x7da18dc05360> | keyword[def] identifier[_dump] ( identifier[self] , identifier[tag] , identifier[x] , identifier[lo] , identifier[hi] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[lo] , identifier[hi] ):
keyword[yield] literal[string] %( identifier[tag] , identifier[x] [ identifier[i] ]) | def _dump(self, tag, x, lo, hi):
"""Generate comparison results for a same-tagged range."""
for i in xrange(lo, hi):
yield ('%s %s' % (tag, x[i])) # depends on [control=['for'], data=['i']] |
def calculate(ctx, slot, challenge, totp, digits):
"""
Perform a challenge-response operation.
Send a challenge (in hex) to a YubiKey slot with a challenge-response
credential, and read the response. Supports output as a OATH-TOTP code.
"""
controller = ctx.obj['controller']
if not challenge and not totp:
ctx.fail('No challenge provided.')
# Check that slot is not empty
slot1, slot2 = controller.slot_status
if (slot == 1 and not slot1) or (slot == 2 and not slot2):
ctx.fail('Cannot perform challenge-response on an empty slot.')
# Timestamp challenge should be int
if challenge and totp:
try:
challenge = int(challenge)
except Exception as e:
logger.error('Error', exc_info=e)
ctx.fail('Timestamp challenge for TOTP must be an integer.')
try:
res = controller.calculate(
slot, challenge, totp=totp,
digits=int(digits), wait_for_touch=False)
except YkpersError as e:
# Touch is set
if e.errno == 11:
prompt_for_touch()
try:
res = controller.calculate(
slot, challenge, totp=totp,
digits=int(digits), wait_for_touch=True)
except YkpersError as e:
# Touch timed out
if e.errno == 4:
ctx.fail('The YubiKey timed out.')
else:
ctx.fail(e)
else:
ctx.fail('Failed to calculate challenge.')
click.echo(res) | def function[calculate, parameter[ctx, slot, challenge, totp, digits]]:
constant[
Perform a challenge-response operation.
Send a challenge (in hex) to a YubiKey slot with a challenge-response
credential, and read the response. Supports output as a OATH-TOTP code.
]
variable[controller] assign[=] call[name[ctx].obj][constant[controller]]
if <ast.BoolOp object at 0x7da1b2344fa0> begin[:]
call[name[ctx].fail, parameter[constant[No challenge provided.]]]
<ast.Tuple object at 0x7da1b2346ec0> assign[=] name[controller].slot_status
if <ast.BoolOp object at 0x7da1b2347d90> begin[:]
call[name[ctx].fail, parameter[constant[Cannot perform challenge-response on an empty slot.]]]
if <ast.BoolOp object at 0x7da1b2345690> begin[:]
<ast.Try object at 0x7da1b2346dd0>
<ast.Try object at 0x7da1b2346bc0>
call[name[click].echo, parameter[name[res]]] | keyword[def] identifier[calculate] ( identifier[ctx] , identifier[slot] , identifier[challenge] , identifier[totp] , identifier[digits] ):
literal[string]
identifier[controller] = identifier[ctx] . identifier[obj] [ literal[string] ]
keyword[if] keyword[not] identifier[challenge] keyword[and] keyword[not] identifier[totp] :
identifier[ctx] . identifier[fail] ( literal[string] )
identifier[slot1] , identifier[slot2] = identifier[controller] . identifier[slot_status]
keyword[if] ( identifier[slot] == literal[int] keyword[and] keyword[not] identifier[slot1] ) keyword[or] ( identifier[slot] == literal[int] keyword[and] keyword[not] identifier[slot2] ):
identifier[ctx] . identifier[fail] ( literal[string] )
keyword[if] identifier[challenge] keyword[and] identifier[totp] :
keyword[try] :
identifier[challenge] = identifier[int] ( identifier[challenge] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] , identifier[exc_info] = identifier[e] )
identifier[ctx] . identifier[fail] ( literal[string] )
keyword[try] :
identifier[res] = identifier[controller] . identifier[calculate] (
identifier[slot] , identifier[challenge] , identifier[totp] = identifier[totp] ,
identifier[digits] = identifier[int] ( identifier[digits] ), identifier[wait_for_touch] = keyword[False] )
keyword[except] identifier[YkpersError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == literal[int] :
identifier[prompt_for_touch] ()
keyword[try] :
identifier[res] = identifier[controller] . identifier[calculate] (
identifier[slot] , identifier[challenge] , identifier[totp] = identifier[totp] ,
identifier[digits] = identifier[int] ( identifier[digits] ), identifier[wait_for_touch] = keyword[True] )
keyword[except] identifier[YkpersError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errno] == literal[int] :
identifier[ctx] . identifier[fail] ( literal[string] )
keyword[else] :
identifier[ctx] . identifier[fail] ( identifier[e] )
keyword[else] :
identifier[ctx] . identifier[fail] ( literal[string] )
identifier[click] . identifier[echo] ( identifier[res] ) | def calculate(ctx, slot, challenge, totp, digits):
"""
Perform a challenge-response operation.
Send a challenge (in hex) to a YubiKey slot with a challenge-response
credential, and read the response. Supports output as a OATH-TOTP code.
"""
controller = ctx.obj['controller']
if not challenge and (not totp):
ctx.fail('No challenge provided.') # depends on [control=['if'], data=[]]
# Check that slot is not empty
(slot1, slot2) = controller.slot_status
if slot == 1 and (not slot1) or (slot == 2 and (not slot2)):
ctx.fail('Cannot perform challenge-response on an empty slot.') # depends on [control=['if'], data=[]]
# Timestamp challenge should be int
if challenge and totp:
try:
challenge = int(challenge) # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('Error', exc_info=e)
ctx.fail('Timestamp challenge for TOTP must be an integer.') # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
try:
res = controller.calculate(slot, challenge, totp=totp, digits=int(digits), wait_for_touch=False) # depends on [control=['try'], data=[]]
except YkpersError as e:
# Touch is set
if e.errno == 11:
prompt_for_touch()
try:
res = controller.calculate(slot, challenge, totp=totp, digits=int(digits), wait_for_touch=True) # depends on [control=['try'], data=[]]
except YkpersError as e:
# Touch timed out
if e.errno == 4:
ctx.fail('The YubiKey timed out.') # depends on [control=['if'], data=[]]
else:
ctx.fail(e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
ctx.fail('Failed to calculate challenge.') # depends on [control=['except'], data=['e']]
click.echo(res) |
def remove_reaction(self, reaction):
"""Remove reaction from model"""
if reaction not in self._reaction_set:
return
self._reaction_set.remove(reaction)
self._limits_lower.pop(reaction, None)
self._limits_upper.pop(reaction, None)
# Remove compound from compound_set if it is not referenced
# by any other reactions in the model.
for compound, value in self._database.get_reaction_values(reaction):
reactions = frozenset(
self._database.get_compound_reactions(compound))
if all(other_reaction not in self._reaction_set
for other_reaction in reactions):
self._compound_set.remove(compound) | def function[remove_reaction, parameter[self, reaction]]:
constant[Remove reaction from model]
if compare[name[reaction] <ast.NotIn object at 0x7da2590d7190> name[self]._reaction_set] begin[:]
return[None]
call[name[self]._reaction_set.remove, parameter[name[reaction]]]
call[name[self]._limits_lower.pop, parameter[name[reaction], constant[None]]]
call[name[self]._limits_upper.pop, parameter[name[reaction], constant[None]]]
for taget[tuple[[<ast.Name object at 0x7da2046201f0>, <ast.Name object at 0x7da204620940>]]] in starred[call[name[self]._database.get_reaction_values, parameter[name[reaction]]]] begin[:]
variable[reactions] assign[=] call[name[frozenset], parameter[call[name[self]._database.get_compound_reactions, parameter[name[compound]]]]]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da204620790>]] begin[:]
call[name[self]._compound_set.remove, parameter[name[compound]]] | keyword[def] identifier[remove_reaction] ( identifier[self] , identifier[reaction] ):
literal[string]
keyword[if] identifier[reaction] keyword[not] keyword[in] identifier[self] . identifier[_reaction_set] :
keyword[return]
identifier[self] . identifier[_reaction_set] . identifier[remove] ( identifier[reaction] )
identifier[self] . identifier[_limits_lower] . identifier[pop] ( identifier[reaction] , keyword[None] )
identifier[self] . identifier[_limits_upper] . identifier[pop] ( identifier[reaction] , keyword[None] )
keyword[for] identifier[compound] , identifier[value] keyword[in] identifier[self] . identifier[_database] . identifier[get_reaction_values] ( identifier[reaction] ):
identifier[reactions] = identifier[frozenset] (
identifier[self] . identifier[_database] . identifier[get_compound_reactions] ( identifier[compound] ))
keyword[if] identifier[all] ( identifier[other_reaction] keyword[not] keyword[in] identifier[self] . identifier[_reaction_set]
keyword[for] identifier[other_reaction] keyword[in] identifier[reactions] ):
identifier[self] . identifier[_compound_set] . identifier[remove] ( identifier[compound] ) | def remove_reaction(self, reaction):
"""Remove reaction from model"""
if reaction not in self._reaction_set:
return # depends on [control=['if'], data=[]]
self._reaction_set.remove(reaction)
self._limits_lower.pop(reaction, None)
self._limits_upper.pop(reaction, None)
# Remove compound from compound_set if it is not referenced
# by any other reactions in the model.
for (compound, value) in self._database.get_reaction_values(reaction):
reactions = frozenset(self._database.get_compound_reactions(compound))
if all((other_reaction not in self._reaction_set for other_reaction in reactions)):
self._compound_set.remove(compound) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def on_shutdown(self, callback: callable, polling=True, webhook=True):
"""
Register a callback for the shutdown process
:param callback:
:param polling: use with polling
:param webhook: use with webhook
"""
self._check_frozen()
if not webhook and not polling:
warn('This action has no effect!', UserWarning)
return
if isinstance(callback, (list, tuple, set)):
for cb in callback:
self.on_shutdown(cb, polling, webhook)
return
if polling:
self._on_shutdown_polling.append(callback)
if webhook:
self._on_shutdown_webhook.append(callback) | def function[on_shutdown, parameter[self, callback, polling, webhook]]:
constant[
Register a callback for the shutdown process
:param callback:
:param polling: use with polling
:param webhook: use with webhook
]
call[name[self]._check_frozen, parameter[]]
if <ast.BoolOp object at 0x7da1b18fe020> begin[:]
call[name[warn], parameter[constant[This action has no effect!], name[UserWarning]]]
return[None]
if call[name[isinstance], parameter[name[callback], tuple[[<ast.Name object at 0x7da1b18fdc30>, <ast.Name object at 0x7da1b18fc8e0>, <ast.Name object at 0x7da1b18fc3d0>]]]] begin[:]
for taget[name[cb]] in starred[name[callback]] begin[:]
call[name[self].on_shutdown, parameter[name[cb], name[polling], name[webhook]]]
return[None]
if name[polling] begin[:]
call[name[self]._on_shutdown_polling.append, parameter[name[callback]]]
if name[webhook] begin[:]
call[name[self]._on_shutdown_webhook.append, parameter[name[callback]]] | keyword[def] identifier[on_shutdown] ( identifier[self] , identifier[callback] : identifier[callable] , identifier[polling] = keyword[True] , identifier[webhook] = keyword[True] ):
literal[string]
identifier[self] . identifier[_check_frozen] ()
keyword[if] keyword[not] identifier[webhook] keyword[and] keyword[not] identifier[polling] :
identifier[warn] ( literal[string] , identifier[UserWarning] )
keyword[return]
keyword[if] identifier[isinstance] ( identifier[callback] ,( identifier[list] , identifier[tuple] , identifier[set] )):
keyword[for] identifier[cb] keyword[in] identifier[callback] :
identifier[self] . identifier[on_shutdown] ( identifier[cb] , identifier[polling] , identifier[webhook] )
keyword[return]
keyword[if] identifier[polling] :
identifier[self] . identifier[_on_shutdown_polling] . identifier[append] ( identifier[callback] )
keyword[if] identifier[webhook] :
identifier[self] . identifier[_on_shutdown_webhook] . identifier[append] ( identifier[callback] ) | def on_shutdown(self, callback: callable, polling=True, webhook=True):
"""
Register a callback for the shutdown process
:param callback:
:param polling: use with polling
:param webhook: use with webhook
"""
self._check_frozen()
if not webhook and (not polling):
warn('This action has no effect!', UserWarning)
return # depends on [control=['if'], data=[]]
if isinstance(callback, (list, tuple, set)):
for cb in callback:
self.on_shutdown(cb, polling, webhook) # depends on [control=['for'], data=['cb']]
return # depends on [control=['if'], data=[]]
if polling:
self._on_shutdown_polling.append(callback) # depends on [control=['if'], data=[]]
if webhook:
self._on_shutdown_webhook.append(callback) # depends on [control=['if'], data=[]] |
def load_subcat_info( subcat_lex_file ):
''' Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings.
'''
rules = {}
nonSpacePattern = re.compile('^\S+$')
posTagPattern = re.compile('_._')
in_f = codecs.open(subcat_lex_file, mode='r', encoding='utf-8')
lemma = ''
subcatRules = ''
for line in in_f:
line = line.rstrip()
if nonSpacePattern.match(line) and not posTagPattern.search(line):
lemma = line
elif posTagPattern.search(line):
subcatRules = line
if len(lemma) > 0 and len(subcatRules) > 0:
if lemma not in rules:
rules[lemma] = []
parts = subcatRules.split('&')
for part in parts:
part = part.strip()
rules[lemma].append( part )
lemma = ''
subcatRules = ''
in_f.close()
#print( len(rules.keys()) ) # 4484
return rules | def function[load_subcat_info, parameter[subcat_lex_file]]:
constant[ Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings.
]
variable[rules] assign[=] dictionary[[], []]
variable[nonSpacePattern] assign[=] call[name[re].compile, parameter[constant[^\S+$]]]
variable[posTagPattern] assign[=] call[name[re].compile, parameter[constant[_._]]]
variable[in_f] assign[=] call[name[codecs].open, parameter[name[subcat_lex_file]]]
variable[lemma] assign[=] constant[]
variable[subcatRules] assign[=] constant[]
for taget[name[line]] in starred[name[in_f]] begin[:]
variable[line] assign[=] call[name[line].rstrip, parameter[]]
if <ast.BoolOp object at 0x7da207f99ab0> begin[:]
variable[lemma] assign[=] name[line]
if <ast.BoolOp object at 0x7da207f9ae90> begin[:]
if compare[name[lemma] <ast.NotIn object at 0x7da2590d7190> name[rules]] begin[:]
call[name[rules]][name[lemma]] assign[=] list[[]]
variable[parts] assign[=] call[name[subcatRules].split, parameter[constant[&]]]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[part] assign[=] call[name[part].strip, parameter[]]
call[call[name[rules]][name[lemma]].append, parameter[name[part]]]
variable[lemma] assign[=] constant[]
variable[subcatRules] assign[=] constant[]
call[name[in_f].close, parameter[]]
return[name[rules]] | keyword[def] identifier[load_subcat_info] ( identifier[subcat_lex_file] ):
literal[string]
identifier[rules] ={}
identifier[nonSpacePattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[posTagPattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[in_f] = identifier[codecs] . identifier[open] ( identifier[subcat_lex_file] , identifier[mode] = literal[string] , identifier[encoding] = literal[string] )
identifier[lemma] = literal[string]
identifier[subcatRules] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[in_f] :
identifier[line] = identifier[line] . identifier[rstrip] ()
keyword[if] identifier[nonSpacePattern] . identifier[match] ( identifier[line] ) keyword[and] keyword[not] identifier[posTagPattern] . identifier[search] ( identifier[line] ):
identifier[lemma] = identifier[line]
keyword[elif] identifier[posTagPattern] . identifier[search] ( identifier[line] ):
identifier[subcatRules] = identifier[line]
keyword[if] identifier[len] ( identifier[lemma] )> literal[int] keyword[and] identifier[len] ( identifier[subcatRules] )> literal[int] :
keyword[if] identifier[lemma] keyword[not] keyword[in] identifier[rules] :
identifier[rules] [ identifier[lemma] ]=[]
identifier[parts] = identifier[subcatRules] . identifier[split] ( literal[string] )
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[part] = identifier[part] . identifier[strip] ()
identifier[rules] [ identifier[lemma] ]. identifier[append] ( identifier[part] )
identifier[lemma] = literal[string]
identifier[subcatRules] = literal[string]
identifier[in_f] . identifier[close] ()
keyword[return] identifier[rules] | def load_subcat_info(subcat_lex_file):
""" Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings.
"""
rules = {}
nonSpacePattern = re.compile('^\\S+$')
posTagPattern = re.compile('_._')
in_f = codecs.open(subcat_lex_file, mode='r', encoding='utf-8')
lemma = ''
subcatRules = ''
for line in in_f:
line = line.rstrip()
if nonSpacePattern.match(line) and (not posTagPattern.search(line)):
lemma = line # depends on [control=['if'], data=[]]
elif posTagPattern.search(line):
subcatRules = line # depends on [control=['if'], data=[]]
if len(lemma) > 0 and len(subcatRules) > 0:
if lemma not in rules:
rules[lemma] = [] # depends on [control=['if'], data=['lemma', 'rules']]
parts = subcatRules.split('&')
for part in parts:
part = part.strip()
rules[lemma].append(part) # depends on [control=['for'], data=['part']]
lemma = ''
subcatRules = '' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
in_f.close()
#print( len(rules.keys()) ) # 4484
return rules |
def deleteLayerNode(self, layername, nodeNum):
"""
Removes a particular unit/node from a layer.
"""
# first, construct an array of all of the weights
# that won't be deleted:
gene = []
for layer in self.layers:
if layer.type != 'Input':
for i in range(layer.size):
if layer.name == layername and i == nodeNum:
pass # skip it
else:
gene.append(layer.weight[i])
for connection in self.connections:
for i in range(connection.fromLayer.size):
for j in range(connection.toLayer.size):
if ((connection.fromLayer.name == layername and i == nodeNum) or
(connection.toLayer.name == layername and j == nodeNum)):
pass # skip weights from/to nodeNum
else:
gene.append(connection.weight[i][j])
# now, change the size (removes rightmost node):
self.changeLayerSize(layername, self[layername].size - 1)
# and put the good weights where they go:
self.unArrayify(gene) | def function[deleteLayerNode, parameter[self, layername, nodeNum]]:
constant[
Removes a particular unit/node from a layer.
]
variable[gene] assign[=] list[[]]
for taget[name[layer]] in starred[name[self].layers] begin[:]
if compare[name[layer].type not_equal[!=] constant[Input]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[layer].size]]] begin[:]
if <ast.BoolOp object at 0x7da1b0472950> begin[:]
pass
for taget[name[connection]] in starred[name[self].connections] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[connection].fromLayer.size]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[name[connection].toLayer.size]]] begin[:]
if <ast.BoolOp object at 0x7da1b0473880> begin[:]
pass
call[name[self].changeLayerSize, parameter[name[layername], binary_operation[call[name[self]][name[layername]].size - constant[1]]]]
call[name[self].unArrayify, parameter[name[gene]]] | keyword[def] identifier[deleteLayerNode] ( identifier[self] , identifier[layername] , identifier[nodeNum] ):
literal[string]
identifier[gene] =[]
keyword[for] identifier[layer] keyword[in] identifier[self] . identifier[layers] :
keyword[if] identifier[layer] . identifier[type] != literal[string] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[layer] . identifier[size] ):
keyword[if] identifier[layer] . identifier[name] == identifier[layername] keyword[and] identifier[i] == identifier[nodeNum] :
keyword[pass]
keyword[else] :
identifier[gene] . identifier[append] ( identifier[layer] . identifier[weight] [ identifier[i] ])
keyword[for] identifier[connection] keyword[in] identifier[self] . identifier[connections] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[connection] . identifier[fromLayer] . identifier[size] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[connection] . identifier[toLayer] . identifier[size] ):
keyword[if] (( identifier[connection] . identifier[fromLayer] . identifier[name] == identifier[layername] keyword[and] identifier[i] == identifier[nodeNum] ) keyword[or]
( identifier[connection] . identifier[toLayer] . identifier[name] == identifier[layername] keyword[and] identifier[j] == identifier[nodeNum] )):
keyword[pass]
keyword[else] :
identifier[gene] . identifier[append] ( identifier[connection] . identifier[weight] [ identifier[i] ][ identifier[j] ])
identifier[self] . identifier[changeLayerSize] ( identifier[layername] , identifier[self] [ identifier[layername] ]. identifier[size] - literal[int] )
identifier[self] . identifier[unArrayify] ( identifier[gene] ) | def deleteLayerNode(self, layername, nodeNum):
"""
Removes a particular unit/node from a layer.
"""
# first, construct an array of all of the weights
# that won't be deleted:
gene = []
for layer in self.layers:
if layer.type != 'Input':
for i in range(layer.size):
if layer.name == layername and i == nodeNum:
pass # skip it # depends on [control=['if'], data=[]]
else:
gene.append(layer.weight[i]) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['layer']]
for connection in self.connections:
for i in range(connection.fromLayer.size):
for j in range(connection.toLayer.size):
if connection.fromLayer.name == layername and i == nodeNum or (connection.toLayer.name == layername and j == nodeNum):
pass # skip weights from/to nodeNum # depends on [control=['if'], data=[]]
else:
gene.append(connection.weight[i][j]) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['connection']]
# now, change the size (removes rightmost node):
self.changeLayerSize(layername, self[layername].size - 1)
# and put the good weights where they go:
self.unArrayify(gene) |
def mouseReleaseEvent(self, event):
"""
Emits mouse_released signal.
:param event: QMouseEvent
"""
initial_state = event.isAccepted()
event.ignore()
self.mouse_released.emit(event)
if not event.isAccepted():
event.setAccepted(initial_state)
super(CodeEdit, self).mouseReleaseEvent(event) | def function[mouseReleaseEvent, parameter[self, event]]:
constant[
Emits mouse_released signal.
:param event: QMouseEvent
]
variable[initial_state] assign[=] call[name[event].isAccepted, parameter[]]
call[name[event].ignore, parameter[]]
call[name[self].mouse_released.emit, parameter[name[event]]]
if <ast.UnaryOp object at 0x7da20c7cba00> begin[:]
call[name[event].setAccepted, parameter[name[initial_state]]]
call[call[name[super], parameter[name[CodeEdit], name[self]]].mouseReleaseEvent, parameter[name[event]]] | keyword[def] identifier[mouseReleaseEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[initial_state] = identifier[event] . identifier[isAccepted] ()
identifier[event] . identifier[ignore] ()
identifier[self] . identifier[mouse_released] . identifier[emit] ( identifier[event] )
keyword[if] keyword[not] identifier[event] . identifier[isAccepted] ():
identifier[event] . identifier[setAccepted] ( identifier[initial_state] )
identifier[super] ( identifier[CodeEdit] , identifier[self] ). identifier[mouseReleaseEvent] ( identifier[event] ) | def mouseReleaseEvent(self, event):
"""
Emits mouse_released signal.
:param event: QMouseEvent
"""
initial_state = event.isAccepted()
event.ignore()
self.mouse_released.emit(event)
if not event.isAccepted():
event.setAccepted(initial_state)
super(CodeEdit, self).mouseReleaseEvent(event) # depends on [control=['if'], data=[]] |
def _parse_msg_for_influxdb(self, msgs):
'''
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
'''
series = []
for msg in msgs:
if msg.get('error'):
continue
if msg.get('type').lower() == 'metric':
time = msg.get('timestamp')
measurement = msg.get('event')
tags, fields = self._tag_and_field_maker(msg)
pointvalues = {
"time": time,
"measurement": measurement,
"fields": fields,
"tags": tags}
series.append(pointvalues)
return series | def function[_parse_msg_for_influxdb, parameter[self, msgs]]:
constant[
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
]
variable[series] assign[=] list[[]]
for taget[name[msg]] in starred[name[msgs]] begin[:]
if call[name[msg].get, parameter[constant[error]]] begin[:]
continue
if compare[call[call[name[msg].get, parameter[constant[type]]].lower, parameter[]] equal[==] constant[metric]] begin[:]
variable[time] assign[=] call[name[msg].get, parameter[constant[timestamp]]]
variable[measurement] assign[=] call[name[msg].get, parameter[constant[event]]]
<ast.Tuple object at 0x7da20c6a8d90> assign[=] call[name[self]._tag_and_field_maker, parameter[name[msg]]]
variable[pointvalues] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a85e0>, <ast.Constant object at 0x7da20c990400>, <ast.Constant object at 0x7da20c9901c0>, <ast.Constant object at 0x7da20c992020>], [<ast.Name object at 0x7da20c992a40>, <ast.Name object at 0x7da20c990bb0>, <ast.Name object at 0x7da20c993850>, <ast.Name object at 0x7da20c992a70>]]
call[name[series].append, parameter[name[pointvalues]]]
return[name[series]] | keyword[def] identifier[_parse_msg_for_influxdb] ( identifier[self] , identifier[msgs] ):
literal[string]
identifier[series] =[]
keyword[for] identifier[msg] keyword[in] identifier[msgs] :
keyword[if] identifier[msg] . identifier[get] ( literal[string] ):
keyword[continue]
keyword[if] identifier[msg] . identifier[get] ( literal[string] ). identifier[lower] ()== literal[string] :
identifier[time] = identifier[msg] . identifier[get] ( literal[string] )
identifier[measurement] = identifier[msg] . identifier[get] ( literal[string] )
identifier[tags] , identifier[fields] = identifier[self] . identifier[_tag_and_field_maker] ( identifier[msg] )
identifier[pointvalues] ={
literal[string] : identifier[time] ,
literal[string] : identifier[measurement] ,
literal[string] : identifier[fields] ,
literal[string] : identifier[tags] }
identifier[series] . identifier[append] ( identifier[pointvalues] )
keyword[return] identifier[series] | def _parse_msg_for_influxdb(self, msgs):
"""
>>> from logagg.forwarders import InfluxDBForwarder
>>> idbf = InfluxDBForwarder('no_host', '8086', 'deadpool',
... 'chimichanga', 'logs', 'collection')
>>> valid_log = [{u'data': {u'_force_this_as_field': 'CXNS CNS nbkbsd',
... u'a': 1,
... u'b': 2,
... u'msg': u'this is a dummy log'},
... u'error': False,
... u'error_tb': u'',
... u'event': u'some_log',
... u'file': u'/var/log/sample.log',
... u'formatter': u'logagg.formatters.basescript',
... u'host': u'deepcompute',
... u'id': u'20180409T095924_aec36d313bdc11e89da654e1ad04f45e',
... u'level': u'info',
... u'raw': u'{...}',
... u'timestamp': u'2018-04-09T09:59:24.733945Z',
... u'type': u'metric'}]
>>> pointvalues = idbf._parse_msg_for_influxdb(valid_log)
>>> from pprint import pprint
>>> pprint(pointvalues)
[{'fields': {u'data._force_this_as_field': "'CXNS CNS nbkbsd'",
u'data.a': 1,
u'data.b': 2},
'measurement': u'some_log',
'tags': {u'data.msg': u'this is a dummy log',
u'error_tb': u'',
u'file': u'/var/log/sample.log',
u'formatter': u'logagg.formatters.basescript',
u'host': u'deepcompute',
u'level': u'info'},
'time': u'2018-04-09T09:59:24.733945Z'}]
>>> invalid_log = valid_log
>>> invalid_log[0]['error'] = True
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
>>> invalid_log = valid_log
>>> invalid_log[0]['type'] = 'log'
>>> pointvalues = idbf._parse_msg_for_influxdb(invalid_log)
>>> pprint(pointvalues)
[]
"""
series = []
for msg in msgs:
if msg.get('error'):
continue # depends on [control=['if'], data=[]]
if msg.get('type').lower() == 'metric':
time = msg.get('timestamp')
measurement = msg.get('event')
(tags, fields) = self._tag_and_field_maker(msg)
pointvalues = {'time': time, 'measurement': measurement, 'fields': fields, 'tags': tags}
series.append(pointvalues) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['msg']]
return series |
def normalizeSequence(sequence, considerDimensions=None):
"""
normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence
"""
seq = np.array(sequence).astype('float64')
nSampleDim = seq.shape[1]
if considerDimensions is None:
considerDimensions = range(nSampleDim)
for dim in considerDimensions:
seq[:, dim] = (seq[:, dim] - np.mean(seq[:, dim])) / np.std(seq[:, dim])
sequence = seq.tolist()
return sequence | def function[normalizeSequence, parameter[sequence, considerDimensions]]:
constant[
normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence
]
variable[seq] assign[=] call[call[name[np].array, parameter[name[sequence]]].astype, parameter[constant[float64]]]
variable[nSampleDim] assign[=] call[name[seq].shape][constant[1]]
if compare[name[considerDimensions] is constant[None]] begin[:]
variable[considerDimensions] assign[=] call[name[range], parameter[name[nSampleDim]]]
for taget[name[dim]] in starred[name[considerDimensions]] begin[:]
call[name[seq]][tuple[[<ast.Slice object at 0x7da1b08452d0>, <ast.Name object at 0x7da1b0844370>]]] assign[=] binary_operation[binary_operation[call[name[seq]][tuple[[<ast.Slice object at 0x7da1b0844820>, <ast.Name object at 0x7da1b08445b0>]]] - call[name[np].mean, parameter[call[name[seq]][tuple[[<ast.Slice object at 0x7da1b0845000>, <ast.Name object at 0x7da1b08462f0>]]]]]] / call[name[np].std, parameter[call[name[seq]][tuple[[<ast.Slice object at 0x7da1b0847a90>, <ast.Name object at 0x7da1b0844760>]]]]]]
variable[sequence] assign[=] call[name[seq].tolist, parameter[]]
return[name[sequence]] | keyword[def] identifier[normalizeSequence] ( identifier[sequence] , identifier[considerDimensions] = keyword[None] ):
literal[string]
identifier[seq] = identifier[np] . identifier[array] ( identifier[sequence] ). identifier[astype] ( literal[string] )
identifier[nSampleDim] = identifier[seq] . identifier[shape] [ literal[int] ]
keyword[if] identifier[considerDimensions] keyword[is] keyword[None] :
identifier[considerDimensions] = identifier[range] ( identifier[nSampleDim] )
keyword[for] identifier[dim] keyword[in] identifier[considerDimensions] :
identifier[seq] [:, identifier[dim] ]=( identifier[seq] [:, identifier[dim] ]- identifier[np] . identifier[mean] ( identifier[seq] [:, identifier[dim] ]))/ identifier[np] . identifier[std] ( identifier[seq] [:, identifier[dim] ])
identifier[sequence] = identifier[seq] . identifier[tolist] ()
keyword[return] identifier[sequence] | def normalizeSequence(sequence, considerDimensions=None):
"""
normalize sequence by subtracting the mean and
:param sequence: a list of data samples
:param considerDimensions: a list of dimensions to consider
:return: normalized sequence
"""
seq = np.array(sequence).astype('float64')
nSampleDim = seq.shape[1]
if considerDimensions is None:
considerDimensions = range(nSampleDim) # depends on [control=['if'], data=['considerDimensions']]
for dim in considerDimensions:
seq[:, dim] = (seq[:, dim] - np.mean(seq[:, dim])) / np.std(seq[:, dim]) # depends on [control=['for'], data=['dim']]
sequence = seq.tolist()
return sequence |
def add_job(cls, identifier, queue_name=None, priority=0, queue_model=None,
prepend=False, delayed_for=None, delayed_until=None,
**fields_if_new):
"""
Add a job to a queue.
If this job already exists, check it's current priority. If its higher
than the new one, don't touch it, else move the job to the wanted queue.
Before setting/moving the job to the queue, check for a `delayed_for`
(int/foat/timedelta) or `delayed_until` (datetime) argument to see if
it must be delayed instead of queued.
If the job is created, fields in fields_if_new will be set for the new
job.
Finally return the job.
"""
# check for delayed_for/delayed_until arguments
delayed_until = compute_delayed_until(delayed_for, delayed_until)
# create the job or get an existing one
job_kwargs = {'identifier': identifier, 'queued': '1'}
retries = 0
while retries < 10:
retries += 1
try:
job, created = cls.get_or_connect(**job_kwargs)
except IndexError:
# Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP
# => retry
continue
except ValueError:
# more than one already in the queue !
try:
job = cls.collection(**job_kwargs).instances()[0]
except IndexError:
# but no more now ?!
# => retry
continue
else:
created = False
# ok we have our job, stop now
break
try:
# check queue_name
queue_name = cls._get_queue_name(queue_name)
# if the job already exists, and we want a higher priority or move it,
# start by updating it
if not created:
current_priority = int(job.priority.hget() or 0)
# if the job has a higher priority, or don't need to be moved,
# don't move it
if not prepend and current_priority >= priority:
return job
# cancel it temporarily, we'll set it as waiting later
job.status.hset(STATUSES.CANCELED)
# remove it from the current queue, we'll add it to the new one later
if queue_model is None:
queue_model = cls.queue_model
current_queue = queue_model.get_queue(queue_name, current_priority)
current_queue.waiting.lrem(0, job.ident)
else:
job.set_fields(added=str(datetime.utcnow()), **(fields_if_new or {}))
# add the job to the queue
job.enqueue_or_delay(queue_name, priority, delayed_until, prepend, queue_model)
return job
except Exception:
job.queued.delete()
raise | def function[add_job, parameter[cls, identifier, queue_name, priority, queue_model, prepend, delayed_for, delayed_until]]:
constant[
Add a job to a queue.
If this job already exists, check it's current priority. If its higher
than the new one, don't touch it, else move the job to the wanted queue.
Before setting/moving the job to the queue, check for a `delayed_for`
(int/foat/timedelta) or `delayed_until` (datetime) argument to see if
it must be delayed instead of queued.
If the job is created, fields in fields_if_new will be set for the new
job.
Finally return the job.
]
variable[delayed_until] assign[=] call[name[compute_delayed_until], parameter[name[delayed_for], name[delayed_until]]]
variable[job_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da2043460b0>, <ast.Constant object at 0x7da204345bd0>], [<ast.Name object at 0x7da204344a60>, <ast.Constant object at 0x7da204347880>]]
variable[retries] assign[=] constant[0]
while compare[name[retries] less[<] constant[10]] begin[:]
<ast.AugAssign object at 0x7da2045670d0>
<ast.Try object at 0x7da204567b80>
break
<ast.Try object at 0x7da204567a60> | keyword[def] identifier[add_job] ( identifier[cls] , identifier[identifier] , identifier[queue_name] = keyword[None] , identifier[priority] = literal[int] , identifier[queue_model] = keyword[None] ,
identifier[prepend] = keyword[False] , identifier[delayed_for] = keyword[None] , identifier[delayed_until] = keyword[None] ,
** identifier[fields_if_new] ):
literal[string]
identifier[delayed_until] = identifier[compute_delayed_until] ( identifier[delayed_for] , identifier[delayed_until] )
identifier[job_kwargs] ={ literal[string] : identifier[identifier] , literal[string] : literal[string] }
identifier[retries] = literal[int]
keyword[while] identifier[retries] < literal[int] :
identifier[retries] += literal[int]
keyword[try] :
identifier[job] , identifier[created] = identifier[cls] . identifier[get_or_connect] (** identifier[job_kwargs] )
keyword[except] identifier[IndexError] :
keyword[continue]
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[job] = identifier[cls] . identifier[collection] (** identifier[job_kwargs] ). identifier[instances] ()[ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[continue]
keyword[else] :
identifier[created] = keyword[False]
keyword[break]
keyword[try] :
identifier[queue_name] = identifier[cls] . identifier[_get_queue_name] ( identifier[queue_name] )
keyword[if] keyword[not] identifier[created] :
identifier[current_priority] = identifier[int] ( identifier[job] . identifier[priority] . identifier[hget] () keyword[or] literal[int] )
keyword[if] keyword[not] identifier[prepend] keyword[and] identifier[current_priority] >= identifier[priority] :
keyword[return] identifier[job]
identifier[job] . identifier[status] . identifier[hset] ( identifier[STATUSES] . identifier[CANCELED] )
keyword[if] identifier[queue_model] keyword[is] keyword[None] :
identifier[queue_model] = identifier[cls] . identifier[queue_model]
identifier[current_queue] = identifier[queue_model] . identifier[get_queue] ( identifier[queue_name] , identifier[current_priority] )
identifier[current_queue] . identifier[waiting] . identifier[lrem] ( literal[int] , identifier[job] . identifier[ident] )
keyword[else] :
identifier[job] . identifier[set_fields] ( identifier[added] = identifier[str] ( identifier[datetime] . identifier[utcnow] ()),**( identifier[fields_if_new] keyword[or] {}))
identifier[job] . identifier[enqueue_or_delay] ( identifier[queue_name] , identifier[priority] , identifier[delayed_until] , identifier[prepend] , identifier[queue_model] )
keyword[return] identifier[job]
keyword[except] identifier[Exception] :
identifier[job] . identifier[queued] . identifier[delete] ()
keyword[raise] | def add_job(cls, identifier, queue_name=None, priority=0, queue_model=None, prepend=False, delayed_for=None, delayed_until=None, **fields_if_new):
"""
Add a job to a queue.
If this job already exists, check it's current priority. If its higher
than the new one, don't touch it, else move the job to the wanted queue.
Before setting/moving the job to the queue, check for a `delayed_for`
(int/foat/timedelta) or `delayed_until` (datetime) argument to see if
it must be delayed instead of queued.
If the job is created, fields in fields_if_new will be set for the new
job.
Finally return the job.
"""
# check for delayed_for/delayed_until arguments
delayed_until = compute_delayed_until(delayed_for, delayed_until)
# create the job or get an existing one
job_kwargs = {'identifier': identifier, 'queued': '1'}
retries = 0
while retries < 10:
retries += 1
try:
(job, created) = cls.get_or_connect(**job_kwargs) # depends on [control=['try'], data=[]]
except IndexError:
# Failure during the retrieval https://friendpaste.com/5U63a8aFuV44SEgQckgMP
# => retry
continue # depends on [control=['except'], data=[]]
except ValueError:
# more than one already in the queue !
try:
job = cls.collection(**job_kwargs).instances()[0] # depends on [control=['try'], data=[]]
except IndexError:
# but no more now ?!
# => retry
continue # depends on [control=['except'], data=[]]
else:
created = False # depends on [control=['except'], data=[]]
# ok we have our job, stop now
break # depends on [control=['while'], data=['retries']]
try:
# check queue_name
queue_name = cls._get_queue_name(queue_name)
# if the job already exists, and we want a higher priority or move it,
# start by updating it
if not created:
current_priority = int(job.priority.hget() or 0)
# if the job has a higher priority, or don't need to be moved,
# don't move it
if not prepend and current_priority >= priority:
return job # depends on [control=['if'], data=[]]
# cancel it temporarily, we'll set it as waiting later
job.status.hset(STATUSES.CANCELED)
# remove it from the current queue, we'll add it to the new one later
if queue_model is None:
queue_model = cls.queue_model # depends on [control=['if'], data=['queue_model']]
current_queue = queue_model.get_queue(queue_name, current_priority)
current_queue.waiting.lrem(0, job.ident) # depends on [control=['if'], data=[]]
else:
job.set_fields(added=str(datetime.utcnow()), **fields_if_new or {})
# add the job to the queue
job.enqueue_or_delay(queue_name, priority, delayed_until, prepend, queue_model)
return job # depends on [control=['try'], data=[]]
except Exception:
job.queued.delete()
raise # depends on [control=['except'], data=[]] |
def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp) | def function[created_on, parameter[self]]:
constant[The creation timestamp.]
variable[timestamp] assign[=] call[name[self]._info.get, parameter[constant[creationTime]]]
return[call[name[_parser].Parser.parse_timestamp, parameter[name[timestamp]]]] | keyword[def] identifier[created_on] ( identifier[self] ):
literal[string]
identifier[timestamp] = identifier[self] . identifier[_info] . identifier[get] ( literal[string] )
keyword[return] identifier[_parser] . identifier[Parser] . identifier[parse_timestamp] ( identifier[timestamp] ) | def created_on(self):
"""The creation timestamp."""
timestamp = self._info.get('creationTime')
return _parser.Parser.parse_timestamp(timestamp) |
def _long_from_raw(thehash):
"""Fold to a long, a digest supplied as a string."""
hashnum = 0
for h in thehash:
hashnum <<= 8
hashnum |= ord(bytes([h]))
return hashnum | def function[_long_from_raw, parameter[thehash]]:
constant[Fold to a long, a digest supplied as a string.]
variable[hashnum] assign[=] constant[0]
for taget[name[h]] in starred[name[thehash]] begin[:]
<ast.AugAssign object at 0x7da1b072d630>
<ast.AugAssign object at 0x7da1b072f3a0>
return[name[hashnum]] | keyword[def] identifier[_long_from_raw] ( identifier[thehash] ):
literal[string]
identifier[hashnum] = literal[int]
keyword[for] identifier[h] keyword[in] identifier[thehash] :
identifier[hashnum] <<= literal[int]
identifier[hashnum] |= identifier[ord] ( identifier[bytes] ([ identifier[h] ]))
keyword[return] identifier[hashnum] | def _long_from_raw(thehash):
"""Fold to a long, a digest supplied as a string."""
hashnum = 0
for h in thehash:
hashnum <<= 8
hashnum |= ord(bytes([h])) # depends on [control=['for'], data=['h']]
return hashnum |
def urban_adj_factor(self):
"""
Return urban adjustment factor (UAF) used to adjust QMED and growth curves.
Methodology source: eqn. 8, Kjeldsen 2010
:return: urban adjustment factor
:rtype: float
"""
urbext = self.catchment.descriptors.urbext(self.year)
result = self._pruaf() ** 2.16 * (1 + urbext) ** 0.37
self.results_log['urban_extent'] = urbext
self.results_log['urban_adj_factor'] = result
return result | def function[urban_adj_factor, parameter[self]]:
constant[
Return urban adjustment factor (UAF) used to adjust QMED and growth curves.
Methodology source: eqn. 8, Kjeldsen 2010
:return: urban adjustment factor
:rtype: float
]
variable[urbext] assign[=] call[name[self].catchment.descriptors.urbext, parameter[name[self].year]]
variable[result] assign[=] binary_operation[binary_operation[call[name[self]._pruaf, parameter[]] ** constant[2.16]] * binary_operation[binary_operation[constant[1] + name[urbext]] ** constant[0.37]]]
call[name[self].results_log][constant[urban_extent]] assign[=] name[urbext]
call[name[self].results_log][constant[urban_adj_factor]] assign[=] name[result]
return[name[result]] | keyword[def] identifier[urban_adj_factor] ( identifier[self] ):
literal[string]
identifier[urbext] = identifier[self] . identifier[catchment] . identifier[descriptors] . identifier[urbext] ( identifier[self] . identifier[year] )
identifier[result] = identifier[self] . identifier[_pruaf] ()** literal[int] *( literal[int] + identifier[urbext] )** literal[int]
identifier[self] . identifier[results_log] [ literal[string] ]= identifier[urbext]
identifier[self] . identifier[results_log] [ literal[string] ]= identifier[result]
keyword[return] identifier[result] | def urban_adj_factor(self):
"""
Return urban adjustment factor (UAF) used to adjust QMED and growth curves.
Methodology source: eqn. 8, Kjeldsen 2010
:return: urban adjustment factor
:rtype: float
"""
urbext = self.catchment.descriptors.urbext(self.year)
result = self._pruaf() ** 2.16 * (1 + urbext) ** 0.37
self.results_log['urban_extent'] = urbext
self.results_log['urban_adj_factor'] = result
return result |
def _handle_command_response(self, res, event):
"""Either send a message (choosing between rtm and postMessage) or ignore the response.
:param event: a slacker event dict
:param res: a string, a dict, or None.
See the command docstring for what these represent.
"""
response_handler = None
if isinstance(res, basestring):
response_handler = functools.partial(self._send_rtm_message, event['channel'])
elif isinstance(res, dict):
response_handler = self._send_api_message
if response_handler is not None:
response_handler(res) | def function[_handle_command_response, parameter[self, res, event]]:
constant[Either send a message (choosing between rtm and postMessage) or ignore the response.
:param event: a slacker event dict
:param res: a string, a dict, or None.
See the command docstring for what these represent.
]
variable[response_handler] assign[=] constant[None]
if call[name[isinstance], parameter[name[res], name[basestring]]] begin[:]
variable[response_handler] assign[=] call[name[functools].partial, parameter[name[self]._send_rtm_message, call[name[event]][constant[channel]]]]
if compare[name[response_handler] is_not constant[None]] begin[:]
call[name[response_handler], parameter[name[res]]] | keyword[def] identifier[_handle_command_response] ( identifier[self] , identifier[res] , identifier[event] ):
literal[string]
identifier[response_handler] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[res] , identifier[basestring] ):
identifier[response_handler] = identifier[functools] . identifier[partial] ( identifier[self] . identifier[_send_rtm_message] , identifier[event] [ literal[string] ])
keyword[elif] identifier[isinstance] ( identifier[res] , identifier[dict] ):
identifier[response_handler] = identifier[self] . identifier[_send_api_message]
keyword[if] identifier[response_handler] keyword[is] keyword[not] keyword[None] :
identifier[response_handler] ( identifier[res] ) | def _handle_command_response(self, res, event):
"""Either send a message (choosing between rtm and postMessage) or ignore the response.
:param event: a slacker event dict
:param res: a string, a dict, or None.
See the command docstring for what these represent.
"""
response_handler = None
if isinstance(res, basestring):
response_handler = functools.partial(self._send_rtm_message, event['channel']) # depends on [control=['if'], data=[]]
elif isinstance(res, dict):
response_handler = self._send_api_message # depends on [control=['if'], data=[]]
if response_handler is not None:
response_handler(res) # depends on [control=['if'], data=['response_handler']] |
def start(self):
"""
Starts the upload.
:raises SbgError: If upload is not in PREPARING state.
"""
if self._status == TransferState.PREPARING:
super(Upload, self).start()
else:
raise SbgError(
'Unable to start. Upload not in PREPARING state.'
) | def function[start, parameter[self]]:
constant[
Starts the upload.
:raises SbgError: If upload is not in PREPARING state.
]
if compare[name[self]._status equal[==] name[TransferState].PREPARING] begin[:]
call[call[name[super], parameter[name[Upload], name[self]]].start, parameter[]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_status] == identifier[TransferState] . identifier[PREPARING] :
identifier[super] ( identifier[Upload] , identifier[self] ). identifier[start] ()
keyword[else] :
keyword[raise] identifier[SbgError] (
literal[string]
) | def start(self):
"""
Starts the upload.
:raises SbgError: If upload is not in PREPARING state.
"""
if self._status == TransferState.PREPARING:
super(Upload, self).start() # depends on [control=['if'], data=[]]
else:
raise SbgError('Unable to start. Upload not in PREPARING state.') |
def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.args, name, field) | def function[parse_querystring, parameter[self, req, name, field]]:
constant[Pull a querystring value from the request.]
return[call[name[core].get_value, parameter[name[req].args, name[name], name[field]]]] | keyword[def] identifier[parse_querystring] ( identifier[self] , identifier[req] , identifier[name] , identifier[field] ):
literal[string]
keyword[return] identifier[core] . identifier[get_value] ( identifier[req] . identifier[args] , identifier[name] , identifier[field] ) | def parse_querystring(self, req, name, field):
"""Pull a querystring value from the request."""
return core.get_value(req.args, name, field) |
def module(self):
"""The module in which the Class is defined.
Python equivalent of the CLIPS defglobal-module command.
"""
modname = ffi.string(lib.EnvDefclassModule(self._env, self._cls))
defmodule = lib.EnvFindDefmodule(self._env, modname)
return Module(self._env, defmodule) | def function[module, parameter[self]]:
constant[The module in which the Class is defined.
Python equivalent of the CLIPS defglobal-module command.
]
variable[modname] assign[=] call[name[ffi].string, parameter[call[name[lib].EnvDefclassModule, parameter[name[self]._env, name[self]._cls]]]]
variable[defmodule] assign[=] call[name[lib].EnvFindDefmodule, parameter[name[self]._env, name[modname]]]
return[call[name[Module], parameter[name[self]._env, name[defmodule]]]] | keyword[def] identifier[module] ( identifier[self] ):
literal[string]
identifier[modname] = identifier[ffi] . identifier[string] ( identifier[lib] . identifier[EnvDefclassModule] ( identifier[self] . identifier[_env] , identifier[self] . identifier[_cls] ))
identifier[defmodule] = identifier[lib] . identifier[EnvFindDefmodule] ( identifier[self] . identifier[_env] , identifier[modname] )
keyword[return] identifier[Module] ( identifier[self] . identifier[_env] , identifier[defmodule] ) | def module(self):
"""The module in which the Class is defined.
Python equivalent of the CLIPS defglobal-module command.
"""
modname = ffi.string(lib.EnvDefclassModule(self._env, self._cls))
defmodule = lib.EnvFindDefmodule(self._env, modname)
return Module(self._env, defmodule) |
def LeaseCronJobs(self, cronjob_ids=None, lease_time=None):
"""Leases all available cron jobs."""
leased_jobs = []
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + lease_time
for job in itervalues(self.cronjobs):
if cronjob_ids and job.cron_job_id not in cronjob_ids:
continue
existing_lease = self.cronjob_leases.get(job.cron_job_id)
if existing_lease is None or existing_lease[0] < now:
self.cronjob_leases[job.cron_job_id] = (expiration_time,
utils.ProcessIdString())
job = job.Copy()
job.leased_until, job.leased_by = self.cronjob_leases[job.cron_job_id]
leased_jobs.append(job)
return leased_jobs | def function[LeaseCronJobs, parameter[self, cronjob_ids, lease_time]]:
constant[Leases all available cron jobs.]
variable[leased_jobs] assign[=] list[[]]
variable[now] assign[=] call[name[rdfvalue].RDFDatetime.Now, parameter[]]
variable[expiration_time] assign[=] binary_operation[name[now] + name[lease_time]]
for taget[name[job]] in starred[call[name[itervalues], parameter[name[self].cronjobs]]] begin[:]
if <ast.BoolOp object at 0x7da18fe90eb0> begin[:]
continue
variable[existing_lease] assign[=] call[name[self].cronjob_leases.get, parameter[name[job].cron_job_id]]
if <ast.BoolOp object at 0x7da18fe921a0> begin[:]
call[name[self].cronjob_leases][name[job].cron_job_id] assign[=] tuple[[<ast.Name object at 0x7da18fe920b0>, <ast.Call object at 0x7da18fe93d30>]]
variable[job] assign[=] call[name[job].Copy, parameter[]]
<ast.Tuple object at 0x7da18fe90b80> assign[=] call[name[self].cronjob_leases][name[job].cron_job_id]
call[name[leased_jobs].append, parameter[name[job]]]
return[name[leased_jobs]] | keyword[def] identifier[LeaseCronJobs] ( identifier[self] , identifier[cronjob_ids] = keyword[None] , identifier[lease_time] = keyword[None] ):
literal[string]
identifier[leased_jobs] =[]
identifier[now] = identifier[rdfvalue] . identifier[RDFDatetime] . identifier[Now] ()
identifier[expiration_time] = identifier[now] + identifier[lease_time]
keyword[for] identifier[job] keyword[in] identifier[itervalues] ( identifier[self] . identifier[cronjobs] ):
keyword[if] identifier[cronjob_ids] keyword[and] identifier[job] . identifier[cron_job_id] keyword[not] keyword[in] identifier[cronjob_ids] :
keyword[continue]
identifier[existing_lease] = identifier[self] . identifier[cronjob_leases] . identifier[get] ( identifier[job] . identifier[cron_job_id] )
keyword[if] identifier[existing_lease] keyword[is] keyword[None] keyword[or] identifier[existing_lease] [ literal[int] ]< identifier[now] :
identifier[self] . identifier[cronjob_leases] [ identifier[job] . identifier[cron_job_id] ]=( identifier[expiration_time] ,
identifier[utils] . identifier[ProcessIdString] ())
identifier[job] = identifier[job] . identifier[Copy] ()
identifier[job] . identifier[leased_until] , identifier[job] . identifier[leased_by] = identifier[self] . identifier[cronjob_leases] [ identifier[job] . identifier[cron_job_id] ]
identifier[leased_jobs] . identifier[append] ( identifier[job] )
keyword[return] identifier[leased_jobs] | def LeaseCronJobs(self, cronjob_ids=None, lease_time=None):
"""Leases all available cron jobs."""
leased_jobs = []
now = rdfvalue.RDFDatetime.Now()
expiration_time = now + lease_time
for job in itervalues(self.cronjobs):
if cronjob_ids and job.cron_job_id not in cronjob_ids:
continue # depends on [control=['if'], data=[]]
existing_lease = self.cronjob_leases.get(job.cron_job_id)
if existing_lease is None or existing_lease[0] < now:
self.cronjob_leases[job.cron_job_id] = (expiration_time, utils.ProcessIdString())
job = job.Copy()
(job.leased_until, job.leased_by) = self.cronjob_leases[job.cron_job_id]
leased_jobs.append(job) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['job']]
return leased_jobs |
def normalize(self, inplace=False):
"""Normalizes a Quaternion to unit length
so that it represents a valid rotation.
Args:
inplace (bool): Do an inplace normalization.
Returns:
Quaternion: Normalized quaternion.
"""
if inplace:
nrm = self.norm()
self.data /= nrm
return None
nrm = self.norm()
data_copy = np.array(self.data, copy=True)
data_copy /= nrm
return Quaternion(data_copy) | def function[normalize, parameter[self, inplace]]:
constant[Normalizes a Quaternion to unit length
so that it represents a valid rotation.
Args:
inplace (bool): Do an inplace normalization.
Returns:
Quaternion: Normalized quaternion.
]
if name[inplace] begin[:]
variable[nrm] assign[=] call[name[self].norm, parameter[]]
<ast.AugAssign object at 0x7da2047e8130>
return[constant[None]]
variable[nrm] assign[=] call[name[self].norm, parameter[]]
variable[data_copy] assign[=] call[name[np].array, parameter[name[self].data]]
<ast.AugAssign object at 0x7da2047e8550>
return[call[name[Quaternion], parameter[name[data_copy]]]] | keyword[def] identifier[normalize] ( identifier[self] , identifier[inplace] = keyword[False] ):
literal[string]
keyword[if] identifier[inplace] :
identifier[nrm] = identifier[self] . identifier[norm] ()
identifier[self] . identifier[data] /= identifier[nrm]
keyword[return] keyword[None]
identifier[nrm] = identifier[self] . identifier[norm] ()
identifier[data_copy] = identifier[np] . identifier[array] ( identifier[self] . identifier[data] , identifier[copy] = keyword[True] )
identifier[data_copy] /= identifier[nrm]
keyword[return] identifier[Quaternion] ( identifier[data_copy] ) | def normalize(self, inplace=False):
"""Normalizes a Quaternion to unit length
so that it represents a valid rotation.
Args:
inplace (bool): Do an inplace normalization.
Returns:
Quaternion: Normalized quaternion.
"""
if inplace:
nrm = self.norm()
self.data /= nrm
return None # depends on [control=['if'], data=[]]
nrm = self.norm()
data_copy = np.array(self.data, copy=True)
data_copy /= nrm
return Quaternion(data_copy) |
def _nextOn(self, request):
"""
Formatted date/time of when this event (including any postponements)
will next be on
"""
retval = None
nextDt, event = self.__localAfterOrPostponedTo(timezone.localtime(),
dt.time.min)
if nextDt is not None:
timeFrom = nextDt.time() if event.time_from is not None else None
retval = "{} {}".format(dateFormat(nextDt.date()),
timeFormat(timeFrom, prefix=gettext("at ")))
if event is not self and event.isAuthorized(request):
retval = format_html('<a class="inline-link" href="{}">{}</a>',
event.url, retval)
return retval | def function[_nextOn, parameter[self, request]]:
constant[
Formatted date/time of when this event (including any postponements)
will next be on
]
variable[retval] assign[=] constant[None]
<ast.Tuple object at 0x7da20e9606a0> assign[=] call[name[self].__localAfterOrPostponedTo, parameter[call[name[timezone].localtime, parameter[]], name[dt].time.min]]
if compare[name[nextDt] is_not constant[None]] begin[:]
variable[timeFrom] assign[=] <ast.IfExp object at 0x7da20e960a00>
variable[retval] assign[=] call[constant[{} {}].format, parameter[call[name[dateFormat], parameter[call[name[nextDt].date, parameter[]]]], call[name[timeFormat], parameter[name[timeFrom]]]]]
if <ast.BoolOp object at 0x7da20e963550> begin[:]
variable[retval] assign[=] call[name[format_html], parameter[constant[<a class="inline-link" href="{}">{}</a>], name[event].url, name[retval]]]
return[name[retval]] | keyword[def] identifier[_nextOn] ( identifier[self] , identifier[request] ):
literal[string]
identifier[retval] = keyword[None]
identifier[nextDt] , identifier[event] = identifier[self] . identifier[__localAfterOrPostponedTo] ( identifier[timezone] . identifier[localtime] (),
identifier[dt] . identifier[time] . identifier[min] )
keyword[if] identifier[nextDt] keyword[is] keyword[not] keyword[None] :
identifier[timeFrom] = identifier[nextDt] . identifier[time] () keyword[if] identifier[event] . identifier[time_from] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
identifier[retval] = literal[string] . identifier[format] ( identifier[dateFormat] ( identifier[nextDt] . identifier[date] ()),
identifier[timeFormat] ( identifier[timeFrom] , identifier[prefix] = identifier[gettext] ( literal[string] )))
keyword[if] identifier[event] keyword[is] keyword[not] identifier[self] keyword[and] identifier[event] . identifier[isAuthorized] ( identifier[request] ):
identifier[retval] = identifier[format_html] ( literal[string] ,
identifier[event] . identifier[url] , identifier[retval] )
keyword[return] identifier[retval] | def _nextOn(self, request):
"""
Formatted date/time of when this event (including any postponements)
will next be on
"""
retval = None
(nextDt, event) = self.__localAfterOrPostponedTo(timezone.localtime(), dt.time.min)
if nextDt is not None:
timeFrom = nextDt.time() if event.time_from is not None else None
retval = '{} {}'.format(dateFormat(nextDt.date()), timeFormat(timeFrom, prefix=gettext('at ')))
if event is not self and event.isAuthorized(request):
retval = format_html('<a class="inline-link" href="{}">{}</a>', event.url, retval) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['nextDt']]
return retval |
def make_energy_funnel_data(self, cores=1):
"""Compares models created during the minimisation to the best model.
Returns
-------
energy_rmsd_gen: [(float, float, int)]
A list of triples containing the BUFF score, RMSD to the
top model and generation of a model generated during the
minimisation.
"""
if not self.parameter_log:
raise AttributeError(
'No parameter log data to make funnel, have you ran the '
'optimiser?')
model_cls = self._params['specification']
gen_tagged = []
for gen, models in enumerate(self.parameter_log):
for model in models:
gen_tagged.append((model[0], model[1], gen))
sorted_pps = sorted(gen_tagged, key=lambda x: x[1])
top_result = sorted_pps[0]
top_result_model = model_cls(*top_result[0])
if (cores == 1) or (sys.platform == 'win32'):
energy_rmsd_gen = map(
self.funnel_rebuild,
[(x, top_result_model,
self._params['specification']) for x in sorted_pps[1:]])
else:
with futures.ProcessPoolExecutor(
max_workers=self._params['processors']) as executor:
energy_rmsd_gen = executor.map(
self.funnel_rebuild,
[(x, top_result_model, self._params['specification'])
for x in sorted_pps[1:]])
return list(energy_rmsd_gen) | def function[make_energy_funnel_data, parameter[self, cores]]:
constant[Compares models created during the minimisation to the best model.
Returns
-------
energy_rmsd_gen: [(float, float, int)]
A list of triples containing the BUFF score, RMSD to the
top model and generation of a model generated during the
minimisation.
]
if <ast.UnaryOp object at 0x7da1b2625030> begin[:]
<ast.Raise object at 0x7da1b2626da0>
variable[model_cls] assign[=] call[name[self]._params][constant[specification]]
variable[gen_tagged] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b26265f0>, <ast.Name object at 0x7da1b2624d00>]]] in starred[call[name[enumerate], parameter[name[self].parameter_log]]] begin[:]
for taget[name[model]] in starred[name[models]] begin[:]
call[name[gen_tagged].append, parameter[tuple[[<ast.Subscript object at 0x7da1b2624af0>, <ast.Subscript object at 0x7da1b2625870>, <ast.Name object at 0x7da1b2627370>]]]]
variable[sorted_pps] assign[=] call[name[sorted], parameter[name[gen_tagged]]]
variable[top_result] assign[=] call[name[sorted_pps]][constant[0]]
variable[top_result_model] assign[=] call[name[model_cls], parameter[<ast.Starred object at 0x7da1b2624910>]]
if <ast.BoolOp object at 0x7da1b26259c0> begin[:]
variable[energy_rmsd_gen] assign[=] call[name[map], parameter[name[self].funnel_rebuild, <ast.ListComp object at 0x7da1b2625750>]]
return[call[name[list], parameter[name[energy_rmsd_gen]]]] | keyword[def] identifier[make_energy_funnel_data] ( identifier[self] , identifier[cores] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[parameter_log] :
keyword[raise] identifier[AttributeError] (
literal[string]
literal[string] )
identifier[model_cls] = identifier[self] . identifier[_params] [ literal[string] ]
identifier[gen_tagged] =[]
keyword[for] identifier[gen] , identifier[models] keyword[in] identifier[enumerate] ( identifier[self] . identifier[parameter_log] ):
keyword[for] identifier[model] keyword[in] identifier[models] :
identifier[gen_tagged] . identifier[append] (( identifier[model] [ literal[int] ], identifier[model] [ literal[int] ], identifier[gen] ))
identifier[sorted_pps] = identifier[sorted] ( identifier[gen_tagged] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])
identifier[top_result] = identifier[sorted_pps] [ literal[int] ]
identifier[top_result_model] = identifier[model_cls] (* identifier[top_result] [ literal[int] ])
keyword[if] ( identifier[cores] == literal[int] ) keyword[or] ( identifier[sys] . identifier[platform] == literal[string] ):
identifier[energy_rmsd_gen] = identifier[map] (
identifier[self] . identifier[funnel_rebuild] ,
[( identifier[x] , identifier[top_result_model] ,
identifier[self] . identifier[_params] [ literal[string] ]) keyword[for] identifier[x] keyword[in] identifier[sorted_pps] [ literal[int] :]])
keyword[else] :
keyword[with] identifier[futures] . identifier[ProcessPoolExecutor] (
identifier[max_workers] = identifier[self] . identifier[_params] [ literal[string] ]) keyword[as] identifier[executor] :
identifier[energy_rmsd_gen] = identifier[executor] . identifier[map] (
identifier[self] . identifier[funnel_rebuild] ,
[( identifier[x] , identifier[top_result_model] , identifier[self] . identifier[_params] [ literal[string] ])
keyword[for] identifier[x] keyword[in] identifier[sorted_pps] [ literal[int] :]])
keyword[return] identifier[list] ( identifier[energy_rmsd_gen] ) | def make_energy_funnel_data(self, cores=1):
"""Compares models created during the minimisation to the best model.
Returns
-------
energy_rmsd_gen: [(float, float, int)]
A list of triples containing the BUFF score, RMSD to the
top model and generation of a model generated during the
minimisation.
"""
if not self.parameter_log:
raise AttributeError('No parameter log data to make funnel, have you ran the optimiser?') # depends on [control=['if'], data=[]]
model_cls = self._params['specification']
gen_tagged = []
for (gen, models) in enumerate(self.parameter_log):
for model in models:
gen_tagged.append((model[0], model[1], gen)) # depends on [control=['for'], data=['model']] # depends on [control=['for'], data=[]]
sorted_pps = sorted(gen_tagged, key=lambda x: x[1])
top_result = sorted_pps[0]
top_result_model = model_cls(*top_result[0])
if cores == 1 or sys.platform == 'win32':
energy_rmsd_gen = map(self.funnel_rebuild, [(x, top_result_model, self._params['specification']) for x in sorted_pps[1:]]) # depends on [control=['if'], data=[]]
else:
with futures.ProcessPoolExecutor(max_workers=self._params['processors']) as executor:
energy_rmsd_gen = executor.map(self.funnel_rebuild, [(x, top_result_model, self._params['specification']) for x in sorted_pps[1:]]) # depends on [control=['with'], data=['executor']]
return list(energy_rmsd_gen) |
def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds,
data_vars='minimal', coords='minimal',
grid_attrs=None, **kwargs):
"""Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
"""
apply_preload_user_commands(file_set)
func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs,
**kwargs)
return xr.open_mfdataset(file_set, preprocess=func, concat_dim=TIME_STR,
decode_times=False, decode_coords=False,
mask_and_scale=True, data_vars=data_vars,
coords=coords) | def function[_load_data_from_disk, parameter[file_set, preprocess_func, data_vars, coords, grid_attrs]]:
constant[Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
]
call[name[apply_preload_user_commands], parameter[name[file_set]]]
variable[func] assign[=] call[name[_preprocess_and_rename_grid_attrs], parameter[name[preprocess_func], name[grid_attrs]]]
return[call[name[xr].open_mfdataset, parameter[name[file_set]]]] | keyword[def] identifier[_load_data_from_disk] ( identifier[file_set] , identifier[preprocess_func] = keyword[lambda] identifier[ds] : identifier[ds] ,
identifier[data_vars] = literal[string] , identifier[coords] = literal[string] ,
identifier[grid_attrs] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[apply_preload_user_commands] ( identifier[file_set] )
identifier[func] = identifier[_preprocess_and_rename_grid_attrs] ( identifier[preprocess_func] , identifier[grid_attrs] ,
** identifier[kwargs] )
keyword[return] identifier[xr] . identifier[open_mfdataset] ( identifier[file_set] , identifier[preprocess] = identifier[func] , identifier[concat_dim] = identifier[TIME_STR] ,
identifier[decode_times] = keyword[False] , identifier[decode_coords] = keyword[False] ,
identifier[mask_and_scale] = keyword[True] , identifier[data_vars] = identifier[data_vars] ,
identifier[coords] = identifier[coords] ) | def _load_data_from_disk(file_set, preprocess_func=lambda ds: ds, data_vars='minimal', coords='minimal', grid_attrs=None, **kwargs):
"""Load a Dataset from a list or glob-string of files.
Datasets from files are concatenated along time,
and all grid attributes are renamed to their aospy internal names.
Parameters
----------
file_set : list or str
List of paths to files or glob-string
preprocess_func : function (optional)
Custom function to call before applying any aospy logic
to the loaded dataset
data_vars : str (default 'minimal')
Mode for concatenating data variables in call to ``xr.open_mfdataset``
coords : str (default 'minimal')
Mode for concatenating coordinate variables in call to
``xr.open_mfdataset``.
grid_attrs : dict
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
Dataset
"""
apply_preload_user_commands(file_set)
func = _preprocess_and_rename_grid_attrs(preprocess_func, grid_attrs, **kwargs)
return xr.open_mfdataset(file_set, preprocess=func, concat_dim=TIME_STR, decode_times=False, decode_coords=False, mask_and_scale=True, data_vars=data_vars, coords=coords) |
def candidate(self, cand_func, args=None, kwargs=None, name='Candidate', context=None):
'''
Adds a candidate function to an experiment. Can be used multiple times for
multiple candidates.
:param callable cand_func: your control function
:param iterable args: positional arguments to pass to your function
:param dict kwargs: keyword arguments to pass to your function
:param string name: a name for your observation
:param dict context: observation-specific context
'''
self._candidates.append({
'func': cand_func,
'args': args or [],
'kwargs': kwargs or {},
'name': name,
'context': context or {},
}) | def function[candidate, parameter[self, cand_func, args, kwargs, name, context]]:
constant[
Adds a candidate function to an experiment. Can be used multiple times for
multiple candidates.
:param callable cand_func: your control function
:param iterable args: positional arguments to pass to your function
:param dict kwargs: keyword arguments to pass to your function
:param string name: a name for your observation
:param dict context: observation-specific context
]
call[name[self]._candidates.append, parameter[dictionary[[<ast.Constant object at 0x7da1b0544070>, <ast.Constant object at 0x7da1b05450c0>, <ast.Constant object at 0x7da1b0546b90>, <ast.Constant object at 0x7da1b0547910>, <ast.Constant object at 0x7da1b05478b0>], [<ast.Name object at 0x7da1b0544940>, <ast.BoolOp object at 0x7da1b05446a0>, <ast.BoolOp object at 0x7da1b0546e30>, <ast.Name object at 0x7da1b0546290>, <ast.BoolOp object at 0x7da1b0547be0>]]]] | keyword[def] identifier[candidate] ( identifier[self] , identifier[cand_func] , identifier[args] = keyword[None] , identifier[kwargs] = keyword[None] , identifier[name] = literal[string] , identifier[context] = keyword[None] ):
literal[string]
identifier[self] . identifier[_candidates] . identifier[append] ({
literal[string] : identifier[cand_func] ,
literal[string] : identifier[args] keyword[or] [],
literal[string] : identifier[kwargs] keyword[or] {},
literal[string] : identifier[name] ,
literal[string] : identifier[context] keyword[or] {},
}) | def candidate(self, cand_func, args=None, kwargs=None, name='Candidate', context=None):
"""
Adds a candidate function to an experiment. Can be used multiple times for
multiple candidates.
:param callable cand_func: your control function
:param iterable args: positional arguments to pass to your function
:param dict kwargs: keyword arguments to pass to your function
:param string name: a name for your observation
:param dict context: observation-specific context
"""
self._candidates.append({'func': cand_func, 'args': args or [], 'kwargs': kwargs or {}, 'name': name, 'context': context or {}}) |
def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps | def function[leaf_nodes, parameter[self]]:
constant[
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
]
variable[deps] assign[=] <ast.SetComp object at 0x7da18f723c40>
return[binary_operation[name[self].nodes - name[deps]]] | keyword[def] identifier[leaf_nodes] ( identifier[self] ):
literal[string]
identifier[deps] ={ identifier[item] keyword[for] identifier[sublist] keyword[in] identifier[self] . identifier[edges] . identifier[values] () keyword[for] identifier[item] keyword[in] identifier[sublist] }
keyword[return] identifier[self] . identifier[nodes] - identifier[deps] | def leaf_nodes(self):
"""
Return an interable of nodes with no edges pointing at them. This is
helpful to find all nodes without dependencies.
"""
# Now contains all nodes that contain dependencies.
deps = {item for sublist in self.edges.values() for item in sublist}
# contains all nodes *without* any dependencies (leaf nodes)
return self.nodes - deps |
def get_compound_regex(schema='mona'):
""" Create a dictionary of regex for extracting the compound information for the spectra
"""
# NOTE: will just ignore cases in the regex, to avoid repetition here
meta_parse = collections.OrderedDict()
if schema == 'mona':
meta_parse['name'] = ['^Name(?:=|:)(.*)$']
meta_parse['inchikey_id'] = ['^inchikey(?:=|:)(.*)$']
meta_parse['molecular_formula'] = ['^molecular formula(?:=|:)(.*)$', '^formula:(.*)$']
meta_parse['molecular_weight'] = ['^MW(?:=|:)(\d*[.,]?\d*)$']
meta_parse['pubchem_id'] = ['^pubchem.*cid(?:=|:)(\d*)".*$']
meta_parse['chemspider_id'] = ['^chemspider(?:=|:)(\d*)".*$']
meta_parse['compound_class'] = ['^compound.*class(?:=|:)(.*)$']
meta_parse['exact_mass'] = ['^exact.*mass(?:=|:)(\d*[.,]?\d*)$']
meta_parse['smiles'] = ['^SMILES(?:=|:)(.*)$']
meta_parse['other_names'] = ['^Synonym(?:=|:)(.*)$']
elif schema == 'massbank':
meta_parse['name'] = ['^CH\$NAME:\s+(.*)$']
meta_parse['other_names'] = ['^CH\$NAME:\s+(.*)$']
meta_parse['inchikey_id'] = ['^CH\$LINK:\s+INCHIKEY\s+(.*)$']
meta_parse['molecular_formula'] = ['^CH\$FORMULA:\s+(.*)$']
meta_parse['molecular_weight'] = ['^CH\$MOLECULAR_WEIGHT:\s+(.*)$']
meta_parse['pubchem_id'] = ['^CH\$LINK:\s+PUBCHEM\s+CID:(.*)$']
meta_parse['chemspider_id'] = ['^CH\$LINK:\s+CHEMSPIDER\s+(.*)$']
meta_parse['compound_class'] = ['^CH\$COMPOUND_CLASS:\s+(.*)$']
meta_parse['exact_mass'] = ['^CH\$EXACT_MASS:\s+(.*)$']
meta_parse['smiles'] = ['^CH\$SMILES:\s+(.*)$']
return meta_parse | def function[get_compound_regex, parameter[schema]]:
constant[ Create a dictionary of regex for extracting the compound information for the spectra
]
variable[meta_parse] assign[=] call[name[collections].OrderedDict, parameter[]]
if compare[name[schema] equal[==] constant[mona]] begin[:]
call[name[meta_parse]][constant[name]] assign[=] list[[<ast.Constant object at 0x7da18bcc81c0>]]
call[name[meta_parse]][constant[inchikey_id]] assign[=] list[[<ast.Constant object at 0x7da18bcca9b0>]]
call[name[meta_parse]][constant[molecular_formula]] assign[=] list[[<ast.Constant object at 0x7da18bcca1a0>, <ast.Constant object at 0x7da18bcc86a0>]]
call[name[meta_parse]][constant[molecular_weight]] assign[=] list[[<ast.Constant object at 0x7da18bcc9ae0>]]
call[name[meta_parse]][constant[pubchem_id]] assign[=] list[[<ast.Constant object at 0x7da18bccaf20>]]
call[name[meta_parse]][constant[chemspider_id]] assign[=] list[[<ast.Constant object at 0x7da18bccbcd0>]]
call[name[meta_parse]][constant[compound_class]] assign[=] list[[<ast.Constant object at 0x7da18bcca620>]]
call[name[meta_parse]][constant[exact_mass]] assign[=] list[[<ast.Constant object at 0x7da18bcc93f0>]]
call[name[meta_parse]][constant[smiles]] assign[=] list[[<ast.Constant object at 0x7da18bccb0a0>]]
call[name[meta_parse]][constant[other_names]] assign[=] list[[<ast.Constant object at 0x7da18bcca3e0>]]
return[name[meta_parse]] | keyword[def] identifier[get_compound_regex] ( identifier[schema] = literal[string] ):
literal[string]
identifier[meta_parse] = identifier[collections] . identifier[OrderedDict] ()
keyword[if] identifier[schema] == literal[string] :
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] , literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
keyword[elif] identifier[schema] == literal[string] :
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
identifier[meta_parse] [ literal[string] ]=[ literal[string] ]
keyword[return] identifier[meta_parse] | def get_compound_regex(schema='mona'):
""" Create a dictionary of regex for extracting the compound information for the spectra
"""
# NOTE: will just ignore cases in the regex, to avoid repetition here
meta_parse = collections.OrderedDict()
if schema == 'mona':
meta_parse['name'] = ['^Name(?:=|:)(.*)$']
meta_parse['inchikey_id'] = ['^inchikey(?:=|:)(.*)$']
meta_parse['molecular_formula'] = ['^molecular formula(?:=|:)(.*)$', '^formula:(.*)$']
meta_parse['molecular_weight'] = ['^MW(?:=|:)(\\d*[.,]?\\d*)$']
meta_parse['pubchem_id'] = ['^pubchem.*cid(?:=|:)(\\d*)".*$']
meta_parse['chemspider_id'] = ['^chemspider(?:=|:)(\\d*)".*$']
meta_parse['compound_class'] = ['^compound.*class(?:=|:)(.*)$']
meta_parse['exact_mass'] = ['^exact.*mass(?:=|:)(\\d*[.,]?\\d*)$']
meta_parse['smiles'] = ['^SMILES(?:=|:)(.*)$']
meta_parse['other_names'] = ['^Synonym(?:=|:)(.*)$'] # depends on [control=['if'], data=[]]
elif schema == 'massbank':
meta_parse['name'] = ['^CH\\$NAME:\\s+(.*)$']
meta_parse['other_names'] = ['^CH\\$NAME:\\s+(.*)$']
meta_parse['inchikey_id'] = ['^CH\\$LINK:\\s+INCHIKEY\\s+(.*)$']
meta_parse['molecular_formula'] = ['^CH\\$FORMULA:\\s+(.*)$']
meta_parse['molecular_weight'] = ['^CH\\$MOLECULAR_WEIGHT:\\s+(.*)$']
meta_parse['pubchem_id'] = ['^CH\\$LINK:\\s+PUBCHEM\\s+CID:(.*)$']
meta_parse['chemspider_id'] = ['^CH\\$LINK:\\s+CHEMSPIDER\\s+(.*)$']
meta_parse['compound_class'] = ['^CH\\$COMPOUND_CLASS:\\s+(.*)$']
meta_parse['exact_mass'] = ['^CH\\$EXACT_MASS:\\s+(.*)$']
meta_parse['smiles'] = ['^CH\\$SMILES:\\s+(.*)$'] # depends on [control=['if'], data=[]]
return meta_parse |
def read_float_matrix(rx_specifier):
""" Return float matrix as np array for the given rx specifier. """
path, offset = rx_specifier.strip().split(':', maxsplit=1)
offset = int(offset)
sample_format = 4
with open(path, 'rb') as f:
# move to offset
f.seek(offset)
# assert binary ark
binary = f.read(2)
assert (binary == b'\x00B')
# assert type float 32
format = f.read(3)
assert (format == b'FM ')
# get number of mfcc features
f.read(1)
num_frames = struct.unpack('<i', f.read(4))[0]
# get size of mfcc features
f.read(1)
feature_size = struct.unpack('<i', f.read(4))[0]
# read feature data
data = f.read(num_frames * feature_size * sample_format)
feature_vector = np.frombuffer(data, dtype='float32')
feature_matrix = np.reshape(feature_vector, (num_frames, feature_size))
return feature_matrix | def function[read_float_matrix, parameter[rx_specifier]]:
constant[ Return float matrix as np array for the given rx specifier. ]
<ast.Tuple object at 0x7da1b0ba7040> assign[=] call[call[name[rx_specifier].strip, parameter[]].split, parameter[constant[:]]]
variable[offset] assign[=] call[name[int], parameter[name[offset]]]
variable[sample_format] assign[=] constant[4]
with call[name[open], parameter[name[path], constant[rb]]] begin[:]
call[name[f].seek, parameter[name[offset]]]
variable[binary] assign[=] call[name[f].read, parameter[constant[2]]]
assert[compare[name[binary] equal[==] constant[b'\x00B']]]
variable[format] assign[=] call[name[f].read, parameter[constant[3]]]
assert[compare[name[format] equal[==] constant[b'FM ']]]
call[name[f].read, parameter[constant[1]]]
variable[num_frames] assign[=] call[call[name[struct].unpack, parameter[constant[<i], call[name[f].read, parameter[constant[4]]]]]][constant[0]]
call[name[f].read, parameter[constant[1]]]
variable[feature_size] assign[=] call[call[name[struct].unpack, parameter[constant[<i], call[name[f].read, parameter[constant[4]]]]]][constant[0]]
variable[data] assign[=] call[name[f].read, parameter[binary_operation[binary_operation[name[num_frames] * name[feature_size]] * name[sample_format]]]]
variable[feature_vector] assign[=] call[name[np].frombuffer, parameter[name[data]]]
variable[feature_matrix] assign[=] call[name[np].reshape, parameter[name[feature_vector], tuple[[<ast.Name object at 0x7da1b0c53f70>, <ast.Name object at 0x7da1b0c53670>]]]]
return[name[feature_matrix]] | keyword[def] identifier[read_float_matrix] ( identifier[rx_specifier] ):
literal[string]
identifier[path] , identifier[offset] = identifier[rx_specifier] . identifier[strip] (). identifier[split] ( literal[string] , identifier[maxsplit] = literal[int] )
identifier[offset] = identifier[int] ( identifier[offset] )
identifier[sample_format] = literal[int]
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[seek] ( identifier[offset] )
identifier[binary] = identifier[f] . identifier[read] ( literal[int] )
keyword[assert] ( identifier[binary] == literal[string] )
identifier[format] = identifier[f] . identifier[read] ( literal[int] )
keyword[assert] ( identifier[format] == literal[string] )
identifier[f] . identifier[read] ( literal[int] )
identifier[num_frames] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[f] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[f] . identifier[read] ( literal[int] )
identifier[feature_size] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[f] . identifier[read] ( literal[int] ))[ literal[int] ]
identifier[data] = identifier[f] . identifier[read] ( identifier[num_frames] * identifier[feature_size] * identifier[sample_format] )
identifier[feature_vector] = identifier[np] . identifier[frombuffer] ( identifier[data] , identifier[dtype] = literal[string] )
identifier[feature_matrix] = identifier[np] . identifier[reshape] ( identifier[feature_vector] ,( identifier[num_frames] , identifier[feature_size] ))
keyword[return] identifier[feature_matrix] | def read_float_matrix(rx_specifier):
""" Return float matrix as np array for the given rx specifier. """
(path, offset) = rx_specifier.strip().split(':', maxsplit=1)
offset = int(offset)
sample_format = 4
with open(path, 'rb') as f:
# move to offset
f.seek(offset)
# assert binary ark
binary = f.read(2)
assert binary == b'\x00B'
# assert type float 32
format = f.read(3)
assert format == b'FM '
# get number of mfcc features
f.read(1)
num_frames = struct.unpack('<i', f.read(4))[0]
# get size of mfcc features
f.read(1)
feature_size = struct.unpack('<i', f.read(4))[0]
# read feature data
data = f.read(num_frames * feature_size * sample_format)
feature_vector = np.frombuffer(data, dtype='float32')
feature_matrix = np.reshape(feature_vector, (num_frames, feature_size))
return feature_matrix # depends on [control=['with'], data=['f']] |
def _parsecsv(x):
"""Deserialize file-like object containing csv to a Python generator.
"""
for line in x:
# decode as utf-8, whitespace-strip and split on delimiter
yield line.decode('utf-8').strip().split(config.DELIMITER) | def function[_parsecsv, parameter[x]]:
constant[Deserialize file-like object containing csv to a Python generator.
]
for taget[name[line]] in starred[name[x]] begin[:]
<ast.Yield object at 0x7da20c6c5660> | keyword[def] identifier[_parsecsv] ( identifier[x] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[x] :
keyword[yield] identifier[line] . identifier[decode] ( literal[string] ). identifier[strip] (). identifier[split] ( identifier[config] . identifier[DELIMITER] ) | def _parsecsv(x):
"""Deserialize file-like object containing csv to a Python generator.
"""
for line in x:
# decode as utf-8, whitespace-strip and split on delimiter
yield line.decode('utf-8').strip().split(config.DELIMITER) # depends on [control=['for'], data=['line']] |
def pin_chat_message(chat_id, message_id, disable_notification=None, **kwargs):
"""
Use this method to pin a message in a supergroup or a channel. The bot must be an administrator in the chat for this to work and
must have the ‘can_pin_messages’ admin right in the supergroup or ‘can_edit_messages’ admin right in the channel.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Identifier of a message to pin
:param disable_notification: Pass True, if it is not necessary to send a notification to all chat members about the new pinned message.
Notifications are always disabled in channels.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:return: Returns True on success.
:rtype: bool
"""
# required args
params = dict(
chat_id=chat_id,
message_id=message_id
)
params.update(
_clean_params(
disable_notification=disable_notification,
)
)
return TelegramBotRPCRequest('pinChatMessage', params=params, on_result=lambda result: result, **kwargs) | def function[pin_chat_message, parameter[chat_id, message_id, disable_notification]]:
constant[
Use this method to pin a message in a supergroup or a channel. The bot must be an administrator in the chat for this to work and
must have the ‘can_pin_messages’ admin right in the supergroup or ‘can_edit_messages’ admin right in the channel.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Identifier of a message to pin
:param disable_notification: Pass True, if it is not necessary to send a notification to all chat members about the new pinned message.
Notifications are always disabled in channels.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:return: Returns True on success.
:rtype: bool
]
variable[params] assign[=] call[name[dict], parameter[]]
call[name[params].update, parameter[call[name[_clean_params], parameter[]]]]
return[call[name[TelegramBotRPCRequest], parameter[constant[pinChatMessage]]]] | keyword[def] identifier[pin_chat_message] ( identifier[chat_id] , identifier[message_id] , identifier[disable_notification] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[params] = identifier[dict] (
identifier[chat_id] = identifier[chat_id] ,
identifier[message_id] = identifier[message_id]
)
identifier[params] . identifier[update] (
identifier[_clean_params] (
identifier[disable_notification] = identifier[disable_notification] ,
)
)
keyword[return] identifier[TelegramBotRPCRequest] ( literal[string] , identifier[params] = identifier[params] , identifier[on_result] = keyword[lambda] identifier[result] : identifier[result] ,** identifier[kwargs] ) | def pin_chat_message(chat_id, message_id, disable_notification=None, **kwargs):
"""
Use this method to pin a message in a supergroup or a channel. The bot must be an administrator in the chat for this to work and
must have the ‘can_pin_messages’ admin right in the supergroup or ‘can_edit_messages’ admin right in the channel.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param message_id: Identifier of a message to pin
:param disable_notification: Pass True, if it is not necessary to send a notification to all chat members about the new pinned message.
Notifications are always disabled in channels.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:return: Returns True on success.
:rtype: bool
"""
# required args
params = dict(chat_id=chat_id, message_id=message_id)
params.update(_clean_params(disable_notification=disable_notification))
return TelegramBotRPCRequest('pinChatMessage', params=params, on_result=lambda result: result, **kwargs) |
def save(self, name=None, path=None):
"""Save file as xml
"""
if path :
name = os.path.join(path,name)
try:
self._create_table_xml_file(self.etree, name)
except (Exception,) as e:
print(e)
return False
return True | def function[save, parameter[self, name, path]]:
constant[Save file as xml
]
if name[path] begin[:]
variable[name] assign[=] call[name[os].path.join, parameter[name[path], name[name]]]
<ast.Try object at 0x7da2045663b0>
return[constant[True]] | keyword[def] identifier[save] ( identifier[self] , identifier[name] = keyword[None] , identifier[path] = keyword[None] ):
literal[string]
keyword[if] identifier[path] :
identifier[name] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[name] )
keyword[try] :
identifier[self] . identifier[_create_table_xml_file] ( identifier[self] . identifier[etree] , identifier[name] )
keyword[except] ( identifier[Exception] ,) keyword[as] identifier[e] :
identifier[print] ( identifier[e] )
keyword[return] keyword[False]
keyword[return] keyword[True] | def save(self, name=None, path=None):
"""Save file as xml
"""
if path:
name = os.path.join(path, name) # depends on [control=['if'], data=[]]
try:
self._create_table_xml_file(self.etree, name) # depends on [control=['try'], data=[]]
except (Exception,) as e:
print(e)
return False # depends on [control=['except'], data=['e']]
return True |
def mergebydepth(args):
"""
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
"""
p = OptionParser(mergebydepth.__doc__)
p.add_option("--mindepth", default=3, type="int",
help="Minimum depth required")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + ".d{0}".format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, "--minaccn={0}".format(mindepth),
"--outfile={0}".format(bedgraphfiltered)])
merged = bedgraphfiltered + ".merge.fasta"
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True) | def function[mergebydepth, parameter[args]]:
constant[
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[mergebydepth].__doc__]]
call[name[p].add_option, parameter[constant[--mindepth]]]
<ast.Tuple object at 0x7da18fe930d0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18fe90670>]]
<ast.Tuple object at 0x7da18fe90460> assign[=] name[args]
variable[mindepth] assign[=] name[opts].mindepth
variable[bedgraph] assign[=] call[name[make_bedgraph], parameter[name[bedfile]]]
variable[bedgraphfiltered] assign[=] binary_operation[name[bedgraph] + call[constant[.d{0}].format, parameter[name[mindepth]]]]
if call[name[need_update], parameter[name[bedgraph], name[bedgraphfiltered]]] begin[:]
call[name[filter], parameter[list[[<ast.Name object at 0x7da18fe92410>, <ast.Call object at 0x7da18fe91e40>, <ast.Call object at 0x7da18fe93df0>]]]]
variable[merged] assign[=] binary_operation[name[bedgraphfiltered] + constant[.merge.fasta]]
if call[name[need_update], parameter[name[bedgraphfiltered], name[merged]]] begin[:]
call[name[mergeBed], parameter[name[bedgraphfiltered]]] | keyword[def] identifier[mergebydepth] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[mergebydepth] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[int] , identifier[type] = literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[bedfile] , identifier[fastafile] = identifier[args]
identifier[mindepth] = identifier[opts] . identifier[mindepth]
identifier[bedgraph] = identifier[make_bedgraph] ( identifier[bedfile] )
identifier[bedgraphfiltered] = identifier[bedgraph] + literal[string] . identifier[format] ( identifier[mindepth] )
keyword[if] identifier[need_update] ( identifier[bedgraph] , identifier[bedgraphfiltered] ):
identifier[filter] ([ identifier[bedgraph] , literal[string] . identifier[format] ( identifier[mindepth] ),
literal[string] . identifier[format] ( identifier[bedgraphfiltered] )])
identifier[merged] = identifier[bedgraphfiltered] + literal[string]
keyword[if] identifier[need_update] ( identifier[bedgraphfiltered] , identifier[merged] ):
identifier[mergeBed] ( identifier[bedgraphfiltered] , identifier[sorted] = keyword[True] ) | def mergebydepth(args):
"""
%prog mergebydepth reads.bed genome.fasta
Similar to mergeBed, but only returns regions beyond certain depth.
"""
p = OptionParser(mergebydepth.__doc__)
p.add_option('--mindepth', default=3, type='int', help='Minimum depth required')
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(bedfile, fastafile) = args
mindepth = opts.mindepth
bedgraph = make_bedgraph(bedfile)
bedgraphfiltered = bedgraph + '.d{0}'.format(mindepth)
if need_update(bedgraph, bedgraphfiltered):
filter([bedgraph, '--minaccn={0}'.format(mindepth), '--outfile={0}'.format(bedgraphfiltered)]) # depends on [control=['if'], data=[]]
merged = bedgraphfiltered + '.merge.fasta'
if need_update(bedgraphfiltered, merged):
mergeBed(bedgraphfiltered, sorted=True) # depends on [control=['if'], data=[]] |
def analysis_provenance_details_pdf_extractor(
impact_report, component_metadata):
"""Extracting the main provenance details to its own pdf report.
For PDF generations
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.1
"""
# QGIS Composer needed certain context to generate the output
# - Map Settings
# - Substitution maps
# - Element settings, such as icon for picture file or image source
context = QGISComposerContext()
# we only have html elements for this
html_frame_elements = [
{
'id': 'analysis-provenance-details-report',
'mode': 'text',
'text': jinja2_output_as_string(
impact_report, 'analysis-provenance-details-report'),
'margin_left': 10,
'margin_top': 10,
}
]
context.html_frame_elements = html_frame_elements
return context | def function[analysis_provenance_details_pdf_extractor, parameter[impact_report, component_metadata]]:
constant[Extracting the main provenance details to its own pdf report.
For PDF generations
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.1
]
variable[context] assign[=] call[name[QGISComposerContext], parameter[]]
variable[html_frame_elements] assign[=] list[[<ast.Dict object at 0x7da1b0c52620>]]
name[context].html_frame_elements assign[=] name[html_frame_elements]
return[name[context]] | keyword[def] identifier[analysis_provenance_details_pdf_extractor] (
identifier[impact_report] , identifier[component_metadata] ):
literal[string]
identifier[context] = identifier[QGISComposerContext] ()
identifier[html_frame_elements] =[
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[jinja2_output_as_string] (
identifier[impact_report] , literal[string] ),
literal[string] : literal[int] ,
literal[string] : literal[int] ,
}
]
identifier[context] . identifier[html_frame_elements] = identifier[html_frame_elements]
keyword[return] identifier[context] | def analysis_provenance_details_pdf_extractor(impact_report, component_metadata):
"""Extracting the main provenance details to its own pdf report.
For PDF generations
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.1
"""
# QGIS Composer needed certain context to generate the output
# - Map Settings
# - Substitution maps
# - Element settings, such as icon for picture file or image source
context = QGISComposerContext()
# we only have html elements for this
html_frame_elements = [{'id': 'analysis-provenance-details-report', 'mode': 'text', 'text': jinja2_output_as_string(impact_report, 'analysis-provenance-details-report'), 'margin_left': 10, 'margin_top': 10}]
context.html_frame_elements = html_frame_elements
return context |
def objwalk(obj, path=(), memo=None):
"""
Walks an arbitrary python pbject.
:param mixed obj: Any python object
:param tuple path: A tuple of the set attributes representing the path to the value
:param set memo: The list of attributes traversed thus far
:rtype <tuple<tuple>, <mixed>>: The path to the value on the object, the value.
"""
if len( path ) > MAX_DEPTH + 1:
yield path, obj # Truncate it!
if memo is None:
memo = set()
iterator = None
if isinstance(obj, Mapping):
iterator = iteritems
elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, string_types):
iterator = enumerate
elif hasattr( obj, '__class__' ) and hasattr( obj, '__dict__' ) and type(obj) not in primitives: # If type(obj) == <instance>
iterator = class_iterator
elif hasattr(obj, '__iter__') or isinstance(obj, types.GeneratorType):
obj = [o for o in obj]
else:
pass
if iterator:
if id(obj) not in memo:
memo.add(id(obj))
for path_component, value in iterator(obj):
for result in objwalk(value, path + (path_component,), memo):
yield result
memo.remove(id(obj))
else:
yield path, obj | def function[objwalk, parameter[obj, path, memo]]:
constant[
Walks an arbitrary python pbject.
:param mixed obj: Any python object
:param tuple path: A tuple of the set attributes representing the path to the value
:param set memo: The list of attributes traversed thus far
:rtype <tuple<tuple>, <mixed>>: The path to the value on the object, the value.
]
if compare[call[name[len], parameter[name[path]]] greater[>] binary_operation[name[MAX_DEPTH] + constant[1]]] begin[:]
<ast.Yield object at 0x7da18bcc8c70>
if compare[name[memo] is constant[None]] begin[:]
variable[memo] assign[=] call[name[set], parameter[]]
variable[iterator] assign[=] constant[None]
if call[name[isinstance], parameter[name[obj], name[Mapping]]] begin[:]
variable[iterator] assign[=] name[iteritems]
if name[iterator] begin[:]
if compare[call[name[id], parameter[name[obj]]] <ast.NotIn object at 0x7da2590d7190> name[memo]] begin[:]
call[name[memo].add, parameter[call[name[id], parameter[name[obj]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0a4a080>, <ast.Name object at 0x7da1b0a4be50>]]] in starred[call[name[iterator], parameter[name[obj]]]] begin[:]
for taget[name[result]] in starred[call[name[objwalk], parameter[name[value], binary_operation[name[path] + tuple[[<ast.Name object at 0x7da18f721f60>]]], name[memo]]]] begin[:]
<ast.Yield object at 0x7da18f723400>
call[name[memo].remove, parameter[call[name[id], parameter[name[obj]]]]] | keyword[def] identifier[objwalk] ( identifier[obj] , identifier[path] =(), identifier[memo] = keyword[None] ):
literal[string]
keyword[if] identifier[len] ( identifier[path] )> identifier[MAX_DEPTH] + literal[int] :
keyword[yield] identifier[path] , identifier[obj]
keyword[if] identifier[memo] keyword[is] keyword[None] :
identifier[memo] = identifier[set] ()
identifier[iterator] = keyword[None]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Mapping] ):
identifier[iterator] = identifier[iteritems]
keyword[elif] identifier[isinstance] ( identifier[obj] ,( identifier[Sequence] , identifier[Set] )) keyword[and] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[string_types] ):
identifier[iterator] = identifier[enumerate]
keyword[elif] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[and] identifier[type] ( identifier[obj] ) keyword[not] keyword[in] identifier[primitives] :
identifier[iterator] = identifier[class_iterator]
keyword[elif] identifier[hasattr] ( identifier[obj] , literal[string] ) keyword[or] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[GeneratorType] ):
identifier[obj] =[ identifier[o] keyword[for] identifier[o] keyword[in] identifier[obj] ]
keyword[else] :
keyword[pass]
keyword[if] identifier[iterator] :
keyword[if] identifier[id] ( identifier[obj] ) keyword[not] keyword[in] identifier[memo] :
identifier[memo] . identifier[add] ( identifier[id] ( identifier[obj] ))
keyword[for] identifier[path_component] , identifier[value] keyword[in] identifier[iterator] ( identifier[obj] ):
keyword[for] identifier[result] keyword[in] identifier[objwalk] ( identifier[value] , identifier[path] +( identifier[path_component] ,), identifier[memo] ):
keyword[yield] identifier[result]
identifier[memo] . identifier[remove] ( identifier[id] ( identifier[obj] ))
keyword[else] :
keyword[yield] identifier[path] , identifier[obj] | def objwalk(obj, path=(), memo=None):
"""
Walks an arbitrary python pbject.
:param mixed obj: Any python object
:param tuple path: A tuple of the set attributes representing the path to the value
:param set memo: The list of attributes traversed thus far
:rtype <tuple<tuple>, <mixed>>: The path to the value on the object, the value.
"""
if len(path) > MAX_DEPTH + 1:
yield (path, obj) # Truncate it! # depends on [control=['if'], data=[]]
if memo is None:
memo = set() # depends on [control=['if'], data=['memo']]
iterator = None
if isinstance(obj, Mapping):
iterator = iteritems # depends on [control=['if'], data=[]]
elif isinstance(obj, (Sequence, Set)) and (not isinstance(obj, string_types)):
iterator = enumerate # depends on [control=['if'], data=[]]
elif hasattr(obj, '__class__') and hasattr(obj, '__dict__') and (type(obj) not in primitives): # If type(obj) == <instance>
iterator = class_iterator # depends on [control=['if'], data=[]]
elif hasattr(obj, '__iter__') or isinstance(obj, types.GeneratorType):
obj = [o for o in obj] # depends on [control=['if'], data=[]]
else:
pass
if iterator:
if id(obj) not in memo:
memo.add(id(obj))
for (path_component, value) in iterator(obj):
for result in objwalk(value, path + (path_component,), memo):
yield result # depends on [control=['for'], data=['result']] # depends on [control=['for'], data=[]]
memo.remove(id(obj)) # depends on [control=['if'], data=['memo']] # depends on [control=['if'], data=[]]
else:
yield (path, obj) |
def and_constraint(v=1, sense="minimize"):
""" AND constraint """
assert v in [0,1], "v must be 0 or 1 instead of %s" % v.__repr__()
model, x, y, z = _init()
r = model.addVar("r", "B")
model.addConsAnd([x,y,z], r)
model.addCons(x==v)
model.setObjective(r, sense=sense)
_optimize("AND", model) | def function[and_constraint, parameter[v, sense]]:
constant[ AND constraint ]
assert[compare[name[v] in list[[<ast.Constant object at 0x7da1b17f46d0>, <ast.Constant object at 0x7da1b17f46a0>]]]]
<ast.Tuple object at 0x7da1b17f4550> assign[=] call[name[_init], parameter[]]
variable[r] assign[=] call[name[model].addVar, parameter[constant[r], constant[B]]]
call[name[model].addConsAnd, parameter[list[[<ast.Name object at 0x7da1b17f4160>, <ast.Name object at 0x7da1b17f4100>, <ast.Name object at 0x7da1b17f40a0>]], name[r]]]
call[name[model].addCons, parameter[compare[name[x] equal[==] name[v]]]]
call[name[model].setObjective, parameter[name[r]]]
call[name[_optimize], parameter[constant[AND], name[model]]] | keyword[def] identifier[and_constraint] ( identifier[v] = literal[int] , identifier[sense] = literal[string] ):
literal[string]
keyword[assert] identifier[v] keyword[in] [ literal[int] , literal[int] ], literal[string] % identifier[v] . identifier[__repr__] ()
identifier[model] , identifier[x] , identifier[y] , identifier[z] = identifier[_init] ()
identifier[r] = identifier[model] . identifier[addVar] ( literal[string] , literal[string] )
identifier[model] . identifier[addConsAnd] ([ identifier[x] , identifier[y] , identifier[z] ], identifier[r] )
identifier[model] . identifier[addCons] ( identifier[x] == identifier[v] )
identifier[model] . identifier[setObjective] ( identifier[r] , identifier[sense] = identifier[sense] )
identifier[_optimize] ( literal[string] , identifier[model] ) | def and_constraint(v=1, sense='minimize'):
""" AND constraint """
assert v in [0, 1], 'v must be 0 or 1 instead of %s' % v.__repr__()
(model, x, y, z) = _init()
r = model.addVar('r', 'B')
model.addConsAnd([x, y, z], r)
model.addCons(x == v)
model.setObjective(r, sense=sense)
_optimize('AND', model) |
def _get(self, url):
"""Handles api.football-data.org requests"""
self.logger.info(f'Sending request: {RequestHandler.BASE_URL+url}')
req = requests.get(RequestHandler.BASE_URL+url, headers=self.headers)
self.logger.info(f'Request returned with status code {req.status_code}')
if req.status_code == requests.codes.ok:
return req
if req.status_code == requests.codes.bad:
raise APIErrorException('Invalid request. Check parameters.', req.status_code)
if req.status_code == requests.codes.forbidden:
raise APIErrorException('This resource is restricted', req.status_code)
if req.status_code == requests.codes.not_found:
raise APIErrorException('This resource does not exist. Check parameters', req.status_code)
if req.status_code == requests.codes.too_many_requests:
raise APIErrorException('You have exceeded your allowed requests per minute/day', req.status_code) | def function[_get, parameter[self, url]]:
constant[Handles api.football-data.org requests]
call[name[self].logger.info, parameter[<ast.JoinedStr object at 0x7da1b085e410>]]
variable[req] assign[=] call[name[requests].get, parameter[binary_operation[name[RequestHandler].BASE_URL + name[url]]]]
call[name[self].logger.info, parameter[<ast.JoinedStr object at 0x7da1b085efe0>]]
if compare[name[req].status_code equal[==] name[requests].codes.ok] begin[:]
return[name[req]]
if compare[name[req].status_code equal[==] name[requests].codes.bad] begin[:]
<ast.Raise object at 0x7da1b085d540>
if compare[name[req].status_code equal[==] name[requests].codes.forbidden] begin[:]
<ast.Raise object at 0x7da1b085dae0>
if compare[name[req].status_code equal[==] name[requests].codes.not_found] begin[:]
<ast.Raise object at 0x7da1b085fd30>
if compare[name[req].status_code equal[==] name[requests].codes.too_many_requests] begin[:]
<ast.Raise object at 0x7da1b085e350> | keyword[def] identifier[_get] ( identifier[self] , identifier[url] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
identifier[req] = identifier[requests] . identifier[get] ( identifier[RequestHandler] . identifier[BASE_URL] + identifier[url] , identifier[headers] = identifier[self] . identifier[headers] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[req] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[ok] :
keyword[return] identifier[req]
keyword[if] identifier[req] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[bad] :
keyword[raise] identifier[APIErrorException] ( literal[string] , identifier[req] . identifier[status_code] )
keyword[if] identifier[req] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[forbidden] :
keyword[raise] identifier[APIErrorException] ( literal[string] , identifier[req] . identifier[status_code] )
keyword[if] identifier[req] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[not_found] :
keyword[raise] identifier[APIErrorException] ( literal[string] , identifier[req] . identifier[status_code] )
keyword[if] identifier[req] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[too_many_requests] :
keyword[raise] identifier[APIErrorException] ( literal[string] , identifier[req] . identifier[status_code] ) | def _get(self, url):
"""Handles api.football-data.org requests"""
self.logger.info(f'Sending request: {RequestHandler.BASE_URL + url}')
req = requests.get(RequestHandler.BASE_URL + url, headers=self.headers)
self.logger.info(f'Request returned with status code {req.status_code}')
if req.status_code == requests.codes.ok:
return req # depends on [control=['if'], data=[]]
if req.status_code == requests.codes.bad:
raise APIErrorException('Invalid request. Check parameters.', req.status_code) # depends on [control=['if'], data=[]]
if req.status_code == requests.codes.forbidden:
raise APIErrorException('This resource is restricted', req.status_code) # depends on [control=['if'], data=[]]
if req.status_code == requests.codes.not_found:
raise APIErrorException('This resource does not exist. Check parameters', req.status_code) # depends on [control=['if'], data=[]]
if req.status_code == requests.codes.too_many_requests:
raise APIErrorException('You have exceeded your allowed requests per minute/day', req.status_code) # depends on [control=['if'], data=[]] |
def set_terminal(self, terminal):
"""Packs the terminal widget.
"""
if self.terminal is not None:
raise RuntimeError("TerminalBox: terminal already set")
self.terminal = terminal
self.terminal.connect("grab-focus", self.on_terminal_focus)
self.terminal.connect("button-press-event", self.on_button_press, None)
self.terminal.connect('child-exited', self.on_terminal_exited)
self.pack_start(self.terminal, True, True, 0)
self.terminal.show()
self.add_scroll_bar() | def function[set_terminal, parameter[self, terminal]]:
constant[Packs the terminal widget.
]
if compare[name[self].terminal is_not constant[None]] begin[:]
<ast.Raise object at 0x7da2041d9cc0>
name[self].terminal assign[=] name[terminal]
call[name[self].terminal.connect, parameter[constant[grab-focus], name[self].on_terminal_focus]]
call[name[self].terminal.connect, parameter[constant[button-press-event], name[self].on_button_press, constant[None]]]
call[name[self].terminal.connect, parameter[constant[child-exited], name[self].on_terminal_exited]]
call[name[self].pack_start, parameter[name[self].terminal, constant[True], constant[True], constant[0]]]
call[name[self].terminal.show, parameter[]]
call[name[self].add_scroll_bar, parameter[]] | keyword[def] identifier[set_terminal] ( identifier[self] , identifier[terminal] ):
literal[string]
keyword[if] identifier[self] . identifier[terminal] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[terminal] = identifier[terminal]
identifier[self] . identifier[terminal] . identifier[connect] ( literal[string] , identifier[self] . identifier[on_terminal_focus] )
identifier[self] . identifier[terminal] . identifier[connect] ( literal[string] , identifier[self] . identifier[on_button_press] , keyword[None] )
identifier[self] . identifier[terminal] . identifier[connect] ( literal[string] , identifier[self] . identifier[on_terminal_exited] )
identifier[self] . identifier[pack_start] ( identifier[self] . identifier[terminal] , keyword[True] , keyword[True] , literal[int] )
identifier[self] . identifier[terminal] . identifier[show] ()
identifier[self] . identifier[add_scroll_bar] () | def set_terminal(self, terminal):
"""Packs the terminal widget.
"""
if self.terminal is not None:
raise RuntimeError('TerminalBox: terminal already set') # depends on [control=['if'], data=[]]
self.terminal = terminal
self.terminal.connect('grab-focus', self.on_terminal_focus)
self.terminal.connect('button-press-event', self.on_button_press, None)
self.terminal.connect('child-exited', self.on_terminal_exited)
self.pack_start(self.terminal, True, True, 0)
self.terminal.show()
self.add_scroll_bar() |
def _put_policy_set(self, policy_set_id, body):
"""
Will create or update a policy set for the given path.
"""
assert isinstance(body, (dict)), "PUT requires body to be a dict."
uri = self._get_policy_set_uri(guid=policy_set_id)
return self.service._put(uri, body) | def function[_put_policy_set, parameter[self, policy_set_id, body]]:
constant[
Will create or update a policy set for the given path.
]
assert[call[name[isinstance], parameter[name[body], name[dict]]]]
variable[uri] assign[=] call[name[self]._get_policy_set_uri, parameter[]]
return[call[name[self].service._put, parameter[name[uri], name[body]]]] | keyword[def] identifier[_put_policy_set] ( identifier[self] , identifier[policy_set_id] , identifier[body] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[body] ,( identifier[dict] )), literal[string]
identifier[uri] = identifier[self] . identifier[_get_policy_set_uri] ( identifier[guid] = identifier[policy_set_id] )
keyword[return] identifier[self] . identifier[service] . identifier[_put] ( identifier[uri] , identifier[body] ) | def _put_policy_set(self, policy_set_id, body):
"""
Will create or update a policy set for the given path.
"""
assert isinstance(body, dict), 'PUT requires body to be a dict.'
uri = self._get_policy_set_uri(guid=policy_set_id)
return self.service._put(uri, body) |
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name))) | def function[validate_groupby_func, parameter[name, args, kwargs, allowed]]:
constant[
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
]
if compare[name[allowed] is constant[None]] begin[:]
variable[allowed] assign[=] list[[]]
variable[kwargs] assign[=] binary_operation[call[name[set], parameter[name[kwargs]]] - call[name[set], parameter[name[allowed]]]]
if compare[binary_operation[call[name[len], parameter[name[args]]] + call[name[len], parameter[name[kwargs]]]] greater[>] constant[0]] begin[:]
<ast.Raise object at 0x7da20cabf6a0> | keyword[def] identifier[validate_groupby_func] ( identifier[name] , identifier[args] , identifier[kwargs] , identifier[allowed] = keyword[None] ):
literal[string]
keyword[if] identifier[allowed] keyword[is] keyword[None] :
identifier[allowed] =[]
identifier[kwargs] = identifier[set] ( identifier[kwargs] )- identifier[set] ( identifier[allowed] )
keyword[if] identifier[len] ( identifier[args] )+ identifier[len] ( identifier[kwargs] )> literal[int] :
keyword[raise] identifier[UnsupportedFunctionCall] ((
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[func] = identifier[name] ))) | def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = [] # depends on [control=['if'], data=['allowed']]
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall('numpy operations are not valid with groupby. Use .groupby(...).{func}() instead'.format(func=name)) # depends on [control=['if'], data=[]] |
def _taskdict(task):
'''
Note: No locking is provided. Under normal circumstances, like the other task is not running (e.g. this is running
from the same event loop as the task) or task is the current task, this is fine.
'''
if task is None:
task = asyncio.current_task()
assert task
taskvars = getattr(task, '_syn_taskvars', None)
if taskvars is None:
taskvars = varinit(task)
return taskvars | def function[_taskdict, parameter[task]]:
constant[
Note: No locking is provided. Under normal circumstances, like the other task is not running (e.g. this is running
from the same event loop as the task) or task is the current task, this is fine.
]
if compare[name[task] is constant[None]] begin[:]
variable[task] assign[=] call[name[asyncio].current_task, parameter[]]
assert[name[task]]
variable[taskvars] assign[=] call[name[getattr], parameter[name[task], constant[_syn_taskvars], constant[None]]]
if compare[name[taskvars] is constant[None]] begin[:]
variable[taskvars] assign[=] call[name[varinit], parameter[name[task]]]
return[name[taskvars]] | keyword[def] identifier[_taskdict] ( identifier[task] ):
literal[string]
keyword[if] identifier[task] keyword[is] keyword[None] :
identifier[task] = identifier[asyncio] . identifier[current_task] ()
keyword[assert] identifier[task]
identifier[taskvars] = identifier[getattr] ( identifier[task] , literal[string] , keyword[None] )
keyword[if] identifier[taskvars] keyword[is] keyword[None] :
identifier[taskvars] = identifier[varinit] ( identifier[task] )
keyword[return] identifier[taskvars] | def _taskdict(task):
"""
Note: No locking is provided. Under normal circumstances, like the other task is not running (e.g. this is running
from the same event loop as the task) or task is the current task, this is fine.
"""
if task is None:
task = asyncio.current_task() # depends on [control=['if'], data=['task']]
assert task
taskvars = getattr(task, '_syn_taskvars', None)
if taskvars is None:
taskvars = varinit(task) # depends on [control=['if'], data=['taskvars']]
return taskvars |
def _handle_job_set(function):
"""A decorator for handling `taskhandle.JobSet`\s
A decorator for handling `taskhandle.JobSet`\s for `do` and `undo`
methods of `Change`\s.
"""
def call(self, job_set=taskhandle.NullJobSet()):
job_set.started_job(str(self))
function(self)
job_set.finished_job()
return call | def function[_handle_job_set, parameter[function]]:
constant[A decorator for handling `taskhandle.JobSet`\s
A decorator for handling `taskhandle.JobSet`\s for `do` and `undo`
methods of `Change`\s.
]
def function[call, parameter[self, job_set]]:
call[name[job_set].started_job, parameter[call[name[str], parameter[name[self]]]]]
call[name[function], parameter[name[self]]]
call[name[job_set].finished_job, parameter[]]
return[name[call]] | keyword[def] identifier[_handle_job_set] ( identifier[function] ):
literal[string]
keyword[def] identifier[call] ( identifier[self] , identifier[job_set] = identifier[taskhandle] . identifier[NullJobSet] ()):
identifier[job_set] . identifier[started_job] ( identifier[str] ( identifier[self] ))
identifier[function] ( identifier[self] )
identifier[job_set] . identifier[finished_job] ()
keyword[return] identifier[call] | def _handle_job_set(function):
"""A decorator for handling `taskhandle.JobSet`\\s
A decorator for handling `taskhandle.JobSet`\\s for `do` and `undo`
methods of `Change`\\s.
"""
def call(self, job_set=taskhandle.NullJobSet()):
job_set.started_job(str(self))
function(self)
job_set.finished_job()
return call |
def _get_sampling_freq(self, r):
"""Raises BitReaderError"""
samplingFrequencyIndex = r.bits(4)
if samplingFrequencyIndex == 0xf:
samplingFrequency = r.bits(24)
else:
try:
samplingFrequency = self._FREQS[samplingFrequencyIndex]
except IndexError:
samplingFrequency = 0
return samplingFrequency | def function[_get_sampling_freq, parameter[self, r]]:
constant[Raises BitReaderError]
variable[samplingFrequencyIndex] assign[=] call[name[r].bits, parameter[constant[4]]]
if compare[name[samplingFrequencyIndex] equal[==] constant[15]] begin[:]
variable[samplingFrequency] assign[=] call[name[r].bits, parameter[constant[24]]]
return[name[samplingFrequency]] | keyword[def] identifier[_get_sampling_freq] ( identifier[self] , identifier[r] ):
literal[string]
identifier[samplingFrequencyIndex] = identifier[r] . identifier[bits] ( literal[int] )
keyword[if] identifier[samplingFrequencyIndex] == literal[int] :
identifier[samplingFrequency] = identifier[r] . identifier[bits] ( literal[int] )
keyword[else] :
keyword[try] :
identifier[samplingFrequency] = identifier[self] . identifier[_FREQS] [ identifier[samplingFrequencyIndex] ]
keyword[except] identifier[IndexError] :
identifier[samplingFrequency] = literal[int]
keyword[return] identifier[samplingFrequency] | def _get_sampling_freq(self, r):
"""Raises BitReaderError"""
samplingFrequencyIndex = r.bits(4)
if samplingFrequencyIndex == 15:
samplingFrequency = r.bits(24) # depends on [control=['if'], data=[]]
else:
try:
samplingFrequency = self._FREQS[samplingFrequencyIndex] # depends on [control=['try'], data=[]]
except IndexError:
samplingFrequency = 0 # depends on [control=['except'], data=[]]
return samplingFrequency |
def create_payload(self):
"""Wrap submitted data within an extra dict.
For more information, see `Bugzilla #1151220
<https://bugzilla.redhat.com/show_bug.cgi?id=1151220>`_.
In addition, rename the ``search_`` field to ``search``.
"""
payload = super(DiscoveryRule, self).create_payload()
if 'search_' in payload:
payload['search'] = payload.pop('search_')
return {u'discovery_rule': payload} | def function[create_payload, parameter[self]]:
constant[Wrap submitted data within an extra dict.
For more information, see `Bugzilla #1151220
<https://bugzilla.redhat.com/show_bug.cgi?id=1151220>`_.
In addition, rename the ``search_`` field to ``search``.
]
variable[payload] assign[=] call[call[name[super], parameter[name[DiscoveryRule], name[self]]].create_payload, parameter[]]
if compare[constant[search_] in name[payload]] begin[:]
call[name[payload]][constant[search]] assign[=] call[name[payload].pop, parameter[constant[search_]]]
return[dictionary[[<ast.Constant object at 0x7da20cabfbe0>], [<ast.Name object at 0x7da20cabdfc0>]]] | keyword[def] identifier[create_payload] ( identifier[self] ):
literal[string]
identifier[payload] = identifier[super] ( identifier[DiscoveryRule] , identifier[self] ). identifier[create_payload] ()
keyword[if] literal[string] keyword[in] identifier[payload] :
identifier[payload] [ literal[string] ]= identifier[payload] . identifier[pop] ( literal[string] )
keyword[return] { literal[string] : identifier[payload] } | def create_payload(self):
"""Wrap submitted data within an extra dict.
For more information, see `Bugzilla #1151220
<https://bugzilla.redhat.com/show_bug.cgi?id=1151220>`_.
In addition, rename the ``search_`` field to ``search``.
"""
payload = super(DiscoveryRule, self).create_payload()
if 'search_' in payload:
payload['search'] = payload.pop('search_') # depends on [control=['if'], data=['payload']]
return {u'discovery_rule': payload} |
def _generate_key(pass_id, passphrases, salt, algorithm):
'''Generate and return PBKDF2 key'''
if pass_id not in passphrases:
raise Exception('Passphrase not defined for id: %d' % pass_id)
passphrase = passphrases[pass_id]
if len(passphrase) < 32:
raise Exception('Passphrase less than 32 characters long')
digestmod = EncryptedPickle._get_hashlib(algorithm['pbkdf2_algorithm'])
encoder = PBKDF2(passphrase, salt,
iterations=algorithm['pbkdf2_iterations'],
digestmodule=digestmod)
return encoder.read(algorithm['key_size']) | def function[_generate_key, parameter[pass_id, passphrases, salt, algorithm]]:
constant[Generate and return PBKDF2 key]
if compare[name[pass_id] <ast.NotIn object at 0x7da2590d7190> name[passphrases]] begin[:]
<ast.Raise object at 0x7da2054a6ec0>
variable[passphrase] assign[=] call[name[passphrases]][name[pass_id]]
if compare[call[name[len], parameter[name[passphrase]]] less[<] constant[32]] begin[:]
<ast.Raise object at 0x7da2054a4850>
variable[digestmod] assign[=] call[name[EncryptedPickle]._get_hashlib, parameter[call[name[algorithm]][constant[pbkdf2_algorithm]]]]
variable[encoder] assign[=] call[name[PBKDF2], parameter[name[passphrase], name[salt]]]
return[call[name[encoder].read, parameter[call[name[algorithm]][constant[key_size]]]]] | keyword[def] identifier[_generate_key] ( identifier[pass_id] , identifier[passphrases] , identifier[salt] , identifier[algorithm] ):
literal[string]
keyword[if] identifier[pass_id] keyword[not] keyword[in] identifier[passphrases] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[pass_id] )
identifier[passphrase] = identifier[passphrases] [ identifier[pass_id] ]
keyword[if] identifier[len] ( identifier[passphrase] )< literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[digestmod] = identifier[EncryptedPickle] . identifier[_get_hashlib] ( identifier[algorithm] [ literal[string] ])
identifier[encoder] = identifier[PBKDF2] ( identifier[passphrase] , identifier[salt] ,
identifier[iterations] = identifier[algorithm] [ literal[string] ],
identifier[digestmodule] = identifier[digestmod] )
keyword[return] identifier[encoder] . identifier[read] ( identifier[algorithm] [ literal[string] ]) | def _generate_key(pass_id, passphrases, salt, algorithm):
"""Generate and return PBKDF2 key"""
if pass_id not in passphrases:
raise Exception('Passphrase not defined for id: %d' % pass_id) # depends on [control=['if'], data=['pass_id']]
passphrase = passphrases[pass_id]
if len(passphrase) < 32:
raise Exception('Passphrase less than 32 characters long') # depends on [control=['if'], data=[]]
digestmod = EncryptedPickle._get_hashlib(algorithm['pbkdf2_algorithm'])
encoder = PBKDF2(passphrase, salt, iterations=algorithm['pbkdf2_iterations'], digestmodule=digestmod)
return encoder.read(algorithm['key_size']) |
def on_input_path_textChanged(self):
"""Action when input file name is changed."""
input_path = self.input_path.text()
input_not_grid_msg = tr('input file is not .xml')
if input_path and not input_path.endswith('.xml'):
self.warning_text.add(input_not_grid_msg)
elif input_path and input_not_grid_msg in self.warning_text:
self.warning_text.remove(input_not_grid_msg)
if self.use_output_default.isChecked():
self.get_output_from_input()
self.update_warning() | def function[on_input_path_textChanged, parameter[self]]:
constant[Action when input file name is changed.]
variable[input_path] assign[=] call[name[self].input_path.text, parameter[]]
variable[input_not_grid_msg] assign[=] call[name[tr], parameter[constant[input file is not .xml]]]
if <ast.BoolOp object at 0x7da207f9bb50> begin[:]
call[name[self].warning_text.add, parameter[name[input_not_grid_msg]]]
if call[name[self].use_output_default.isChecked, parameter[]] begin[:]
call[name[self].get_output_from_input, parameter[]]
call[name[self].update_warning, parameter[]] | keyword[def] identifier[on_input_path_textChanged] ( identifier[self] ):
literal[string]
identifier[input_path] = identifier[self] . identifier[input_path] . identifier[text] ()
identifier[input_not_grid_msg] = identifier[tr] ( literal[string] )
keyword[if] identifier[input_path] keyword[and] keyword[not] identifier[input_path] . identifier[endswith] ( literal[string] ):
identifier[self] . identifier[warning_text] . identifier[add] ( identifier[input_not_grid_msg] )
keyword[elif] identifier[input_path] keyword[and] identifier[input_not_grid_msg] keyword[in] identifier[self] . identifier[warning_text] :
identifier[self] . identifier[warning_text] . identifier[remove] ( identifier[input_not_grid_msg] )
keyword[if] identifier[self] . identifier[use_output_default] . identifier[isChecked] ():
identifier[self] . identifier[get_output_from_input] ()
identifier[self] . identifier[update_warning] () | def on_input_path_textChanged(self):
"""Action when input file name is changed."""
input_path = self.input_path.text()
input_not_grid_msg = tr('input file is not .xml')
if input_path and (not input_path.endswith('.xml')):
self.warning_text.add(input_not_grid_msg) # depends on [control=['if'], data=[]]
elif input_path and input_not_grid_msg in self.warning_text:
self.warning_text.remove(input_not_grid_msg) # depends on [control=['if'], data=[]]
if self.use_output_default.isChecked():
self.get_output_from_input() # depends on [control=['if'], data=[]]
self.update_warning() |
def compute(self):
"""
This method can be used for computing one MaxSAT solution,
i.e. for computing an assignment satisfying all hard
clauses of the input formula and maximizing the sum of
weights of satisfied soft clauses. It is a wrapper for the
internal :func:`compute_` method, which does the job,
followed by the model extraction.
Note that the method returns ``None`` if no MaxSAT model
exists. The method can be called multiple times, each
being followed by blocking the last model. This way one
can enumerate top-:math:`k` MaxSAT solutions (this can
also be done by calling :meth:`enumerate()`).
:returns: a MaxSAT model
:rtype: list(int)
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> rc2 = RC2(WCNF()) # passing an empty WCNF() formula
>>> rc2.add_clause([-1, -2])
>>> rc2.add_clause([-1, -3])
>>> rc2.add_clause([-2, -3])
>>>
>>> rc2.add_clause([1], weight=1)
>>> rc2.add_clause([2], weight=1)
>>> rc2.add_clause([3], weight=1)
>>>
>>> model = rc2.compute()
>>> print model
[-1, -2, 3]
>>> print rc2.cost
2
>>> rc2.delete()
"""
# simply apply MaxSAT only once
res = self.compute_()
if res:
# extracting a model
self.model = self.oracle.get_model()
self.model = filter(lambda l: abs(l) in self.vmap.i2e, self.model)
self.model = map(lambda l: int(copysign(self.vmap.i2e[abs(l)], l)), self.model)
self.model = sorted(self.model, key=lambda l: abs(l))
return self.model | def function[compute, parameter[self]]:
constant[
This method can be used for computing one MaxSAT solution,
i.e. for computing an assignment satisfying all hard
clauses of the input formula and maximizing the sum of
weights of satisfied soft clauses. It is a wrapper for the
internal :func:`compute_` method, which does the job,
followed by the model extraction.
Note that the method returns ``None`` if no MaxSAT model
exists. The method can be called multiple times, each
being followed by blocking the last model. This way one
can enumerate top-:math:`k` MaxSAT solutions (this can
also be done by calling :meth:`enumerate()`).
:returns: a MaxSAT model
:rtype: list(int)
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> rc2 = RC2(WCNF()) # passing an empty WCNF() formula
>>> rc2.add_clause([-1, -2])
>>> rc2.add_clause([-1, -3])
>>> rc2.add_clause([-2, -3])
>>>
>>> rc2.add_clause([1], weight=1)
>>> rc2.add_clause([2], weight=1)
>>> rc2.add_clause([3], weight=1)
>>>
>>> model = rc2.compute()
>>> print model
[-1, -2, 3]
>>> print rc2.cost
2
>>> rc2.delete()
]
variable[res] assign[=] call[name[self].compute_, parameter[]]
if name[res] begin[:]
name[self].model assign[=] call[name[self].oracle.get_model, parameter[]]
name[self].model assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da1b11a2aa0>, name[self].model]]
name[self].model assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b11a07c0>, name[self].model]]
name[self].model assign[=] call[name[sorted], parameter[name[self].model]]
return[name[self].model] | keyword[def] identifier[compute] ( identifier[self] ):
literal[string]
identifier[res] = identifier[self] . identifier[compute_] ()
keyword[if] identifier[res] :
identifier[self] . identifier[model] = identifier[self] . identifier[oracle] . identifier[get_model] ()
identifier[self] . identifier[model] = identifier[filter] ( keyword[lambda] identifier[l] : identifier[abs] ( identifier[l] ) keyword[in] identifier[self] . identifier[vmap] . identifier[i2e] , identifier[self] . identifier[model] )
identifier[self] . identifier[model] = identifier[map] ( keyword[lambda] identifier[l] : identifier[int] ( identifier[copysign] ( identifier[self] . identifier[vmap] . identifier[i2e] [ identifier[abs] ( identifier[l] )], identifier[l] )), identifier[self] . identifier[model] )
identifier[self] . identifier[model] = identifier[sorted] ( identifier[self] . identifier[model] , identifier[key] = keyword[lambda] identifier[l] : identifier[abs] ( identifier[l] ))
keyword[return] identifier[self] . identifier[model] | def compute(self):
"""
This method can be used for computing one MaxSAT solution,
i.e. for computing an assignment satisfying all hard
clauses of the input formula and maximizing the sum of
weights of satisfied soft clauses. It is a wrapper for the
internal :func:`compute_` method, which does the job,
followed by the model extraction.
Note that the method returns ``None`` if no MaxSAT model
exists. The method can be called multiple times, each
being followed by blocking the last model. This way one
can enumerate top-:math:`k` MaxSAT solutions (this can
also be done by calling :meth:`enumerate()`).
:returns: a MaxSAT model
:rtype: list(int)
.. code-block:: python
>>> from pysat.examples.rc2 import RC2
>>> from pysat.formula import WCNF
>>>
>>> rc2 = RC2(WCNF()) # passing an empty WCNF() formula
>>> rc2.add_clause([-1, -2])
>>> rc2.add_clause([-1, -3])
>>> rc2.add_clause([-2, -3])
>>>
>>> rc2.add_clause([1], weight=1)
>>> rc2.add_clause([2], weight=1)
>>> rc2.add_clause([3], weight=1)
>>>
>>> model = rc2.compute()
>>> print model
[-1, -2, 3]
>>> print rc2.cost
2
>>> rc2.delete()
"""
# simply apply MaxSAT only once
res = self.compute_()
if res:
# extracting a model
self.model = self.oracle.get_model()
self.model = filter(lambda l: abs(l) in self.vmap.i2e, self.model)
self.model = map(lambda l: int(copysign(self.vmap.i2e[abs(l)], l)), self.model)
self.model = sorted(self.model, key=lambda l: abs(l))
return self.model # depends on [control=['if'], data=[]] |
def output_reduce_list(path_list, force=False):
"""Generates structure file with protons from a list of structure files."""
output_paths = []
for path in path_list:
output_path = output_reduce(path, force=force)
if output_path:
output_paths.append(output_path)
return output_paths | def function[output_reduce_list, parameter[path_list, force]]:
constant[Generates structure file with protons from a list of structure files.]
variable[output_paths] assign[=] list[[]]
for taget[name[path]] in starred[name[path_list]] begin[:]
variable[output_path] assign[=] call[name[output_reduce], parameter[name[path]]]
if name[output_path] begin[:]
call[name[output_paths].append, parameter[name[output_path]]]
return[name[output_paths]] | keyword[def] identifier[output_reduce_list] ( identifier[path_list] , identifier[force] = keyword[False] ):
literal[string]
identifier[output_paths] =[]
keyword[for] identifier[path] keyword[in] identifier[path_list] :
identifier[output_path] = identifier[output_reduce] ( identifier[path] , identifier[force] = identifier[force] )
keyword[if] identifier[output_path] :
identifier[output_paths] . identifier[append] ( identifier[output_path] )
keyword[return] identifier[output_paths] | def output_reduce_list(path_list, force=False):
"""Generates structure file with protons from a list of structure files."""
output_paths = []
for path in path_list:
output_path = output_reduce(path, force=force)
if output_path:
output_paths.append(output_path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
return output_paths |
def get_service_url(request, redirect_to=None):
"""Generates application django service URL for CAS"""
if hasattr(django_settings, 'CAS_ROOT_PROXIED_AS'):
service = django_settings.CAS_ROOT_PROXIED_AS + request.path
else:
protocol = get_protocol(request)
host = request.get_host()
service = urllib_parse.urlunparse(
(protocol, host, request.path, '', '', ''),
)
if not django_settings.CAS_STORE_NEXT:
if '?' in service:
service += '&'
else:
service += '?'
service += urllib_parse.urlencode({
REDIRECT_FIELD_NAME: redirect_to or get_redirect_url(request)
})
return service | def function[get_service_url, parameter[request, redirect_to]]:
constant[Generates application django service URL for CAS]
if call[name[hasattr], parameter[name[django_settings], constant[CAS_ROOT_PROXIED_AS]]] begin[:]
variable[service] assign[=] binary_operation[name[django_settings].CAS_ROOT_PROXIED_AS + name[request].path]
if <ast.UnaryOp object at 0x7da1b1e64b50> begin[:]
if compare[constant[?] in name[service]] begin[:]
<ast.AugAssign object at 0x7da1b1e64310>
<ast.AugAssign object at 0x7da1b1e642b0>
return[name[service]] | keyword[def] identifier[get_service_url] ( identifier[request] , identifier[redirect_to] = keyword[None] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[django_settings] , literal[string] ):
identifier[service] = identifier[django_settings] . identifier[CAS_ROOT_PROXIED_AS] + identifier[request] . identifier[path]
keyword[else] :
identifier[protocol] = identifier[get_protocol] ( identifier[request] )
identifier[host] = identifier[request] . identifier[get_host] ()
identifier[service] = identifier[urllib_parse] . identifier[urlunparse] (
( identifier[protocol] , identifier[host] , identifier[request] . identifier[path] , literal[string] , literal[string] , literal[string] ),
)
keyword[if] keyword[not] identifier[django_settings] . identifier[CAS_STORE_NEXT] :
keyword[if] literal[string] keyword[in] identifier[service] :
identifier[service] += literal[string]
keyword[else] :
identifier[service] += literal[string]
identifier[service] += identifier[urllib_parse] . identifier[urlencode] ({
identifier[REDIRECT_FIELD_NAME] : identifier[redirect_to] keyword[or] identifier[get_redirect_url] ( identifier[request] )
})
keyword[return] identifier[service] | def get_service_url(request, redirect_to=None):
"""Generates application django service URL for CAS"""
if hasattr(django_settings, 'CAS_ROOT_PROXIED_AS'):
service = django_settings.CAS_ROOT_PROXIED_AS + request.path # depends on [control=['if'], data=[]]
else:
protocol = get_protocol(request)
host = request.get_host()
service = urllib_parse.urlunparse((protocol, host, request.path, '', '', ''))
if not django_settings.CAS_STORE_NEXT:
if '?' in service:
service += '&' # depends on [control=['if'], data=['service']]
else:
service += '?'
service += urllib_parse.urlencode({REDIRECT_FIELD_NAME: redirect_to or get_redirect_url(request)}) # depends on [control=['if'], data=[]]
return service |
def clean_pages_from_space(confluence, space_key, limit=500):
"""
Remove all pages from trash for related space
:param limit:
:param confluence:
:param space_key:
:return:
"""
flag = True
while flag:
values = confluence.get_all_pages_from_space_trash(space=space_key, start=0, limit=limit)
if len(values) == 0:
flag = False
print("For space {} trash is empty".format(space_key))
else:
print("Found in space {} pages as trashed {}".format(space_key, len(values)))
for value in values:
print("Removing page with title: " + value['title'])
confluence.remove_page_from_trash(value['id']) | def function[clean_pages_from_space, parameter[confluence, space_key, limit]]:
constant[
Remove all pages from trash for related space
:param limit:
:param confluence:
:param space_key:
:return:
]
variable[flag] assign[=] constant[True]
while name[flag] begin[:]
variable[values] assign[=] call[name[confluence].get_all_pages_from_space_trash, parameter[]]
if compare[call[name[len], parameter[name[values]]] equal[==] constant[0]] begin[:]
variable[flag] assign[=] constant[False]
call[name[print], parameter[call[constant[For space {} trash is empty].format, parameter[name[space_key]]]]] | keyword[def] identifier[clean_pages_from_space] ( identifier[confluence] , identifier[space_key] , identifier[limit] = literal[int] ):
literal[string]
identifier[flag] = keyword[True]
keyword[while] identifier[flag] :
identifier[values] = identifier[confluence] . identifier[get_all_pages_from_space_trash] ( identifier[space] = identifier[space_key] , identifier[start] = literal[int] , identifier[limit] = identifier[limit] )
keyword[if] identifier[len] ( identifier[values] )== literal[int] :
identifier[flag] = keyword[False]
identifier[print] ( literal[string] . identifier[format] ( identifier[space_key] ))
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[space_key] , identifier[len] ( identifier[values] )))
keyword[for] identifier[value] keyword[in] identifier[values] :
identifier[print] ( literal[string] + identifier[value] [ literal[string] ])
identifier[confluence] . identifier[remove_page_from_trash] ( identifier[value] [ literal[string] ]) | def clean_pages_from_space(confluence, space_key, limit=500):
"""
Remove all pages from trash for related space
:param limit:
:param confluence:
:param space_key:
:return:
"""
flag = True
while flag:
values = confluence.get_all_pages_from_space_trash(space=space_key, start=0, limit=limit)
if len(values) == 0:
flag = False
print('For space {} trash is empty'.format(space_key)) # depends on [control=['if'], data=[]]
else:
print('Found in space {} pages as trashed {}'.format(space_key, len(values)))
for value in values:
print('Removing page with title: ' + value['title'])
confluence.remove_page_from_trash(value['id']) # depends on [control=['for'], data=['value']] # depends on [control=['while'], data=[]] |
def _send_message(self, msg):
"""Add message to queue and start processing the queue."""
LWLink.the_queue.put_nowait(msg)
if LWLink.thread is None or not LWLink.thread.isAlive():
LWLink.thread = Thread(target=self._send_queue)
LWLink.thread.start() | def function[_send_message, parameter[self, msg]]:
constant[Add message to queue and start processing the queue.]
call[name[LWLink].the_queue.put_nowait, parameter[name[msg]]]
if <ast.BoolOp object at 0x7da1b01a6200> begin[:]
name[LWLink].thread assign[=] call[name[Thread], parameter[]]
call[name[LWLink].thread.start, parameter[]] | keyword[def] identifier[_send_message] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[LWLink] . identifier[the_queue] . identifier[put_nowait] ( identifier[msg] )
keyword[if] identifier[LWLink] . identifier[thread] keyword[is] keyword[None] keyword[or] keyword[not] identifier[LWLink] . identifier[thread] . identifier[isAlive] ():
identifier[LWLink] . identifier[thread] = identifier[Thread] ( identifier[target] = identifier[self] . identifier[_send_queue] )
identifier[LWLink] . identifier[thread] . identifier[start] () | def _send_message(self, msg):
"""Add message to queue and start processing the queue."""
LWLink.the_queue.put_nowait(msg)
if LWLink.thread is None or not LWLink.thread.isAlive():
LWLink.thread = Thread(target=self._send_queue)
LWLink.thread.start() # depends on [control=['if'], data=[]] |
def get_rendered_fields(self, ctx=None):
'''
:param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered
'''
if ctx is None:
ctx = RenderContext()
ctx.push(self)
result = []
for f in self._fields:
if len(f.render(ctx)):
result.append(f)
ctx.pop()
return result | def function[get_rendered_fields, parameter[self, ctx]]:
constant[
:param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered
]
if compare[name[ctx] is constant[None]] begin[:]
variable[ctx] assign[=] call[name[RenderContext], parameter[]]
call[name[ctx].push, parameter[name[self]]]
variable[result] assign[=] list[[]]
for taget[name[f]] in starred[name[self]._fields] begin[:]
if call[name[len], parameter[call[name[f].render, parameter[name[ctx]]]]] begin[:]
call[name[result].append, parameter[name[f]]]
call[name[ctx].pop, parameter[]]
return[name[result]] | keyword[def] identifier[get_rendered_fields] ( identifier[self] , identifier[ctx] = keyword[None] ):
literal[string]
keyword[if] identifier[ctx] keyword[is] keyword[None] :
identifier[ctx] = identifier[RenderContext] ()
identifier[ctx] . identifier[push] ( identifier[self] )
identifier[result] =[]
keyword[for] identifier[f] keyword[in] identifier[self] . identifier[_fields] :
keyword[if] identifier[len] ( identifier[f] . identifier[render] ( identifier[ctx] )):
identifier[result] . identifier[append] ( identifier[f] )
identifier[ctx] . identifier[pop] ()
keyword[return] identifier[result] | def get_rendered_fields(self, ctx=None):
"""
:param ctx: rendering context in which the method was called
:return: ordered list of the fields that will be rendered
"""
if ctx is None:
ctx = RenderContext() # depends on [control=['if'], data=['ctx']]
ctx.push(self)
result = []
for f in self._fields:
if len(f.render(ctx)):
result.append(f) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
ctx.pop()
return result |
def from_name(cls, name):
""" Retrieve a snapshot profile accsociated to a name."""
snps = cls.list({'name': name})
if len(snps) == 1:
return snps[0]['id']
elif not snps:
return
raise DuplicateResults('snapshot profile name %s is ambiguous.' % name) | def function[from_name, parameter[cls, name]]:
constant[ Retrieve a snapshot profile accsociated to a name.]
variable[snps] assign[=] call[name[cls].list, parameter[dictionary[[<ast.Constant object at 0x7da20e956050>], [<ast.Name object at 0x7da20e957af0>]]]]
if compare[call[name[len], parameter[name[snps]]] equal[==] constant[1]] begin[:]
return[call[call[name[snps]][constant[0]]][constant[id]]]
<ast.Raise object at 0x7da18ede62f0> | keyword[def] identifier[from_name] ( identifier[cls] , identifier[name] ):
literal[string]
identifier[snps] = identifier[cls] . identifier[list] ({ literal[string] : identifier[name] })
keyword[if] identifier[len] ( identifier[snps] )== literal[int] :
keyword[return] identifier[snps] [ literal[int] ][ literal[string] ]
keyword[elif] keyword[not] identifier[snps] :
keyword[return]
keyword[raise] identifier[DuplicateResults] ( literal[string] % identifier[name] ) | def from_name(cls, name):
""" Retrieve a snapshot profile accsociated to a name."""
snps = cls.list({'name': name})
if len(snps) == 1:
return snps[0]['id'] # depends on [control=['if'], data=[]]
elif not snps:
return # depends on [control=['if'], data=[]]
raise DuplicateResults('snapshot profile name %s is ambiguous.' % name) |
def read_yaml_file(self, file_name):
"""
Parses a YAML file into a matrix.
:param file_name: name of the YAML file
:return: a matrix with the file's contents
"""
with open(os.path.join(self.__path(), os.path.basename(file_name)),
'rt') as yamlfile:
return yaml.load(yamlfile) | def function[read_yaml_file, parameter[self, file_name]]:
constant[
Parses a YAML file into a matrix.
:param file_name: name of the YAML file
:return: a matrix with the file's contents
]
with call[name[open], parameter[call[name[os].path.join, parameter[call[name[self].__path, parameter[]], call[name[os].path.basename, parameter[name[file_name]]]]], constant[rt]]] begin[:]
return[call[name[yaml].load, parameter[name[yamlfile]]]] | keyword[def] identifier[read_yaml_file] ( identifier[self] , identifier[file_name] ):
literal[string]
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[__path] (), identifier[os] . identifier[path] . identifier[basename] ( identifier[file_name] )),
literal[string] ) keyword[as] identifier[yamlfile] :
keyword[return] identifier[yaml] . identifier[load] ( identifier[yamlfile] ) | def read_yaml_file(self, file_name):
"""
Parses a YAML file into a matrix.
:param file_name: name of the YAML file
:return: a matrix with the file's contents
"""
with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as yamlfile:
return yaml.load(yamlfile) # depends on [control=['with'], data=['yamlfile']] |
def init_plugins(settings):
"""Load plugins and call register()."""
logger = logging.getLogger(__name__)
logger.debug('Plugin paths: %s', settings['plugin_paths'])
for path in settings['plugin_paths']:
sys.path.insert(0, path)
for plugin in settings['plugins']:
try:
if isinstance(plugin, str):
mod = importlib.import_module(plugin)
mod.register(settings)
else:
plugin.register(settings)
logger.debug('Registered plugin %s', plugin)
except Exception as e:
logger.error('Failed to load plugin %s: %r', plugin, e)
for path in settings['plugin_paths']:
sys.path.remove(path) | def function[init_plugins, parameter[settings]]:
constant[Load plugins and call register().]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[__name__]]]
call[name[logger].debug, parameter[constant[Plugin paths: %s], call[name[settings]][constant[plugin_paths]]]]
for taget[name[path]] in starred[call[name[settings]][constant[plugin_paths]]] begin[:]
call[name[sys].path.insert, parameter[constant[0], name[path]]]
for taget[name[plugin]] in starred[call[name[settings]][constant[plugins]]] begin[:]
<ast.Try object at 0x7da18f09d600>
for taget[name[path]] in starred[call[name[settings]][constant[plugin_paths]]] begin[:]
call[name[sys].path.remove, parameter[name[path]]] | keyword[def] identifier[init_plugins] ( identifier[settings] ):
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[settings] [ literal[string] ])
keyword[for] identifier[path] keyword[in] identifier[settings] [ literal[string] ]:
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[path] )
keyword[for] identifier[plugin] keyword[in] identifier[settings] [ literal[string] ]:
keyword[try] :
keyword[if] identifier[isinstance] ( identifier[plugin] , identifier[str] ):
identifier[mod] = identifier[importlib] . identifier[import_module] ( identifier[plugin] )
identifier[mod] . identifier[register] ( identifier[settings] )
keyword[else] :
identifier[plugin] . identifier[register] ( identifier[settings] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[plugin] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] , identifier[plugin] , identifier[e] )
keyword[for] identifier[path] keyword[in] identifier[settings] [ literal[string] ]:
identifier[sys] . identifier[path] . identifier[remove] ( identifier[path] ) | def init_plugins(settings):
"""Load plugins and call register()."""
logger = logging.getLogger(__name__)
logger.debug('Plugin paths: %s', settings['plugin_paths'])
for path in settings['plugin_paths']:
sys.path.insert(0, path) # depends on [control=['for'], data=['path']]
for plugin in settings['plugins']:
try:
if isinstance(plugin, str):
mod = importlib.import_module(plugin)
mod.register(settings) # depends on [control=['if'], data=[]]
else:
plugin.register(settings)
logger.debug('Registered plugin %s', plugin) # depends on [control=['try'], data=[]]
except Exception as e:
logger.error('Failed to load plugin %s: %r', plugin, e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['plugin']]
for path in settings['plugin_paths']:
sys.path.remove(path) # depends on [control=['for'], data=['path']] |
def mul_at(self, other, k):
'''Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)'''
if k > (self.degree + other.degree) or k > self.degree: return 0 # optimization: if the required coefficient is above the maximum coefficient of the resulting polynomial, we can already predict that and just return 0
term = 0
for i in _range(min(len(self), len(other))):
coef1 = self.coefficients[-(k-i+1)]
coef2 = other.coefficients[-(i+1)]
if coef1 == 0 or coef2 == 0: continue # log(0) is undefined, skip (and in addition it's a nice optimization)
term += coef1 * coef2
return term | def function[mul_at, parameter[self, other, k]]:
constant[Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)]
if <ast.BoolOp object at 0x7da18f00f940> begin[:]
return[constant[0]]
variable[term] assign[=] constant[0]
for taget[name[i]] in starred[call[name[_range], parameter[call[name[min], parameter[call[name[len], parameter[name[self]]], call[name[len], parameter[name[other]]]]]]]] begin[:]
variable[coef1] assign[=] call[name[self].coefficients][<ast.UnaryOp object at 0x7da18f00dc90>]
variable[coef2] assign[=] call[name[other].coefficients][<ast.UnaryOp object at 0x7da18f00e320>]
if <ast.BoolOp object at 0x7da18f00dc60> begin[:]
continue
<ast.AugAssign object at 0x7da18f00e470>
return[name[term]] | keyword[def] identifier[mul_at] ( identifier[self] , identifier[other] , identifier[k] ):
literal[string]
keyword[if] identifier[k] >( identifier[self] . identifier[degree] + identifier[other] . identifier[degree] ) keyword[or] identifier[k] > identifier[self] . identifier[degree] : keyword[return] literal[int]
identifier[term] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[_range] ( identifier[min] ( identifier[len] ( identifier[self] ), identifier[len] ( identifier[other] ))):
identifier[coef1] = identifier[self] . identifier[coefficients] [-( identifier[k] - identifier[i] + literal[int] )]
identifier[coef2] = identifier[other] . identifier[coefficients] [-( identifier[i] + literal[int] )]
keyword[if] identifier[coef1] == literal[int] keyword[or] identifier[coef2] == literal[int] : keyword[continue]
identifier[term] += identifier[coef1] * identifier[coef2]
keyword[return] identifier[term] | def mul_at(self, other, k):
"""Compute the multiplication between two polynomials only at the specified coefficient (this is a lot cheaper than doing the full polynomial multiplication and then extract only the required coefficient)"""
if k > self.degree + other.degree or k > self.degree:
return 0 # optimization: if the required coefficient is above the maximum coefficient of the resulting polynomial, we can already predict that and just return 0 # depends on [control=['if'], data=[]]
term = 0
for i in _range(min(len(self), len(other))):
coef1 = self.coefficients[-(k - i + 1)]
coef2 = other.coefficients[-(i + 1)]
if coef1 == 0 or coef2 == 0:
continue # log(0) is undefined, skip (and in addition it's a nice optimization) # depends on [control=['if'], data=[]]
term += coef1 * coef2 # depends on [control=['for'], data=['i']]
return term |
def log_batch(self, log_data):
"""Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
"""
url = uri_join(self.base_url, "log")
attachments = []
for log_item in log_data:
log_item["item_id"] = self.stack[-1]
attachment = log_item.get("attachment", None)
if "attachment" in log_item:
del log_item["attachment"]
if attachment:
if not isinstance(attachment, collections.Mapping):
attachment = {"data": attachment}
name = attachment.get("name", str(uuid.uuid4()))
log_item["file"] = {"name": name}
attachments.append(("file", (
name,
attachment["data"],
attachment.get("mime", "application/octet-stream")
)))
files = [(
"json_request_part", (
None,
json.dumps(log_data),
"application/json"
)
)]
files.extend(attachments)
from reportportal_client import POST_LOGBATCH_RETRY_COUNT
for i in range(POST_LOGBATCH_RETRY_COUNT):
try:
r = self.session.post(
url=url,
files=files,
verify=self.verify_ssl
)
except KeyError:
if i < POST_LOGBATCH_RETRY_COUNT - 1:
continue
else:
raise
break
logger.debug("log_batch - Stack: %s", self.stack)
logger.debug("log_batch response: %s", r.text)
return _get_data(r) | def function[log_batch, parameter[self, log_data]]:
constant[Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
]
variable[url] assign[=] call[name[uri_join], parameter[name[self].base_url, constant[log]]]
variable[attachments] assign[=] list[[]]
for taget[name[log_item]] in starred[name[log_data]] begin[:]
call[name[log_item]][constant[item_id]] assign[=] call[name[self].stack][<ast.UnaryOp object at 0x7da204564fa0>]
variable[attachment] assign[=] call[name[log_item].get, parameter[constant[attachment], constant[None]]]
if compare[constant[attachment] in name[log_item]] begin[:]
<ast.Delete object at 0x7da204566050>
if name[attachment] begin[:]
if <ast.UnaryOp object at 0x7da2045650f0> begin[:]
variable[attachment] assign[=] dictionary[[<ast.Constant object at 0x7da204564c70>], [<ast.Name object at 0x7da204567490>]]
variable[name] assign[=] call[name[attachment].get, parameter[constant[name], call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]]]]
call[name[log_item]][constant[file]] assign[=] dictionary[[<ast.Constant object at 0x7da204564430>], [<ast.Name object at 0x7da204566ec0>]]
call[name[attachments].append, parameter[tuple[[<ast.Constant object at 0x7da204567160>, <ast.Tuple object at 0x7da204564340>]]]]
variable[files] assign[=] list[[<ast.Tuple object at 0x7da204565270>]]
call[name[files].extend, parameter[name[attachments]]]
from relative_module[reportportal_client] import module[POST_LOGBATCH_RETRY_COUNT]
for taget[name[i]] in starred[call[name[range], parameter[name[POST_LOGBATCH_RETRY_COUNT]]]] begin[:]
<ast.Try object at 0x7da204567100>
break
call[name[logger].debug, parameter[constant[log_batch - Stack: %s], name[self].stack]]
call[name[logger].debug, parameter[constant[log_batch response: %s], name[r].text]]
return[call[name[_get_data], parameter[name[r]]]] | keyword[def] identifier[log_batch] ( identifier[self] , identifier[log_data] ):
literal[string]
identifier[url] = identifier[uri_join] ( identifier[self] . identifier[base_url] , literal[string] )
identifier[attachments] =[]
keyword[for] identifier[log_item] keyword[in] identifier[log_data] :
identifier[log_item] [ literal[string] ]= identifier[self] . identifier[stack] [- literal[int] ]
identifier[attachment] = identifier[log_item] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] literal[string] keyword[in] identifier[log_item] :
keyword[del] identifier[log_item] [ literal[string] ]
keyword[if] identifier[attachment] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[attachment] , identifier[collections] . identifier[Mapping] ):
identifier[attachment] ={ literal[string] : identifier[attachment] }
identifier[name] = identifier[attachment] . identifier[get] ( literal[string] , identifier[str] ( identifier[uuid] . identifier[uuid4] ()))
identifier[log_item] [ literal[string] ]={ literal[string] : identifier[name] }
identifier[attachments] . identifier[append] (( literal[string] ,(
identifier[name] ,
identifier[attachment] [ literal[string] ],
identifier[attachment] . identifier[get] ( literal[string] , literal[string] )
)))
identifier[files] =[(
literal[string] ,(
keyword[None] ,
identifier[json] . identifier[dumps] ( identifier[log_data] ),
literal[string]
)
)]
identifier[files] . identifier[extend] ( identifier[attachments] )
keyword[from] identifier[reportportal_client] keyword[import] identifier[POST_LOGBATCH_RETRY_COUNT]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[POST_LOGBATCH_RETRY_COUNT] ):
keyword[try] :
identifier[r] = identifier[self] . identifier[session] . identifier[post] (
identifier[url] = identifier[url] ,
identifier[files] = identifier[files] ,
identifier[verify] = identifier[self] . identifier[verify_ssl]
)
keyword[except] identifier[KeyError] :
keyword[if] identifier[i] < identifier[POST_LOGBATCH_RETRY_COUNT] - literal[int] :
keyword[continue]
keyword[else] :
keyword[raise]
keyword[break]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[stack] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[r] . identifier[text] )
keyword[return] identifier[_get_data] ( identifier[r] ) | def log_batch(self, log_data):
"""Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
"""
url = uri_join(self.base_url, 'log')
attachments = []
for log_item in log_data:
log_item['item_id'] = self.stack[-1]
attachment = log_item.get('attachment', None)
if 'attachment' in log_item:
del log_item['attachment'] # depends on [control=['if'], data=['log_item']]
if attachment:
if not isinstance(attachment, collections.Mapping):
attachment = {'data': attachment} # depends on [control=['if'], data=[]]
name = attachment.get('name', str(uuid.uuid4()))
log_item['file'] = {'name': name}
attachments.append(('file', (name, attachment['data'], attachment.get('mime', 'application/octet-stream')))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['log_item']]
files = [('json_request_part', (None, json.dumps(log_data), 'application/json'))]
files.extend(attachments)
from reportportal_client import POST_LOGBATCH_RETRY_COUNT
for i in range(POST_LOGBATCH_RETRY_COUNT):
try:
r = self.session.post(url=url, files=files, verify=self.verify_ssl) # depends on [control=['try'], data=[]]
except KeyError:
if i < POST_LOGBATCH_RETRY_COUNT - 1:
continue # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=[]]
break # depends on [control=['for'], data=['i']]
logger.debug('log_batch - Stack: %s', self.stack)
logger.debug('log_batch response: %s', r.text)
return _get_data(r) |
def focus(self, model):
"""Sets the passed model as focused element
:param ModelMT model: The element to be focused
"""
if model is None:
del self.focus
return
self._check_model_types(model)
self.add(model)
focus_msg = FocusSignalMsg(model, self._focus)
self._focus = model
self._selected.add(model)
self._selected = reduce_to_parent_states(self._selected)
self.focus_signal.emit(focus_msg) | def function[focus, parameter[self, model]]:
constant[Sets the passed model as focused element
:param ModelMT model: The element to be focused
]
if compare[name[model] is constant[None]] begin[:]
<ast.Delete object at 0x7da18ede6170>
return[None]
call[name[self]._check_model_types, parameter[name[model]]]
call[name[self].add, parameter[name[model]]]
variable[focus_msg] assign[=] call[name[FocusSignalMsg], parameter[name[model], name[self]._focus]]
name[self]._focus assign[=] name[model]
call[name[self]._selected.add, parameter[name[model]]]
name[self]._selected assign[=] call[name[reduce_to_parent_states], parameter[name[self]._selected]]
call[name[self].focus_signal.emit, parameter[name[focus_msg]]] | keyword[def] identifier[focus] ( identifier[self] , identifier[model] ):
literal[string]
keyword[if] identifier[model] keyword[is] keyword[None] :
keyword[del] identifier[self] . identifier[focus]
keyword[return]
identifier[self] . identifier[_check_model_types] ( identifier[model] )
identifier[self] . identifier[add] ( identifier[model] )
identifier[focus_msg] = identifier[FocusSignalMsg] ( identifier[model] , identifier[self] . identifier[_focus] )
identifier[self] . identifier[_focus] = identifier[model]
identifier[self] . identifier[_selected] . identifier[add] ( identifier[model] )
identifier[self] . identifier[_selected] = identifier[reduce_to_parent_states] ( identifier[self] . identifier[_selected] )
identifier[self] . identifier[focus_signal] . identifier[emit] ( identifier[focus_msg] ) | def focus(self, model):
"""Sets the passed model as focused element
:param ModelMT model: The element to be focused
"""
if model is None:
del self.focus
return # depends on [control=['if'], data=[]]
self._check_model_types(model)
self.add(model)
focus_msg = FocusSignalMsg(model, self._focus)
self._focus = model
self._selected.add(model)
self._selected = reduce_to_parent_states(self._selected)
self.focus_signal.emit(focus_msg) |
def find_all_runs(self, session=None):
"""
Return all tasks that have been updated.
"""
with self._session(session) as session:
return session.query(TaskRecord).all() | def function[find_all_runs, parameter[self, session]]:
constant[
Return all tasks that have been updated.
]
with call[name[self]._session, parameter[name[session]]] begin[:]
return[call[call[name[session].query, parameter[name[TaskRecord]]].all, parameter[]]] | keyword[def] identifier[find_all_runs] ( identifier[self] , identifier[session] = keyword[None] ):
literal[string]
keyword[with] identifier[self] . identifier[_session] ( identifier[session] ) keyword[as] identifier[session] :
keyword[return] identifier[session] . identifier[query] ( identifier[TaskRecord] ). identifier[all] () | def find_all_runs(self, session=None):
"""
Return all tasks that have been updated.
"""
with self._session(session) as session:
return session.query(TaskRecord).all() # depends on [control=['with'], data=['session']] |
def set_actuator_control_target_encode(self, time_usec, group_mlx, target_system, target_component, controls):
'''
Set the vehicle attitude and body angular rates.
time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t)
group_mlx : Actuator group. The "_mlx" indicates this is a multi-instance message and a MAVLink parser should use this field to difference between instances. (uint8_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
controls : Actuator controls. Normed to -1..+1 where 0 is neutral position. Throttle for single rotation direction motors is 0..1, negative range for reverse direction. Standard mapping for attitude controls (group 0): (index 0-7): roll, pitch, yaw, throttle, flaps, spoilers, airbrakes, landing gear. Load a pass-through mixer to repurpose them as generic outputs. (float)
'''
return MAVLink_set_actuator_control_target_message(time_usec, group_mlx, target_system, target_component, controls) | def function[set_actuator_control_target_encode, parameter[self, time_usec, group_mlx, target_system, target_component, controls]]:
constant[
Set the vehicle attitude and body angular rates.
time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t)
group_mlx : Actuator group. The "_mlx" indicates this is a multi-instance message and a MAVLink parser should use this field to difference between instances. (uint8_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
controls : Actuator controls. Normed to -1..+1 where 0 is neutral position. Throttle for single rotation direction motors is 0..1, negative range for reverse direction. Standard mapping for attitude controls (group 0): (index 0-7): roll, pitch, yaw, throttle, flaps, spoilers, airbrakes, landing gear. Load a pass-through mixer to repurpose them as generic outputs. (float)
]
return[call[name[MAVLink_set_actuator_control_target_message], parameter[name[time_usec], name[group_mlx], name[target_system], name[target_component], name[controls]]]] | keyword[def] identifier[set_actuator_control_target_encode] ( identifier[self] , identifier[time_usec] , identifier[group_mlx] , identifier[target_system] , identifier[target_component] , identifier[controls] ):
literal[string]
keyword[return] identifier[MAVLink_set_actuator_control_target_message] ( identifier[time_usec] , identifier[group_mlx] , identifier[target_system] , identifier[target_component] , identifier[controls] ) | def set_actuator_control_target_encode(self, time_usec, group_mlx, target_system, target_component, controls):
"""
Set the vehicle attitude and body angular rates.
time_usec : Timestamp (micros since boot or Unix epoch) (uint64_t)
group_mlx : Actuator group. The "_mlx" indicates this is a multi-instance message and a MAVLink parser should use this field to difference between instances. (uint8_t)
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
controls : Actuator controls. Normed to -1..+1 where 0 is neutral position. Throttle for single rotation direction motors is 0..1, negative range for reverse direction. Standard mapping for attitude controls (group 0): (index 0-7): roll, pitch, yaw, throttle, flaps, spoilers, airbrakes, landing gear. Load a pass-through mixer to repurpose them as generic outputs. (float)
"""
return MAVLink_set_actuator_control_target_message(time_usec, group_mlx, target_system, target_component, controls) |
def init_node(cls, *args, **kwargs):
"""Initializes an ast node with the provided attributes.
Python 2.6+ supports this in the node class initializers, but Python 2.5
does not, so this is intended to be an equivalent.
"""
node = cls()
for name, value in zip(cls._fields, args):
setattr(node, name, value)
for name, value in kwargs:
setattr(node, name, value)
return node | def function[init_node, parameter[cls]]:
constant[Initializes an ast node with the provided attributes.
Python 2.6+ supports this in the node class initializers, but Python 2.5
does not, so this is intended to be an equivalent.
]
variable[node] assign[=] call[name[cls], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e9b34c0>, <ast.Name object at 0x7da20e9b1f30>]]] in starred[call[name[zip], parameter[name[cls]._fields, name[args]]]] begin[:]
call[name[setattr], parameter[name[node], name[name], name[value]]]
for taget[tuple[[<ast.Name object at 0x7da20e9b2cb0>, <ast.Name object at 0x7da20e9b26e0>]]] in starred[name[kwargs]] begin[:]
call[name[setattr], parameter[name[node], name[name], name[value]]]
return[name[node]] | keyword[def] identifier[init_node] ( identifier[cls] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[node] = identifier[cls] ()
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[zip] ( identifier[cls] . identifier[_fields] , identifier[args] ):
identifier[setattr] ( identifier[node] , identifier[name] , identifier[value] )
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[kwargs] :
identifier[setattr] ( identifier[node] , identifier[name] , identifier[value] )
keyword[return] identifier[node] | def init_node(cls, *args, **kwargs):
"""Initializes an ast node with the provided attributes.
Python 2.6+ supports this in the node class initializers, but Python 2.5
does not, so this is intended to be an equivalent.
"""
node = cls()
for (name, value) in zip(cls._fields, args):
setattr(node, name, value) # depends on [control=['for'], data=[]]
for (name, value) in kwargs:
setattr(node, name, value) # depends on [control=['for'], data=[]]
return node |
def get_stacker_env_file(path, environment, region):
"""Determine Stacker environment file name."""
for name in gen_stacker_env_files(environment, region):
if os.path.isfile(os.path.join(path, name)):
return name
return "%s-%s.env" % (environment, region) | def function[get_stacker_env_file, parameter[path, environment, region]]:
constant[Determine Stacker environment file name.]
for taget[name[name]] in starred[call[name[gen_stacker_env_files], parameter[name[environment], name[region]]]] begin[:]
if call[name[os].path.isfile, parameter[call[name[os].path.join, parameter[name[path], name[name]]]]] begin[:]
return[name[name]]
return[binary_operation[constant[%s-%s.env] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b06fc3d0>, <ast.Name object at 0x7da1b06ff9d0>]]]] | keyword[def] identifier[get_stacker_env_file] ( identifier[path] , identifier[environment] , identifier[region] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[gen_stacker_env_files] ( identifier[environment] , identifier[region] ):
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[name] )):
keyword[return] identifier[name]
keyword[return] literal[string] %( identifier[environment] , identifier[region] ) | def get_stacker_env_file(path, environment, region):
"""Determine Stacker environment file name."""
for name in gen_stacker_env_files(environment, region):
if os.path.isfile(os.path.join(path, name)):
return name # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']]
return '%s-%s.env' % (environment, region) |
def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store. Depending on the jobstore
used, carry out the appropriate cache functions.
"""
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# What does this do?
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
# If the file is from the scope of local temp dir
if absLocalFileName.startswith(self.localTempDir):
# If the job store is of type FileJobStore and the job store and the local temp dir
# are on the same file system, then we want to hard link the files instead of copying
# barring the case where the file being written was one that was previously read
# from the file store. In that case, you want to copy to the file store so that
# the two have distinct nlink counts.
# Can read without a lock because we're only reading job-specific info.
jobSpecificFiles = list(self._CacheState._load(self.cacheStateFile).jobState[
self.jobID]['filesToFSIDs'].keys())
# Saying nlink is 2 implicitly means we are using the job file store, and it is on
# the same device as the work dir.
if self.nlinkThreshold == 2 and absLocalFileName not in jobSpecificFiles:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# getEmptyFileStoreID creates the file in the scope of the job store hence we
# need to delete it before linking.
os.remove(self.jobStore._getAbsPath(jobStoreFileID))
os.link(absLocalFileName, self.jobStore._getAbsPath(jobStoreFileID))
# If they're not on the file system, or if the file is already linked with an
# existing file, we need to copy to the job store.
# Check if the user allows asynchronous file writes
elif self.jobStore.config.useAsync:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# Before we can start the async process, we should also create a dummy harbinger
# file in the cache such that any subsequent jobs asking for this file will not
# attempt to download it from the job store till the write is complete. We do
# this now instead of in the writing thread because there is an edge case where
# readGlobalFile in a subsequent job is called before the writing thread has
# received the message to write the file and has created the dummy harbinger
# (and the file was unable to be cached/was evicted from the cache).
harbingerFile = self.HarbingerFile(self, fileStoreID=jobStoreFileID)
harbingerFile.write()
fileHandle = open(absLocalFileName, 'rb')
with self._pendingFileWritesLock:
self._pendingFileWrites.add(jobStoreFileID)
# A file handle added to the queue allows the asyncWrite threads to remove their
# jobID from _pendingFileWrites. Therefore, a file should only be added after
# its fileID is added to _pendingFileWrites
self.queue.put((fileHandle, jobStoreFileID))
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Local files are cached by default, unless they were written from previously read
# files.
if absLocalFileName not in jobSpecificFiles:
self.addToCache(absLocalFileName, jobStoreFileID, 'write')
else:
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, absLocalFileName,
0.0, False)
# Else write directly to the job store.
else:
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Non local files are NOT cached by default, but they are tracked as local files.
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, None,
0.0, False)
return FileID.forPath(jobStoreFileID, absLocalFileName) | def function[writeGlobalFile, parameter[self, localFileName, cleanup]]:
constant[
Takes a file (as a path) and uploads it to the job store. Depending on the jobstore
used, carry out the appropriate cache functions.
]
variable[absLocalFileName] assign[=] call[name[self]._resolveAbsoluteLocalPath, parameter[name[localFileName]]]
variable[cleanupID] assign[=] <ast.IfExp object at 0x7da2054a7910>
if call[name[absLocalFileName].startswith, parameter[name[self].localTempDir]] begin[:]
variable[jobSpecificFiles] assign[=] call[name[list], parameter[call[call[call[call[name[self]._CacheState._load, parameter[name[self].cacheStateFile]].jobState][name[self].jobID]][constant[filesToFSIDs]].keys, parameter[]]]]
if <ast.BoolOp object at 0x7da2054a60e0> begin[:]
variable[jobStoreFileID] assign[=] call[name[self].jobStore.getEmptyFileStoreID, parameter[name[cleanupID]]]
call[name[os].remove, parameter[call[name[self].jobStore._getAbsPath, parameter[name[jobStoreFileID]]]]]
call[name[os].link, parameter[name[absLocalFileName], call[name[self].jobStore._getAbsPath, parameter[name[jobStoreFileID]]]]]
if compare[name[absLocalFileName] <ast.NotIn object at 0x7da2590d7190> name[jobSpecificFiles]] begin[:]
call[name[self].addToCache, parameter[name[absLocalFileName], name[jobStoreFileID], constant[write]]]
return[call[name[FileID].forPath, parameter[name[jobStoreFileID], name[absLocalFileName]]]] | keyword[def] identifier[writeGlobalFile] ( identifier[self] , identifier[localFileName] , identifier[cleanup] = keyword[False] ):
literal[string]
identifier[absLocalFileName] = identifier[self] . identifier[_resolveAbsoluteLocalPath] ( identifier[localFileName] )
identifier[cleanupID] = keyword[None] keyword[if] keyword[not] identifier[cleanup] keyword[else] identifier[self] . identifier[jobGraph] . identifier[jobStoreID]
keyword[if] identifier[absLocalFileName] . identifier[startswith] ( identifier[self] . identifier[localTempDir] ):
identifier[jobSpecificFiles] = identifier[list] ( identifier[self] . identifier[_CacheState] . identifier[_load] ( identifier[self] . identifier[cacheStateFile] ). identifier[jobState] [
identifier[self] . identifier[jobID] ][ literal[string] ]. identifier[keys] ())
keyword[if] identifier[self] . identifier[nlinkThreshold] == literal[int] keyword[and] identifier[absLocalFileName] keyword[not] keyword[in] identifier[jobSpecificFiles] :
identifier[jobStoreFileID] = identifier[self] . identifier[jobStore] . identifier[getEmptyFileStoreID] ( identifier[cleanupID] )
identifier[os] . identifier[remove] ( identifier[self] . identifier[jobStore] . identifier[_getAbsPath] ( identifier[jobStoreFileID] ))
identifier[os] . identifier[link] ( identifier[absLocalFileName] , identifier[self] . identifier[jobStore] . identifier[_getAbsPath] ( identifier[jobStoreFileID] ))
keyword[elif] identifier[self] . identifier[jobStore] . identifier[config] . identifier[useAsync] :
identifier[jobStoreFileID] = identifier[self] . identifier[jobStore] . identifier[getEmptyFileStoreID] ( identifier[cleanupID] )
identifier[harbingerFile] = identifier[self] . identifier[HarbingerFile] ( identifier[self] , identifier[fileStoreID] = identifier[jobStoreFileID] )
identifier[harbingerFile] . identifier[write] ()
identifier[fileHandle] = identifier[open] ( identifier[absLocalFileName] , literal[string] )
keyword[with] identifier[self] . identifier[_pendingFileWritesLock] :
identifier[self] . identifier[_pendingFileWrites] . identifier[add] ( identifier[jobStoreFileID] )
identifier[self] . identifier[queue] . identifier[put] (( identifier[fileHandle] , identifier[jobStoreFileID] ))
keyword[else] :
identifier[jobStoreFileID] = identifier[self] . identifier[jobStore] . identifier[writeFile] ( identifier[absLocalFileName] , identifier[cleanupID] )
keyword[if] identifier[absLocalFileName] keyword[not] keyword[in] identifier[jobSpecificFiles] :
identifier[self] . identifier[addToCache] ( identifier[absLocalFileName] , identifier[jobStoreFileID] , literal[string] )
keyword[else] :
identifier[self] . identifier[_JobState] . identifier[updateJobSpecificFiles] ( identifier[self] , identifier[jobStoreFileID] , identifier[absLocalFileName] ,
literal[int] , keyword[False] )
keyword[else] :
identifier[jobStoreFileID] = identifier[self] . identifier[jobStore] . identifier[writeFile] ( identifier[absLocalFileName] , identifier[cleanupID] )
identifier[self] . identifier[_JobState] . identifier[updateJobSpecificFiles] ( identifier[self] , identifier[jobStoreFileID] , keyword[None] ,
literal[int] , keyword[False] )
keyword[return] identifier[FileID] . identifier[forPath] ( identifier[jobStoreFileID] , identifier[absLocalFileName] ) | def writeGlobalFile(self, localFileName, cleanup=False):
"""
Takes a file (as a path) and uploads it to the job store. Depending on the jobstore
used, carry out the appropriate cache functions.
"""
absLocalFileName = self._resolveAbsoluteLocalPath(localFileName)
# What does this do?
cleanupID = None if not cleanup else self.jobGraph.jobStoreID
# If the file is from the scope of local temp dir
if absLocalFileName.startswith(self.localTempDir):
# If the job store is of type FileJobStore and the job store and the local temp dir
# are on the same file system, then we want to hard link the files instead of copying
# barring the case where the file being written was one that was previously read
# from the file store. In that case, you want to copy to the file store so that
# the two have distinct nlink counts.
# Can read without a lock because we're only reading job-specific info.
jobSpecificFiles = list(self._CacheState._load(self.cacheStateFile).jobState[self.jobID]['filesToFSIDs'].keys())
# Saying nlink is 2 implicitly means we are using the job file store, and it is on
# the same device as the work dir.
if self.nlinkThreshold == 2 and absLocalFileName not in jobSpecificFiles:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# getEmptyFileStoreID creates the file in the scope of the job store hence we
# need to delete it before linking.
os.remove(self.jobStore._getAbsPath(jobStoreFileID))
os.link(absLocalFileName, self.jobStore._getAbsPath(jobStoreFileID)) # depends on [control=['if'], data=[]]
# If they're not on the file system, or if the file is already linked with an
# existing file, we need to copy to the job store.
# Check if the user allows asynchronous file writes
elif self.jobStore.config.useAsync:
jobStoreFileID = self.jobStore.getEmptyFileStoreID(cleanupID)
# Before we can start the async process, we should also create a dummy harbinger
# file in the cache such that any subsequent jobs asking for this file will not
# attempt to download it from the job store till the write is complete. We do
# this now instead of in the writing thread because there is an edge case where
# readGlobalFile in a subsequent job is called before the writing thread has
# received the message to write the file and has created the dummy harbinger
# (and the file was unable to be cached/was evicted from the cache).
harbingerFile = self.HarbingerFile(self, fileStoreID=jobStoreFileID)
harbingerFile.write()
fileHandle = open(absLocalFileName, 'rb')
with self._pendingFileWritesLock:
self._pendingFileWrites.add(jobStoreFileID) # depends on [control=['with'], data=[]]
# A file handle added to the queue allows the asyncWrite threads to remove their
# jobID from _pendingFileWrites. Therefore, a file should only be added after
# its fileID is added to _pendingFileWrites
self.queue.put((fileHandle, jobStoreFileID)) # depends on [control=['if'], data=[]]
else:
# Else write directly to the job store.
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Local files are cached by default, unless they were written from previously read
# files.
if absLocalFileName not in jobSpecificFiles:
self.addToCache(absLocalFileName, jobStoreFileID, 'write') # depends on [control=['if'], data=['absLocalFileName']]
else:
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, absLocalFileName, 0.0, False) # depends on [control=['if'], data=[]]
else:
# Else write directly to the job store.
jobStoreFileID = self.jobStore.writeFile(absLocalFileName, cleanupID)
# Non local files are NOT cached by default, but they are tracked as local files.
self._JobState.updateJobSpecificFiles(self, jobStoreFileID, None, 0.0, False)
return FileID.forPath(jobStoreFileID, absLocalFileName) |
def _add(self, error: "Err"):
"""
Adds an error to the trace if required
"""
if self.trace_errs is True:
self.errors.append(error) | def function[_add, parameter[self, error]]:
constant[
Adds an error to the trace if required
]
if compare[name[self].trace_errs is constant[True]] begin[:]
call[name[self].errors.append, parameter[name[error]]] | keyword[def] identifier[_add] ( identifier[self] , identifier[error] : literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[trace_errs] keyword[is] keyword[True] :
identifier[self] . identifier[errors] . identifier[append] ( identifier[error] ) | def _add(self, error: 'Err'):
"""
Adds an error to the trace if required
"""
if self.trace_errs is True:
self.errors.append(error) # depends on [control=['if'], data=[]] |
def get_license_checker_config_path(config_dir='.'):
# type: (str) -> List[str]
"""Checks for local config overrides for license checker,
if not found it returns the package default.
:param config_dir:
:return: str
"""
if LICENSE_CHECKER_CONFIG_NAME in os.listdir(config_dir):
license_checker_config_path = LICENSE_CHECKER_CONFIG_NAME
else:
license_checker_config_path = DEFAULT_LICENSE_CHECKER_CONFIG_PATH
return license_checker_config_path | def function[get_license_checker_config_path, parameter[config_dir]]:
constant[Checks for local config overrides for license checker,
if not found it returns the package default.
:param config_dir:
:return: str
]
if compare[name[LICENSE_CHECKER_CONFIG_NAME] in call[name[os].listdir, parameter[name[config_dir]]]] begin[:]
variable[license_checker_config_path] assign[=] name[LICENSE_CHECKER_CONFIG_NAME]
return[name[license_checker_config_path]] | keyword[def] identifier[get_license_checker_config_path] ( identifier[config_dir] = literal[string] ):
literal[string]
keyword[if] identifier[LICENSE_CHECKER_CONFIG_NAME] keyword[in] identifier[os] . identifier[listdir] ( identifier[config_dir] ):
identifier[license_checker_config_path] = identifier[LICENSE_CHECKER_CONFIG_NAME]
keyword[else] :
identifier[license_checker_config_path] = identifier[DEFAULT_LICENSE_CHECKER_CONFIG_PATH]
keyword[return] identifier[license_checker_config_path] | def get_license_checker_config_path(config_dir='.'):
# type: (str) -> List[str]
'Checks for local config overrides for license checker,\n if not found it returns the package default.\n\n :param config_dir:\n :return: str\n '
if LICENSE_CHECKER_CONFIG_NAME in os.listdir(config_dir):
license_checker_config_path = LICENSE_CHECKER_CONFIG_NAME # depends on [control=['if'], data=['LICENSE_CHECKER_CONFIG_NAME']]
else:
license_checker_config_path = DEFAULT_LICENSE_CHECKER_CONFIG_PATH
return license_checker_config_path |
def _parse_executable_spec(src_dir, json_file_name, parser):
"""
Returns the parsed contents of a json specification.
Raises WorkflowBuilderException (exit code 3) if this cannot be done.
"""
if not os.path.isdir(src_dir):
parser.error("{} is not a directory".format(src_dir))
if not os.path.exists(os.path.join(src_dir, json_file_name)):
raise WorkflowBuilderException(
"Directory {} does not contain dxworkflow.json: not a valid DNAnexus workflow source directory"
.format(src_dir))
with open(os.path.join(src_dir, json_file_name)) as desc:
try:
return json_load_raise_on_duplicates(desc)
except Exception as e:
raise WorkflowBuilderException("Could not parse {} file as JSON: {}".format(json_file_name, e.args)) | def function[_parse_executable_spec, parameter[src_dir, json_file_name, parser]]:
constant[
Returns the parsed contents of a json specification.
Raises WorkflowBuilderException (exit code 3) if this cannot be done.
]
if <ast.UnaryOp object at 0x7da20c6e64d0> begin[:]
call[name[parser].error, parameter[call[constant[{} is not a directory].format, parameter[name[src_dir]]]]]
if <ast.UnaryOp object at 0x7da20c6e7850> begin[:]
<ast.Raise object at 0x7da20c6e7ac0>
with call[name[open], parameter[call[name[os].path.join, parameter[name[src_dir], name[json_file_name]]]]] begin[:]
<ast.Try object at 0x7da18eb57730> | keyword[def] identifier[_parse_executable_spec] ( identifier[src_dir] , identifier[json_file_name] , identifier[parser] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[src_dir] ):
identifier[parser] . identifier[error] ( literal[string] . identifier[format] ( identifier[src_dir] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[os] . identifier[path] . identifier[join] ( identifier[src_dir] , identifier[json_file_name] )):
keyword[raise] identifier[WorkflowBuilderException] (
literal[string]
. identifier[format] ( identifier[src_dir] ))
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[src_dir] , identifier[json_file_name] )) keyword[as] identifier[desc] :
keyword[try] :
keyword[return] identifier[json_load_raise_on_duplicates] ( identifier[desc] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[WorkflowBuilderException] ( literal[string] . identifier[format] ( identifier[json_file_name] , identifier[e] . identifier[args] )) | def _parse_executable_spec(src_dir, json_file_name, parser):
"""
Returns the parsed contents of a json specification.
Raises WorkflowBuilderException (exit code 3) if this cannot be done.
"""
if not os.path.isdir(src_dir):
parser.error('{} is not a directory'.format(src_dir)) # depends on [control=['if'], data=[]]
if not os.path.exists(os.path.join(src_dir, json_file_name)):
raise WorkflowBuilderException('Directory {} does not contain dxworkflow.json: not a valid DNAnexus workflow source directory'.format(src_dir)) # depends on [control=['if'], data=[]]
with open(os.path.join(src_dir, json_file_name)) as desc:
try:
return json_load_raise_on_duplicates(desc) # depends on [control=['try'], data=[]]
except Exception as e:
raise WorkflowBuilderException('Could not parse {} file as JSON: {}'.format(json_file_name, e.args)) # depends on [control=['except'], data=['e']] # depends on [control=['with'], data=['desc']] |
def tokens_delete(name=None, user=None, read_access_token=None, force=False):
"""Delete a personal OAuth token."""
if not (name or user) and not read_access_token:
click.get_current_context().fail(
'You have to pass either a "name" and "user" or the "token"')
if name and user:
client = Client.query.filter(
Client.user_id == user.id,
Client.name == name,
Client.is_internal.is_(True)).one()
token = Token.query.filter(
Token.user_id == user.id,
Token.is_personal.is_(True),
Token.client_id == client.client_id).one()
elif read_access_token:
access_token = click.prompt('Token', hide_input=True)
token = Token.query.filter(Token.access_token == access_token).one()
else:
click.get_current_context().fail('No token was found with provided')
if force or click.confirm('Are you sure you want to delete the token?'):
db.session.delete(token)
db.session.commit()
click.secho(
'Token "{}" deleted.'.format(token.access_token), fg='yellow') | def function[tokens_delete, parameter[name, user, read_access_token, force]]:
constant[Delete a personal OAuth token.]
if <ast.BoolOp object at 0x7da1b2581780> begin[:]
call[call[name[click].get_current_context, parameter[]].fail, parameter[constant[You have to pass either a "name" and "user" or the "token"]]]
if <ast.BoolOp object at 0x7da1b2580f10> begin[:]
variable[client] assign[=] call[call[name[Client].query.filter, parameter[compare[name[Client].user_id equal[==] name[user].id], compare[name[Client].name equal[==] name[name]], call[name[Client].is_internal.is_, parameter[constant[True]]]]].one, parameter[]]
variable[token] assign[=] call[call[name[Token].query.filter, parameter[compare[name[Token].user_id equal[==] name[user].id], call[name[Token].is_personal.is_, parameter[constant[True]]], compare[name[Token].client_id equal[==] name[client].client_id]]].one, parameter[]]
if <ast.BoolOp object at 0x7da1b2581420> begin[:]
call[name[db].session.delete, parameter[name[token]]]
call[name[db].session.commit, parameter[]]
call[name[click].secho, parameter[call[constant[Token "{}" deleted.].format, parameter[name[token].access_token]]]] | keyword[def] identifier[tokens_delete] ( identifier[name] = keyword[None] , identifier[user] = keyword[None] , identifier[read_access_token] = keyword[None] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] keyword[not] ( identifier[name] keyword[or] identifier[user] ) keyword[and] keyword[not] identifier[read_access_token] :
identifier[click] . identifier[get_current_context] (). identifier[fail] (
literal[string] )
keyword[if] identifier[name] keyword[and] identifier[user] :
identifier[client] = identifier[Client] . identifier[query] . identifier[filter] (
identifier[Client] . identifier[user_id] == identifier[user] . identifier[id] ,
identifier[Client] . identifier[name] == identifier[name] ,
identifier[Client] . identifier[is_internal] . identifier[is_] ( keyword[True] )). identifier[one] ()
identifier[token] = identifier[Token] . identifier[query] . identifier[filter] (
identifier[Token] . identifier[user_id] == identifier[user] . identifier[id] ,
identifier[Token] . identifier[is_personal] . identifier[is_] ( keyword[True] ),
identifier[Token] . identifier[client_id] == identifier[client] . identifier[client_id] ). identifier[one] ()
keyword[elif] identifier[read_access_token] :
identifier[access_token] = identifier[click] . identifier[prompt] ( literal[string] , identifier[hide_input] = keyword[True] )
identifier[token] = identifier[Token] . identifier[query] . identifier[filter] ( identifier[Token] . identifier[access_token] == identifier[access_token] ). identifier[one] ()
keyword[else] :
identifier[click] . identifier[get_current_context] (). identifier[fail] ( literal[string] )
keyword[if] identifier[force] keyword[or] identifier[click] . identifier[confirm] ( literal[string] ):
identifier[db] . identifier[session] . identifier[delete] ( identifier[token] )
identifier[db] . identifier[session] . identifier[commit] ()
identifier[click] . identifier[secho] (
literal[string] . identifier[format] ( identifier[token] . identifier[access_token] ), identifier[fg] = literal[string] ) | def tokens_delete(name=None, user=None, read_access_token=None, force=False):
"""Delete a personal OAuth token."""
if not (name or user) and (not read_access_token):
click.get_current_context().fail('You have to pass either a "name" and "user" or the "token"') # depends on [control=['if'], data=[]]
if name and user:
client = Client.query.filter(Client.user_id == user.id, Client.name == name, Client.is_internal.is_(True)).one()
token = Token.query.filter(Token.user_id == user.id, Token.is_personal.is_(True), Token.client_id == client.client_id).one() # depends on [control=['if'], data=[]]
elif read_access_token:
access_token = click.prompt('Token', hide_input=True)
token = Token.query.filter(Token.access_token == access_token).one() # depends on [control=['if'], data=[]]
else:
click.get_current_context().fail('No token was found with provided')
if force or click.confirm('Are you sure you want to delete the token?'):
db.session.delete(token)
db.session.commit()
click.secho('Token "{}" deleted.'.format(token.access_token), fg='yellow') # depends on [control=['if'], data=[]] |
def self_sign_jwks(keyjar, iss, kid='', lifetime=3600):
"""
Create a signed JWT containing a JWKS. The JWT is signed by one of the
keys in the JWKS.
:param keyjar: A KeyJar instance with at least one private signing key
:param iss: issuer of the JWT, should be the owner of the keys
:param kid: A key ID if a special key should be used otherwise one
is picked at random.
:param lifetime: The lifetime of the signed JWT
:return: A signed JWT
"""
# _json = json.dumps(jwks)
_jwt = JWT(keyjar, iss=iss, lifetime=lifetime)
jwks = keyjar.export_jwks(issuer=iss)
return _jwt.pack(payload={'jwks': jwks}, owner=iss, kid=kid) | def function[self_sign_jwks, parameter[keyjar, iss, kid, lifetime]]:
constant[
Create a signed JWT containing a JWKS. The JWT is signed by one of the
keys in the JWKS.
:param keyjar: A KeyJar instance with at least one private signing key
:param iss: issuer of the JWT, should be the owner of the keys
:param kid: A key ID if a special key should be used otherwise one
is picked at random.
:param lifetime: The lifetime of the signed JWT
:return: A signed JWT
]
variable[_jwt] assign[=] call[name[JWT], parameter[name[keyjar]]]
variable[jwks] assign[=] call[name[keyjar].export_jwks, parameter[]]
return[call[name[_jwt].pack, parameter[]]] | keyword[def] identifier[self_sign_jwks] ( identifier[keyjar] , identifier[iss] , identifier[kid] = literal[string] , identifier[lifetime] = literal[int] ):
literal[string]
identifier[_jwt] = identifier[JWT] ( identifier[keyjar] , identifier[iss] = identifier[iss] , identifier[lifetime] = identifier[lifetime] )
identifier[jwks] = identifier[keyjar] . identifier[export_jwks] ( identifier[issuer] = identifier[iss] )
keyword[return] identifier[_jwt] . identifier[pack] ( identifier[payload] ={ literal[string] : identifier[jwks] }, identifier[owner] = identifier[iss] , identifier[kid] = identifier[kid] ) | def self_sign_jwks(keyjar, iss, kid='', lifetime=3600):
"""
Create a signed JWT containing a JWKS. The JWT is signed by one of the
keys in the JWKS.
:param keyjar: A KeyJar instance with at least one private signing key
:param iss: issuer of the JWT, should be the owner of the keys
:param kid: A key ID if a special key should be used otherwise one
is picked at random.
:param lifetime: The lifetime of the signed JWT
:return: A signed JWT
"""
# _json = json.dumps(jwks)
_jwt = JWT(keyjar, iss=iss, lifetime=lifetime)
jwks = keyjar.export_jwks(issuer=iss)
return _jwt.pack(payload={'jwks': jwks}, owner=iss, kid=kid) |
def __capture(self, checkout_id, **kwargs):
"""Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'checkout_id': checkout_id
}
return self.make_call(self.__capture, params, kwargs) | def function[__capture, parameter[self, checkout_id]]:
constant[Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b15217b0>], [<ast.Name object at 0x7da1b1521720>]]
return[call[name[self].make_call, parameter[name[self].__capture, name[params], name[kwargs]]]] | keyword[def] identifier[__capture] ( identifier[self] , identifier[checkout_id] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] : identifier[checkout_id]
}
keyword[return] identifier[self] . identifier[make_call] ( identifier[self] . identifier[__capture] , identifier[params] , identifier[kwargs] ) | def __capture(self, checkout_id, **kwargs):
"""Call documentation: `/checkout/capture
<https://www.wepay.com/developer/reference/checkout#capture>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {'checkout_id': checkout_id}
return self.make_call(self.__capture, params, kwargs) |
def sct_report_string(report):
"""Return a human-readable string representation of the error report
returned by lxml's schematron validator.
"""
ret = []
namespaces = {"svrl": "http://purl.oclc.org/dsdl/svrl"}
for index, failed_assert_el in enumerate(
report.findall("svrl:failed-assert", namespaces=namespaces)
):
ret.append(
"{}. {}".format(
index + 1,
failed_assert_el.find("svrl:text", namespaces=namespaces).text,
)
)
ret.append(" test: {}".format(failed_assert_el.attrib["test"]))
ret.append(" location: {}".format(failed_assert_el.attrib["location"]))
ret.append("\n")
return "\n".join(ret) | def function[sct_report_string, parameter[report]]:
constant[Return a human-readable string representation of the error report
returned by lxml's schematron validator.
]
variable[ret] assign[=] list[[]]
variable[namespaces] assign[=] dictionary[[<ast.Constant object at 0x7da1b1b84730>], [<ast.Constant object at 0x7da1b1b85cf0>]]
for taget[tuple[[<ast.Name object at 0x7da1b1b85570>, <ast.Name object at 0x7da1b1b84c70>]]] in starred[call[name[enumerate], parameter[call[name[report].findall, parameter[constant[svrl:failed-assert]]]]]] begin[:]
call[name[ret].append, parameter[call[constant[{}. {}].format, parameter[binary_operation[name[index] + constant[1]], call[name[failed_assert_el].find, parameter[constant[svrl:text]]].text]]]]
call[name[ret].append, parameter[call[constant[ test: {}].format, parameter[call[name[failed_assert_el].attrib][constant[test]]]]]]
call[name[ret].append, parameter[call[constant[ location: {}].format, parameter[call[name[failed_assert_el].attrib][constant[location]]]]]]
call[name[ret].append, parameter[constant[
]]]
return[call[constant[
].join, parameter[name[ret]]]] | keyword[def] identifier[sct_report_string] ( identifier[report] ):
literal[string]
identifier[ret] =[]
identifier[namespaces] ={ literal[string] : literal[string] }
keyword[for] identifier[index] , identifier[failed_assert_el] keyword[in] identifier[enumerate] (
identifier[report] . identifier[findall] ( literal[string] , identifier[namespaces] = identifier[namespaces] )
):
identifier[ret] . identifier[append] (
literal[string] . identifier[format] (
identifier[index] + literal[int] ,
identifier[failed_assert_el] . identifier[find] ( literal[string] , identifier[namespaces] = identifier[namespaces] ). identifier[text] ,
)
)
identifier[ret] . identifier[append] ( literal[string] . identifier[format] ( identifier[failed_assert_el] . identifier[attrib] [ literal[string] ]))
identifier[ret] . identifier[append] ( literal[string] . identifier[format] ( identifier[failed_assert_el] . identifier[attrib] [ literal[string] ]))
identifier[ret] . identifier[append] ( literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[ret] ) | def sct_report_string(report):
"""Return a human-readable string representation of the error report
returned by lxml's schematron validator.
"""
ret = []
namespaces = {'svrl': 'http://purl.oclc.org/dsdl/svrl'}
for (index, failed_assert_el) in enumerate(report.findall('svrl:failed-assert', namespaces=namespaces)):
ret.append('{}. {}'.format(index + 1, failed_assert_el.find('svrl:text', namespaces=namespaces).text))
ret.append(' test: {}'.format(failed_assert_el.attrib['test']))
ret.append(' location: {}'.format(failed_assert_el.attrib['location']))
ret.append('\n') # depends on [control=['for'], data=[]]
return '\n'.join(ret) |
def handle(self, *args, **options):
"""
Run the specified Selenium test(s) the indicated number of times in
the specified browser.
"""
browser_name = options['browser_name']
count = options['count']
if len(args) > 0:
tests = list(args)
else:
tests = settings.SELENIUM_DEFAULT_TESTS
# Kill any orphaned chromedriver processes
process = Popen(['killall', 'chromedriver'],
stderr=open(os.devnull, 'w'))
process.wait()
# Clear any old log and screenshots
self.clean()
docker = None
sc_process = None
selenium_process = None
if options['docker']:
if browser_name not in ['chrome', 'firefox']:
self.stdout.write('Only chrome and firefox can currently be run in a Docker container')
return
docker = DockerSelenium(browser=browser_name,
port=settings.SELENIUM_DOCKER_PORT,
tag=settings.SELENIUM_DOCKER_TAG,
debug=settings.SELENIUM_DOCKER_DEBUG)
elif 'platform' in options and settings.SELENIUM_SAUCE_CONNECT_PATH:
running, sc_process = self.verify_sauce_connect_is_running(options)
if not running:
return
elif browser_name in ['opera', 'safari']:
running, selenium_process = self.verify_selenium_server_is_running()
if not running:
return
elif browser_name in ['ipad', 'iphone']:
if not self.verify_appium_is_running():
return
# Make it so django-nose won't have nosetests choke on our parameters
TestRunner = get_runner(django_settings)
if hasattr(TestRunner, 'django_opts'):
for option in OPTIONS:
TestRunner.django_opts.extend(option[0])
# Configure and run the tests
try:
if docker:
docker.start()
options['command_executor'] = docker.command_executor()
self.update_environment(options)
self.run_tests(tests, browser_name, count)
finally:
# Stop the Selenium Docker container, if running
if docker and docker.container_id:
docker.stop()
# Kill Sauce Connect, if running
if sc_process:
sc_process.kill()
# Kill the Selenium standalone server, if running
if selenium_process:
selenium_process.kill() | def function[handle, parameter[self]]:
constant[
Run the specified Selenium test(s) the indicated number of times in
the specified browser.
]
variable[browser_name] assign[=] call[name[options]][constant[browser_name]]
variable[count] assign[=] call[name[options]][constant[count]]
if compare[call[name[len], parameter[name[args]]] greater[>] constant[0]] begin[:]
variable[tests] assign[=] call[name[list], parameter[name[args]]]
variable[process] assign[=] call[name[Popen], parameter[list[[<ast.Constant object at 0x7da1b0c9e470>, <ast.Constant object at 0x7da1b0c9de70>]]]]
call[name[process].wait, parameter[]]
call[name[self].clean, parameter[]]
variable[docker] assign[=] constant[None]
variable[sc_process] assign[=] constant[None]
variable[selenium_process] assign[=] constant[None]
if call[name[options]][constant[docker]] begin[:]
if compare[name[browser_name] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b0b9d360>, <ast.Constant object at 0x7da1b0b9cb50>]]] begin[:]
call[name[self].stdout.write, parameter[constant[Only chrome and firefox can currently be run in a Docker container]]]
return[None]
variable[docker] assign[=] call[name[DockerSelenium], parameter[]]
variable[TestRunner] assign[=] call[name[get_runner], parameter[name[django_settings]]]
if call[name[hasattr], parameter[name[TestRunner], constant[django_opts]]] begin[:]
for taget[name[option]] in starred[name[OPTIONS]] begin[:]
call[name[TestRunner].django_opts.extend, parameter[call[name[option]][constant[0]]]]
<ast.Try object at 0x7da1b0b9c8e0>
if name[sc_process] begin[:]
call[name[sc_process].kill, parameter[]]
if name[selenium_process] begin[:]
call[name[selenium_process].kill, parameter[]] | keyword[def] identifier[handle] ( identifier[self] ,* identifier[args] ,** identifier[options] ):
literal[string]
identifier[browser_name] = identifier[options] [ literal[string] ]
identifier[count] = identifier[options] [ literal[string] ]
keyword[if] identifier[len] ( identifier[args] )> literal[int] :
identifier[tests] = identifier[list] ( identifier[args] )
keyword[else] :
identifier[tests] = identifier[settings] . identifier[SELENIUM_DEFAULT_TESTS]
identifier[process] = identifier[Popen] ([ literal[string] , literal[string] ],
identifier[stderr] = identifier[open] ( identifier[os] . identifier[devnull] , literal[string] ))
identifier[process] . identifier[wait] ()
identifier[self] . identifier[clean] ()
identifier[docker] = keyword[None]
identifier[sc_process] = keyword[None]
identifier[selenium_process] = keyword[None]
keyword[if] identifier[options] [ literal[string] ]:
keyword[if] identifier[browser_name] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
identifier[self] . identifier[stdout] . identifier[write] ( literal[string] )
keyword[return]
identifier[docker] = identifier[DockerSelenium] ( identifier[browser] = identifier[browser_name] ,
identifier[port] = identifier[settings] . identifier[SELENIUM_DOCKER_PORT] ,
identifier[tag] = identifier[settings] . identifier[SELENIUM_DOCKER_TAG] ,
identifier[debug] = identifier[settings] . identifier[SELENIUM_DOCKER_DEBUG] )
keyword[elif] literal[string] keyword[in] identifier[options] keyword[and] identifier[settings] . identifier[SELENIUM_SAUCE_CONNECT_PATH] :
identifier[running] , identifier[sc_process] = identifier[self] . identifier[verify_sauce_connect_is_running] ( identifier[options] )
keyword[if] keyword[not] identifier[running] :
keyword[return]
keyword[elif] identifier[browser_name] keyword[in] [ literal[string] , literal[string] ]:
identifier[running] , identifier[selenium_process] = identifier[self] . identifier[verify_selenium_server_is_running] ()
keyword[if] keyword[not] identifier[running] :
keyword[return]
keyword[elif] identifier[browser_name] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] keyword[not] identifier[self] . identifier[verify_appium_is_running] ():
keyword[return]
identifier[TestRunner] = identifier[get_runner] ( identifier[django_settings] )
keyword[if] identifier[hasattr] ( identifier[TestRunner] , literal[string] ):
keyword[for] identifier[option] keyword[in] identifier[OPTIONS] :
identifier[TestRunner] . identifier[django_opts] . identifier[extend] ( identifier[option] [ literal[int] ])
keyword[try] :
keyword[if] identifier[docker] :
identifier[docker] . identifier[start] ()
identifier[options] [ literal[string] ]= identifier[docker] . identifier[command_executor] ()
identifier[self] . identifier[update_environment] ( identifier[options] )
identifier[self] . identifier[run_tests] ( identifier[tests] , identifier[browser_name] , identifier[count] )
keyword[finally] :
keyword[if] identifier[docker] keyword[and] identifier[docker] . identifier[container_id] :
identifier[docker] . identifier[stop] ()
keyword[if] identifier[sc_process] :
identifier[sc_process] . identifier[kill] ()
keyword[if] identifier[selenium_process] :
identifier[selenium_process] . identifier[kill] () | def handle(self, *args, **options):
"""
Run the specified Selenium test(s) the indicated number of times in
the specified browser.
"""
browser_name = options['browser_name']
count = options['count']
if len(args) > 0:
tests = list(args) # depends on [control=['if'], data=[]]
else:
tests = settings.SELENIUM_DEFAULT_TESTS
# Kill any orphaned chromedriver processes
process = Popen(['killall', 'chromedriver'], stderr=open(os.devnull, 'w'))
process.wait()
# Clear any old log and screenshots
self.clean()
docker = None
sc_process = None
selenium_process = None
if options['docker']:
if browser_name not in ['chrome', 'firefox']:
self.stdout.write('Only chrome and firefox can currently be run in a Docker container')
return # depends on [control=['if'], data=[]]
docker = DockerSelenium(browser=browser_name, port=settings.SELENIUM_DOCKER_PORT, tag=settings.SELENIUM_DOCKER_TAG, debug=settings.SELENIUM_DOCKER_DEBUG) # depends on [control=['if'], data=[]]
elif 'platform' in options and settings.SELENIUM_SAUCE_CONNECT_PATH:
(running, sc_process) = self.verify_sauce_connect_is_running(options)
if not running:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif browser_name in ['opera', 'safari']:
(running, selenium_process) = self.verify_selenium_server_is_running()
if not running:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif browser_name in ['ipad', 'iphone']:
if not self.verify_appium_is_running():
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Make it so django-nose won't have nosetests choke on our parameters
TestRunner = get_runner(django_settings)
if hasattr(TestRunner, 'django_opts'):
for option in OPTIONS:
TestRunner.django_opts.extend(option[0]) # depends on [control=['for'], data=['option']] # depends on [control=['if'], data=[]]
# Configure and run the tests
try:
if docker:
docker.start()
options['command_executor'] = docker.command_executor() # depends on [control=['if'], data=[]]
self.update_environment(options)
self.run_tests(tests, browser_name, count) # depends on [control=['try'], data=[]]
finally:
# Stop the Selenium Docker container, if running
if docker and docker.container_id:
docker.stop() # depends on [control=['if'], data=[]]
# Kill Sauce Connect, if running
if sc_process:
sc_process.kill() # depends on [control=['if'], data=[]]
# Kill the Selenium standalone server, if running
if selenium_process:
selenium_process.kill() # depends on [control=['if'], data=[]] |
def apply_mask_4d(image, mask_img): # , smooth_mm=None, remove_nans=True):
"""Read a Nifti file nii_file and a mask Nifti file.
Extract the signals in nii_file that are within the mask, the mask indices
and the mask shape.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
mask_img: img-like object or boyle.nifti.NeuroImage or str
3D mask array: True where a voxel should be used.
See img description.
smooth_mm: float #TBD
(optional) The size in mm of the FWHM Gaussian kernel to smooth the signal.
If True, remove_nans is True.
remove_nans: bool #TBD
If remove_nans is True (default), the non-finite values (NaNs and
infs) found in the images will be replaced by zeros.
Returns
-------
session_series, mask_data
session_series: numpy.ndarray
2D array of series with shape (voxel number, image number)
Note
----
nii_file and mask_file must have the same shape.
Raises
------
FileNotFound, NiftiFilesNotCompatible
"""
img = check_img(image)
mask = check_img(mask_img)
check_img_compatibility(img, mask, only_check_3d=True)
vol = get_data(img)
series, mask_data = _apply_mask_to_4d_data(vol, mask)
return series, mask_data | def function[apply_mask_4d, parameter[image, mask_img]]:
constant[Read a Nifti file nii_file and a mask Nifti file.
Extract the signals in nii_file that are within the mask, the mask indices
and the mask shape.
Parameters
----------
image: img-like object or boyle.nifti.NeuroImage or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
mask_img: img-like object or boyle.nifti.NeuroImage or str
3D mask array: True where a voxel should be used.
See img description.
smooth_mm: float #TBD
(optional) The size in mm of the FWHM Gaussian kernel to smooth the signal.
If True, remove_nans is True.
remove_nans: bool #TBD
If remove_nans is True (default), the non-finite values (NaNs and
infs) found in the images will be replaced by zeros.
Returns
-------
session_series, mask_data
session_series: numpy.ndarray
2D array of series with shape (voxel number, image number)
Note
----
nii_file and mask_file must have the same shape.
Raises
------
FileNotFound, NiftiFilesNotCompatible
]
variable[img] assign[=] call[name[check_img], parameter[name[image]]]
variable[mask] assign[=] call[name[check_img], parameter[name[mask_img]]]
call[name[check_img_compatibility], parameter[name[img], name[mask]]]
variable[vol] assign[=] call[name[get_data], parameter[name[img]]]
<ast.Tuple object at 0x7da1afef8730> assign[=] call[name[_apply_mask_to_4d_data], parameter[name[vol], name[mask]]]
return[tuple[[<ast.Name object at 0x7da1afef93f0>, <ast.Name object at 0x7da1afef9ae0>]]] | keyword[def] identifier[apply_mask_4d] ( identifier[image] , identifier[mask_img] ):
literal[string]
identifier[img] = identifier[check_img] ( identifier[image] )
identifier[mask] = identifier[check_img] ( identifier[mask_img] )
identifier[check_img_compatibility] ( identifier[img] , identifier[mask] , identifier[only_check_3d] = keyword[True] )
identifier[vol] = identifier[get_data] ( identifier[img] )
identifier[series] , identifier[mask_data] = identifier[_apply_mask_to_4d_data] ( identifier[vol] , identifier[mask] )
keyword[return] identifier[series] , identifier[mask_data] | def apply_mask_4d(image, mask_img): # , smooth_mm=None, remove_nans=True):
'Read a Nifti file nii_file and a mask Nifti file.\n Extract the signals in nii_file that are within the mask, the mask indices\n and the mask shape.\n\n Parameters\n ----------\n image: img-like object or boyle.nifti.NeuroImage or str\n Can either be:\n - a file path to a Nifti image\n - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n mask_img: img-like object or boyle.nifti.NeuroImage or str\n 3D mask array: True where a voxel should be used.\n See img description.\n\n smooth_mm: float #TBD\n (optional) The size in mm of the FWHM Gaussian kernel to smooth the signal.\n If True, remove_nans is True.\n\n remove_nans: bool #TBD\n If remove_nans is True (default), the non-finite values (NaNs and\n infs) found in the images will be replaced by zeros.\n\n Returns\n -------\n session_series, mask_data\n\n session_series: numpy.ndarray\n 2D array of series with shape (voxel number, image number)\n\n Note\n ----\n nii_file and mask_file must have the same shape.\n\n Raises\n ------\n FileNotFound, NiftiFilesNotCompatible\n '
img = check_img(image)
mask = check_img(mask_img)
check_img_compatibility(img, mask, only_check_3d=True)
vol = get_data(img)
(series, mask_data) = _apply_mask_to_4d_data(vol, mask)
return (series, mask_data) |
def _parse(
self,
item_iter, # type: Iterable[ET.Element]
state # type: _ProcessorState
):
# type: (...) -> List
"""Parse the array data using the provided iterator of XML elements."""
parsed_array = []
for i, item in enumerate(item_iter):
state.push_location(self._item_processor.element_path, i)
parsed_array.append(self._item_processor.parse_at_element(item, state))
state.pop_location()
if not parsed_array and self.required:
state.raise_error(MissingValue, 'Missing required array "{}"'.format(self.alias))
return parsed_array | def function[_parse, parameter[self, item_iter, state]]:
constant[Parse the array data using the provided iterator of XML elements.]
variable[parsed_array] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b25d2440>, <ast.Name object at 0x7da1b25d3850>]]] in starred[call[name[enumerate], parameter[name[item_iter]]]] begin[:]
call[name[state].push_location, parameter[name[self]._item_processor.element_path, name[i]]]
call[name[parsed_array].append, parameter[call[name[self]._item_processor.parse_at_element, parameter[name[item], name[state]]]]]
call[name[state].pop_location, parameter[]]
if <ast.BoolOp object at 0x7da1b25d37c0> begin[:]
call[name[state].raise_error, parameter[name[MissingValue], call[constant[Missing required array "{}"].format, parameter[name[self].alias]]]]
return[name[parsed_array]] | keyword[def] identifier[_parse] (
identifier[self] ,
identifier[item_iter] ,
identifier[state]
):
literal[string]
identifier[parsed_array] =[]
keyword[for] identifier[i] , identifier[item] keyword[in] identifier[enumerate] ( identifier[item_iter] ):
identifier[state] . identifier[push_location] ( identifier[self] . identifier[_item_processor] . identifier[element_path] , identifier[i] )
identifier[parsed_array] . identifier[append] ( identifier[self] . identifier[_item_processor] . identifier[parse_at_element] ( identifier[item] , identifier[state] ))
identifier[state] . identifier[pop_location] ()
keyword[if] keyword[not] identifier[parsed_array] keyword[and] identifier[self] . identifier[required] :
identifier[state] . identifier[raise_error] ( identifier[MissingValue] , literal[string] . identifier[format] ( identifier[self] . identifier[alias] ))
keyword[return] identifier[parsed_array] | def _parse(self, item_iter, state): # type: Iterable[ET.Element]
# type: _ProcessorState
# type: (...) -> List
'Parse the array data using the provided iterator of XML elements.'
parsed_array = []
for (i, item) in enumerate(item_iter):
state.push_location(self._item_processor.element_path, i)
parsed_array.append(self._item_processor.parse_at_element(item, state))
state.pop_location() # depends on [control=['for'], data=[]]
if not parsed_array and self.required:
state.raise_error(MissingValue, 'Missing required array "{}"'.format(self.alias)) # depends on [control=['if'], data=[]]
return parsed_array |
def mkdir(self, path, mode=o777):
"""
Create a folder (directory) named ``path`` with numeric mode ``mode``.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
:param str path: name of the folder to create
:param int mode: permissions (posix-style) for the newly-created folder
"""
path = self._adjust_cwd(path)
self._log(DEBUG, "mkdir({!r}, {!r})".format(path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_MKDIR, path, attr) | def function[mkdir, parameter[self, path, mode]]:
constant[
Create a folder (directory) named ``path`` with numeric mode ``mode``.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
:param str path: name of the folder to create
:param int mode: permissions (posix-style) for the newly-created folder
]
variable[path] assign[=] call[name[self]._adjust_cwd, parameter[name[path]]]
call[name[self]._log, parameter[name[DEBUG], call[constant[mkdir({!r}, {!r})].format, parameter[name[path], name[mode]]]]]
variable[attr] assign[=] call[name[SFTPAttributes], parameter[]]
name[attr].st_mode assign[=] name[mode]
call[name[self]._request, parameter[name[CMD_MKDIR], name[path], name[attr]]] | keyword[def] identifier[mkdir] ( identifier[self] , identifier[path] , identifier[mode] = identifier[o777] ):
literal[string]
identifier[path] = identifier[self] . identifier[_adjust_cwd] ( identifier[path] )
identifier[self] . identifier[_log] ( identifier[DEBUG] , literal[string] . identifier[format] ( identifier[path] , identifier[mode] ))
identifier[attr] = identifier[SFTPAttributes] ()
identifier[attr] . identifier[st_mode] = identifier[mode]
identifier[self] . identifier[_request] ( identifier[CMD_MKDIR] , identifier[path] , identifier[attr] ) | def mkdir(self, path, mode=o777):
"""
Create a folder (directory) named ``path`` with numeric mode ``mode``.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
:param str path: name of the folder to create
:param int mode: permissions (posix-style) for the newly-created folder
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'mkdir({!r}, {!r})'.format(path, mode))
attr = SFTPAttributes()
attr.st_mode = mode
self._request(CMD_MKDIR, path, attr) |
def _bnot32(ins):
""" Negates top (Bitwise NOT) of the stack (32 bits in DEHL)
"""
output = _32bit_oper(ins.quad[2])
output.append('call __BNOT32')
output.append('push de')
output.append('push hl')
REQUIRES.add('bnot32.asm')
return output | def function[_bnot32, parameter[ins]]:
constant[ Negates top (Bitwise NOT) of the stack (32 bits in DEHL)
]
variable[output] assign[=] call[name[_32bit_oper], parameter[call[name[ins].quad][constant[2]]]]
call[name[output].append, parameter[constant[call __BNOT32]]]
call[name[output].append, parameter[constant[push de]]]
call[name[output].append, parameter[constant[push hl]]]
call[name[REQUIRES].add, parameter[constant[bnot32.asm]]]
return[name[output]] | keyword[def] identifier[_bnot32] ( identifier[ins] ):
literal[string]
identifier[output] = identifier[_32bit_oper] ( identifier[ins] . identifier[quad] [ literal[int] ])
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[output] . identifier[append] ( literal[string] )
identifier[REQUIRES] . identifier[add] ( literal[string] )
keyword[return] identifier[output] | def _bnot32(ins):
""" Negates top (Bitwise NOT) of the stack (32 bits in DEHL)
"""
output = _32bit_oper(ins.quad[2])
output.append('call __BNOT32')
output.append('push de')
output.append('push hl')
REQUIRES.add('bnot32.asm')
return output |
def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, "w") as file_out:
file_out.write(dot_file) | def function[intermediary_to_dot, parameter[tables, relationships, output]]:
constant[ Save the intermediary representation to dot format. ]
variable[dot_file] assign[=] call[name[_intermediary_to_dot], parameter[name[tables], name[relationships]]]
with call[name[open], parameter[name[output], constant[w]]] begin[:]
call[name[file_out].write, parameter[name[dot_file]]] | keyword[def] identifier[intermediary_to_dot] ( identifier[tables] , identifier[relationships] , identifier[output] ):
literal[string]
identifier[dot_file] = identifier[_intermediary_to_dot] ( identifier[tables] , identifier[relationships] )
keyword[with] identifier[open] ( identifier[output] , literal[string] ) keyword[as] identifier[file_out] :
identifier[file_out] . identifier[write] ( identifier[dot_file] ) | def intermediary_to_dot(tables, relationships, output):
""" Save the intermediary representation to dot format. """
dot_file = _intermediary_to_dot(tables, relationships)
with open(output, 'w') as file_out:
file_out.write(dot_file) # depends on [control=['with'], data=['file_out']] |
def normalize_query_parameters(query_string):
"""
normalize_query_parameters(query_string) -> dict
Converts a query string into a dictionary mapping parameter names to a
list of the sorted values. This ensurses that the query string follows
% encoding rules according to RFC 3986 and checks for duplicate keys.
A ValueError exception is raised if a percent encoding is invalid.
"""
if query_string == "":
return {}
components = query_string.split("&")
result = {}
for component in components:
try:
key, value = component.split("=", 1)
except ValueError:
key = component
value = ""
if component == "":
# Empty component; skip it.
continue
key = normalize_uri_path_component(key)
value = normalize_uri_path_component(value)
if key in result:
result[key].append(value)
else:
result[key] = [value]
return dict([(key, sorted(values))
for key, values in iteritems(result)]) | def function[normalize_query_parameters, parameter[query_string]]:
constant[
normalize_query_parameters(query_string) -> dict
Converts a query string into a dictionary mapping parameter names to a
list of the sorted values. This ensurses that the query string follows
% encoding rules according to RFC 3986 and checks for duplicate keys.
A ValueError exception is raised if a percent encoding is invalid.
]
if compare[name[query_string] equal[==] constant[]] begin[:]
return[dictionary[[], []]]
variable[components] assign[=] call[name[query_string].split, parameter[constant[&]]]
variable[result] assign[=] dictionary[[], []]
for taget[name[component]] in starred[name[components]] begin[:]
<ast.Try object at 0x7da1b094b010>
if compare[name[component] equal[==] constant[]] begin[:]
continue
variable[key] assign[=] call[name[normalize_uri_path_component], parameter[name[key]]]
variable[value] assign[=] call[name[normalize_uri_path_component], parameter[name[value]]]
if compare[name[key] in name[result]] begin[:]
call[call[name[result]][name[key]].append, parameter[name[value]]]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da1b094a080>]]] | keyword[def] identifier[normalize_query_parameters] ( identifier[query_string] ):
literal[string]
keyword[if] identifier[query_string] == literal[string] :
keyword[return] {}
identifier[components] = identifier[query_string] . identifier[split] ( literal[string] )
identifier[result] ={}
keyword[for] identifier[component] keyword[in] identifier[components] :
keyword[try] :
identifier[key] , identifier[value] = identifier[component] . identifier[split] ( literal[string] , literal[int] )
keyword[except] identifier[ValueError] :
identifier[key] = identifier[component]
identifier[value] = literal[string]
keyword[if] identifier[component] == literal[string] :
keyword[continue]
identifier[key] = identifier[normalize_uri_path_component] ( identifier[key] )
identifier[value] = identifier[normalize_uri_path_component] ( identifier[value] )
keyword[if] identifier[key] keyword[in] identifier[result] :
identifier[result] [ identifier[key] ]. identifier[append] ( identifier[value] )
keyword[else] :
identifier[result] [ identifier[key] ]=[ identifier[value] ]
keyword[return] identifier[dict] ([( identifier[key] , identifier[sorted] ( identifier[values] ))
keyword[for] identifier[key] , identifier[values] keyword[in] identifier[iteritems] ( identifier[result] )]) | def normalize_query_parameters(query_string):
"""
normalize_query_parameters(query_string) -> dict
Converts a query string into a dictionary mapping parameter names to a
list of the sorted values. This ensurses that the query string follows
% encoding rules according to RFC 3986 and checks for duplicate keys.
A ValueError exception is raised if a percent encoding is invalid.
"""
if query_string == '':
return {} # depends on [control=['if'], data=[]]
components = query_string.split('&')
result = {}
for component in components:
try:
(key, value) = component.split('=', 1) # depends on [control=['try'], data=[]]
except ValueError:
key = component
value = '' # depends on [control=['except'], data=[]]
if component == '':
# Empty component; skip it.
continue # depends on [control=['if'], data=[]]
key = normalize_uri_path_component(key)
value = normalize_uri_path_component(value)
if key in result:
result[key].append(value) # depends on [control=['if'], data=['key', 'result']]
else:
result[key] = [value] # depends on [control=['for'], data=['component']]
return dict([(key, sorted(values)) for (key, values) in iteritems(result)]) |
def get_bel_stmts(self, filter=False):
"""Get relevant statements from the BEL large corpus.
Performs a series of neighborhood queries and then takes the union of
all the statements. Because the query process can take a long time for
large gene lists, the resulting list of statements are cached in a
pickle file with the filename `<basename>_bel_stmts.pkl`. If the
pickle file is present, it is used by default; if not present, the
queries are performed and the results are cached.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False. Note that the
full (unfiltered) set of statements are cached.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from the BEL large corpus.
"""
if self.basename is not None:
bel_stmt_path = '%s_bel_stmts.pkl' % self.basename
# Check for cached BEL stmt file
if self.basename is not None and os.path.isfile(bel_stmt_path):
logger.info("Loading BEL statements from %s" % bel_stmt_path)
with open(bel_stmt_path, 'rb') as f:
bel_statements = pickle.load(f)
# No cache, so perform the queries
else:
bel_proc = bel.process_pybel_neighborhood(self.gene_list,
network_file=self.bel_corpus)
bel_statements = bel_proc.statements
# Save to pickle file if we're caching
if self.basename is not None:
with open(bel_stmt_path, 'wb') as f:
pickle.dump(bel_statements, f)
# Optionally filter out statements not involving only our gene set
if filter:
if len(self.gene_list) > 1:
bel_statements = ac.filter_gene_list(bel_statements,
self.gene_list, 'all')
return bel_statements | def function[get_bel_stmts, parameter[self, filter]]:
constant[Get relevant statements from the BEL large corpus.
Performs a series of neighborhood queries and then takes the union of
all the statements. Because the query process can take a long time for
large gene lists, the resulting list of statements are cached in a
pickle file with the filename `<basename>_bel_stmts.pkl`. If the
pickle file is present, it is used by default; if not present, the
queries are performed and the results are cached.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False. Note that the
full (unfiltered) set of statements are cached.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from the BEL large corpus.
]
if compare[name[self].basename is_not constant[None]] begin[:]
variable[bel_stmt_path] assign[=] binary_operation[constant[%s_bel_stmts.pkl] <ast.Mod object at 0x7da2590d6920> name[self].basename]
if <ast.BoolOp object at 0x7da1b0d18d00> begin[:]
call[name[logger].info, parameter[binary_operation[constant[Loading BEL statements from %s] <ast.Mod object at 0x7da2590d6920> name[bel_stmt_path]]]]
with call[name[open], parameter[name[bel_stmt_path], constant[rb]]] begin[:]
variable[bel_statements] assign[=] call[name[pickle].load, parameter[name[f]]]
if name[filter] begin[:]
if compare[call[name[len], parameter[name[self].gene_list]] greater[>] constant[1]] begin[:]
variable[bel_statements] assign[=] call[name[ac].filter_gene_list, parameter[name[bel_statements], name[self].gene_list, constant[all]]]
return[name[bel_statements]] | keyword[def] identifier[get_bel_stmts] ( identifier[self] , identifier[filter] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[basename] keyword[is] keyword[not] keyword[None] :
identifier[bel_stmt_path] = literal[string] % identifier[self] . identifier[basename]
keyword[if] identifier[self] . identifier[basename] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[bel_stmt_path] ):
identifier[logger] . identifier[info] ( literal[string] % identifier[bel_stmt_path] )
keyword[with] identifier[open] ( identifier[bel_stmt_path] , literal[string] ) keyword[as] identifier[f] :
identifier[bel_statements] = identifier[pickle] . identifier[load] ( identifier[f] )
keyword[else] :
identifier[bel_proc] = identifier[bel] . identifier[process_pybel_neighborhood] ( identifier[self] . identifier[gene_list] ,
identifier[network_file] = identifier[self] . identifier[bel_corpus] )
identifier[bel_statements] = identifier[bel_proc] . identifier[statements]
keyword[if] identifier[self] . identifier[basename] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[open] ( identifier[bel_stmt_path] , literal[string] ) keyword[as] identifier[f] :
identifier[pickle] . identifier[dump] ( identifier[bel_statements] , identifier[f] )
keyword[if] identifier[filter] :
keyword[if] identifier[len] ( identifier[self] . identifier[gene_list] )> literal[int] :
identifier[bel_statements] = identifier[ac] . identifier[filter_gene_list] ( identifier[bel_statements] ,
identifier[self] . identifier[gene_list] , literal[string] )
keyword[return] identifier[bel_statements] | def get_bel_stmts(self, filter=False):
"""Get relevant statements from the BEL large corpus.
Performs a series of neighborhood queries and then takes the union of
all the statements. Because the query process can take a long time for
large gene lists, the resulting list of statements are cached in a
pickle file with the filename `<basename>_bel_stmts.pkl`. If the
pickle file is present, it is used by default; if not present, the
queries are performed and the results are cached.
Parameters
----------
filter : bool
If True, includes only those statements that exclusively mention
genes in :py:attr:`gene_list`. Default is False. Note that the
full (unfiltered) set of statements are cached.
Returns
-------
list of :py:class:`indra.statements.Statement`
List of INDRA statements extracted from the BEL large corpus.
"""
if self.basename is not None:
bel_stmt_path = '%s_bel_stmts.pkl' % self.basename # depends on [control=['if'], data=[]]
# Check for cached BEL stmt file
if self.basename is not None and os.path.isfile(bel_stmt_path):
logger.info('Loading BEL statements from %s' % bel_stmt_path)
with open(bel_stmt_path, 'rb') as f:
bel_statements = pickle.load(f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
# No cache, so perform the queries
bel_proc = bel.process_pybel_neighborhood(self.gene_list, network_file=self.bel_corpus)
bel_statements = bel_proc.statements
# Save to pickle file if we're caching
if self.basename is not None:
with open(bel_stmt_path, 'wb') as f:
pickle.dump(bel_statements, f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
# Optionally filter out statements not involving only our gene set
if filter:
if len(self.gene_list) > 1:
bel_statements = ac.filter_gene_list(bel_statements, self.gene_list, 'all') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return bel_statements |
def dst(self, dt):
"""
http://docs.python.org/library/datetime.html#datetime.tzinfo.dst
"""
if self.__is_daylight_time(dt):
return self.__dst_offset - self.__offset
else:
return datetime.timedelta(0) | def function[dst, parameter[self, dt]]:
constant[
http://docs.python.org/library/datetime.html#datetime.tzinfo.dst
]
if call[name[self].__is_daylight_time, parameter[name[dt]]] begin[:]
return[binary_operation[name[self].__dst_offset - name[self].__offset]] | keyword[def] identifier[dst] ( identifier[self] , identifier[dt] ):
literal[string]
keyword[if] identifier[self] . identifier[__is_daylight_time] ( identifier[dt] ):
keyword[return] identifier[self] . identifier[__dst_offset] - identifier[self] . identifier[__offset]
keyword[else] :
keyword[return] identifier[datetime] . identifier[timedelta] ( literal[int] ) | def dst(self, dt):
"""
http://docs.python.org/library/datetime.html#datetime.tzinfo.dst
"""
if self.__is_daylight_time(dt):
return self.__dst_offset - self.__offset # depends on [control=['if'], data=[]]
else:
return datetime.timedelta(0) |
def import_words_from_file(self,
inputfile: str,
is_diceware: bool) -> None:
"""Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like.
"""
if not Aux.isfile_notempty(inputfile):
raise FileNotFoundError('Input file does not exists, is not valid '
'or is empty: {}'.format(inputfile))
self._wordlist_entropy_bits = None
if is_diceware:
self._wordlist = self._read_words_from_diceware(inputfile)
else:
self._wordlist = self._read_words_from_wordfile(inputfile) | def function[import_words_from_file, parameter[self, inputfile, is_diceware]]:
constant[Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like.
]
if <ast.UnaryOp object at 0x7da207f03e80> begin[:]
<ast.Raise object at 0x7da207f03a90>
name[self]._wordlist_entropy_bits assign[=] constant[None]
if name[is_diceware] begin[:]
name[self]._wordlist assign[=] call[name[self]._read_words_from_diceware, parameter[name[inputfile]]] | keyword[def] identifier[import_words_from_file] ( identifier[self] ,
identifier[inputfile] : identifier[str] ,
identifier[is_diceware] : identifier[bool] )-> keyword[None] :
literal[string]
keyword[if] keyword[not] identifier[Aux] . identifier[isfile_notempty] ( identifier[inputfile] ):
keyword[raise] identifier[FileNotFoundError] ( literal[string]
literal[string] . identifier[format] ( identifier[inputfile] ))
identifier[self] . identifier[_wordlist_entropy_bits] = keyword[None]
keyword[if] identifier[is_diceware] :
identifier[self] . identifier[_wordlist] = identifier[self] . identifier[_read_words_from_diceware] ( identifier[inputfile] )
keyword[else] :
identifier[self] . identifier[_wordlist] = identifier[self] . identifier[_read_words_from_wordfile] ( identifier[inputfile] ) | def import_words_from_file(self, inputfile: str, is_diceware: bool) -> None:
"""Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like.
"""
if not Aux.isfile_notempty(inputfile):
raise FileNotFoundError('Input file does not exists, is not valid or is empty: {}'.format(inputfile)) # depends on [control=['if'], data=[]]
self._wordlist_entropy_bits = None
if is_diceware:
self._wordlist = self._read_words_from_diceware(inputfile) # depends on [control=['if'], data=[]]
else:
self._wordlist = self._read_words_from_wordfile(inputfile) |
def eval(self, script, keys=[], args=[]):
"""Execute a Lua script server side."""
return self.execute(b'EVAL', script, len(keys), *(keys + args)) | def function[eval, parameter[self, script, keys, args]]:
constant[Execute a Lua script server side.]
return[call[name[self].execute, parameter[constant[b'EVAL'], name[script], call[name[len], parameter[name[keys]]], <ast.Starred object at 0x7da2041dbdc0>]]] | keyword[def] identifier[eval] ( identifier[self] , identifier[script] , identifier[keys] =[], identifier[args] =[]):
literal[string]
keyword[return] identifier[self] . identifier[execute] ( literal[string] , identifier[script] , identifier[len] ( identifier[keys] ),*( identifier[keys] + identifier[args] )) | def eval(self, script, keys=[], args=[]):
"""Execute a Lua script server side."""
return self.execute(b'EVAL', script, len(keys), *keys + args) |
def validateDtdFinal(self, doc):
"""Does the final step for the dtds validation once all the
subsets have been parsed basically it does the following
checks described by the XML Rec - check that ENTITY and
ENTITIES type attributes default or possible values matches
one of the defined entities. - check that NOTATION type
attributes default or possible values matches one of the
defined notations. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDtdFinal(self._o, doc__o)
return ret | def function[validateDtdFinal, parameter[self, doc]]:
constant[Does the final step for the dtds validation once all the
subsets have been parsed basically it does the following
checks described by the XML Rec - check that ENTITY and
ENTITIES type attributes default or possible values matches
one of the defined entities. - check that NOTATION type
attributes default or possible values matches one of the
defined notations. ]
if compare[name[doc] is constant[None]] begin[:]
variable[doc__o] assign[=] constant[None]
variable[ret] assign[=] call[name[libxml2mod].xmlValidateDtdFinal, parameter[name[self]._o, name[doc__o]]]
return[name[ret]] | keyword[def] identifier[validateDtdFinal] ( identifier[self] , identifier[doc] ):
literal[string]
keyword[if] identifier[doc] keyword[is] keyword[None] : identifier[doc__o] = keyword[None]
keyword[else] : identifier[doc__o] = identifier[doc] . identifier[_o]
identifier[ret] = identifier[libxml2mod] . identifier[xmlValidateDtdFinal] ( identifier[self] . identifier[_o] , identifier[doc__o] )
keyword[return] identifier[ret] | def validateDtdFinal(self, doc):
"""Does the final step for the dtds validation once all the
subsets have been parsed basically it does the following
checks described by the XML Rec - check that ENTITY and
ENTITIES type attributes default or possible values matches
one of the defined entities. - check that NOTATION type
attributes default or possible values matches one of the
defined notations. """
if doc is None:
doc__o = None # depends on [control=['if'], data=[]]
else:
doc__o = doc._o
ret = libxml2mod.xmlValidateDtdFinal(self._o, doc__o)
return ret |
def past(self, rev=None):
"""Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev)
return WindowDictPastView(self._past) | def function[past, parameter[self, rev]]:
constant[Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
]
if compare[name[rev] is_not constant[None]] begin[:]
call[name[self].seek, parameter[name[rev]]]
return[call[name[WindowDictPastView], parameter[name[self]._past]]] | keyword[def] identifier[past] ( identifier[self] , identifier[rev] = keyword[None] ):
literal[string]
keyword[if] identifier[rev] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[seek] ( identifier[rev] )
keyword[return] identifier[WindowDictPastView] ( identifier[self] . identifier[_past] ) | def past(self, rev=None):
"""Return a Mapping of items at or before the given revision.
Default revision is the last one looked up.
"""
if rev is not None:
self.seek(rev) # depends on [control=['if'], data=['rev']]
return WindowDictPastView(self._past) |
def run_daemon(self):
"""
Used as daemon starter.
Warning:
DO NOT OVERRIDE THIS.
"""
try:
self.daemon_runner.do_action()
except daemon.runner.DaemonRunnerStopFailureError:
self.onStopFail()
except SystemExit:
self.onExit() | def function[run_daemon, parameter[self]]:
constant[
Used as daemon starter.
Warning:
DO NOT OVERRIDE THIS.
]
<ast.Try object at 0x7da1b09be6e0> | keyword[def] identifier[run_daemon] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[daemon_runner] . identifier[do_action] ()
keyword[except] identifier[daemon] . identifier[runner] . identifier[DaemonRunnerStopFailureError] :
identifier[self] . identifier[onStopFail] ()
keyword[except] identifier[SystemExit] :
identifier[self] . identifier[onExit] () | def run_daemon(self):
"""
Used as daemon starter.
Warning:
DO NOT OVERRIDE THIS.
"""
try:
self.daemon_runner.do_action() # depends on [control=['try'], data=[]]
except daemon.runner.DaemonRunnerStopFailureError:
self.onStopFail() # depends on [control=['except'], data=[]]
except SystemExit:
self.onExit() # depends on [control=['except'], data=[]] |
def _get_perm_obj_or_404(self, pk=None):
"""
If is send parameter pk is returned object according this pk,
else is returned object from get_obj method, but it search only inside filtered values for current user,
finally if object is still None is returned according the input key from all objects.
If object does not exist is raised Http404
"""
if pk:
obj = get_object_or_none(self.core.model, pk=pk)
else:
try:
obj = self.get_obj(False)
except Http404:
obj = get_object_or_none(self.core.model, **self.get_obj_filters())
if not obj:
raise Http404
return obj | def function[_get_perm_obj_or_404, parameter[self, pk]]:
constant[
If is send parameter pk is returned object according this pk,
else is returned object from get_obj method, but it search only inside filtered values for current user,
finally if object is still None is returned according the input key from all objects.
If object does not exist is raised Http404
]
if name[pk] begin[:]
variable[obj] assign[=] call[name[get_object_or_none], parameter[name[self].core.model]]
if <ast.UnaryOp object at 0x7da18dc9a380> begin[:]
<ast.Raise object at 0x7da18dc98670>
return[name[obj]] | keyword[def] identifier[_get_perm_obj_or_404] ( identifier[self] , identifier[pk] = keyword[None] ):
literal[string]
keyword[if] identifier[pk] :
identifier[obj] = identifier[get_object_or_none] ( identifier[self] . identifier[core] . identifier[model] , identifier[pk] = identifier[pk] )
keyword[else] :
keyword[try] :
identifier[obj] = identifier[self] . identifier[get_obj] ( keyword[False] )
keyword[except] identifier[Http404] :
identifier[obj] = identifier[get_object_or_none] ( identifier[self] . identifier[core] . identifier[model] ,** identifier[self] . identifier[get_obj_filters] ())
keyword[if] keyword[not] identifier[obj] :
keyword[raise] identifier[Http404]
keyword[return] identifier[obj] | def _get_perm_obj_or_404(self, pk=None):
"""
If is send parameter pk is returned object according this pk,
else is returned object from get_obj method, but it search only inside filtered values for current user,
finally if object is still None is returned according the input key from all objects.
If object does not exist is raised Http404
"""
if pk:
obj = get_object_or_none(self.core.model, pk=pk) # depends on [control=['if'], data=[]]
else:
try:
obj = self.get_obj(False) # depends on [control=['try'], data=[]]
except Http404:
obj = get_object_or_none(self.core.model, **self.get_obj_filters()) # depends on [control=['except'], data=[]]
if not obj:
raise Http404 # depends on [control=['if'], data=[]]
return obj |
def add_option(self, section, name, value):
"""Add an option to a section of the unit file
Args:
section (str): The name of the section, If it doesn't exist it will be created
name (str): The name of the option to add
value (str): The value of the option
Returns:
True: The item was added
"""
# Don't allow updating units we loaded from fleet, it's not supported
if self._is_live():
raise RuntimeError('Submitted units cannot update their options')
option = {
'section': section,
'name': name,
'value': value
}
self._data['options'].append(option)
return True | def function[add_option, parameter[self, section, name, value]]:
constant[Add an option to a section of the unit file
Args:
section (str): The name of the section, If it doesn't exist it will be created
name (str): The name of the option to add
value (str): The value of the option
Returns:
True: The item was added
]
if call[name[self]._is_live, parameter[]] begin[:]
<ast.Raise object at 0x7da20cabe5f0>
variable[option] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e55a0>, <ast.Constant object at 0x7da20c6e49d0>, <ast.Constant object at 0x7da20c6e5c60>], [<ast.Name object at 0x7da20c6e7580>, <ast.Name object at 0x7da20c6e4dc0>, <ast.Name object at 0x7da20c6e43d0>]]
call[call[name[self]._data][constant[options]].append, parameter[name[option]]]
return[constant[True]] | keyword[def] identifier[add_option] ( identifier[self] , identifier[section] , identifier[name] , identifier[value] ):
literal[string]
keyword[if] identifier[self] . identifier[_is_live] ():
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[option] ={
literal[string] : identifier[section] ,
literal[string] : identifier[name] ,
literal[string] : identifier[value]
}
identifier[self] . identifier[_data] [ literal[string] ]. identifier[append] ( identifier[option] )
keyword[return] keyword[True] | def add_option(self, section, name, value):
"""Add an option to a section of the unit file
Args:
section (str): The name of the section, If it doesn't exist it will be created
name (str): The name of the option to add
value (str): The value of the option
Returns:
True: The item was added
"""
# Don't allow updating units we loaded from fleet, it's not supported
if self._is_live():
raise RuntimeError('Submitted units cannot update their options') # depends on [control=['if'], data=[]]
option = {'section': section, 'name': name, 'value': value}
self._data['options'].append(option)
return True |
def upvotes(self, option):
"""
Set whether to filter by a user's upvoted list. Options available are
user.ONLY, user.NOT, and None; default is None.
"""
params = join_params(self.parameters, {"upvotes": option})
return self.__class__(**params) | def function[upvotes, parameter[self, option]]:
constant[
Set whether to filter by a user's upvoted list. Options available are
user.ONLY, user.NOT, and None; default is None.
]
variable[params] assign[=] call[name[join_params], parameter[name[self].parameters, dictionary[[<ast.Constant object at 0x7da1b1a2d150>], [<ast.Name object at 0x7da1b1a2df60>]]]]
return[call[name[self].__class__, parameter[]]] | keyword[def] identifier[upvotes] ( identifier[self] , identifier[option] ):
literal[string]
identifier[params] = identifier[join_params] ( identifier[self] . identifier[parameters] ,{ literal[string] : identifier[option] })
keyword[return] identifier[self] . identifier[__class__] (** identifier[params] ) | def upvotes(self, option):
"""
Set whether to filter by a user's upvoted list. Options available are
user.ONLY, user.NOT, and None; default is None.
"""
params = join_params(self.parameters, {'upvotes': option})
return self.__class__(**params) |
def result_dataframe(count, x, width, xmin=None, xmax=None):
"""
Create a dataframe to hold bin information
"""
if xmin is None:
xmin = x-width/2
if xmax is None:
xmax = x+width/2
# Eliminate any numerical roundoff discrepancies
# between the edges
xmin[1:] = xmax[:-1]
density = (count/width) / np.sum(np.abs(count))
out = pd.DataFrame({
'count': count,
'x': x,
'xmin': xmin,
'xmax': xmax,
'width': width,
'density': density,
'ncount': count/np.max(np.abs(count)),
'ndensity': count/np.max(np.abs(density))})
return out | def function[result_dataframe, parameter[count, x, width, xmin, xmax]]:
constant[
Create a dataframe to hold bin information
]
if compare[name[xmin] is constant[None]] begin[:]
variable[xmin] assign[=] binary_operation[name[x] - binary_operation[name[width] / constant[2]]]
if compare[name[xmax] is constant[None]] begin[:]
variable[xmax] assign[=] binary_operation[name[x] + binary_operation[name[width] / constant[2]]]
call[name[xmin]][<ast.Slice object at 0x7da207f033d0>] assign[=] call[name[xmax]][<ast.Slice object at 0x7da207f00070>]
variable[density] assign[=] binary_operation[binary_operation[name[count] / name[width]] / call[name[np].sum, parameter[call[name[np].abs, parameter[name[count]]]]]]
variable[out] assign[=] call[name[pd].DataFrame, parameter[dictionary[[<ast.Constant object at 0x7da207f00640>, <ast.Constant object at 0x7da207f02740>, <ast.Constant object at 0x7da207f01810>, <ast.Constant object at 0x7da207f03460>, <ast.Constant object at 0x7da207f003d0>, <ast.Constant object at 0x7da207f00880>, <ast.Constant object at 0x7da207f02e00>, <ast.Constant object at 0x7da207f03ee0>], [<ast.Name object at 0x7da207f00340>, <ast.Name object at 0x7da207f01c90>, <ast.Name object at 0x7da207f03520>, <ast.Name object at 0x7da207f03880>, <ast.Name object at 0x7da207f00d30>, <ast.Name object at 0x7da207f00790>, <ast.BinOp object at 0x7da207f02260>, <ast.BinOp object at 0x7da207f00bb0>]]]]
return[name[out]] | keyword[def] identifier[result_dataframe] ( identifier[count] , identifier[x] , identifier[width] , identifier[xmin] = keyword[None] , identifier[xmax] = keyword[None] ):
literal[string]
keyword[if] identifier[xmin] keyword[is] keyword[None] :
identifier[xmin] = identifier[x] - identifier[width] / literal[int]
keyword[if] identifier[xmax] keyword[is] keyword[None] :
identifier[xmax] = identifier[x] + identifier[width] / literal[int]
identifier[xmin] [ literal[int] :]= identifier[xmax] [:- literal[int] ]
identifier[density] =( identifier[count] / identifier[width] )/ identifier[np] . identifier[sum] ( identifier[np] . identifier[abs] ( identifier[count] ))
identifier[out] = identifier[pd] . identifier[DataFrame] ({
literal[string] : identifier[count] ,
literal[string] : identifier[x] ,
literal[string] : identifier[xmin] ,
literal[string] : identifier[xmax] ,
literal[string] : identifier[width] ,
literal[string] : identifier[density] ,
literal[string] : identifier[count] / identifier[np] . identifier[max] ( identifier[np] . identifier[abs] ( identifier[count] )),
literal[string] : identifier[count] / identifier[np] . identifier[max] ( identifier[np] . identifier[abs] ( identifier[density] ))})
keyword[return] identifier[out] | def result_dataframe(count, x, width, xmin=None, xmax=None):
"""
Create a dataframe to hold bin information
"""
if xmin is None:
xmin = x - width / 2 # depends on [control=['if'], data=['xmin']]
if xmax is None:
xmax = x + width / 2 # depends on [control=['if'], data=['xmax']]
# Eliminate any numerical roundoff discrepancies
# between the edges
xmin[1:] = xmax[:-1]
density = count / width / np.sum(np.abs(count))
out = pd.DataFrame({'count': count, 'x': x, 'xmin': xmin, 'xmax': xmax, 'width': width, 'density': density, 'ncount': count / np.max(np.abs(count)), 'ndensity': count / np.max(np.abs(density))})
return out |
def CQO(cpu):
"""
RDX:RAX = sign-extend of RAX.
"""
res = Operators.SEXTEND(cpu.RAX, 64, 128)
cpu.RAX = Operators.EXTRACT(res, 0, 64)
cpu.RDX = Operators.EXTRACT(res, 64, 64) | def function[CQO, parameter[cpu]]:
constant[
RDX:RAX = sign-extend of RAX.
]
variable[res] assign[=] call[name[Operators].SEXTEND, parameter[name[cpu].RAX, constant[64], constant[128]]]
name[cpu].RAX assign[=] call[name[Operators].EXTRACT, parameter[name[res], constant[0], constant[64]]]
name[cpu].RDX assign[=] call[name[Operators].EXTRACT, parameter[name[res], constant[64], constant[64]]] | keyword[def] identifier[CQO] ( identifier[cpu] ):
literal[string]
identifier[res] = identifier[Operators] . identifier[SEXTEND] ( identifier[cpu] . identifier[RAX] , literal[int] , literal[int] )
identifier[cpu] . identifier[RAX] = identifier[Operators] . identifier[EXTRACT] ( identifier[res] , literal[int] , literal[int] )
identifier[cpu] . identifier[RDX] = identifier[Operators] . identifier[EXTRACT] ( identifier[res] , literal[int] , literal[int] ) | def CQO(cpu):
"""
RDX:RAX = sign-extend of RAX.
"""
res = Operators.SEXTEND(cpu.RAX, 64, 128)
cpu.RAX = Operators.EXTRACT(res, 0, 64)
cpu.RDX = Operators.EXTRACT(res, 64, 64) |
def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for def_uri, comp_def in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None)
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity,
SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location))
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location))
else:
locations.append(GenericLocation(**location))
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj)
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None)
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj)
self._components[def_uri].sequence_constraints = sequence_constraints | def function[_extend_component_definitions, parameter[self, graph]]:
constant[
Read graph and update component definitions with related elements
]
for taget[tuple[[<ast.Name object at 0x7da2047ebe20>, <ast.Name object at 0x7da2047e9210>]]] in starred[call[name[self]._components.items, parameter[]]] begin[:]
variable[component_index] assign[=] dictionary[[], []]
variable[identity] assign[=] call[name[URIRef], parameter[name[def_uri]]]
for taget[name[comp]] in starred[call[name[graph].triples, parameter[tuple[[<ast.Name object at 0x7da2047e8fd0>, <ast.Attribute object at 0x7da2047eb400>, <ast.Constant object at 0x7da2047ea170>]]]]] begin[:]
variable[comp_identity] assign[=] call[name[comp]][constant[2]]
variable[ci] assign[=] call[name[self]._get_rdf_identified, parameter[name[graph], name[comp_identity]]]
call[name[ci]][constant[maps_to]] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[comp_identity], name[SBOL].mapTo]]
call[name[ci]][constant[access]] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[comp_identity], name[SBOL].access]]
variable[component_comp_def] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[comp_identity], name[SBOL].definition]]
call[name[ci]][constant[definition]] assign[=] call[name[self]._components][name[component_comp_def]]
variable[c] assign[=] call[name[Component], parameter[]]
call[name[component_index]][call[name[ci]][constant[identity]]] assign[=] name[c]
call[name[self]._components][name[def_uri]].components assign[=] call[name[list], parameter[call[name[component_index].values, parameter[]]]]
if compare[tuple[[<ast.Name object at 0x7da2047ea950>, <ast.Attribute object at 0x7da2047e9120>, <ast.Constant object at 0x7da2047e9060>]] in name[graph]] begin[:]
variable[find_annotation_using] assign[=] tuple[[<ast.Name object at 0x7da2047e9240>, <ast.Attribute object at 0x7da2047ebca0>, <ast.Constant object at 0x7da2047e8be0>]]
variable[sequence_annotations] assign[=] list[[]]
for taget[name[seq_annot]] in starred[call[name[graph].triples, parameter[name[find_annotation_using]]]] begin[:]
variable[seq_identity] assign[=] call[name[seq_annot]][constant[2]]
variable[sa] assign[=] call[name[self]._get_rdf_identified, parameter[name[graph], name[seq_identity]]]
variable[component_to_use] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[seq_identity], name[SBOL].component]]
call[name[sa]][constant[component]] assign[=] call[name[component_index]][name[component_to_use]]
call[name[sa]][constant[roles]] assign[=] call[name[self]._get_triplet_value_list, parameter[name[graph], name[seq_identity], name[SBOL].role]]
variable[locations] assign[=] list[[]]
for taget[name[loc]] in starred[call[name[graph].triples, parameter[tuple[[<ast.Name object at 0x7da2047e8b20>, <ast.Attribute object at 0x7da2047ebac0>, <ast.Constant object at 0x7da2047eb640>]]]]] begin[:]
variable[loc_identity] assign[=] call[name[loc]][constant[2]]
variable[location] assign[=] call[name[self]._get_rdf_identified, parameter[name[graph], name[loc_identity]]]
call[name[location]][constant[orientation]] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[loc_identity], name[SBOL].orientation]]
variable[location_type] assign[=] call[name[URIRef], parameter[call[name[self]._get_triplet_value, parameter[name[graph], name[loc_identity], name[RDF].type]]]]
if compare[name[location_type] equal[==] name[SBOL].Range] begin[:]
call[name[location]][constant[start]] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[loc_identity], name[SBOL].start]]
call[name[location]][constant[end]] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[loc_identity], name[SBOL].end]]
call[name[locations].append, parameter[call[name[Range], parameter[]]]]
variable[sa_obj] assign[=] call[name[SequenceAnnotation], parameter[]]
call[name[sequence_annotations].append, parameter[name[sa_obj]]]
call[name[self]._components][name[def_uri]].sequence_annotations assign[=] name[sequence_annotations]
if compare[tuple[[<ast.Name object at 0x7da20c6c7a60>, <ast.Attribute object at 0x7da20c6c7f10>, <ast.Constant object at 0x7da20c6c6650>]] in name[graph]] begin[:]
variable[find_constraint_using] assign[=] tuple[[<ast.Name object at 0x7da20c6c4880>, <ast.Attribute object at 0x7da20c6c4ca0>, <ast.Constant object at 0x7da20c6c7940>]]
variable[sequence_constraints] assign[=] list[[]]
for taget[name[seq_constraint]] in starred[call[name[graph].triples, parameter[name[find_constraint_using]]]] begin[:]
variable[seq_identity] assign[=] call[name[seq_constraint]][constant[2]]
variable[sc] assign[=] call[name[self]._get_rdf_identified, parameter[name[graph], name[seq_identity]]]
call[name[sc]][constant[restriction]] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[seq_identity], name[SBOL].restriction]]
variable[subject_id] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[seq_identity], name[SBOL].subject]]
call[name[sc]][constant[subject]] assign[=] call[name[component_index]][name[subject_id]]
variable[object_id] assign[=] call[name[self]._get_triplet_value, parameter[name[graph], name[seq_identity], name[SBOL].object]]
call[name[sc]][constant[obj]] assign[=] call[name[component_index]][name[object_id]]
variable[sc_obj] assign[=] call[name[SequenceConstraint], parameter[]]
call[name[sequence_constraints].append, parameter[name[sc_obj]]]
call[name[self]._components][name[def_uri]].sequence_constraints assign[=] name[sequence_constraints] | keyword[def] identifier[_extend_component_definitions] ( identifier[self] , identifier[graph] ):
literal[string]
keyword[for] identifier[def_uri] , identifier[comp_def] keyword[in] identifier[self] . identifier[_components] . identifier[items] ():
identifier[component_index] ={}
identifier[identity] = identifier[URIRef] ( identifier[def_uri] )
keyword[for] identifier[comp] keyword[in] identifier[graph] . identifier[triples] (( identifier[identity] , identifier[SBOL] . identifier[component] , keyword[None] )):
identifier[comp_identity] = identifier[comp] [ literal[int] ]
identifier[ci] = identifier[self] . identifier[_get_rdf_identified] ( identifier[graph] , identifier[comp_identity] )
identifier[ci] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[comp_identity] , identifier[SBOL] . identifier[mapTo] )
identifier[ci] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[comp_identity] , identifier[SBOL] . identifier[access] )
identifier[component_comp_def] = identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[comp_identity] , identifier[SBOL] . identifier[definition] )
identifier[ci] [ literal[string] ]= identifier[self] . identifier[_components] [ identifier[component_comp_def] ]
identifier[c] = identifier[Component] (** identifier[ci] )
identifier[component_index] [ identifier[ci] [ literal[string] ]]= identifier[c]
identifier[self] . identifier[_components] [ identifier[def_uri] ]. identifier[components] = identifier[list] ( identifier[component_index] . identifier[values] ())
keyword[if] ( identifier[identity] , identifier[SBOL] . identifier[sequenceAnnotation] , keyword[None] ) keyword[in] identifier[graph] :
identifier[find_annotation_using] =( identifier[identity] , identifier[SBOL] . identifier[sequenceAnnotation] , keyword[None] )
keyword[else] :
identifier[find_annotation_using] =( identifier[identity] , identifier[SBOL] . identifier[SequenceAnnotation] , keyword[None] )
identifier[sequence_annotations] =[]
keyword[for] identifier[seq_annot] keyword[in] identifier[graph] . identifier[triples] ( identifier[find_annotation_using] ):
identifier[seq_identity] = identifier[seq_annot] [ literal[int] ]
identifier[sa] = identifier[self] . identifier[_get_rdf_identified] ( identifier[graph] , identifier[seq_identity] )
identifier[component_to_use] = identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[seq_identity] , identifier[SBOL] . identifier[component] )
identifier[sa] [ literal[string] ]= identifier[component_index] [ identifier[component_to_use] ]
identifier[sa] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value_list] ( identifier[graph] , identifier[seq_identity] , identifier[SBOL] . identifier[role] )
identifier[locations] =[]
keyword[for] identifier[loc] keyword[in] identifier[graph] . identifier[triples] (( identifier[seq_identity] , identifier[SBOL] . identifier[location] , keyword[None] )):
identifier[loc_identity] = identifier[loc] [ literal[int] ]
identifier[location] = identifier[self] . identifier[_get_rdf_identified] ( identifier[graph] , identifier[loc_identity] )
identifier[location] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[loc_identity] ,
identifier[SBOL] . identifier[orientation] )
identifier[location_type] = identifier[URIRef] ( identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[loc_identity] , identifier[RDF] . identifier[type] ))
keyword[if] identifier[location_type] == identifier[SBOL] . identifier[Range] :
identifier[location] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[loc_identity] , identifier[SBOL] . identifier[start] )
identifier[location] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[loc_identity] , identifier[SBOL] . identifier[end] )
identifier[locations] . identifier[append] ( identifier[Range] (** identifier[location] ))
keyword[elif] identifier[location_type] == identifier[SBOL] . identifier[Cut] :
identifier[location] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[loc_identity] , identifier[SBOL] . identifier[at] )
identifier[locations] . identifier[append] ( identifier[Cut] (** identifier[location] ))
keyword[else] :
identifier[locations] . identifier[append] ( identifier[GenericLocation] (** identifier[location] ))
identifier[sa_obj] = identifier[SequenceAnnotation] ( identifier[locations] = identifier[locations] ,** identifier[sa] )
identifier[sequence_annotations] . identifier[append] ( identifier[sa_obj] )
identifier[self] . identifier[_components] [ identifier[def_uri] ]. identifier[sequence_annotations] = identifier[sequence_annotations]
keyword[if] ( identifier[identity] , identifier[SBOL] . identifier[sequenceConstraint] , keyword[None] ) keyword[in] identifier[graph] :
identifier[find_constraint_using] =( identifier[identity] , identifier[SBOL] . identifier[sequenceConstraint] , keyword[None] )
keyword[else] :
identifier[find_constraint_using] =( identifier[identity] , identifier[SBOL] . identifier[SequenceConstraint] , keyword[None] )
identifier[sequence_constraints] =[]
keyword[for] identifier[seq_constraint] keyword[in] identifier[graph] . identifier[triples] ( identifier[find_constraint_using] ):
identifier[seq_identity] = identifier[seq_constraint] [ literal[int] ]
identifier[sc] = identifier[self] . identifier[_get_rdf_identified] ( identifier[graph] , identifier[seq_identity] )
identifier[sc] [ literal[string] ]= identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[seq_identity] , identifier[SBOL] . identifier[restriction] )
identifier[subject_id] = identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[seq_identity] , identifier[SBOL] . identifier[subject] )
identifier[sc] [ literal[string] ]= identifier[component_index] [ identifier[subject_id] ]
identifier[object_id] = identifier[self] . identifier[_get_triplet_value] ( identifier[graph] , identifier[seq_identity] , identifier[SBOL] . identifier[object] )
identifier[sc] [ literal[string] ]= identifier[component_index] [ identifier[object_id] ]
identifier[sc_obj] = identifier[SequenceConstraint] (** identifier[sc] )
identifier[sequence_constraints] . identifier[append] ( identifier[sc_obj] )
identifier[self] . identifier[_components] [ identifier[def_uri] ]. identifier[sequence_constraints] = identifier[sequence_constraints] | def _extend_component_definitions(self, graph):
"""
Read graph and update component definitions with related elements
"""
for (def_uri, comp_def) in self._components.items():
# Store created components indexed for later lookup
component_index = {}
identity = URIRef(def_uri)
# Get components
for comp in graph.triples((identity, SBOL.component, None)):
comp_identity = comp[2]
ci = self._get_rdf_identified(graph, comp_identity)
ci['maps_to'] = self._get_triplet_value(graph, comp_identity, SBOL.mapTo)
ci['access'] = self._get_triplet_value(graph, comp_identity, SBOL.access)
component_comp_def = self._get_triplet_value(graph, comp_identity, SBOL.definition)
ci['definition'] = self._components[component_comp_def]
c = Component(**ci)
component_index[ci['identity']] = c # depends on [control=['for'], data=['comp']]
self._components[def_uri].components = list(component_index.values())
# Get sequence annotations
if (identity, SBOL.sequenceAnnotation, None) in graph:
find_annotation_using = (identity, SBOL.sequenceAnnotation, None) # depends on [control=['if'], data=[]]
else:
find_annotation_using = (identity, SBOL.SequenceAnnotation, None)
sequence_annotations = []
for seq_annot in graph.triples(find_annotation_using):
seq_identity = seq_annot[2]
sa = self._get_rdf_identified(graph, seq_identity)
component_to_use = self._get_triplet_value(graph, seq_identity, SBOL.component)
sa['component'] = component_index[component_to_use]
sa['roles'] = self._get_triplet_value_list(graph, seq_identity, SBOL.role)
locations = []
for loc in graph.triples((seq_identity, SBOL.location, None)):
loc_identity = loc[2]
location = self._get_rdf_identified(graph, loc_identity)
location['orientation'] = self._get_triplet_value(graph, loc_identity, SBOL.orientation)
location_type = URIRef(self._get_triplet_value(graph, loc_identity, RDF.type))
if location_type == SBOL.Range:
location['start'] = self._get_triplet_value(graph, loc_identity, SBOL.start)
location['end'] = self._get_triplet_value(graph, loc_identity, SBOL.end)
locations.append(Range(**location)) # depends on [control=['if'], data=[]]
elif location_type == SBOL.Cut:
location['at'] = self._get_triplet_value(graph, loc_identity, SBOL.at)
locations.append(Cut(**location)) # depends on [control=['if'], data=[]]
else:
locations.append(GenericLocation(**location)) # depends on [control=['for'], data=['loc']]
sa_obj = SequenceAnnotation(locations=locations, **sa)
sequence_annotations.append(sa_obj) # depends on [control=['for'], data=['seq_annot']]
self._components[def_uri].sequence_annotations = sequence_annotations
# Get sequence constraints
if (identity, SBOL.sequenceConstraint, None) in graph:
find_constraint_using = (identity, SBOL.sequenceConstraint, None) # depends on [control=['if'], data=[]]
else:
find_constraint_using = (identity, SBOL.SequenceConstraint, None)
sequence_constraints = []
for seq_constraint in graph.triples(find_constraint_using):
seq_identity = seq_constraint[2]
sc = self._get_rdf_identified(graph, seq_identity)
sc['restriction'] = self._get_triplet_value(graph, seq_identity, SBOL.restriction)
subject_id = self._get_triplet_value(graph, seq_identity, SBOL.subject)
sc['subject'] = component_index[subject_id]
object_id = self._get_triplet_value(graph, seq_identity, SBOL.object)
# Object is a reserved word so call it obj to prevent clashes
sc['obj'] = component_index[object_id]
sc_obj = SequenceConstraint(**sc)
sequence_constraints.append(sc_obj) # depends on [control=['for'], data=['seq_constraint']]
self._components[def_uri].sequence_constraints = sequence_constraints # depends on [control=['for'], data=[]] |
def generate_voxel_grid(bbox, szval, use_cubes=False):
""" Generates the voxel grid with the desired size.
:param bbox: bounding box
:type bbox: list, tuple
:param szval: size in x-, y-, z-directions
:type szval: list, tuple
:param use_cubes: use cube voxels instead of cuboid ones
:type use_cubes: bool
:return: voxel grid
:rtype: list
"""
# Input validation
if szval[0] <= 1 or szval[1] <= 1 or szval[2] <= 1:
raise GeomdlException("Size values must be bigger than 1", data=dict(sizevals=szval))
# Find step size for each direction
steps = [float(bbox[1][idx] - bbox[0][idx]) / float(szval[idx] - 1) for idx in range(0, 3)]
# It is possible to use cubes instead of cuboids
if use_cubes:
min_val = min(*steps)
steps = [min_val for _ in range(0, 3)]
# Find range in each direction
ranges = [list(linalg.frange(bbox[0][idx], bbox[1][idx], steps[idx])) for idx in range(0, 3)]
voxel_grid = []
for u in ranges[0]:
for v in ranges[1]:
for w in ranges[2]:
bbmin = [u, v, w]
bbmax = [k + l for k, l in zip(bbmin, steps)]
voxel_grid.append([bbmin, bbmax])
return voxel_grid | def function[generate_voxel_grid, parameter[bbox, szval, use_cubes]]:
constant[ Generates the voxel grid with the desired size.
:param bbox: bounding box
:type bbox: list, tuple
:param szval: size in x-, y-, z-directions
:type szval: list, tuple
:param use_cubes: use cube voxels instead of cuboid ones
:type use_cubes: bool
:return: voxel grid
:rtype: list
]
if <ast.BoolOp object at 0x7da1b17b65f0> begin[:]
<ast.Raise object at 0x7da1b17b4580>
variable[steps] assign[=] <ast.ListComp object at 0x7da1b17b4490>
if name[use_cubes] begin[:]
variable[min_val] assign[=] call[name[min], parameter[<ast.Starred object at 0x7da1b17b5a20>]]
variable[steps] assign[=] <ast.ListComp object at 0x7da1b17b7280>
variable[ranges] assign[=] <ast.ListComp object at 0x7da1b17b4910>
variable[voxel_grid] assign[=] list[[]]
for taget[name[u]] in starred[call[name[ranges]][constant[0]]] begin[:]
for taget[name[v]] in starred[call[name[ranges]][constant[1]]] begin[:]
for taget[name[w]] in starred[call[name[ranges]][constant[2]]] begin[:]
variable[bbmin] assign[=] list[[<ast.Name object at 0x7da1b17b4cd0>, <ast.Name object at 0x7da1b17b6fb0>, <ast.Name object at 0x7da1b17b65c0>]]
variable[bbmax] assign[=] <ast.ListComp object at 0x7da1b17b74f0>
call[name[voxel_grid].append, parameter[list[[<ast.Name object at 0x7da1b17b63b0>, <ast.Name object at 0x7da1b17b5ab0>]]]]
return[name[voxel_grid]] | keyword[def] identifier[generate_voxel_grid] ( identifier[bbox] , identifier[szval] , identifier[use_cubes] = keyword[False] ):
literal[string]
keyword[if] identifier[szval] [ literal[int] ]<= literal[int] keyword[or] identifier[szval] [ literal[int] ]<= literal[int] keyword[or] identifier[szval] [ literal[int] ]<= literal[int] :
keyword[raise] identifier[GeomdlException] ( literal[string] , identifier[data] = identifier[dict] ( identifier[sizevals] = identifier[szval] ))
identifier[steps] =[ identifier[float] ( identifier[bbox] [ literal[int] ][ identifier[idx] ]- identifier[bbox] [ literal[int] ][ identifier[idx] ])/ identifier[float] ( identifier[szval] [ identifier[idx] ]- literal[int] ) keyword[for] identifier[idx] keyword[in] identifier[range] ( literal[int] , literal[int] )]
keyword[if] identifier[use_cubes] :
identifier[min_val] = identifier[min] (* identifier[steps] )
identifier[steps] =[ identifier[min_val] keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] , literal[int] )]
identifier[ranges] =[ identifier[list] ( identifier[linalg] . identifier[frange] ( identifier[bbox] [ literal[int] ][ identifier[idx] ], identifier[bbox] [ literal[int] ][ identifier[idx] ], identifier[steps] [ identifier[idx] ])) keyword[for] identifier[idx] keyword[in] identifier[range] ( literal[int] , literal[int] )]
identifier[voxel_grid] =[]
keyword[for] identifier[u] keyword[in] identifier[ranges] [ literal[int] ]:
keyword[for] identifier[v] keyword[in] identifier[ranges] [ literal[int] ]:
keyword[for] identifier[w] keyword[in] identifier[ranges] [ literal[int] ]:
identifier[bbmin] =[ identifier[u] , identifier[v] , identifier[w] ]
identifier[bbmax] =[ identifier[k] + identifier[l] keyword[for] identifier[k] , identifier[l] keyword[in] identifier[zip] ( identifier[bbmin] , identifier[steps] )]
identifier[voxel_grid] . identifier[append] ([ identifier[bbmin] , identifier[bbmax] ])
keyword[return] identifier[voxel_grid] | def generate_voxel_grid(bbox, szval, use_cubes=False):
""" Generates the voxel grid with the desired size.
:param bbox: bounding box
:type bbox: list, tuple
:param szval: size in x-, y-, z-directions
:type szval: list, tuple
:param use_cubes: use cube voxels instead of cuboid ones
:type use_cubes: bool
:return: voxel grid
:rtype: list
"""
# Input validation
if szval[0] <= 1 or szval[1] <= 1 or szval[2] <= 1:
raise GeomdlException('Size values must be bigger than 1', data=dict(sizevals=szval)) # depends on [control=['if'], data=[]]
# Find step size for each direction
steps = [float(bbox[1][idx] - bbox[0][idx]) / float(szval[idx] - 1) for idx in range(0, 3)]
# It is possible to use cubes instead of cuboids
if use_cubes:
min_val = min(*steps)
steps = [min_val for _ in range(0, 3)] # depends on [control=['if'], data=[]]
# Find range in each direction
ranges = [list(linalg.frange(bbox[0][idx], bbox[1][idx], steps[idx])) for idx in range(0, 3)]
voxel_grid = []
for u in ranges[0]:
for v in ranges[1]:
for w in ranges[2]:
bbmin = [u, v, w]
bbmax = [k + l for (k, l) in zip(bbmin, steps)]
voxel_grid.append([bbmin, bbmax]) # depends on [control=['for'], data=['w']] # depends on [control=['for'], data=['v']] # depends on [control=['for'], data=['u']]
return voxel_grid |
def precipitation(self, val):
""""Precipitation is a value in [-1,1]"""
try:
data, thresholds = val
except ValueError:
raise ValueError("Pass an iterable: (data, thresholds)")
else:
if data.shape[0] != self.height:
raise Exception("Setting data with wrong height")
if data.shape[1] != self.width:
raise Exception("Setting data with wrong width")
self.layers['precipitation'] = LayerWithThresholds(data, thresholds) | def function[precipitation, parameter[self, val]]:
constant["Precipitation is a value in [-1,1]]
<ast.Try object at 0x7da1b0655180> | keyword[def] identifier[precipitation] ( identifier[self] , identifier[val] ):
literal[string]
keyword[try] :
identifier[data] , identifier[thresholds] = identifier[val]
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[if] identifier[data] . identifier[shape] [ literal[int] ]!= identifier[self] . identifier[height] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[data] . identifier[shape] [ literal[int] ]!= identifier[self] . identifier[width] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[layers] [ literal[string] ]= identifier[LayerWithThresholds] ( identifier[data] , identifier[thresholds] ) | def precipitation(self, val):
""""Precipitation is a value in [-1,1]"""
try:
(data, thresholds) = val # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('Pass an iterable: (data, thresholds)') # depends on [control=['except'], data=[]]
else:
if data.shape[0] != self.height:
raise Exception('Setting data with wrong height') # depends on [control=['if'], data=[]]
if data.shape[1] != self.width:
raise Exception('Setting data with wrong width') # depends on [control=['if'], data=[]]
self.layers['precipitation'] = LayerWithThresholds(data, thresholds) |
def word(self, _id, padding=75):
"""
Get words
"""
word = self.words[_id][2]
vec = word_to_vector(word)
vec += [-1] * (padding - len(vec))
return np.array(vec, dtype=np.int32) | def function[word, parameter[self, _id, padding]]:
constant[
Get words
]
variable[word] assign[=] call[call[name[self].words][name[_id]]][constant[2]]
variable[vec] assign[=] call[name[word_to_vector], parameter[name[word]]]
<ast.AugAssign object at 0x7da1b1ff50f0>
return[call[name[np].array, parameter[name[vec]]]] | keyword[def] identifier[word] ( identifier[self] , identifier[_id] , identifier[padding] = literal[int] ):
literal[string]
identifier[word] = identifier[self] . identifier[words] [ identifier[_id] ][ literal[int] ]
identifier[vec] = identifier[word_to_vector] ( identifier[word] )
identifier[vec] +=[- literal[int] ]*( identifier[padding] - identifier[len] ( identifier[vec] ))
keyword[return] identifier[np] . identifier[array] ( identifier[vec] , identifier[dtype] = identifier[np] . identifier[int32] ) | def word(self, _id, padding=75):
"""
Get words
"""
word = self.words[_id][2]
vec = word_to_vector(word)
vec += [-1] * (padding - len(vec))
return np.array(vec, dtype=np.int32) |
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0 | def function[any, parameter[self, values, axis]]:
constant[compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
]
variable[values] assign[=] call[name[np].asarray, parameter[name[values]]]
if <ast.UnaryOp object at 0x7da2054a4a00> begin[:]
variable[values] assign[=] compare[name[values] not_equal[!=] constant[0]]
return[tuple[[<ast.Attribute object at 0x7da18dc985b0>, <ast.Compare object at 0x7da18dc99570>]]] | keyword[def] identifier[any] ( identifier[self] , identifier[values] , identifier[axis] = literal[int] ):
literal[string]
identifier[values] = identifier[np] . identifier[asarray] ( identifier[values] )
keyword[if] keyword[not] identifier[values] . identifier[dtype] == identifier[np] . identifier[bool] :
identifier[values] = identifier[values] != literal[int]
keyword[return] identifier[self] . identifier[unique] , identifier[self] . identifier[reduce] ( identifier[values] , identifier[axis] = identifier[axis] )> literal[int] | def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0 # depends on [control=['if'], data=[]]
return (self.unique, self.reduce(values, axis=axis) > 0) |
def _create_element(name, element_type, data, server=None):
'''
Create a new element
'''
# Define property and id from name and properties + remove SaltStack parameters
if 'properties' in data:
data['property'] = ''
for key, value in data['properties'].items():
if not data['property']:
data['property'] += '{0}={1}'.format(key, value.replace(':', '\\:'))
else:
data['property'] += ':{0}={1}'.format(key, value.replace(':', '\\:'))
del data['properties']
# Send request
_api_post(element_type, _clean_data(data), server)
return unquote(name) | def function[_create_element, parameter[name, element_type, data, server]]:
constant[
Create a new element
]
if compare[constant[properties] in name[data]] begin[:]
call[name[data]][constant[property]] assign[=] constant[]
for taget[tuple[[<ast.Name object at 0x7da1b1fa6b30>, <ast.Name object at 0x7da1b1fa68c0>]]] in starred[call[call[name[data]][constant[properties]].items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1fa75b0> begin[:]
<ast.AugAssign object at 0x7da1b1fa63b0>
<ast.Delete object at 0x7da1b1fa73a0>
call[name[_api_post], parameter[name[element_type], call[name[_clean_data], parameter[name[data]]], name[server]]]
return[call[name[unquote], parameter[name[name]]]] | keyword[def] identifier[_create_element] ( identifier[name] , identifier[element_type] , identifier[data] , identifier[server] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[data] [ literal[string] ]= literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[data] [ literal[string] ]. identifier[items] ():
keyword[if] keyword[not] identifier[data] [ literal[string] ]:
identifier[data] [ literal[string] ]+= literal[string] . identifier[format] ( identifier[key] , identifier[value] . identifier[replace] ( literal[string] , literal[string] ))
keyword[else] :
identifier[data] [ literal[string] ]+= literal[string] . identifier[format] ( identifier[key] , identifier[value] . identifier[replace] ( literal[string] , literal[string] ))
keyword[del] identifier[data] [ literal[string] ]
identifier[_api_post] ( identifier[element_type] , identifier[_clean_data] ( identifier[data] ), identifier[server] )
keyword[return] identifier[unquote] ( identifier[name] ) | def _create_element(name, element_type, data, server=None):
"""
Create a new element
"""
# Define property and id from name and properties + remove SaltStack parameters
if 'properties' in data:
data['property'] = ''
for (key, value) in data['properties'].items():
if not data['property']:
data['property'] += '{0}={1}'.format(key, value.replace(':', '\\:')) # depends on [control=['if'], data=[]]
else:
data['property'] += ':{0}={1}'.format(key, value.replace(':', '\\:')) # depends on [control=['for'], data=[]]
del data['properties'] # depends on [control=['if'], data=['data']]
# Send request
_api_post(element_type, _clean_data(data), server)
return unquote(name) |
def hyperparameters(self):
"""Return hyperparameters used by your custom Chainer code during training."""
hyperparameters = super(Chainer, self).hyperparameters()
additional_hyperparameters = {Chainer._use_mpi: self.use_mpi,
Chainer._num_processes: self.num_processes,
Chainer._process_slots_per_host: self.process_slots_per_host,
Chainer._additional_mpi_options: self.additional_mpi_options}
# remove unset keys.
additional_hyperparameters = {k: v for k, v in additional_hyperparameters.items() if v}
hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))
return hyperparameters | def function[hyperparameters, parameter[self]]:
constant[Return hyperparameters used by your custom Chainer code during training.]
variable[hyperparameters] assign[=] call[call[name[super], parameter[name[Chainer], name[self]]].hyperparameters, parameter[]]
variable[additional_hyperparameters] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1c48340>, <ast.Attribute object at 0x7da1b1c4bc40>, <ast.Attribute object at 0x7da1b1c4a020>, <ast.Attribute object at 0x7da1b1c48d00>], [<ast.Attribute object at 0x7da1b1c48b80>, <ast.Attribute object at 0x7da1b1c49bd0>, <ast.Attribute object at 0x7da1b1c49fc0>, <ast.Attribute object at 0x7da1b1c496c0>]]
variable[additional_hyperparameters] assign[=] <ast.DictComp object at 0x7da1b215c250>
call[name[hyperparameters].update, parameter[call[name[Framework]._json_encode_hyperparameters, parameter[name[additional_hyperparameters]]]]]
return[name[hyperparameters]] | keyword[def] identifier[hyperparameters] ( identifier[self] ):
literal[string]
identifier[hyperparameters] = identifier[super] ( identifier[Chainer] , identifier[self] ). identifier[hyperparameters] ()
identifier[additional_hyperparameters] ={ identifier[Chainer] . identifier[_use_mpi] : identifier[self] . identifier[use_mpi] ,
identifier[Chainer] . identifier[_num_processes] : identifier[self] . identifier[num_processes] ,
identifier[Chainer] . identifier[_process_slots_per_host] : identifier[self] . identifier[process_slots_per_host] ,
identifier[Chainer] . identifier[_additional_mpi_options] : identifier[self] . identifier[additional_mpi_options] }
identifier[additional_hyperparameters] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[additional_hyperparameters] . identifier[items] () keyword[if] identifier[v] }
identifier[hyperparameters] . identifier[update] ( identifier[Framework] . identifier[_json_encode_hyperparameters] ( identifier[additional_hyperparameters] ))
keyword[return] identifier[hyperparameters] | def hyperparameters(self):
"""Return hyperparameters used by your custom Chainer code during training."""
hyperparameters = super(Chainer, self).hyperparameters()
additional_hyperparameters = {Chainer._use_mpi: self.use_mpi, Chainer._num_processes: self.num_processes, Chainer._process_slots_per_host: self.process_slots_per_host, Chainer._additional_mpi_options: self.additional_mpi_options}
# remove unset keys.
additional_hyperparameters = {k: v for (k, v) in additional_hyperparameters.items() if v}
hyperparameters.update(Framework._json_encode_hyperparameters(additional_hyperparameters))
return hyperparameters |
def spec_compliant_encrypt(claims, jwk, add_header=None, alg='RSA-OAEP',
enc='A128CBC-HS256', rng=get_random_bytes):
""" Encrypts the given claims and produces a :class:`~jose.JWE`
:param claims: A `dict` representing the claims for this
:class:`~jose.JWE`.
:param jwk: A `dict` representing the JWK to be used for encryption of
the CEK. This parameter is algorithm-specific.
:param add_header: Additional items to be added to the header. Additional
headers *will* be authenticated.
:param alg: The algorithm to use for CEK encryption
:param enc: The algorithm to use for claims encryption
:param rng: Random number generator. A string of random bytes is expected
as output.
: param compression: The compression algorithm to use. Currently supports
`'DEF'`.
:rtype: :class:`~jose.JWE`
:raises: :class:`~jose.Error` if there is an error producing the JWE
"""
# We need 5 components for JWE token
# 1. Generate header
header = dict((add_header or {}).items() + [(HEADER_ENC, enc),
(HEADER_ALG, alg)])
protected_header = json_encode(header)
# 2. Generate CEK
mac_key, enc_key = _generate_encryption_keys(enc, rng)
encrypted_key = _encrypt_key(mac_key + enc_key, jwk, alg)
# 3. Generate Initialization Vector
iv = _generate_iv(enc, rng)
# 4. Generate payload
plaintext = json_encode(claims)
# Compress if needed
if HEADER_ZIP in header:
try:
(compression_func, _) = COMPRESSION[header[HEADER_ZIP]]
except KeyError:
raise Error(
'Unsupported compression algorithm: {}'.format(header[HEADER_ZIP]))
M = compression_func(plaintext)
else:
M = plaintext
# Encrypt payload
((cipher, _), key_len), _ = JWA[enc]
ciphertext = cipher(M, enc_key, iv)
# 5. Generate authentication tag
authentication_tag = _generate_authentication_tag(
mac_key, protected_header, ciphertext, iv, enc
)
return JWE(
*map(
b64encode_url,
(protected_header, encrypted_key, iv, ciphertext,
authentication_tag)
)
) | def function[spec_compliant_encrypt, parameter[claims, jwk, add_header, alg, enc, rng]]:
constant[ Encrypts the given claims and produces a :class:`~jose.JWE`
:param claims: A `dict` representing the claims for this
:class:`~jose.JWE`.
:param jwk: A `dict` representing the JWK to be used for encryption of
the CEK. This parameter is algorithm-specific.
:param add_header: Additional items to be added to the header. Additional
headers *will* be authenticated.
:param alg: The algorithm to use for CEK encryption
:param enc: The algorithm to use for claims encryption
:param rng: Random number generator. A string of random bytes is expected
as output.
: param compression: The compression algorithm to use. Currently supports
`'DEF'`.
:rtype: :class:`~jose.JWE`
:raises: :class:`~jose.Error` if there is an error producing the JWE
]
variable[header] assign[=] call[name[dict], parameter[binary_operation[call[<ast.BoolOp object at 0x7da1b04a41f0>.items, parameter[]] + list[[<ast.Tuple object at 0x7da1b04a4670>, <ast.Tuple object at 0x7da1b04a4c40>]]]]]
variable[protected_header] assign[=] call[name[json_encode], parameter[name[header]]]
<ast.Tuple object at 0x7da1b04a57e0> assign[=] call[name[_generate_encryption_keys], parameter[name[enc], name[rng]]]
variable[encrypted_key] assign[=] call[name[_encrypt_key], parameter[binary_operation[name[mac_key] + name[enc_key]], name[jwk], name[alg]]]
variable[iv] assign[=] call[name[_generate_iv], parameter[name[enc], name[rng]]]
variable[plaintext] assign[=] call[name[json_encode], parameter[name[claims]]]
if compare[name[HEADER_ZIP] in name[header]] begin[:]
<ast.Try object at 0x7da2054a5e40>
variable[M] assign[=] call[name[compression_func], parameter[name[plaintext]]]
<ast.Tuple object at 0x7da2054a77f0> assign[=] call[name[JWA]][name[enc]]
variable[ciphertext] assign[=] call[name[cipher], parameter[name[M], name[enc_key], name[iv]]]
variable[authentication_tag] assign[=] call[name[_generate_authentication_tag], parameter[name[mac_key], name[protected_header], name[ciphertext], name[iv], name[enc]]]
return[call[name[JWE], parameter[<ast.Starred object at 0x7da2054a5c90>]]] | keyword[def] identifier[spec_compliant_encrypt] ( identifier[claims] , identifier[jwk] , identifier[add_header] = keyword[None] , identifier[alg] = literal[string] ,
identifier[enc] = literal[string] , identifier[rng] = identifier[get_random_bytes] ):
literal[string]
identifier[header] = identifier[dict] (( identifier[add_header] keyword[or] {}). identifier[items] ()+[( identifier[HEADER_ENC] , identifier[enc] ),
( identifier[HEADER_ALG] , identifier[alg] )])
identifier[protected_header] = identifier[json_encode] ( identifier[header] )
identifier[mac_key] , identifier[enc_key] = identifier[_generate_encryption_keys] ( identifier[enc] , identifier[rng] )
identifier[encrypted_key] = identifier[_encrypt_key] ( identifier[mac_key] + identifier[enc_key] , identifier[jwk] , identifier[alg] )
identifier[iv] = identifier[_generate_iv] ( identifier[enc] , identifier[rng] )
identifier[plaintext] = identifier[json_encode] ( identifier[claims] )
keyword[if] identifier[HEADER_ZIP] keyword[in] identifier[header] :
keyword[try] :
( identifier[compression_func] , identifier[_] )= identifier[COMPRESSION] [ identifier[header] [ identifier[HEADER_ZIP] ]]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[Error] (
literal[string] . identifier[format] ( identifier[header] [ identifier[HEADER_ZIP] ]))
identifier[M] = identifier[compression_func] ( identifier[plaintext] )
keyword[else] :
identifier[M] = identifier[plaintext]
(( identifier[cipher] , identifier[_] ), identifier[key_len] ), identifier[_] = identifier[JWA] [ identifier[enc] ]
identifier[ciphertext] = identifier[cipher] ( identifier[M] , identifier[enc_key] , identifier[iv] )
identifier[authentication_tag] = identifier[_generate_authentication_tag] (
identifier[mac_key] , identifier[protected_header] , identifier[ciphertext] , identifier[iv] , identifier[enc]
)
keyword[return] identifier[JWE] (
* identifier[map] (
identifier[b64encode_url] ,
( identifier[protected_header] , identifier[encrypted_key] , identifier[iv] , identifier[ciphertext] ,
identifier[authentication_tag] )
)
) | def spec_compliant_encrypt(claims, jwk, add_header=None, alg='RSA-OAEP', enc='A128CBC-HS256', rng=get_random_bytes):
""" Encrypts the given claims and produces a :class:`~jose.JWE`
:param claims: A `dict` representing the claims for this
:class:`~jose.JWE`.
:param jwk: A `dict` representing the JWK to be used for encryption of
the CEK. This parameter is algorithm-specific.
:param add_header: Additional items to be added to the header. Additional
headers *will* be authenticated.
:param alg: The algorithm to use for CEK encryption
:param enc: The algorithm to use for claims encryption
:param rng: Random number generator. A string of random bytes is expected
as output.
: param compression: The compression algorithm to use. Currently supports
`'DEF'`.
:rtype: :class:`~jose.JWE`
:raises: :class:`~jose.Error` if there is an error producing the JWE
"""
# We need 5 components for JWE token
# 1. Generate header
header = dict((add_header or {}).items() + [(HEADER_ENC, enc), (HEADER_ALG, alg)])
protected_header = json_encode(header)
# 2. Generate CEK
(mac_key, enc_key) = _generate_encryption_keys(enc, rng)
encrypted_key = _encrypt_key(mac_key + enc_key, jwk, alg)
# 3. Generate Initialization Vector
iv = _generate_iv(enc, rng)
# 4. Generate payload
plaintext = json_encode(claims)
# Compress if needed
if HEADER_ZIP in header:
try:
(compression_func, _) = COMPRESSION[header[HEADER_ZIP]] # depends on [control=['try'], data=[]]
except KeyError:
raise Error('Unsupported compression algorithm: {}'.format(header[HEADER_ZIP])) # depends on [control=['except'], data=[]]
M = compression_func(plaintext) # depends on [control=['if'], data=['HEADER_ZIP', 'header']]
else:
M = plaintext
# Encrypt payload
(((cipher, _), key_len), _) = JWA[enc]
ciphertext = cipher(M, enc_key, iv)
# 5. Generate authentication tag
authentication_tag = _generate_authentication_tag(mac_key, protected_header, ciphertext, iv, enc)
return JWE(*map(b64encode_url, (protected_header, encrypted_key, iv, ciphertext, authentication_tag))) |
def mass_3d_lens(self, R, Rs, theta_Rs):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
"""
rho0 = self._alpha2rho0(theta_Rs, Rs)
m_3d = self.mass_3d(R, Rs, rho0)
return m_3d | def function[mass_3d_lens, parameter[self, R, Rs, theta_Rs]]:
constant[
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
]
variable[rho0] assign[=] call[name[self]._alpha2rho0, parameter[name[theta_Rs], name[Rs]]]
variable[m_3d] assign[=] call[name[self].mass_3d, parameter[name[R], name[Rs], name[rho0]]]
return[name[m_3d]] | keyword[def] identifier[mass_3d_lens] ( identifier[self] , identifier[R] , identifier[Rs] , identifier[theta_Rs] ):
literal[string]
identifier[rho0] = identifier[self] . identifier[_alpha2rho0] ( identifier[theta_Rs] , identifier[Rs] )
identifier[m_3d] = identifier[self] . identifier[mass_3d] ( identifier[R] , identifier[Rs] , identifier[rho0] )
keyword[return] identifier[m_3d] | def mass_3d_lens(self, R, Rs, theta_Rs):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param Ra:
:param Rs:
:return:
"""
rho0 = self._alpha2rho0(theta_Rs, Rs)
m_3d = self.mass_3d(R, Rs, rho0)
return m_3d |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Read the data encoding the DefaultsInformation structure and decode it
into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the object defaults are missing
from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the DefaultsInformation structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the DefaultsInformation "
"object.".format(
kmip_version.value
)
)
super(DefaultsInformation, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
object_defaults = []
while self.is_tag_next(enums.Tags.OBJECT_DEFAULTS, local_buffer):
object_default = ObjectDefaults()
object_default.read(local_buffer, kmip_version=kmip_version)
object_defaults.append(object_default)
if len(object_defaults) == 0:
raise exceptions.InvalidKmipEncoding(
"The DefaultsInformation encoding is missing the object "
"defaults structure."
)
else:
self._object_defaults = object_defaults
self.is_oversized(local_buffer) | def function[read, parameter[self, input_buffer, kmip_version]]:
constant[
Read the data encoding the DefaultsInformation structure and decode it
into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the object defaults are missing
from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the DefaultsInformation structure.
]
if compare[name[kmip_version] less[<] name[enums].KMIPVersion.KMIP_2_0] begin[:]
<ast.Raise object at 0x7da20cabece0>
call[call[name[super], parameter[name[DefaultsInformation], name[self]]].read, parameter[name[input_buffer]]]
variable[local_buffer] assign[=] call[name[utils].BytearrayStream, parameter[call[name[input_buffer].read, parameter[name[self].length]]]]
variable[object_defaults] assign[=] list[[]]
while call[name[self].is_tag_next, parameter[name[enums].Tags.OBJECT_DEFAULTS, name[local_buffer]]] begin[:]
variable[object_default] assign[=] call[name[ObjectDefaults], parameter[]]
call[name[object_default].read, parameter[name[local_buffer]]]
call[name[object_defaults].append, parameter[name[object_default]]]
if compare[call[name[len], parameter[name[object_defaults]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b024ca90>
call[name[self].is_oversized, parameter[name[local_buffer]]] | keyword[def] identifier[read] ( identifier[self] , identifier[input_buffer] , identifier[kmip_version] = identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_2_0] ):
literal[string]
keyword[if] identifier[kmip_version] < identifier[enums] . identifier[KMIPVersion] . identifier[KMIP_2_0] :
keyword[raise] identifier[exceptions] . identifier[VersionNotSupported] (
literal[string]
literal[string] . identifier[format] (
identifier[kmip_version] . identifier[value]
)
)
identifier[super] ( identifier[DefaultsInformation] , identifier[self] ). identifier[read] (
identifier[input_buffer] ,
identifier[kmip_version] = identifier[kmip_version]
)
identifier[local_buffer] = identifier[utils] . identifier[BytearrayStream] ( identifier[input_buffer] . identifier[read] ( identifier[self] . identifier[length] ))
identifier[object_defaults] =[]
keyword[while] identifier[self] . identifier[is_tag_next] ( identifier[enums] . identifier[Tags] . identifier[OBJECT_DEFAULTS] , identifier[local_buffer] ):
identifier[object_default] = identifier[ObjectDefaults] ()
identifier[object_default] . identifier[read] ( identifier[local_buffer] , identifier[kmip_version] = identifier[kmip_version] )
identifier[object_defaults] . identifier[append] ( identifier[object_default] )
keyword[if] identifier[len] ( identifier[object_defaults] )== literal[int] :
keyword[raise] identifier[exceptions] . identifier[InvalidKmipEncoding] (
literal[string]
literal[string]
)
keyword[else] :
identifier[self] . identifier[_object_defaults] = identifier[object_defaults]
identifier[self] . identifier[is_oversized] ( identifier[local_buffer] ) | def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Read the data encoding the DefaultsInformation structure and decode it
into its constituent parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the object defaults are missing
from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the DefaultsInformation structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported('KMIP {} does not support the DefaultsInformation object.'.format(kmip_version.value)) # depends on [control=['if'], data=['kmip_version']]
super(DefaultsInformation, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
object_defaults = []
while self.is_tag_next(enums.Tags.OBJECT_DEFAULTS, local_buffer):
object_default = ObjectDefaults()
object_default.read(local_buffer, kmip_version=kmip_version)
object_defaults.append(object_default) # depends on [control=['while'], data=[]]
if len(object_defaults) == 0:
raise exceptions.InvalidKmipEncoding('The DefaultsInformation encoding is missing the object defaults structure.') # depends on [control=['if'], data=[]]
else:
self._object_defaults = object_defaults
self.is_oversized(local_buffer) |
def variability_prob(self, whiteness):
"""Use the probability of the spectral variability
to identify clouds over land.
Equation 15 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
whiteness: ndarray
Output
------
ndarray :
probability of cloud over land based on variability
"""
if self.sat in ['LT5', 'LE7']:
# check for green and red saturation
# if red is saturated and less than nir, ndvi = LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF
mod_ndvi = np.where(self.red_saturated & (self.nir > self.red), 0, self.ndvi)
# if green is saturated and less than swir1, ndsi = LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF
mod_ndsi = np.where(self.green_saturated & (self.swir1 > self.green), 0, self.ndsi)
ndi_max = np.fmax(np.absolute(mod_ndvi), np.absolute(mod_ndsi))
else:
ndi_max = np.fmax(np.absolute(self.ndvi), np.absolute(self.ndsi))
f_max = 1.0 - np.fmax(ndi_max, whiteness)
return f_max | def function[variability_prob, parameter[self, whiteness]]:
constant[Use the probability of the spectral variability
to identify clouds over land.
Equation 15 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
whiteness: ndarray
Output
------
ndarray :
probability of cloud over land based on variability
]
if compare[name[self].sat in list[[<ast.Constant object at 0x7da2044c2a10>, <ast.Constant object at 0x7da2044c37f0>]]] begin[:]
variable[mod_ndvi] assign[=] call[name[np].where, parameter[binary_operation[name[self].red_saturated <ast.BitAnd object at 0x7da2590d6b60> compare[name[self].nir greater[>] name[self].red]], constant[0], name[self].ndvi]]
variable[mod_ndsi] assign[=] call[name[np].where, parameter[binary_operation[name[self].green_saturated <ast.BitAnd object at 0x7da2590d6b60> compare[name[self].swir1 greater[>] name[self].green]], constant[0], name[self].ndsi]]
variable[ndi_max] assign[=] call[name[np].fmax, parameter[call[name[np].absolute, parameter[name[mod_ndvi]]], call[name[np].absolute, parameter[name[mod_ndsi]]]]]
variable[f_max] assign[=] binary_operation[constant[1.0] - call[name[np].fmax, parameter[name[ndi_max], name[whiteness]]]]
return[name[f_max]] | keyword[def] identifier[variability_prob] ( identifier[self] , identifier[whiteness] ):
literal[string]
keyword[if] identifier[self] . identifier[sat] keyword[in] [ literal[string] , literal[string] ]:
identifier[mod_ndvi] = identifier[np] . identifier[where] ( identifier[self] . identifier[red_saturated] &( identifier[self] . identifier[nir] > identifier[self] . identifier[red] ), literal[int] , identifier[self] . identifier[ndvi] )
identifier[mod_ndsi] = identifier[np] . identifier[where] ( identifier[self] . identifier[green_saturated] &( identifier[self] . identifier[swir1] > identifier[self] . identifier[green] ), literal[int] , identifier[self] . identifier[ndsi] )
identifier[ndi_max] = identifier[np] . identifier[fmax] ( identifier[np] . identifier[absolute] ( identifier[mod_ndvi] ), identifier[np] . identifier[absolute] ( identifier[mod_ndsi] ))
keyword[else] :
identifier[ndi_max] = identifier[np] . identifier[fmax] ( identifier[np] . identifier[absolute] ( identifier[self] . identifier[ndvi] ), identifier[np] . identifier[absolute] ( identifier[self] . identifier[ndsi] ))
identifier[f_max] = literal[int] - identifier[np] . identifier[fmax] ( identifier[ndi_max] , identifier[whiteness] )
keyword[return] identifier[f_max] | def variability_prob(self, whiteness):
"""Use the probability of the spectral variability
to identify clouds over land.
Equation 15 (Zhu and Woodcock, 2012)
Parameters
----------
ndvi: ndarray
ndsi: ndarray
whiteness: ndarray
Output
------
ndarray :
probability of cloud over land based on variability
"""
if self.sat in ['LT5', 'LE7']:
# check for green and red saturation
# if red is saturated and less than nir, ndvi = LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF
mod_ndvi = np.where(self.red_saturated & (self.nir > self.red), 0, self.ndvi)
# if green is saturated and less than swir1, ndsi = LE07_clip_L1TP_039027_20150529_20160902_01_T1_B1.TIF
mod_ndsi = np.where(self.green_saturated & (self.swir1 > self.green), 0, self.ndsi)
ndi_max = np.fmax(np.absolute(mod_ndvi), np.absolute(mod_ndsi)) # depends on [control=['if'], data=[]]
else:
ndi_max = np.fmax(np.absolute(self.ndvi), np.absolute(self.ndsi))
f_max = 1.0 - np.fmax(ndi_max, whiteness)
return f_max |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.