code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False | def function[get_iso3_country_code_fuzzy, parameter[cls, country, use_live, exception]]:
constant[Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
]
variable[countriesdata] assign[=] call[name[cls].countriesdata, parameter[]]
variable[iso3] assign[=] call[name[cls].get_iso3_country_code, parameter[name[country]]]
if compare[name[iso3] is_not constant[None]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1bb8820>, <ast.Constant object at 0x7da1b1bb8d30>]]]
def function[remove_matching_from_list, parameter[wordlist, word_or_part]]:
for taget[name[word]] in starred[name[wordlist]] begin[:]
if compare[name[word_or_part] in name[word]] begin[:]
call[name[wordlist].remove, parameter[name[word]]]
variable[expanded_country_candidates] assign[=] call[name[cls].expand_countryname_abbrevs, parameter[name[country]]]
variable[match_strength] assign[=] constant[0]
variable[matches] assign[=] call[name[set], parameter[]]
for taget[name[countryname]] in starred[call[name[sorted], parameter[call[name[countriesdata]][constant[countrynames2iso3]]]]] begin[:]
for taget[name[candidate]] in starred[name[expanded_country_candidates]] begin[:]
<ast.Tuple object at 0x7da1b1bba350> assign[=] call[name[cls].simplify_countryname, parameter[name[candidate]]]
if compare[name[simplified_country] in name[countryname]] begin[:]
variable[words] assign[=] call[name[get_words_in_sentence], parameter[name[countryname]]]
variable[new_match_strength] assign[=] constant[0]
if name[simplified_country] begin[:]
call[name[remove_matching_from_list], parameter[name[words], name[simplified_country]]]
<ast.AugAssign object at 0x7da2044c2680>
for taget[name[word]] in starred[name[removed_words]] begin[:]
if compare[name[word] in name[countryname]] begin[:]
call[name[remove_matching_from_list], parameter[name[words], name[word]]]
<ast.AugAssign object at 0x7da1b1906080>
for taget[name[word]] in starred[name[words]] begin[:]
if compare[name[word] in name[cls].major_differentiators] begin[:]
<ast.AugAssign object at 0x7da1b1906290>
variable[iso3] assign[=] call[call[name[countriesdata]][constant[countrynames2iso3]]][name[countryname]]
if compare[name[new_match_strength] greater[>] name[match_strength]] begin[:]
variable[match_strength] assign[=] name[new_match_strength]
variable[matches] assign[=] call[name[set], parameter[]]
if compare[name[new_match_strength] equal[==] name[match_strength]] begin[:]
call[name[matches].add, parameter[name[iso3]]]
if <ast.BoolOp object at 0x7da1b1be4c70> begin[:]
return[tuple[[<ast.Call object at 0x7da1b1be4610>, <ast.Constant object at 0x7da1b1be69b0>]]]
for taget[tuple[[<ast.Name object at 0x7da1b1be4e50>, <ast.Name object at 0x7da1b1be5b10>]]] in starred[call[call[name[countriesdata]][constant[aliases]].items, parameter[]]] begin[:]
variable[index] assign[=] call[name[re].search, parameter[name[regex], call[name[country].upper, parameter[]]]]
if compare[name[index] is_not constant[None]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1a2ec50>, <ast.Constant object at 0x7da1b1a2feb0>]]]
if compare[name[exception] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1a2fdc0>
return[tuple[[<ast.Constant object at 0x7da1b1a2cb50>, <ast.Constant object at 0x7da1b1a2c580>]]] | keyword[def] identifier[get_iso3_country_code_fuzzy] ( identifier[cls] , identifier[country] , identifier[use_live] = keyword[True] , identifier[exception] = keyword[None] ):
literal[string]
identifier[countriesdata] = identifier[cls] . identifier[countriesdata] ( identifier[use_live] = identifier[use_live] )
identifier[iso3] = identifier[cls] . identifier[get_iso3_country_code] ( identifier[country] ,
identifier[use_live] = identifier[use_live] )
keyword[if] identifier[iso3] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[iso3] , keyword[True]
keyword[def] identifier[remove_matching_from_list] ( identifier[wordlist] , identifier[word_or_part] ):
keyword[for] identifier[word] keyword[in] identifier[wordlist] :
keyword[if] identifier[word_or_part] keyword[in] identifier[word] :
identifier[wordlist] . identifier[remove] ( identifier[word] )
identifier[expanded_country_candidates] = identifier[cls] . identifier[expand_countryname_abbrevs] ( identifier[country] )
identifier[match_strength] = literal[int]
identifier[matches] = identifier[set] ()
keyword[for] identifier[countryname] keyword[in] identifier[sorted] ( identifier[countriesdata] [ literal[string] ]):
keyword[for] identifier[candidate] keyword[in] identifier[expanded_country_candidates] :
identifier[simplified_country] , identifier[removed_words] = identifier[cls] . identifier[simplify_countryname] ( identifier[candidate] )
keyword[if] identifier[simplified_country] keyword[in] identifier[countryname] :
identifier[words] = identifier[get_words_in_sentence] ( identifier[countryname] )
identifier[new_match_strength] = literal[int]
keyword[if] identifier[simplified_country] :
identifier[remove_matching_from_list] ( identifier[words] , identifier[simplified_country] )
identifier[new_match_strength] += literal[int]
keyword[for] identifier[word] keyword[in] identifier[removed_words] :
keyword[if] identifier[word] keyword[in] identifier[countryname] :
identifier[remove_matching_from_list] ( identifier[words] , identifier[word] )
identifier[new_match_strength] += literal[int]
keyword[else] :
keyword[if] identifier[word] keyword[in] identifier[cls] . identifier[major_differentiators] :
identifier[new_match_strength] -= literal[int]
keyword[else] :
identifier[new_match_strength] -= literal[int]
keyword[for] identifier[word] keyword[in] identifier[words] :
keyword[if] identifier[word] keyword[in] identifier[cls] . identifier[major_differentiators] :
identifier[new_match_strength] -= literal[int]
keyword[else] :
identifier[new_match_strength] -= literal[int]
identifier[iso3] = identifier[countriesdata] [ literal[string] ][ identifier[countryname] ]
keyword[if] identifier[new_match_strength] > identifier[match_strength] :
identifier[match_strength] = identifier[new_match_strength]
identifier[matches] = identifier[set] ()
keyword[if] identifier[new_match_strength] == identifier[match_strength] :
identifier[matches] . identifier[add] ( identifier[iso3] )
keyword[if] identifier[len] ( identifier[matches] )== literal[int] keyword[and] identifier[match_strength] > literal[int] :
keyword[return] identifier[matches] . identifier[pop] (), keyword[False]
keyword[for] identifier[iso3] , identifier[regex] keyword[in] identifier[countriesdata] [ literal[string] ]. identifier[items] ():
identifier[index] = identifier[re] . identifier[search] ( identifier[regex] , identifier[country] . identifier[upper] ())
keyword[if] identifier[index] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[iso3] , keyword[False]
keyword[if] identifier[exception] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[exception]
keyword[return] keyword[None] , keyword[False] | def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
'Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second\n showing if the match is exact or not.\n\n Args:\n country (str): Country for which to get ISO3 code\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).\n '
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country, use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return (iso3, True) # depends on [control=['if'], data=['iso3']]
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word) # depends on [control=['if'], data=['word']] # depends on [control=['for'], data=['word']]
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
(simplified_country, removed_words) = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32 # depends on [control=['if'], data=[]]
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4 # depends on [control=['if'], data=['word']]
elif word in cls.major_differentiators:
new_match_strength -= 16 # depends on [control=['if'], data=[]]
else:
new_match_strength -= 1 # depends on [control=['for'], data=['word']]
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16 # depends on [control=['if'], data=[]]
else:
new_match_strength -= 1 # depends on [control=['for'], data=['word']]
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set() # depends on [control=['if'], data=['new_match_strength', 'match_strength']]
if new_match_strength == match_strength:
matches.add(iso3) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['simplified_country', 'countryname']] # depends on [control=['for'], data=['candidate']] # depends on [control=['for'], data=['countryname']]
if len(matches) == 1 and match_strength > 16:
return (matches.pop(), False) # depends on [control=['if'], data=[]]
# regex lookup
for (iso3, regex) in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return (iso3, False) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if exception is not None:
raise exception # depends on [control=['if'], data=['exception']]
return (None, False) |
def autoinit(fn):
"""Automates initialization so things are more composable.
* All specified kwargs in the class and all autoinit classes in
inheritance hierarchy will be setattr'd at the end of initialization,
with defaults derived from the inheritance hierarchy as well.
* If **kwargs is explicitly specified, the __init__ method will be called.
* If a default is specified as a new[class name] then a default instance
of that class will be initialized as the value.
class Base(object):
@autoinit
def __init__(self, a="A", **kwargs):
print "In Base."
class Base2(object):
@autoinit
def __init__(self, a="A2"):
print "In Base2."
class T(Base, Base2):
@autoinit
def __init__(self, b=new[list], **kwargs):
print "In T."
t = T()
print t.a, t.b
t = T(['x'])
print t.a, t.b
"""
if fn is None:
fn = _empty_init
if fn_has_args(fn):
raise Error("*args support is not available in autoinit.")
# its pretty hard to support this, though doable if really needed...
__defaults = fn_kwargs(fn)
avail_ac = fn_available_argcount(fn)
avail_args = list(fn.__code__.co_varnames[1:avail_ac])
signature = fn_signature(fn,
argument_transform=(lambda name: name),
default_transform=(lambda name, _: "%s=__defaults['%s']" % (name,name)),
vararg_transform=None,
kwargs_transform=(lambda _: "**__kwargs"))
signature[0] = "self"
call_signature = fn_signature(fn,
argument_transform=(lambda name: "%s=%s" % (name, name)),
default_transform=(lambda name, _: "%s=%s" % (name,name)),
vararg_transform=None,
kwargs_transform=(lambda _: "**__kwargs"))
call_signature[0] = "self"
if not fn_has_kwargs(fn):
signature.append("**__kwargs")
call_signature.append("**__kwargs")
signature = ", ".join(signature)
call_signature = ", ".join(call_signature)
avail_args = repr(tuple(avail_args))
code = '''def __init__(%(signature)s):
__cls = self.__class__
__mro = tuple(__cls.mro())
# call up the mro
for __base in __mro:
if __base is object: continue
try:
__wrapped_init = __base.__init__.__wrapped_init
except AttributeError:
# not an autoinit class
pass
else:
# **kwargs signals that the initializer wants to be called
if __wrapped_init and fn_has_kwargs(__wrapped_init):
__wrapped_init(%(call_signature)s)
# get defaults from hierarchy
__update_kwargs = { }
for __base in reversed(__mro):
if __base is __cls or __base is object: continue
try:
__defaults = __base.__init__.__defaults
except AttributeError:
# not an autoinit class
pass
else:
for __name, __val in __defaults.iteritems():
if __val is not Default:
__update_kwargs[__name] = __val
# get locally passed arguments into __update_kwargs
__locals = locals()
for __name in %(avail_args)s:
__val = __locals[__name]
if __val is Default:
if __name not in __update_kwargs:
raise Error("Must specify argument " + __name)
else:
__update_kwargs[__name] = __val
for __name, __val in __kwargs.iteritems():
if __val is Default:
if __name not in __update_kwargs:
raise Error("Must specify argument " + __name)
else:
__update_kwargs[__name] = __val
# set attributes according to kwargs
for __name, __val in __update_kwargs.iteritems():
if isinstance(__val, _new_initializer):
setattr(self, __name, __val())
else:
setattr(self, __name, __val)
''' % locals()
exec(code, globals(), locals())
#
# i know, exec -- no other way to get the signature to match it seems
# unless i build it out of an abstract syntax tree or something, which
# seems excessive. or i could inspect the signature and do stuff dynamically
# but that is troublesome and the documentation generators won't like it
#
# if you want to try to fix it to not use exec but retain the semantics
# please do.
#
# -cyrus
init = eval('__init__')
init.__wrapped_init = fn #@UndefinedVariable
init.__defaults = __defaults #@UndefinedVariable
_functools.update_wrapper(init, fn) #@UndefinedVariable
return init #@UndefinedVariable | def function[autoinit, parameter[fn]]:
constant[Automates initialization so things are more composable.
* All specified kwargs in the class and all autoinit classes in
inheritance hierarchy will be setattr'd at the end of initialization,
with defaults derived from the inheritance hierarchy as well.
* If **kwargs is explicitly specified, the __init__ method will be called.
* If a default is specified as a new[class name] then a default instance
of that class will be initialized as the value.
class Base(object):
@autoinit
def __init__(self, a="A", **kwargs):
print "In Base."
class Base2(object):
@autoinit
def __init__(self, a="A2"):
print "In Base2."
class T(Base, Base2):
@autoinit
def __init__(self, b=new[list], **kwargs):
print "In T."
t = T()
print t.a, t.b
t = T(['x'])
print t.a, t.b
]
if compare[name[fn] is constant[None]] begin[:]
variable[fn] assign[=] name[_empty_init]
if call[name[fn_has_args], parameter[name[fn]]] begin[:]
<ast.Raise object at 0x7da20e748760>
variable[__defaults] assign[=] call[name[fn_kwargs], parameter[name[fn]]]
variable[avail_ac] assign[=] call[name[fn_available_argcount], parameter[name[fn]]]
variable[avail_args] assign[=] call[name[list], parameter[call[name[fn].__code__.co_varnames][<ast.Slice object at 0x7da20e74b2b0>]]]
variable[signature] assign[=] call[name[fn_signature], parameter[name[fn]]]
call[name[signature]][constant[0]] assign[=] constant[self]
variable[call_signature] assign[=] call[name[fn_signature], parameter[name[fn]]]
call[name[call_signature]][constant[0]] assign[=] constant[self]
if <ast.UnaryOp object at 0x7da18f812aa0> begin[:]
call[name[signature].append, parameter[constant[**__kwargs]]]
call[name[call_signature].append, parameter[constant[**__kwargs]]]
variable[signature] assign[=] call[constant[, ].join, parameter[name[signature]]]
variable[call_signature] assign[=] call[constant[, ].join, parameter[name[call_signature]]]
variable[avail_args] assign[=] call[name[repr], parameter[call[name[tuple], parameter[name[avail_args]]]]]
variable[code] assign[=] binary_operation[constant[def __init__(%(signature)s):
__cls = self.__class__
__mro = tuple(__cls.mro())
# call up the mro
for __base in __mro:
if __base is object: continue
try:
__wrapped_init = __base.__init__.__wrapped_init
except AttributeError:
# not an autoinit class
pass
else:
# **kwargs signals that the initializer wants to be called
if __wrapped_init and fn_has_kwargs(__wrapped_init):
__wrapped_init(%(call_signature)s)
# get defaults from hierarchy
__update_kwargs = { }
for __base in reversed(__mro):
if __base is __cls or __base is object: continue
try:
__defaults = __base.__init__.__defaults
except AttributeError:
# not an autoinit class
pass
else:
for __name, __val in __defaults.iteritems():
if __val is not Default:
__update_kwargs[__name] = __val
# get locally passed arguments into __update_kwargs
__locals = locals()
for __name in %(avail_args)s:
__val = __locals[__name]
if __val is Default:
if __name not in __update_kwargs:
raise Error("Must specify argument " + __name)
else:
__update_kwargs[__name] = __val
for __name, __val in __kwargs.iteritems():
if __val is Default:
if __name not in __update_kwargs:
raise Error("Must specify argument " + __name)
else:
__update_kwargs[__name] = __val
# set attributes according to kwargs
for __name, __val in __update_kwargs.iteritems():
if isinstance(__val, _new_initializer):
setattr(self, __name, __val())
else:
setattr(self, __name, __val)
] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
call[name[exec], parameter[name[code], call[name[globals], parameter[]], call[name[locals], parameter[]]]]
variable[init] assign[=] call[name[eval], parameter[constant[__init__]]]
name[init].__wrapped_init assign[=] name[fn]
name[init].__defaults assign[=] name[__defaults]
call[name[_functools].update_wrapper, parameter[name[init], name[fn]]]
return[name[init]] | keyword[def] identifier[autoinit] ( identifier[fn] ):
literal[string]
keyword[if] identifier[fn] keyword[is] keyword[None] :
identifier[fn] = identifier[_empty_init]
keyword[if] identifier[fn_has_args] ( identifier[fn] ):
keyword[raise] identifier[Error] ( literal[string] )
identifier[__defaults] = identifier[fn_kwargs] ( identifier[fn] )
identifier[avail_ac] = identifier[fn_available_argcount] ( identifier[fn] )
identifier[avail_args] = identifier[list] ( identifier[fn] . identifier[__code__] . identifier[co_varnames] [ literal[int] : identifier[avail_ac] ])
identifier[signature] = identifier[fn_signature] ( identifier[fn] ,
identifier[argument_transform] =( keyword[lambda] identifier[name] : identifier[name] ),
identifier[default_transform] =( keyword[lambda] identifier[name] , identifier[_] : literal[string] %( identifier[name] , identifier[name] )),
identifier[vararg_transform] = keyword[None] ,
identifier[kwargs_transform] =( keyword[lambda] identifier[_] : literal[string] ))
identifier[signature] [ literal[int] ]= literal[string]
identifier[call_signature] = identifier[fn_signature] ( identifier[fn] ,
identifier[argument_transform] =( keyword[lambda] identifier[name] : literal[string] %( identifier[name] , identifier[name] )),
identifier[default_transform] =( keyword[lambda] identifier[name] , identifier[_] : literal[string] %( identifier[name] , identifier[name] )),
identifier[vararg_transform] = keyword[None] ,
identifier[kwargs_transform] =( keyword[lambda] identifier[_] : literal[string] ))
identifier[call_signature] [ literal[int] ]= literal[string]
keyword[if] keyword[not] identifier[fn_has_kwargs] ( identifier[fn] ):
identifier[signature] . identifier[append] ( literal[string] )
identifier[call_signature] . identifier[append] ( literal[string] )
identifier[signature] = literal[string] . identifier[join] ( identifier[signature] )
identifier[call_signature] = literal[string] . identifier[join] ( identifier[call_signature] )
identifier[avail_args] = identifier[repr] ( identifier[tuple] ( identifier[avail_args] ))
identifier[code] = literal[string] % identifier[locals] ()
identifier[exec] ( identifier[code] , identifier[globals] (), identifier[locals] ())
identifier[init] = identifier[eval] ( literal[string] )
identifier[init] . identifier[__wrapped_init] = identifier[fn]
identifier[init] . identifier[__defaults] = identifier[__defaults]
identifier[_functools] . identifier[update_wrapper] ( identifier[init] , identifier[fn] )
keyword[return] identifier[init] | def autoinit(fn):
"""Automates initialization so things are more composable.
* All specified kwargs in the class and all autoinit classes in
inheritance hierarchy will be setattr'd at the end of initialization,
with defaults derived from the inheritance hierarchy as well.
* If **kwargs is explicitly specified, the __init__ method will be called.
* If a default is specified as a new[class name] then a default instance
of that class will be initialized as the value.
class Base(object):
@autoinit
def __init__(self, a="A", **kwargs):
print "In Base."
class Base2(object):
@autoinit
def __init__(self, a="A2"):
print "In Base2."
class T(Base, Base2):
@autoinit
def __init__(self, b=new[list], **kwargs):
print "In T."
t = T()
print t.a, t.b
t = T(['x'])
print t.a, t.b
"""
if fn is None:
fn = _empty_init # depends on [control=['if'], data=['fn']]
if fn_has_args(fn):
raise Error('*args support is not available in autoinit.') # depends on [control=['if'], data=[]]
# its pretty hard to support this, though doable if really needed...
__defaults = fn_kwargs(fn)
avail_ac = fn_available_argcount(fn)
avail_args = list(fn.__code__.co_varnames[1:avail_ac])
signature = fn_signature(fn, argument_transform=lambda name: name, default_transform=lambda name, _: "%s=__defaults['%s']" % (name, name), vararg_transform=None, kwargs_transform=lambda _: '**__kwargs')
signature[0] = 'self'
call_signature = fn_signature(fn, argument_transform=lambda name: '%s=%s' % (name, name), default_transform=lambda name, _: '%s=%s' % (name, name), vararg_transform=None, kwargs_transform=lambda _: '**__kwargs')
call_signature[0] = 'self'
if not fn_has_kwargs(fn):
signature.append('**__kwargs')
call_signature.append('**__kwargs') # depends on [control=['if'], data=[]]
signature = ', '.join(signature)
call_signature = ', '.join(call_signature)
avail_args = repr(tuple(avail_args))
code = 'def __init__(%(signature)s):\n __cls = self.__class__\n __mro = tuple(__cls.mro())\n\n # call up the mro\n for __base in __mro:\n if __base is object: continue\n try:\n __wrapped_init = __base.__init__.__wrapped_init\n except AttributeError:\n # not an autoinit class\n pass\n else:\n # **kwargs signals that the initializer wants to be called\n if __wrapped_init and fn_has_kwargs(__wrapped_init):\n __wrapped_init(%(call_signature)s)\n\n # get defaults from hierarchy\n __update_kwargs = { }\n for __base in reversed(__mro):\n if __base is __cls or __base is object: continue\n try:\n __defaults = __base.__init__.__defaults\n except AttributeError:\n # not an autoinit class\n pass\n else:\n for __name, __val in __defaults.iteritems():\n if __val is not Default:\n __update_kwargs[__name] = __val\n\n # get locally passed arguments into __update_kwargs\n __locals = locals()\n for __name in %(avail_args)s:\n __val = __locals[__name]\n if __val is Default:\n if __name not in __update_kwargs:\n raise Error("Must specify argument " + __name)\n else:\n __update_kwargs[__name] = __val\n \n for __name, __val in __kwargs.iteritems():\n if __val is Default:\n if __name not in __update_kwargs:\n raise Error("Must specify argument " + __name)\n else:\n __update_kwargs[__name] = __val\n\n # set attributes according to kwargs\n for __name, __val in __update_kwargs.iteritems():\n if isinstance(__val, _new_initializer):\n setattr(self, __name, __val())\n else:\n setattr(self, __name, __val)\n' % locals()
exec(code, globals(), locals())
#
# i know, exec -- no other way to get the signature to match it seems
# unless i build it out of an abstract syntax tree or something, which
# seems excessive. or i could inspect the signature and do stuff dynamically
# but that is troublesome and the documentation generators won't like it
#
# if you want to try to fix it to not use exec but retain the semantics
# please do.
#
# -cyrus
init = eval('__init__')
init.__wrapped_init = fn #@UndefinedVariable
init.__defaults = __defaults #@UndefinedVariable
_functools.update_wrapper(init, fn) #@UndefinedVariable
return init #@UndefinedVariable |
def getForegroundWindow(self):
""" Returns a handle to the window in the foreground """
active_app = NSWorkspace.sharedWorkspace().frontmostApplication().localizedName()
for w in self._get_window_list():
if "kCGWindowOwnerName" in w and w["kCGWindowOwnerName"] == active_app:
return w["kCGWindowNumber"] | def function[getForegroundWindow, parameter[self]]:
constant[ Returns a handle to the window in the foreground ]
variable[active_app] assign[=] call[call[call[name[NSWorkspace].sharedWorkspace, parameter[]].frontmostApplication, parameter[]].localizedName, parameter[]]
for taget[name[w]] in starred[call[name[self]._get_window_list, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da2041d9630> begin[:]
return[call[name[w]][constant[kCGWindowNumber]]] | keyword[def] identifier[getForegroundWindow] ( identifier[self] ):
literal[string]
identifier[active_app] = identifier[NSWorkspace] . identifier[sharedWorkspace] (). identifier[frontmostApplication] (). identifier[localizedName] ()
keyword[for] identifier[w] keyword[in] identifier[self] . identifier[_get_window_list] ():
keyword[if] literal[string] keyword[in] identifier[w] keyword[and] identifier[w] [ literal[string] ]== identifier[active_app] :
keyword[return] identifier[w] [ literal[string] ] | def getForegroundWindow(self):
""" Returns a handle to the window in the foreground """
active_app = NSWorkspace.sharedWorkspace().frontmostApplication().localizedName()
for w in self._get_window_list():
if 'kCGWindowOwnerName' in w and w['kCGWindowOwnerName'] == active_app:
return w['kCGWindowNumber'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']] |
def pdf_lognormal_basis_integral(d, d_characteristic, s, n):
r'''Calculates the integral of the multiplication of d^n by the lognormal
pdf, given a particle diameter `d`, characteristic particle
diameter `d_characteristic`, distribution standard deviation `s`, and
exponent `n`.
.. math::
\int d^n\cdot q(d)\; dd = -\frac{1}{2} \exp\left(\frac{s^2 n^2}{2}
\right)d^n \left(\frac{d}{d_{characteristic}}\right)^{-n}
\text{erf}\left[\frac{s^2 n - \log(d/d_{characteristic})}
{\sqrt{2} s} \right]
This is the crucial integral required for interconversion between different
bases such as number density (denoted :math:`q_0(d)`), length density
(:math:`q_1(d)`), surface area density (:math:`q_2(d)`), or volume density
(:math:`q_3(d)`).
Parameters
----------
d : float
Specified particle diameter, [m]
d_characteristic : float
Characteristic particle diameter; often D[3, 3] is used for this
purpose but not always, [m]
s : float
Distribution standard deviation, [-]
n : int
Exponent of the multiplied n
Returns
-------
pdf_basis_integral : float
Integral of lognormal pdf multiplied by d^n, [-]
Notes
-----
This integral has been verified numerically. This integral is itself
integrated, so it is crucial to obtain an analytical form for at least
this integral.
Note overflow or zero division issues may occur for very large values of
`s`, larger than 10. No mathematical limit was able to be obtained with
a CAS.
Examples
--------
>>> pdf_lognormal_basis_integral(d=1E-4, d_characteristic=1E-5, s=1.1, n=-2)
56228306549.26362
'''
try:
s2 = s*s
t0 = exp(s2*n*n*0.5)
d_ratio = d/d_characteristic
t1 = (d/(d_ratio))**n
t2 = erf((s2*n - log(d_ratio))/(2.**0.5*s))
return -0.5*t0*t1*t2
except (OverflowError, ZeroDivisionError, ValueError):
return pdf_lognormal_basis_integral(d=1E-80, d_characteristic=d_characteristic, s=s, n=n) | def function[pdf_lognormal_basis_integral, parameter[d, d_characteristic, s, n]]:
constant[Calculates the integral of the multiplication of d^n by the lognormal
pdf, given a particle diameter `d`, characteristic particle
diameter `d_characteristic`, distribution standard deviation `s`, and
exponent `n`.
.. math::
\int d^n\cdot q(d)\; dd = -\frac{1}{2} \exp\left(\frac{s^2 n^2}{2}
\right)d^n \left(\frac{d}{d_{characteristic}}\right)^{-n}
\text{erf}\left[\frac{s^2 n - \log(d/d_{characteristic})}
{\sqrt{2} s} \right]
This is the crucial integral required for interconversion between different
bases such as number density (denoted :math:`q_0(d)`), length density
(:math:`q_1(d)`), surface area density (:math:`q_2(d)`), or volume density
(:math:`q_3(d)`).
Parameters
----------
d : float
Specified particle diameter, [m]
d_characteristic : float
Characteristic particle diameter; often D[3, 3] is used for this
purpose but not always, [m]
s : float
Distribution standard deviation, [-]
n : int
Exponent of the multiplied n
Returns
-------
pdf_basis_integral : float
Integral of lognormal pdf multiplied by d^n, [-]
Notes
-----
This integral has been verified numerically. This integral is itself
integrated, so it is crucial to obtain an analytical form for at least
this integral.
Note overflow or zero division issues may occur for very large values of
`s`, larger than 10. No mathematical limit was able to be obtained with
a CAS.
Examples
--------
>>> pdf_lognormal_basis_integral(d=1E-4, d_characteristic=1E-5, s=1.1, n=-2)
56228306549.26362
]
<ast.Try object at 0x7da1b12a0bb0> | keyword[def] identifier[pdf_lognormal_basis_integral] ( identifier[d] , identifier[d_characteristic] , identifier[s] , identifier[n] ):
literal[string]
keyword[try] :
identifier[s2] = identifier[s] * identifier[s]
identifier[t0] = identifier[exp] ( identifier[s2] * identifier[n] * identifier[n] * literal[int] )
identifier[d_ratio] = identifier[d] / identifier[d_characteristic]
identifier[t1] =( identifier[d] /( identifier[d_ratio] ))** identifier[n]
identifier[t2] = identifier[erf] (( identifier[s2] * identifier[n] - identifier[log] ( identifier[d_ratio] ))/( literal[int] ** literal[int] * identifier[s] ))
keyword[return] - literal[int] * identifier[t0] * identifier[t1] * identifier[t2]
keyword[except] ( identifier[OverflowError] , identifier[ZeroDivisionError] , identifier[ValueError] ):
keyword[return] identifier[pdf_lognormal_basis_integral] ( identifier[d] = literal[int] , identifier[d_characteristic] = identifier[d_characteristic] , identifier[s] = identifier[s] , identifier[n] = identifier[n] ) | def pdf_lognormal_basis_integral(d, d_characteristic, s, n):
"""Calculates the integral of the multiplication of d^n by the lognormal
pdf, given a particle diameter `d`, characteristic particle
diameter `d_characteristic`, distribution standard deviation `s`, and
exponent `n`.
.. math::
\\int d^n\\cdot q(d)\\; dd = -\\frac{1}{2} \\exp\\left(\\frac{s^2 n^2}{2}
\\right)d^n \\left(\\frac{d}{d_{characteristic}}\\right)^{-n}
\\text{erf}\\left[\\frac{s^2 n - \\log(d/d_{characteristic})}
{\\sqrt{2} s} \\right]
This is the crucial integral required for interconversion between different
bases such as number density (denoted :math:`q_0(d)`), length density
(:math:`q_1(d)`), surface area density (:math:`q_2(d)`), or volume density
(:math:`q_3(d)`).
Parameters
----------
d : float
Specified particle diameter, [m]
d_characteristic : float
Characteristic particle diameter; often D[3, 3] is used for this
purpose but not always, [m]
s : float
Distribution standard deviation, [-]
n : int
Exponent of the multiplied n
Returns
-------
pdf_basis_integral : float
Integral of lognormal pdf multiplied by d^n, [-]
Notes
-----
This integral has been verified numerically. This integral is itself
integrated, so it is crucial to obtain an analytical form for at least
this integral.
Note overflow or zero division issues may occur for very large values of
`s`, larger than 10. No mathematical limit was able to be obtained with
a CAS.
Examples
--------
>>> pdf_lognormal_basis_integral(d=1E-4, d_characteristic=1E-5, s=1.1, n=-2)
56228306549.26362
"""
try:
s2 = s * s
t0 = exp(s2 * n * n * 0.5)
d_ratio = d / d_characteristic
t1 = (d / d_ratio) ** n
t2 = erf((s2 * n - log(d_ratio)) / (2.0 ** 0.5 * s))
return -0.5 * t0 * t1 * t2 # depends on [control=['try'], data=[]]
except (OverflowError, ZeroDivisionError, ValueError):
return pdf_lognormal_basis_integral(d=1e-80, d_characteristic=d_characteristic, s=s, n=n) # depends on [control=['except'], data=[]] |
def Document(docx=None):
"""
Return a |Document| object loaded from *docx*, where *docx* can be
either a path to a ``.docx`` file (a string) or a file-like object. If
*docx* is missing or ``None``, the built-in default document "template"
is loaded.
"""
docx = _default_docx_path() if docx is None else docx
document_part = Package.open(docx).main_document_part
if document_part.content_type != CT.WML_DOCUMENT_MAIN:
tmpl = "file '%s' is not a Word file, content type is '%s'"
raise ValueError(tmpl % (docx, document_part.content_type))
return document_part.document | def function[Document, parameter[docx]]:
constant[
Return a |Document| object loaded from *docx*, where *docx* can be
either a path to a ``.docx`` file (a string) or a file-like object. If
*docx* is missing or ``None``, the built-in default document "template"
is loaded.
]
variable[docx] assign[=] <ast.IfExp object at 0x7da1b21baaa0>
variable[document_part] assign[=] call[name[Package].open, parameter[name[docx]]].main_document_part
if compare[name[document_part].content_type not_equal[!=] name[CT].WML_DOCUMENT_MAIN] begin[:]
variable[tmpl] assign[=] constant[file '%s' is not a Word file, content type is '%s']
<ast.Raise object at 0x7da1b1cb9d20>
return[name[document_part].document] | keyword[def] identifier[Document] ( identifier[docx] = keyword[None] ):
literal[string]
identifier[docx] = identifier[_default_docx_path] () keyword[if] identifier[docx] keyword[is] keyword[None] keyword[else] identifier[docx]
identifier[document_part] = identifier[Package] . identifier[open] ( identifier[docx] ). identifier[main_document_part]
keyword[if] identifier[document_part] . identifier[content_type] != identifier[CT] . identifier[WML_DOCUMENT_MAIN] :
identifier[tmpl] = literal[string]
keyword[raise] identifier[ValueError] ( identifier[tmpl] %( identifier[docx] , identifier[document_part] . identifier[content_type] ))
keyword[return] identifier[document_part] . identifier[document] | def Document(docx=None):
"""
Return a |Document| object loaded from *docx*, where *docx* can be
either a path to a ``.docx`` file (a string) or a file-like object. If
*docx* is missing or ``None``, the built-in default document "template"
is loaded.
"""
docx = _default_docx_path() if docx is None else docx
document_part = Package.open(docx).main_document_part
if document_part.content_type != CT.WML_DOCUMENT_MAIN:
tmpl = "file '%s' is not a Word file, content type is '%s'"
raise ValueError(tmpl % (docx, document_part.content_type)) # depends on [control=['if'], data=[]]
return document_part.document |
def get(self, profile_id):
'''Returns the profile with the received ID as a dict
If a local copy of the profile exists, it'll be returned. If not, it'll
be downloaded from the web. The results are cached, so any subsequent
calls won't hit the filesystem or the web.
Args:
profile_id (str): The ID of the profile you want.
Raises:
RegistryError: If there was some problem opening the profile file
or its format was incorrect.
'''
if profile_id not in self._profiles:
try:
self._profiles[profile_id] = self._get_profile(profile_id)
except (ValueError,
IOError) as e:
six.raise_from(RegistryError(e), e)
return self._profiles[profile_id] | def function[get, parameter[self, profile_id]]:
constant[Returns the profile with the received ID as a dict
If a local copy of the profile exists, it'll be returned. If not, it'll
be downloaded from the web. The results are cached, so any subsequent
calls won't hit the filesystem or the web.
Args:
profile_id (str): The ID of the profile you want.
Raises:
RegistryError: If there was some problem opening the profile file
or its format was incorrect.
]
if compare[name[profile_id] <ast.NotIn object at 0x7da2590d7190> name[self]._profiles] begin[:]
<ast.Try object at 0x7da1b00d5960>
return[call[name[self]._profiles][name[profile_id]]] | keyword[def] identifier[get] ( identifier[self] , identifier[profile_id] ):
literal[string]
keyword[if] identifier[profile_id] keyword[not] keyword[in] identifier[self] . identifier[_profiles] :
keyword[try] :
identifier[self] . identifier[_profiles] [ identifier[profile_id] ]= identifier[self] . identifier[_get_profile] ( identifier[profile_id] )
keyword[except] ( identifier[ValueError] ,
identifier[IOError] ) keyword[as] identifier[e] :
identifier[six] . identifier[raise_from] ( identifier[RegistryError] ( identifier[e] ), identifier[e] )
keyword[return] identifier[self] . identifier[_profiles] [ identifier[profile_id] ] | def get(self, profile_id):
"""Returns the profile with the received ID as a dict
If a local copy of the profile exists, it'll be returned. If not, it'll
be downloaded from the web. The results are cached, so any subsequent
calls won't hit the filesystem or the web.
Args:
profile_id (str): The ID of the profile you want.
Raises:
RegistryError: If there was some problem opening the profile file
or its format was incorrect.
"""
if profile_id not in self._profiles:
try:
self._profiles[profile_id] = self._get_profile(profile_id) # depends on [control=['try'], data=[]]
except (ValueError, IOError) as e:
six.raise_from(RegistryError(e), e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['profile_id']]
return self._profiles[profile_id] |
def getrange(self, key_prefix, strip=False):
"""
Get a range of keys starting with a common prefix as a mapping of
keys to values.
:param str key_prefix: Common prefix among all keys
:param bool strip: Optionally strip the common prefix from the key
names in the returned dict
:return dict: A (possibly empty) dict of key-value mappings
"""
self.cursor.execute("select key, data from kv where key like ?",
['%s%%' % key_prefix])
result = self.cursor.fetchall()
if not result:
return {}
if not strip:
key_prefix = ''
return dict([
(k[len(key_prefix):], json.loads(v)) for k, v in result]) | def function[getrange, parameter[self, key_prefix, strip]]:
constant[
Get a range of keys starting with a common prefix as a mapping of
keys to values.
:param str key_prefix: Common prefix among all keys
:param bool strip: Optionally strip the common prefix from the key
names in the returned dict
:return dict: A (possibly empty) dict of key-value mappings
]
call[name[self].cursor.execute, parameter[constant[select key, data from kv where key like ?], list[[<ast.BinOp object at 0x7da18f09f910>]]]]
variable[result] assign[=] call[name[self].cursor.fetchall, parameter[]]
if <ast.UnaryOp object at 0x7da18f09e560> begin[:]
return[dictionary[[], []]]
if <ast.UnaryOp object at 0x7da18f09e1a0> begin[:]
variable[key_prefix] assign[=] constant[]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da18f09cfd0>]]] | keyword[def] identifier[getrange] ( identifier[self] , identifier[key_prefix] , identifier[strip] = keyword[False] ):
literal[string]
identifier[self] . identifier[cursor] . identifier[execute] ( literal[string] ,
[ literal[string] % identifier[key_prefix] ])
identifier[result] = identifier[self] . identifier[cursor] . identifier[fetchall] ()
keyword[if] keyword[not] identifier[result] :
keyword[return] {}
keyword[if] keyword[not] identifier[strip] :
identifier[key_prefix] = literal[string]
keyword[return] identifier[dict] ([
( identifier[k] [ identifier[len] ( identifier[key_prefix] ):], identifier[json] . identifier[loads] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[result] ]) | def getrange(self, key_prefix, strip=False):
"""
Get a range of keys starting with a common prefix as a mapping of
keys to values.
:param str key_prefix: Common prefix among all keys
:param bool strip: Optionally strip the common prefix from the key
names in the returned dict
:return dict: A (possibly empty) dict of key-value mappings
"""
self.cursor.execute('select key, data from kv where key like ?', ['%s%%' % key_prefix])
result = self.cursor.fetchall()
if not result:
return {} # depends on [control=['if'], data=[]]
if not strip:
key_prefix = '' # depends on [control=['if'], data=[]]
return dict([(k[len(key_prefix):], json.loads(v)) for (k, v) in result]) |
def read(self, size = None):
'''Read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given.'''
if self.closed: raise ValueError('File closed')
if self._mode in _allowed_write:
raise Exception('File opened for write only')
if not self._done_header:
self._read_header()
# The encrypted file has been entirely read, so return as much as they want
# and remove the returned portion from the decrypted buffer
if self._read_finished:
if size is None:
decrypted = self._decrypted_buffer
else:
decrypted = self._decrypted_buffer[:size]
self._decrypted_buffer = self._decrypted[len(decrypted):]
return decrypted
# Read everything in one chunk
if size is None or size < 0:
self._encrypted_buffer = self._fp.read()
self._read_finished = True
else:
# We fill the encrypted buffer (keeping it with a minimum of 32 bytes in case of the
# end-of-file checksum) and decrypt into a decrypted buffer 1 block at a time
while not self._read_finished:
# We have enough decrypted bytes (or will after decrypting the encrypted buffer)
available = len(self._decrypted_buffer) + len(self._encrypted_buffer) - 32
if available >= size: break
# Read a little extra for the possible final checksum
data = self._fp.read(BLOCK_SIZE)
# No data left; we're done
if not data:
self._read_finished = True
break
self._encrypted_buffer += data
# Decrypt as much of the encrypted data as possible (leaving the final check sum)
safe = self._encrypted_buffer[:-32]
self._encrypted_buffer = self._encrypted_buffer[-32:]
self._decrypted_buffer += self._crypto.decrypt(safe)
self._checksumer.update(safe)
# We read all the bytes, only the checksum remains
if self._read_finished:
self._check_final_checksum(self._encrypted_buffer)
# Send back the number of bytes requests and remove them from the buffer
decrypted = self._decrypted_buffer[:size]
self._decrypted_buffer = self._decrypted_buffer[size:]
return decrypted | def function[read, parameter[self, size]]:
constant[Read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given.]
if name[self].closed begin[:]
<ast.Raise object at 0x7da1b11a73a0>
if compare[name[self]._mode in name[_allowed_write]] begin[:]
<ast.Raise object at 0x7da1b11a7b20>
if <ast.UnaryOp object at 0x7da1b11a53c0> begin[:]
call[name[self]._read_header, parameter[]]
if name[self]._read_finished begin[:]
if compare[name[size] is constant[None]] begin[:]
variable[decrypted] assign[=] name[self]._decrypted_buffer
name[self]._decrypted_buffer assign[=] call[name[self]._decrypted][<ast.Slice object at 0x7da1b11a4d00>]
return[name[decrypted]]
if <ast.BoolOp object at 0x7da1b11a4af0> begin[:]
name[self]._encrypted_buffer assign[=] call[name[self]._fp.read, parameter[]]
name[self]._read_finished assign[=] constant[True]
variable[safe] assign[=] call[name[self]._encrypted_buffer][<ast.Slice object at 0x7da1b11a4a30>]
name[self]._encrypted_buffer assign[=] call[name[self]._encrypted_buffer][<ast.Slice object at 0x7da1b11a5030>]
<ast.AugAssign object at 0x7da1b11a5b10>
call[name[self]._checksumer.update, parameter[name[safe]]]
if name[self]._read_finished begin[:]
call[name[self]._check_final_checksum, parameter[name[self]._encrypted_buffer]]
variable[decrypted] assign[=] call[name[self]._decrypted_buffer][<ast.Slice object at 0x7da1b11a7b80>]
name[self]._decrypted_buffer assign[=] call[name[self]._decrypted_buffer][<ast.Slice object at 0x7da1b11a7c70>]
return[name[decrypted]] | keyword[def] identifier[read] ( identifier[self] , identifier[size] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[closed] : keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[_mode] keyword[in] identifier[_allowed_write] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[_done_header] :
identifier[self] . identifier[_read_header] ()
keyword[if] identifier[self] . identifier[_read_finished] :
keyword[if] identifier[size] keyword[is] keyword[None] :
identifier[decrypted] = identifier[self] . identifier[_decrypted_buffer]
keyword[else] :
identifier[decrypted] = identifier[self] . identifier[_decrypted_buffer] [: identifier[size] ]
identifier[self] . identifier[_decrypted_buffer] = identifier[self] . identifier[_decrypted] [ identifier[len] ( identifier[decrypted] ):]
keyword[return] identifier[decrypted]
keyword[if] identifier[size] keyword[is] keyword[None] keyword[or] identifier[size] < literal[int] :
identifier[self] . identifier[_encrypted_buffer] = identifier[self] . identifier[_fp] . identifier[read] ()
identifier[self] . identifier[_read_finished] = keyword[True]
keyword[else] :
keyword[while] keyword[not] identifier[self] . identifier[_read_finished] :
identifier[available] = identifier[len] ( identifier[self] . identifier[_decrypted_buffer] )+ identifier[len] ( identifier[self] . identifier[_encrypted_buffer] )- literal[int]
keyword[if] identifier[available] >= identifier[size] : keyword[break]
identifier[data] = identifier[self] . identifier[_fp] . identifier[read] ( identifier[BLOCK_SIZE] )
keyword[if] keyword[not] identifier[data] :
identifier[self] . identifier[_read_finished] = keyword[True]
keyword[break]
identifier[self] . identifier[_encrypted_buffer] += identifier[data]
identifier[safe] = identifier[self] . identifier[_encrypted_buffer] [:- literal[int] ]
identifier[self] . identifier[_encrypted_buffer] = identifier[self] . identifier[_encrypted_buffer] [- literal[int] :]
identifier[self] . identifier[_decrypted_buffer] += identifier[self] . identifier[_crypto] . identifier[decrypt] ( identifier[safe] )
identifier[self] . identifier[_checksumer] . identifier[update] ( identifier[safe] )
keyword[if] identifier[self] . identifier[_read_finished] :
identifier[self] . identifier[_check_final_checksum] ( identifier[self] . identifier[_encrypted_buffer] )
identifier[decrypted] = identifier[self] . identifier[_decrypted_buffer] [: identifier[size] ]
identifier[self] . identifier[_decrypted_buffer] = identifier[self] . identifier[_decrypted_buffer] [ identifier[size] :]
keyword[return] identifier[decrypted] | def read(self, size=None):
"""Read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given."""
if self.closed:
raise ValueError('File closed') # depends on [control=['if'], data=[]]
if self._mode in _allowed_write:
raise Exception('File opened for write only') # depends on [control=['if'], data=[]]
if not self._done_header:
self._read_header() # depends on [control=['if'], data=[]]
# The encrypted file has been entirely read, so return as much as they want
# and remove the returned portion from the decrypted buffer
if self._read_finished:
if size is None:
decrypted = self._decrypted_buffer # depends on [control=['if'], data=[]]
else:
decrypted = self._decrypted_buffer[:size]
self._decrypted_buffer = self._decrypted[len(decrypted):]
return decrypted # depends on [control=['if'], data=[]]
# Read everything in one chunk
if size is None or size < 0:
self._encrypted_buffer = self._fp.read()
self._read_finished = True # depends on [control=['if'], data=[]]
else:
# We fill the encrypted buffer (keeping it with a minimum of 32 bytes in case of the
# end-of-file checksum) and decrypt into a decrypted buffer 1 block at a time
while not self._read_finished:
# We have enough decrypted bytes (or will after decrypting the encrypted buffer)
available = len(self._decrypted_buffer) + len(self._encrypted_buffer) - 32
if available >= size:
break # depends on [control=['if'], data=[]]
# Read a little extra for the possible final checksum
data = self._fp.read(BLOCK_SIZE)
# No data left; we're done
if not data:
self._read_finished = True
break # depends on [control=['if'], data=[]]
self._encrypted_buffer += data # depends on [control=['while'], data=[]]
# Decrypt as much of the encrypted data as possible (leaving the final check sum)
safe = self._encrypted_buffer[:-32]
self._encrypted_buffer = self._encrypted_buffer[-32:]
self._decrypted_buffer += self._crypto.decrypt(safe)
self._checksumer.update(safe)
# We read all the bytes, only the checksum remains
if self._read_finished:
self._check_final_checksum(self._encrypted_buffer) # depends on [control=['if'], data=[]]
# Send back the number of bytes requests and remove them from the buffer
decrypted = self._decrypted_buffer[:size]
self._decrypted_buffer = self._decrypted_buffer[size:]
return decrypted |
def finalize(self, ctx, shard_state):
"""See parent class."""
filenames = []
for filehandle in self._filehandles:
filenames.append(filehandle.name)
filehandle.close()
shard_state.writer_state = {"shard_filenames": filenames} | def function[finalize, parameter[self, ctx, shard_state]]:
constant[See parent class.]
variable[filenames] assign[=] list[[]]
for taget[name[filehandle]] in starred[name[self]._filehandles] begin[:]
call[name[filenames].append, parameter[name[filehandle].name]]
call[name[filehandle].close, parameter[]]
name[shard_state].writer_state assign[=] dictionary[[<ast.Constant object at 0x7da18eb54760>], [<ast.Name object at 0x7da18eb55ab0>]] | keyword[def] identifier[finalize] ( identifier[self] , identifier[ctx] , identifier[shard_state] ):
literal[string]
identifier[filenames] =[]
keyword[for] identifier[filehandle] keyword[in] identifier[self] . identifier[_filehandles] :
identifier[filenames] . identifier[append] ( identifier[filehandle] . identifier[name] )
identifier[filehandle] . identifier[close] ()
identifier[shard_state] . identifier[writer_state] ={ literal[string] : identifier[filenames] } | def finalize(self, ctx, shard_state):
"""See parent class."""
filenames = []
for filehandle in self._filehandles:
filenames.append(filehandle.name)
filehandle.close() # depends on [control=['for'], data=['filehandle']]
shard_state.writer_state = {'shard_filenames': filenames} |
def show_frames(self, wait=0):
"""
Show current frames from cameras.
``wait`` is the wait interval in milliseconds before the window closes.
"""
for window, frame in zip(self.windows, self.get_frames()):
cv2.imshow(window, frame)
cv2.waitKey(wait) | def function[show_frames, parameter[self, wait]]:
constant[
Show current frames from cameras.
``wait`` is the wait interval in milliseconds before the window closes.
]
for taget[tuple[[<ast.Name object at 0x7da20c9919c0>, <ast.Name object at 0x7da20c993250>]]] in starred[call[name[zip], parameter[name[self].windows, call[name[self].get_frames, parameter[]]]]] begin[:]
call[name[cv2].imshow, parameter[name[window], name[frame]]]
call[name[cv2].waitKey, parameter[name[wait]]] | keyword[def] identifier[show_frames] ( identifier[self] , identifier[wait] = literal[int] ):
literal[string]
keyword[for] identifier[window] , identifier[frame] keyword[in] identifier[zip] ( identifier[self] . identifier[windows] , identifier[self] . identifier[get_frames] ()):
identifier[cv2] . identifier[imshow] ( identifier[window] , identifier[frame] )
identifier[cv2] . identifier[waitKey] ( identifier[wait] ) | def show_frames(self, wait=0):
"""
Show current frames from cameras.
``wait`` is the wait interval in milliseconds before the window closes.
"""
for (window, frame) in zip(self.windows, self.get_frames()):
cv2.imshow(window, frame) # depends on [control=['for'], data=[]]
cv2.waitKey(wait) |
def _create_spreadsheet(name, title, path, settings):
"""
Create Google spreadsheet.
"""
if not settings.client_secrets:
return None
create = raw_input("Would you like to create a Google spreadsheet? [Y/n] ")
if create and not create.lower() == "y":
return puts("Not creating spreadsheet.")
email_message = (
"What Google account(s) should have access to this "
"this spreadsheet? (Use a full email address, such as "
"your.name@gmail.com. Separate multiple addresses with commas.)")
if settings.config.get("google_account"):
emails = raw_input("\n{0}(Default: {1}) ".format(email_message,
settings.config.get("google_account")
))
if not emails:
emails = settings.config.get("google_account")
else:
emails = None
while not emails:
emails = raw_input(email_message)
try:
media_body = _MediaFileUpload(os.path.join(path, '_blueprint/_spreadsheet.xlsx'),
mimetype='application/vnd.ms-excel')
except IOError:
show_error("_blueprint/_spreadsheet.xlsx doesn't exist!")
return None
service = get_drive_api()
body = {
'title': '{0} (Tarbell)'.format(title),
'description': '{0} ({1})'.format(title, name),
'mimeType': 'application/vnd.ms-excel',
}
try:
newfile = service.files()\
.insert(body=body, media_body=media_body, convert=True).execute()
for email in emails.split(","):
_add_user_to_file(newfile['id'], service, user_email=email.strip())
puts("\n{0!s}! View the spreadsheet at {1!s}".format(
colored.green("Success"),
colored.yellow("https://docs.google.com/spreadsheet/ccc?key={0}"
.format(newfile['id']))
))
return newfile['id']
except errors.HttpError as error:
show_error('An error occurred creating spreadsheet: {0}'.format(error))
return None | def function[_create_spreadsheet, parameter[name, title, path, settings]]:
constant[
Create Google spreadsheet.
]
if <ast.UnaryOp object at 0x7da1b192f610> begin[:]
return[constant[None]]
variable[create] assign[=] call[name[raw_input], parameter[constant[Would you like to create a Google spreadsheet? [Y/n] ]]]
if <ast.BoolOp object at 0x7da1b192c250> begin[:]
return[call[name[puts], parameter[constant[Not creating spreadsheet.]]]]
variable[email_message] assign[=] constant[What Google account(s) should have access to this this spreadsheet? (Use a full email address, such as your.name@gmail.com. Separate multiple addresses with commas.)]
if call[name[settings].config.get, parameter[constant[google_account]]] begin[:]
variable[emails] assign[=] call[name[raw_input], parameter[call[constant[
{0}(Default: {1}) ].format, parameter[name[email_message], call[name[settings].config.get, parameter[constant[google_account]]]]]]]
if <ast.UnaryOp object at 0x7da1b195ae30> begin[:]
variable[emails] assign[=] call[name[settings].config.get, parameter[constant[google_account]]]
<ast.Try object at 0x7da1b195a020>
variable[service] assign[=] call[name[get_drive_api], parameter[]]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da1b195a6b0>, <ast.Constant object at 0x7da1b195aad0>, <ast.Constant object at 0x7da1b195b880>], [<ast.Call object at 0x7da1b195b910>, <ast.Call object at 0x7da1b195a560>, <ast.Constant object at 0x7da1b195ac20>]]
<ast.Try object at 0x7da1b195a380> | keyword[def] identifier[_create_spreadsheet] ( identifier[name] , identifier[title] , identifier[path] , identifier[settings] ):
literal[string]
keyword[if] keyword[not] identifier[settings] . identifier[client_secrets] :
keyword[return] keyword[None]
identifier[create] = identifier[raw_input] ( literal[string] )
keyword[if] identifier[create] keyword[and] keyword[not] identifier[create] . identifier[lower] ()== literal[string] :
keyword[return] identifier[puts] ( literal[string] )
identifier[email_message] =(
literal[string]
literal[string]
literal[string] )
keyword[if] identifier[settings] . identifier[config] . identifier[get] ( literal[string] ):
identifier[emails] = identifier[raw_input] ( literal[string] . identifier[format] ( identifier[email_message] ,
identifier[settings] . identifier[config] . identifier[get] ( literal[string] )
))
keyword[if] keyword[not] identifier[emails] :
identifier[emails] = identifier[settings] . identifier[config] . identifier[get] ( literal[string] )
keyword[else] :
identifier[emails] = keyword[None]
keyword[while] keyword[not] identifier[emails] :
identifier[emails] = identifier[raw_input] ( identifier[email_message] )
keyword[try] :
identifier[media_body] = identifier[_MediaFileUpload] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] ),
identifier[mimetype] = literal[string] )
keyword[except] identifier[IOError] :
identifier[show_error] ( literal[string] )
keyword[return] keyword[None]
identifier[service] = identifier[get_drive_api] ()
identifier[body] ={
literal[string] : literal[string] . identifier[format] ( identifier[title] ),
literal[string] : literal[string] . identifier[format] ( identifier[title] , identifier[name] ),
literal[string] : literal[string] ,
}
keyword[try] :
identifier[newfile] = identifier[service] . identifier[files] (). identifier[insert] ( identifier[body] = identifier[body] , identifier[media_body] = identifier[media_body] , identifier[convert] = keyword[True] ). identifier[execute] ()
keyword[for] identifier[email] keyword[in] identifier[emails] . identifier[split] ( literal[string] ):
identifier[_add_user_to_file] ( identifier[newfile] [ literal[string] ], identifier[service] , identifier[user_email] = identifier[email] . identifier[strip] ())
identifier[puts] ( literal[string] . identifier[format] (
identifier[colored] . identifier[green] ( literal[string] ),
identifier[colored] . identifier[yellow] ( literal[string]
. identifier[format] ( identifier[newfile] [ literal[string] ]))
))
keyword[return] identifier[newfile] [ literal[string] ]
keyword[except] identifier[errors] . identifier[HttpError] keyword[as] identifier[error] :
identifier[show_error] ( literal[string] . identifier[format] ( identifier[error] ))
keyword[return] keyword[None] | def _create_spreadsheet(name, title, path, settings):
"""
Create Google spreadsheet.
"""
if not settings.client_secrets:
return None # depends on [control=['if'], data=[]]
create = raw_input('Would you like to create a Google spreadsheet? [Y/n] ')
if create and (not create.lower() == 'y'):
return puts('Not creating spreadsheet.') # depends on [control=['if'], data=[]]
email_message = 'What Google account(s) should have access to this this spreadsheet? (Use a full email address, such as your.name@gmail.com. Separate multiple addresses with commas.)'
if settings.config.get('google_account'):
emails = raw_input('\n{0}(Default: {1}) '.format(email_message, settings.config.get('google_account')))
if not emails:
emails = settings.config.get('google_account') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
emails = None
while not emails:
emails = raw_input(email_message) # depends on [control=['while'], data=[]]
try:
media_body = _MediaFileUpload(os.path.join(path, '_blueprint/_spreadsheet.xlsx'), mimetype='application/vnd.ms-excel') # depends on [control=['try'], data=[]]
except IOError:
show_error("_blueprint/_spreadsheet.xlsx doesn't exist!")
return None # depends on [control=['except'], data=[]]
service = get_drive_api()
body = {'title': '{0} (Tarbell)'.format(title), 'description': '{0} ({1})'.format(title, name), 'mimeType': 'application/vnd.ms-excel'}
try:
newfile = service.files().insert(body=body, media_body=media_body, convert=True).execute()
for email in emails.split(','):
_add_user_to_file(newfile['id'], service, user_email=email.strip()) # depends on [control=['for'], data=['email']]
puts('\n{0!s}! View the spreadsheet at {1!s}'.format(colored.green('Success'), colored.yellow('https://docs.google.com/spreadsheet/ccc?key={0}'.format(newfile['id']))))
return newfile['id'] # depends on [control=['try'], data=[]]
except errors.HttpError as error:
show_error('An error occurred creating spreadsheet: {0}'.format(error))
return None # depends on [control=['except'], data=['error']] |
def schwefelmult(self, x, pen_fac=1e4):
"""multimodal Schwefel function with domain -500..500"""
y = [x] if isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829 * N - 1.27275661e-5 * N - sum(x * np.sin(np.abs(x)**0.5))
+ pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
return f if len(f) > 1 else f[0] | def function[schwefelmult, parameter[self, x, pen_fac]]:
constant[multimodal Schwefel function with domain -500..500]
variable[y] assign[=] <ast.IfExp object at 0x7da1b0b9f400>
variable[N] assign[=] call[name[len], parameter[call[name[y]][constant[0]]]]
variable[f] assign[=] call[name[array], parameter[<ast.ListComp object at 0x7da1b0b82800>]]
return[<ast.IfExp object at 0x7da1b0b82200>] | keyword[def] identifier[schwefelmult] ( identifier[self] , identifier[x] , identifier[pen_fac] = literal[int] ):
literal[string]
identifier[y] =[ identifier[x] ] keyword[if] identifier[isscalar] ( identifier[x] [ literal[int] ]) keyword[else] identifier[x]
identifier[N] = identifier[len] ( identifier[y] [ literal[int] ])
identifier[f] = identifier[array] ([ literal[int] * identifier[N] - literal[int] * identifier[N] - identifier[sum] ( identifier[x] * identifier[np] . identifier[sin] ( identifier[np] . identifier[abs] ( identifier[x] )** literal[int] ))
+ identifier[pen_fac] * identifier[sum] (( identifier[abs] ( identifier[x] )> literal[int] )*( identifier[abs] ( identifier[x] )- literal[int] )** literal[int] ) keyword[for] identifier[x] keyword[in] identifier[y] ])
keyword[return] identifier[f] keyword[if] identifier[len] ( identifier[f] )> literal[int] keyword[else] identifier[f] [ literal[int] ] | def schwefelmult(self, x, pen_fac=10000.0):
"""multimodal Schwefel function with domain -500..500"""
y = [x] if isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829 * N - 1.27275661e-05 * N - sum(x * np.sin(np.abs(x) ** 0.5)) + pen_fac * sum((abs(x) > 500) * (abs(x) - 500) ** 2) for x in y])
return f if len(f) > 1 else f[0] |
def set(self, key, value):
"""
Transactional implementation of :func:`Map.set(key, value) <hazelcast.proxy.map.Map.set>`
The object to be set will be accessible only in the current transaction context till the transaction is
committed.
:param key: (object), key of the entry.
:param value: (object), value of the entry.
"""
check_not_none(key, "key can't be none")
check_not_none(value, "value can't be none")
return self._encode_invoke(transactional_map_set_codec, key=self._to_data(key),
value=self._to_data(value)) | def function[set, parameter[self, key, value]]:
constant[
Transactional implementation of :func:`Map.set(key, value) <hazelcast.proxy.map.Map.set>`
The object to be set will be accessible only in the current transaction context till the transaction is
committed.
:param key: (object), key of the entry.
:param value: (object), value of the entry.
]
call[name[check_not_none], parameter[name[key], constant[key can't be none]]]
call[name[check_not_none], parameter[name[value], constant[value can't be none]]]
return[call[name[self]._encode_invoke, parameter[name[transactional_map_set_codec]]]] | keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
identifier[check_not_none] ( identifier[key] , literal[string] )
identifier[check_not_none] ( identifier[value] , literal[string] )
keyword[return] identifier[self] . identifier[_encode_invoke] ( identifier[transactional_map_set_codec] , identifier[key] = identifier[self] . identifier[_to_data] ( identifier[key] ),
identifier[value] = identifier[self] . identifier[_to_data] ( identifier[value] )) | def set(self, key, value):
"""
Transactional implementation of :func:`Map.set(key, value) <hazelcast.proxy.map.Map.set>`
The object to be set will be accessible only in the current transaction context till the transaction is
committed.
:param key: (object), key of the entry.
:param value: (object), value of the entry.
"""
check_not_none(key, "key can't be none")
check_not_none(value, "value can't be none")
return self._encode_invoke(transactional_map_set_codec, key=self._to_data(key), value=self._to_data(value)) |
def remove_callback_for_action(self, action, callback):
""" Remove a callback for a specific action
This is mainly for cleanup purposes or a plugin that replaces a GUI widget.
:param str action: the cation of which the callback is going to be remove
:param callback: the callback to be removed
"""
if action in self.__action_to_callbacks:
if callback in self.__action_to_callbacks[action]:
self.__action_to_callbacks[action].remove(callback) | def function[remove_callback_for_action, parameter[self, action, callback]]:
constant[ Remove a callback for a specific action
This is mainly for cleanup purposes or a plugin that replaces a GUI widget.
:param str action: the cation of which the callback is going to be remove
:param callback: the callback to be removed
]
if compare[name[action] in name[self].__action_to_callbacks] begin[:]
if compare[name[callback] in call[name[self].__action_to_callbacks][name[action]]] begin[:]
call[call[name[self].__action_to_callbacks][name[action]].remove, parameter[name[callback]]] | keyword[def] identifier[remove_callback_for_action] ( identifier[self] , identifier[action] , identifier[callback] ):
literal[string]
keyword[if] identifier[action] keyword[in] identifier[self] . identifier[__action_to_callbacks] :
keyword[if] identifier[callback] keyword[in] identifier[self] . identifier[__action_to_callbacks] [ identifier[action] ]:
identifier[self] . identifier[__action_to_callbacks] [ identifier[action] ]. identifier[remove] ( identifier[callback] ) | def remove_callback_for_action(self, action, callback):
""" Remove a callback for a specific action
This is mainly for cleanup purposes or a plugin that replaces a GUI widget.
:param str action: the cation of which the callback is going to be remove
:param callback: the callback to be removed
"""
if action in self.__action_to_callbacks:
if callback in self.__action_to_callbacks[action]:
self.__action_to_callbacks[action].remove(callback) # depends on [control=['if'], data=['callback']] # depends on [control=['if'], data=['action']] |
def _metatile_contents_equal(zip_1, zip_2):
"""
Given two open zip files as arguments, this returns True if the zips
both contain the same set of files, having the same names, and each
file within the zip is byte-wise identical to the one with the same
name in the other zip.
"""
names_1 = set(zip_1.namelist())
names_2 = set(zip_2.namelist())
if names_1 != names_2:
return False
for n in names_1:
bytes_1 = zip_1.read(n)
bytes_2 = zip_2.read(n)
if bytes_1 != bytes_2:
return False
return True | def function[_metatile_contents_equal, parameter[zip_1, zip_2]]:
constant[
Given two open zip files as arguments, this returns True if the zips
both contain the same set of files, having the same names, and each
file within the zip is byte-wise identical to the one with the same
name in the other zip.
]
variable[names_1] assign[=] call[name[set], parameter[call[name[zip_1].namelist, parameter[]]]]
variable[names_2] assign[=] call[name[set], parameter[call[name[zip_2].namelist, parameter[]]]]
if compare[name[names_1] not_equal[!=] name[names_2]] begin[:]
return[constant[False]]
for taget[name[n]] in starred[name[names_1]] begin[:]
variable[bytes_1] assign[=] call[name[zip_1].read, parameter[name[n]]]
variable[bytes_2] assign[=] call[name[zip_2].read, parameter[name[n]]]
if compare[name[bytes_1] not_equal[!=] name[bytes_2]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_metatile_contents_equal] ( identifier[zip_1] , identifier[zip_2] ):
literal[string]
identifier[names_1] = identifier[set] ( identifier[zip_1] . identifier[namelist] ())
identifier[names_2] = identifier[set] ( identifier[zip_2] . identifier[namelist] ())
keyword[if] identifier[names_1] != identifier[names_2] :
keyword[return] keyword[False]
keyword[for] identifier[n] keyword[in] identifier[names_1] :
identifier[bytes_1] = identifier[zip_1] . identifier[read] ( identifier[n] )
identifier[bytes_2] = identifier[zip_2] . identifier[read] ( identifier[n] )
keyword[if] identifier[bytes_1] != identifier[bytes_2] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def _metatile_contents_equal(zip_1, zip_2):
"""
Given two open zip files as arguments, this returns True if the zips
both contain the same set of files, having the same names, and each
file within the zip is byte-wise identical to the one with the same
name in the other zip.
"""
names_1 = set(zip_1.namelist())
names_2 = set(zip_2.namelist())
if names_1 != names_2:
return False # depends on [control=['if'], data=[]]
for n in names_1:
bytes_1 = zip_1.read(n)
bytes_2 = zip_2.read(n)
if bytes_1 != bytes_2:
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
return True |
def satisfies_constraints(self, possible_solution):
"""Return True if the given solution is satisfied by all the constraints."""
for c in self._constraints:
values = c.extract_values(possible_solution)
if values is None or not c(*values):
return False
return True | def function[satisfies_constraints, parameter[self, possible_solution]]:
constant[Return True if the given solution is satisfied by all the constraints.]
for taget[name[c]] in starred[name[self]._constraints] begin[:]
variable[values] assign[=] call[name[c].extract_values, parameter[name[possible_solution]]]
if <ast.BoolOp object at 0x7da18fe91930> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[satisfies_constraints] ( identifier[self] , identifier[possible_solution] ):
literal[string]
keyword[for] identifier[c] keyword[in] identifier[self] . identifier[_constraints] :
identifier[values] = identifier[c] . identifier[extract_values] ( identifier[possible_solution] )
keyword[if] identifier[values] keyword[is] keyword[None] keyword[or] keyword[not] identifier[c] (* identifier[values] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def satisfies_constraints(self, possible_solution):
"""Return True if the given solution is satisfied by all the constraints."""
for c in self._constraints:
values = c.extract_values(possible_solution)
if values is None or not c(*values):
return False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
return True |
def force_plot(base_value, shap_values, features=None, feature_names=None, out_names=None, link="identity",
plot_cmap="RdBu", matplotlib=False, show=True, figsize=(20,3), ordering_keys=None, ordering_keys_time_format=None,
text_rotation=0):
""" Visualize the given SHAP values with an additive force layout.
Parameters
----------
base_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
Matrix of SHAP values (# features) or (# samples x # features). If this is a 1D array then a single
force plot will be drawn, if it is a 2D array then a stacked force plot will be drawn.
features : numpy.array
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
out_names : str
The name of the outout of the model (plural to support multi-output plotting in the future).
link : "identity" or "logit"
The transformation used when drawing the tick mark labels. Using logit will change log-odds numbers
into probabilities.
matplotlib : bool
Whether to use the default Javascript output, or the (less developed) matplotlib output. Using matplotlib
can be helpful in scenarios where rendering Javascript/HTML is inconvenient.
"""
# auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0]
if (type(base_value) == np.ndarray or type(base_value) == list):
if type(shap_values) != list or len(shap_values) != len(base_value):
raise Exception("In v0.20 force_plot now requires the base value as the first parameter! " \
"Try shap.force_plot(explainer.expected_value, shap_values) or " \
"for multi-output models try " \
"shap.force_plot(explainer.expected_value[0], shap_values[0]).")
assert not type(shap_values) == list, "The shap_values arg looks looks multi output, try shap_values[i]."
link = convert_to_link(link)
if type(shap_values) != np.ndarray:
return visualize(shap_values)
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = list(features.columns)
features = features.values
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif features is not None and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, (1, len(shap_values)))
if out_names is None:
out_names = ["output value"]
elif type(out_names) == str:
out_names = [out_names]
if shap_values.shape[0] == 1:
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
features = ["" for _ in range(len(feature_names))]
if type(features) == np.ndarray:
features = features.flatten()
# check that the shape of the shap_values and features match
if len(features) != shap_values.shape[1]:
msg = "Length of features is not equal to the length of shap_values!"
if len(features) == shap_values.shape[1] - 1:
msg += " You might be using an old format shap_values array with the base value " \
"as the last column. In this case just pass the array without the last column."
raise Exception(msg)
instance = Instance(np.zeros((1, len(feature_names))), features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[0, :]) + base_value,
shap_values[0, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.zeros((1, len(feature_names))), list(feature_names))
)
return visualize(e, plot_cmap, matplotlib, figsize=figsize, show=show, text_rotation=text_rotation)
else:
if matplotlib:
raise Exception("matplotlib = True is not yet supported for force plots with multiple samples!")
if shap_values.shape[0] > 3000:
warnings.warn("shap.force_plot is slow for many thousands of rows, try subsampling your data.")
exps = []
for i in range(shap_values.shape[0]):
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
display_features = ["" for i in range(len(feature_names))]
else:
display_features = features[i, :]
instance = Instance(np.ones((1, len(feature_names))), display_features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[i, :]) + base_value,
shap_values[i, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.ones((1, len(feature_names))), list(feature_names))
)
exps.append(e)
return visualize(
exps,
plot_cmap=plot_cmap,
ordering_keys=ordering_keys,
ordering_keys_time_format=ordering_keys_time_format,
text_rotation=text_rotation
) | def function[force_plot, parameter[base_value, shap_values, features, feature_names, out_names, link, plot_cmap, matplotlib, show, figsize, ordering_keys, ordering_keys_time_format, text_rotation]]:
constant[ Visualize the given SHAP values with an additive force layout.
Parameters
----------
base_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
Matrix of SHAP values (# features) or (# samples x # features). If this is a 1D array then a single
force plot will be drawn, if it is a 2D array then a stacked force plot will be drawn.
features : numpy.array
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
out_names : str
The name of the outout of the model (plural to support multi-output plotting in the future).
link : "identity" or "logit"
The transformation used when drawing the tick mark labels. Using logit will change log-odds numbers
into probabilities.
matplotlib : bool
Whether to use the default Javascript output, or the (less developed) matplotlib output. Using matplotlib
can be helpful in scenarios where rendering Javascript/HTML is inconvenient.
]
if <ast.BoolOp object at 0x7da18f58dc60> begin[:]
variable[base_value] assign[=] call[name[base_value]][constant[0]]
if <ast.BoolOp object at 0x7da18f58fca0> begin[:]
if <ast.BoolOp object at 0x7da18f58c2b0> begin[:]
<ast.Raise object at 0x7da18f58fe80>
assert[<ast.UnaryOp object at 0x7da18f58f610>]
variable[link] assign[=] call[name[convert_to_link], parameter[name[link]]]
if compare[call[name[type], parameter[name[shap_values]]] not_equal[!=] name[np].ndarray] begin[:]
return[call[name[visualize], parameter[name[shap_values]]]]
if compare[call[name[str], parameter[call[name[type], parameter[name[features]]]]] equal[==] constant[<class 'pandas.core.frame.DataFrame'>]] begin[:]
if compare[name[feature_names] is constant[None]] begin[:]
variable[feature_names] assign[=] call[name[list], parameter[name[features].columns]]
variable[features] assign[=] name[features].values
if compare[call[name[len], parameter[name[shap_values].shape]] equal[==] constant[1]] begin[:]
variable[shap_values] assign[=] call[name[np].reshape, parameter[name[shap_values], tuple[[<ast.Constant object at 0x7da18f58cb20>, <ast.Call object at 0x7da18f58e410>]]]]
if compare[name[out_names] is constant[None]] begin[:]
variable[out_names] assign[=] list[[<ast.Constant object at 0x7da18f58ca60>]]
if compare[call[name[shap_values].shape][constant[0]] equal[==] constant[1]] begin[:]
if compare[name[feature_names] is constant[None]] begin[:]
variable[feature_names] assign[=] <ast.ListComp object at 0x7da18f58c760>
if compare[name[features] is constant[None]] begin[:]
variable[features] assign[=] <ast.ListComp object at 0x7da20e955450>
if compare[call[name[type], parameter[name[features]]] equal[==] name[np].ndarray] begin[:]
variable[features] assign[=] call[name[features].flatten, parameter[]]
if compare[call[name[len], parameter[name[features]]] not_equal[!=] call[name[shap_values].shape][constant[1]]] begin[:]
variable[msg] assign[=] constant[Length of features is not equal to the length of shap_values!]
if compare[call[name[len], parameter[name[features]]] equal[==] binary_operation[call[name[shap_values].shape][constant[1]] - constant[1]]] begin[:]
<ast.AugAssign object at 0x7da20e956f80>
<ast.Raise object at 0x7da20e956e60>
variable[instance] assign[=] call[name[Instance], parameter[call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da20e9554b0>, <ast.Call object at 0x7da20e956230>]]]], name[features]]]
variable[e] assign[=] call[name[AdditiveExplanation], parameter[name[base_value], binary_operation[call[name[np].sum, parameter[call[name[shap_values]][tuple[[<ast.Constant object at 0x7da20e9569b0>, <ast.Slice object at 0x7da20e9561d0>]]]]] + name[base_value]], call[name[shap_values]][tuple[[<ast.Constant object at 0x7da20e957910>, <ast.Slice object at 0x7da20e9542b0>]]], constant[None], name[instance], name[link], call[name[Model], parameter[constant[None], name[out_names]]], call[name[DenseData], parameter[call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da20e954ee0>, <ast.Call object at 0x7da20e955ba0>]]]], call[name[list], parameter[name[feature_names]]]]]]]
return[call[name[visualize], parameter[name[e], name[plot_cmap], name[matplotlib]]]] | keyword[def] identifier[force_plot] ( identifier[base_value] , identifier[shap_values] , identifier[features] = keyword[None] , identifier[feature_names] = keyword[None] , identifier[out_names] = keyword[None] , identifier[link] = literal[string] ,
identifier[plot_cmap] = literal[string] , identifier[matplotlib] = keyword[False] , identifier[show] = keyword[True] , identifier[figsize] =( literal[int] , literal[int] ), identifier[ordering_keys] = keyword[None] , identifier[ordering_keys_time_format] = keyword[None] ,
identifier[text_rotation] = literal[int] ):
literal[string]
keyword[if] identifier[type] ( identifier[base_value] )== identifier[np] . identifier[ndarray] keyword[and] identifier[len] ( identifier[base_value] )== literal[int] :
identifier[base_value] = identifier[base_value] [ literal[int] ]
keyword[if] ( identifier[type] ( identifier[base_value] )== identifier[np] . identifier[ndarray] keyword[or] identifier[type] ( identifier[base_value] )== identifier[list] ):
keyword[if] identifier[type] ( identifier[shap_values] )!= identifier[list] keyword[or] identifier[len] ( identifier[shap_values] )!= identifier[len] ( identifier[base_value] ):
keyword[raise] identifier[Exception] ( literal[string] literal[string] literal[string] literal[string] )
keyword[assert] keyword[not] identifier[type] ( identifier[shap_values] )== identifier[list] , literal[string]
identifier[link] = identifier[convert_to_link] ( identifier[link] )
keyword[if] identifier[type] ( identifier[shap_values] )!= identifier[np] . identifier[ndarray] :
keyword[return] identifier[visualize] ( identifier[shap_values] )
keyword[if] identifier[str] ( identifier[type] ( identifier[features] ))== literal[string] :
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[list] ( identifier[features] . identifier[columns] )
identifier[features] = identifier[features] . identifier[values]
keyword[elif] identifier[str] ( identifier[type] ( identifier[features] ))== literal[string] :
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[list] ( identifier[features] . identifier[index] )
identifier[features] = identifier[features] . identifier[values]
keyword[elif] identifier[isinstance] ( identifier[features] , identifier[list] ):
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[features]
identifier[features] = keyword[None]
keyword[elif] identifier[features] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[features] . identifier[shape] )== literal[int] keyword[and] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[features]
identifier[features] = keyword[None]
keyword[if] identifier[len] ( identifier[shap_values] . identifier[shape] )== literal[int] :
identifier[shap_values] = identifier[np] . identifier[reshape] ( identifier[shap_values] ,( literal[int] , identifier[len] ( identifier[shap_values] )))
keyword[if] identifier[out_names] keyword[is] keyword[None] :
identifier[out_names] =[ literal[string] ]
keyword[elif] identifier[type] ( identifier[out_names] )== identifier[str] :
identifier[out_names] =[ identifier[out_names] ]
keyword[if] identifier[shap_values] . identifier[shape] [ literal[int] ]== literal[int] :
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] =[ identifier[labels] [ literal[string] ]% identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[shap_values] . identifier[shape] [ literal[int] ])]
keyword[if] identifier[features] keyword[is] keyword[None] :
identifier[features] =[ literal[string] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[len] ( identifier[feature_names] ))]
keyword[if] identifier[type] ( identifier[features] )== identifier[np] . identifier[ndarray] :
identifier[features] = identifier[features] . identifier[flatten] ()
keyword[if] identifier[len] ( identifier[features] )!= identifier[shap_values] . identifier[shape] [ literal[int] ]:
identifier[msg] = literal[string]
keyword[if] identifier[len] ( identifier[features] )== identifier[shap_values] . identifier[shape] [ literal[int] ]- literal[int] :
identifier[msg] += literal[string] literal[string]
keyword[raise] identifier[Exception] ( identifier[msg] )
identifier[instance] = identifier[Instance] ( identifier[np] . identifier[zeros] (( literal[int] , identifier[len] ( identifier[feature_names] ))), identifier[features] )
identifier[e] = identifier[AdditiveExplanation] (
identifier[base_value] ,
identifier[np] . identifier[sum] ( identifier[shap_values] [ literal[int] ,:])+ identifier[base_value] ,
identifier[shap_values] [ literal[int] ,:],
keyword[None] ,
identifier[instance] ,
identifier[link] ,
identifier[Model] ( keyword[None] , identifier[out_names] ),
identifier[DenseData] ( identifier[np] . identifier[zeros] (( literal[int] , identifier[len] ( identifier[feature_names] ))), identifier[list] ( identifier[feature_names] ))
)
keyword[return] identifier[visualize] ( identifier[e] , identifier[plot_cmap] , identifier[matplotlib] , identifier[figsize] = identifier[figsize] , identifier[show] = identifier[show] , identifier[text_rotation] = identifier[text_rotation] )
keyword[else] :
keyword[if] identifier[matplotlib] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[shap_values] . identifier[shape] [ literal[int] ]> literal[int] :
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[exps] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[shap_values] . identifier[shape] [ literal[int] ]):
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] =[ identifier[labels] [ literal[string] ]% identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[shap_values] . identifier[shape] [ literal[int] ])]
keyword[if] identifier[features] keyword[is] keyword[None] :
identifier[display_features] =[ literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[feature_names] ))]
keyword[else] :
identifier[display_features] = identifier[features] [ identifier[i] ,:]
identifier[instance] = identifier[Instance] ( identifier[np] . identifier[ones] (( literal[int] , identifier[len] ( identifier[feature_names] ))), identifier[display_features] )
identifier[e] = identifier[AdditiveExplanation] (
identifier[base_value] ,
identifier[np] . identifier[sum] ( identifier[shap_values] [ identifier[i] ,:])+ identifier[base_value] ,
identifier[shap_values] [ identifier[i] ,:],
keyword[None] ,
identifier[instance] ,
identifier[link] ,
identifier[Model] ( keyword[None] , identifier[out_names] ),
identifier[DenseData] ( identifier[np] . identifier[ones] (( literal[int] , identifier[len] ( identifier[feature_names] ))), identifier[list] ( identifier[feature_names] ))
)
identifier[exps] . identifier[append] ( identifier[e] )
keyword[return] identifier[visualize] (
identifier[exps] ,
identifier[plot_cmap] = identifier[plot_cmap] ,
identifier[ordering_keys] = identifier[ordering_keys] ,
identifier[ordering_keys_time_format] = identifier[ordering_keys_time_format] ,
identifier[text_rotation] = identifier[text_rotation]
) | def force_plot(base_value, shap_values, features=None, feature_names=None, out_names=None, link='identity', plot_cmap='RdBu', matplotlib=False, show=True, figsize=(20, 3), ordering_keys=None, ordering_keys_time_format=None, text_rotation=0):
""" Visualize the given SHAP values with an additive force layout.
Parameters
----------
base_value : float
This is the reference value that the feature contributions start from. For SHAP values it should
be the value of explainer.expected_value.
shap_values : numpy.array
Matrix of SHAP values (# features) or (# samples x # features). If this is a 1D array then a single
force plot will be drawn, if it is a 2D array then a stacked force plot will be drawn.
features : numpy.array
Matrix of feature values (# features) or (# samples x # features). This provides the values of all the
features, and should be the same shape as the shap_values argument.
feature_names : list
List of feature names (# features).
out_names : str
The name of the outout of the model (plural to support multi-output plotting in the future).
link : "identity" or "logit"
The transformation used when drawing the tick mark labels. Using logit will change log-odds numbers
into probabilities.
matplotlib : bool
Whether to use the default Javascript output, or the (less developed) matplotlib output. Using matplotlib
can be helpful in scenarios where rendering Javascript/HTML is inconvenient.
"""
# auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0] # depends on [control=['if'], data=[]]
if type(base_value) == np.ndarray or type(base_value) == list:
if type(shap_values) != list or len(shap_values) != len(base_value):
raise Exception('In v0.20 force_plot now requires the base value as the first parameter! Try shap.force_plot(explainer.expected_value, shap_values) or for multi-output models try shap.force_plot(explainer.expected_value[0], shap_values[0]).') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
assert not type(shap_values) == list, 'The shap_values arg looks looks multi output, try shap_values[i].'
link = convert_to_link(link)
if type(shap_values) != np.ndarray:
return visualize(shap_values) # depends on [control=['if'], data=[]]
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = list(features.columns) # depends on [control=['if'], data=['feature_names']]
features = features.values # depends on [control=['if'], data=[]]
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index) # depends on [control=['if'], data=['feature_names']]
features = features.values # depends on [control=['if'], data=[]]
elif isinstance(features, list):
if feature_names is None:
feature_names = features # depends on [control=['if'], data=['feature_names']]
features = None # depends on [control=['if'], data=[]]
elif features is not None and len(features.shape) == 1 and (feature_names is None):
feature_names = features
features = None # depends on [control=['if'], data=[]]
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, (1, len(shap_values))) # depends on [control=['if'], data=[]]
if out_names is None:
out_names = ['output value'] # depends on [control=['if'], data=['out_names']]
elif type(out_names) == str:
out_names = [out_names] # depends on [control=['if'], data=[]]
if shap_values.shape[0] == 1:
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])] # depends on [control=['if'], data=['feature_names']]
if features is None:
features = ['' for _ in range(len(feature_names))] # depends on [control=['if'], data=['features']]
if type(features) == np.ndarray:
features = features.flatten() # depends on [control=['if'], data=[]]
# check that the shape of the shap_values and features match
if len(features) != shap_values.shape[1]:
msg = 'Length of features is not equal to the length of shap_values!'
if len(features) == shap_values.shape[1] - 1:
msg += ' You might be using an old format shap_values array with the base value as the last column. In this case just pass the array without the last column.' # depends on [control=['if'], data=[]]
raise Exception(msg) # depends on [control=['if'], data=[]]
instance = Instance(np.zeros((1, len(feature_names))), features)
e = AdditiveExplanation(base_value, np.sum(shap_values[0, :]) + base_value, shap_values[0, :], None, instance, link, Model(None, out_names), DenseData(np.zeros((1, len(feature_names))), list(feature_names)))
return visualize(e, plot_cmap, matplotlib, figsize=figsize, show=show, text_rotation=text_rotation) # depends on [control=['if'], data=[]]
else:
if matplotlib:
raise Exception('matplotlib = True is not yet supported for force plots with multiple samples!') # depends on [control=['if'], data=[]]
if shap_values.shape[0] > 3000:
warnings.warn('shap.force_plot is slow for many thousands of rows, try subsampling your data.') # depends on [control=['if'], data=[]]
exps = []
for i in range(shap_values.shape[0]):
if feature_names is None:
feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])] # depends on [control=['if'], data=['feature_names']]
if features is None:
display_features = ['' for i in range(len(feature_names))] # depends on [control=['if'], data=[]]
else:
display_features = features[i, :]
instance = Instance(np.ones((1, len(feature_names))), display_features)
e = AdditiveExplanation(base_value, np.sum(shap_values[i, :]) + base_value, shap_values[i, :], None, instance, link, Model(None, out_names), DenseData(np.ones((1, len(feature_names))), list(feature_names)))
exps.append(e) # depends on [control=['for'], data=['i']]
return visualize(exps, plot_cmap=plot_cmap, ordering_keys=ordering_keys, ordering_keys_time_format=ordering_keys_time_format, text_rotation=text_rotation) |
def make_deny_response(self):
"""Create "deny" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new presence stanza.
:returntype: `Presence`
"""
if self.stanza_type not in ("subscribe", "subscribed",
"unsubscribe", "unsubscribed"):
raise ValueError("Results may only be generated for 'subscribe',"
"'subscribed','unsubscribe' or 'unsubscribed' presence")
stanza = Presence(stanza_type = DENY_RESPONSES[self.stanza_type],
from_jid = self.to_jid, to_jid = self.from_jid,
stanza_id = self.stanza_id)
return stanza | def function[make_deny_response, parameter[self]]:
constant[Create "deny" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new presence stanza.
:returntype: `Presence`
]
if compare[name[self].stanza_type <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b00e7ac0>, <ast.Constant object at 0x7da1b00e6e90>, <ast.Constant object at 0x7da1b00e72b0>, <ast.Constant object at 0x7da1b00e7790>]]] begin[:]
<ast.Raise object at 0x7da1b00e6fe0>
variable[stanza] assign[=] call[name[Presence], parameter[]]
return[name[stanza]] | keyword[def] identifier[make_deny_response] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[stanza_type] keyword[not] keyword[in] ( literal[string] , literal[string] ,
literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[stanza] = identifier[Presence] ( identifier[stanza_type] = identifier[DENY_RESPONSES] [ identifier[self] . identifier[stanza_type] ],
identifier[from_jid] = identifier[self] . identifier[to_jid] , identifier[to_jid] = identifier[self] . identifier[from_jid] ,
identifier[stanza_id] = identifier[self] . identifier[stanza_id] )
keyword[return] identifier[stanza] | def make_deny_response(self):
"""Create "deny" response for the "subscribe" / "subscribed" /
"unsubscribe" / "unsubscribed" presence stanza.
:return: new presence stanza.
:returntype: `Presence`
"""
if self.stanza_type not in ('subscribe', 'subscribed', 'unsubscribe', 'unsubscribed'):
raise ValueError("Results may only be generated for 'subscribe','subscribed','unsubscribe' or 'unsubscribed' presence") # depends on [control=['if'], data=[]]
stanza = Presence(stanza_type=DENY_RESPONSES[self.stanza_type], from_jid=self.to_jid, to_jid=self.from_jid, stanza_id=self.stanza_id)
return stanza |
def validate_packet(packet):
"""Check if packet is valid OF packet.
Raises:
UnpackException: If the packet is invalid.
"""
if not isinstance(packet, bytes):
raise UnpackException('invalid packet')
packet_length = len(packet)
if packet_length < 8 or packet_length > 2**16:
raise UnpackException('invalid packet')
if packet_length != int.from_bytes(packet[2:4], byteorder='big'):
raise UnpackException('invalid packet')
version = packet[0]
if version == 0 or version >= 128:
raise UnpackException('invalid packet') | def function[validate_packet, parameter[packet]]:
constant[Check if packet is valid OF packet.
Raises:
UnpackException: If the packet is invalid.
]
if <ast.UnaryOp object at 0x7da20c7c8280> begin[:]
<ast.Raise object at 0x7da20c7c8070>
variable[packet_length] assign[=] call[name[len], parameter[name[packet]]]
if <ast.BoolOp object at 0x7da20c7c83a0> begin[:]
<ast.Raise object at 0x7da20c7c8310>
if compare[name[packet_length] not_equal[!=] call[name[int].from_bytes, parameter[call[name[packet]][<ast.Slice object at 0x7da20c7c89a0>]]]] begin[:]
<ast.Raise object at 0x7da20c7ca770>
variable[version] assign[=] call[name[packet]][constant[0]]
if <ast.BoolOp object at 0x7da20c7c8fd0> begin[:]
<ast.Raise object at 0x7da20c7c8820> | keyword[def] identifier[validate_packet] ( identifier[packet] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[packet] , identifier[bytes] ):
keyword[raise] identifier[UnpackException] ( literal[string] )
identifier[packet_length] = identifier[len] ( identifier[packet] )
keyword[if] identifier[packet_length] < literal[int] keyword[or] identifier[packet_length] > literal[int] ** literal[int] :
keyword[raise] identifier[UnpackException] ( literal[string] )
keyword[if] identifier[packet_length] != identifier[int] . identifier[from_bytes] ( identifier[packet] [ literal[int] : literal[int] ], identifier[byteorder] = literal[string] ):
keyword[raise] identifier[UnpackException] ( literal[string] )
identifier[version] = identifier[packet] [ literal[int] ]
keyword[if] identifier[version] == literal[int] keyword[or] identifier[version] >= literal[int] :
keyword[raise] identifier[UnpackException] ( literal[string] ) | def validate_packet(packet):
"""Check if packet is valid OF packet.
Raises:
UnpackException: If the packet is invalid.
"""
if not isinstance(packet, bytes):
raise UnpackException('invalid packet') # depends on [control=['if'], data=[]]
packet_length = len(packet)
if packet_length < 8 or packet_length > 2 ** 16:
raise UnpackException('invalid packet') # depends on [control=['if'], data=[]]
if packet_length != int.from_bytes(packet[2:4], byteorder='big'):
raise UnpackException('invalid packet') # depends on [control=['if'], data=[]]
version = packet[0]
if version == 0 or version >= 128:
raise UnpackException('invalid packet') # depends on [control=['if'], data=[]] |
def checkgrad_block(self, analytic_hess, numeric_hess, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False):
"""
Checkgrad a block matrix
"""
if analytic_hess.dtype is np.dtype('object'):
#Make numeric hessian also into a block matrix
real_size = get_block_shapes(analytic_hess)
num_elements = np.sum(real_size)
if (num_elements, num_elements) == numeric_hess.shape:
#If the sizes are the same we assume they are the same
#(we have not fixed any values so the numeric is the whole hessian)
numeric_hess = get_blocks(numeric_hess, real_size)
else:
#Make a fake empty matrix and fill out the correct block
tmp_numeric_hess = get_blocks(np.zeros((num_elements, num_elements)), real_size)
tmp_numeric_hess[block_indices] = numeric_hess.copy()
numeric_hess = tmp_numeric_hess
if block_indices is not None:
#Extract the right block
analytic_hess = analytic_hess[block_indices]
numeric_hess = numeric_hess[block_indices]
else:
#Unblock them if they are in blocks and you aren't checking a single block (checking whole hessian)
if analytic_hess.dtype is np.dtype('object'):
analytic_hess = unblock(analytic_hess)
numeric_hess = unblock(numeric_hess)
ratio = numeric_hess / (numpy.where(analytic_hess==0, 1e-10, analytic_hess))
difference = numpy.abs(analytic_hess - numeric_hess)
check_passed = numpy.all((numpy.abs(1 - ratio)) < tolerance) or numpy.allclose(numeric_hess, analytic_hess, atol = tolerance)
if verbose:
if block_indices:
print("\nBlock {}".format(block_indices))
else:
print("\nAll blocks")
header = ['Checked', 'Max-Ratio', 'Min-Ratio', 'Min-Difference', 'Max-Difference']
header_string = map(lambda x: ' | '.join(header), [header])
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
min_r = '%.6f' % float(numpy.min(ratio))
max_r = '%.6f' % float(numpy.max(ratio))
max_d = '%.6f' % float(numpy.max(difference))
min_d = '%.6f' % float(numpy.min(difference))
cols = [max_r, min_r, min_d, max_d]
if check_passed:
checked = "\033[92m True \033[0m"
else:
checked = "\033[91m False \033[0m"
grad_string = "{} | {} | {} | {} | {} ".format(checked, cols[0], cols[1], cols[2], cols[3])
print(grad_string)
if plot:
from matplotlib import pyplot as pb
fig, axes = pb.subplots(2, 2)
max_lim = numpy.max(numpy.vstack((analytic_hess, numeric_hess)))
min_lim = numpy.min(numpy.vstack((analytic_hess, numeric_hess)))
msa = axes[0,0].matshow(analytic_hess, vmin=min_lim, vmax=max_lim)
axes[0,0].set_title('Analytic hessian')
axes[0,0].xaxis.set_ticklabels([None])
axes[0,0].yaxis.set_ticklabels([None])
axes[0,0].xaxis.set_ticks([None])
axes[0,0].yaxis.set_ticks([None])
msn = axes[0,1].matshow(numeric_hess, vmin=min_lim, vmax=max_lim)
pb.colorbar(msn, ax=axes[0,1])
axes[0,1].set_title('Numeric hessian')
axes[0,1].xaxis.set_ticklabels([None])
axes[0,1].yaxis.set_ticklabels([None])
axes[0,1].xaxis.set_ticks([None])
axes[0,1].yaxis.set_ticks([None])
msr = axes[1,0].matshow(ratio)
pb.colorbar(msr, ax=axes[1,0])
axes[1,0].set_title('Ratio')
axes[1,0].xaxis.set_ticklabels([None])
axes[1,0].yaxis.set_ticklabels([None])
axes[1,0].xaxis.set_ticks([None])
axes[1,0].yaxis.set_ticks([None])
msd = axes[1,1].matshow(difference)
pb.colorbar(msd, ax=axes[1,1])
axes[1,1].set_title('difference')
axes[1,1].xaxis.set_ticklabels([None])
axes[1,1].yaxis.set_ticklabels([None])
axes[1,1].xaxis.set_ticks([None])
axes[1,1].yaxis.set_ticks([None])
if block_indices:
fig.suptitle("Block: {}".format(block_indices))
pb.show()
return check_passed | def function[checkgrad_block, parameter[self, analytic_hess, numeric_hess, verbose, step, tolerance, block_indices, plot]]:
constant[
Checkgrad a block matrix
]
if compare[name[analytic_hess].dtype is call[name[np].dtype, parameter[constant[object]]]] begin[:]
variable[real_size] assign[=] call[name[get_block_shapes], parameter[name[analytic_hess]]]
variable[num_elements] assign[=] call[name[np].sum, parameter[name[real_size]]]
if compare[tuple[[<ast.Name object at 0x7da1b1c68b20>, <ast.Name object at 0x7da1b1c68af0>]] equal[==] name[numeric_hess].shape] begin[:]
variable[numeric_hess] assign[=] call[name[get_blocks], parameter[name[numeric_hess], name[real_size]]]
if compare[name[block_indices] is_not constant[None]] begin[:]
variable[analytic_hess] assign[=] call[name[analytic_hess]][name[block_indices]]
variable[numeric_hess] assign[=] call[name[numeric_hess]][name[block_indices]]
variable[ratio] assign[=] binary_operation[name[numeric_hess] / call[name[numpy].where, parameter[compare[name[analytic_hess] equal[==] constant[0]], constant[1e-10], name[analytic_hess]]]]
variable[difference] assign[=] call[name[numpy].abs, parameter[binary_operation[name[analytic_hess] - name[numeric_hess]]]]
variable[check_passed] assign[=] <ast.BoolOp object at 0x7da1b1c6a5c0>
if name[verbose] begin[:]
if name[block_indices] begin[:]
call[name[print], parameter[call[constant[
Block {}].format, parameter[name[block_indices]]]]]
variable[header] assign[=] list[[<ast.Constant object at 0x7da1b1c605b0>, <ast.Constant object at 0x7da1b1c60490>, <ast.Constant object at 0x7da1b1c600d0>, <ast.Constant object at 0x7da1b1c604c0>, <ast.Constant object at 0x7da1b1c60130>]]
variable[header_string] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b1c69150>, list[[<ast.Name object at 0x7da1b1c68100>]]]]
variable[separator] assign[=] binary_operation[constant[-] * call[name[len], parameter[call[name[header_string]][constant[0]]]]]
call[name[print], parameter[call[constant[
].join, parameter[list[[<ast.Subscript object at 0x7da1b1c6b040>, <ast.Name object at 0x7da1b1c69330>]]]]]]
variable[min_r] assign[=] binary_operation[constant[%.6f] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[call[name[numpy].min, parameter[name[ratio]]]]]]
variable[max_r] assign[=] binary_operation[constant[%.6f] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[call[name[numpy].max, parameter[name[ratio]]]]]]
variable[max_d] assign[=] binary_operation[constant[%.6f] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[call[name[numpy].max, parameter[name[difference]]]]]]
variable[min_d] assign[=] binary_operation[constant[%.6f] <ast.Mod object at 0x7da2590d6920> call[name[float], parameter[call[name[numpy].min, parameter[name[difference]]]]]]
variable[cols] assign[=] list[[<ast.Name object at 0x7da1b1c68970>, <ast.Name object at 0x7da1b1c69210>, <ast.Name object at 0x7da1b1c68910>, <ast.Name object at 0x7da1b1c6bd00>]]
if name[check_passed] begin[:]
variable[checked] assign[=] constant[[92m True [0m]
variable[grad_string] assign[=] call[constant[{} | {} | {} | {} | {} ].format, parameter[name[checked], call[name[cols]][constant[0]], call[name[cols]][constant[1]], call[name[cols]][constant[2]], call[name[cols]][constant[3]]]]
call[name[print], parameter[name[grad_string]]]
if name[plot] begin[:]
from relative_module[matplotlib] import module[pyplot]
<ast.Tuple object at 0x7da1b1c7fd30> assign[=] call[name[pb].subplots, parameter[constant[2], constant[2]]]
variable[max_lim] assign[=] call[name[numpy].max, parameter[call[name[numpy].vstack, parameter[tuple[[<ast.Name object at 0x7da1b1c7eb30>, <ast.Name object at 0x7da1b1c7e620>]]]]]]
variable[min_lim] assign[=] call[name[numpy].min, parameter[call[name[numpy].vstack, parameter[tuple[[<ast.Name object at 0x7da1b1c7e110>, <ast.Name object at 0x7da1b1c7e650>]]]]]]
variable[msa] assign[=] call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7eb00>, <ast.Constant object at 0x7da1b1c7ea70>]]].matshow, parameter[name[analytic_hess]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7edd0>, <ast.Constant object at 0x7da1b1c7c0a0>]]].set_title, parameter[constant[Analytic hessian]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7cdc0>, <ast.Constant object at 0x7da1b1c7fc10>]]].xaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1c7e0e0>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7e2c0>, <ast.Constant object at 0x7da1b1c7df00>]]].yaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1c7ca90>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7df60>, <ast.Constant object at 0x7da1b1c7c1f0>]]].xaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1c7e350>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7efb0>, <ast.Constant object at 0x7da1b1c7cc70>]]].yaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1c7f220>]]]]
variable[msn] assign[=] call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7e500>, <ast.Constant object at 0x7da1b1c7c0d0>]]].matshow, parameter[name[numeric_hess]]]
call[name[pb].colorbar, parameter[name[msn]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7da20>, <ast.Constant object at 0x7da1b1c7db40>]]].set_title, parameter[constant[Numeric hessian]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7db10>, <ast.Constant object at 0x7da1b1c7ca30>]]].xaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1c7f4c0>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7f3a0>, <ast.Constant object at 0x7da1b1c7f190>]]].yaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1c7c9a0>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7d210>, <ast.Constant object at 0x7da1b1c7ece0>]]].xaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1c7d570>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7d2d0>, <ast.Constant object at 0x7da1b1c7d390>]]].yaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1c7d360>]]]]
variable[msr] assign[=] call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7c7c0>, <ast.Constant object at 0x7da1b1c7ec50>]]].matshow, parameter[name[ratio]]]
call[name[pb].colorbar, parameter[name[msr]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7ff10>, <ast.Constant object at 0x7da1b1c7d930>]]].set_title, parameter[constant[Ratio]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7d7e0>, <ast.Constant object at 0x7da1b1c7d810>]]].xaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1c7d300>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7c670>, <ast.Constant object at 0x7da1b1c7dc60>]]].yaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1c7fa00>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7cca0>, <ast.Constant object at 0x7da1b1c7cbe0>]]].xaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1c7c070>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1c7faf0>, <ast.Constant object at 0x7da1b1cf4790>]]].yaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1cf47f0>]]]]
variable[msd] assign[=] call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1cf49a0>, <ast.Constant object at 0x7da1b1cf49d0>]]].matshow, parameter[name[difference]]]
call[name[pb].colorbar, parameter[name[msd]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1cf4d90>, <ast.Constant object at 0x7da1b1cf4dc0>]]].set_title, parameter[constant[difference]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1cf4fa0>, <ast.Constant object at 0x7da1b1cf4fd0>]]].xaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1cf5030>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1cf51e0>, <ast.Constant object at 0x7da1b1cf5210>]]].yaxis.set_ticklabels, parameter[list[[<ast.Constant object at 0x7da1b1cf5270>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1cf5420>, <ast.Constant object at 0x7da1b1cf5450>]]].xaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1cf54b0>]]]]
call[call[name[axes]][tuple[[<ast.Constant object at 0x7da1b1cf5660>, <ast.Constant object at 0x7da1b1cf5690>]]].yaxis.set_ticks, parameter[list[[<ast.Constant object at 0x7da1b1cf56f0>]]]]
if name[block_indices] begin[:]
call[name[fig].suptitle, parameter[call[constant[Block: {}].format, parameter[name[block_indices]]]]]
call[name[pb].show, parameter[]]
return[name[check_passed]] | keyword[def] identifier[checkgrad_block] ( identifier[self] , identifier[analytic_hess] , identifier[numeric_hess] , identifier[verbose] = keyword[False] , identifier[step] = literal[int] , identifier[tolerance] = literal[int] , identifier[block_indices] = keyword[None] , identifier[plot] = keyword[False] ):
literal[string]
keyword[if] identifier[analytic_hess] . identifier[dtype] keyword[is] identifier[np] . identifier[dtype] ( literal[string] ):
identifier[real_size] = identifier[get_block_shapes] ( identifier[analytic_hess] )
identifier[num_elements] = identifier[np] . identifier[sum] ( identifier[real_size] )
keyword[if] ( identifier[num_elements] , identifier[num_elements] )== identifier[numeric_hess] . identifier[shape] :
identifier[numeric_hess] = identifier[get_blocks] ( identifier[numeric_hess] , identifier[real_size] )
keyword[else] :
identifier[tmp_numeric_hess] = identifier[get_blocks] ( identifier[np] . identifier[zeros] (( identifier[num_elements] , identifier[num_elements] )), identifier[real_size] )
identifier[tmp_numeric_hess] [ identifier[block_indices] ]= identifier[numeric_hess] . identifier[copy] ()
identifier[numeric_hess] = identifier[tmp_numeric_hess]
keyword[if] identifier[block_indices] keyword[is] keyword[not] keyword[None] :
identifier[analytic_hess] = identifier[analytic_hess] [ identifier[block_indices] ]
identifier[numeric_hess] = identifier[numeric_hess] [ identifier[block_indices] ]
keyword[else] :
keyword[if] identifier[analytic_hess] . identifier[dtype] keyword[is] identifier[np] . identifier[dtype] ( literal[string] ):
identifier[analytic_hess] = identifier[unblock] ( identifier[analytic_hess] )
identifier[numeric_hess] = identifier[unblock] ( identifier[numeric_hess] )
identifier[ratio] = identifier[numeric_hess] /( identifier[numpy] . identifier[where] ( identifier[analytic_hess] == literal[int] , literal[int] , identifier[analytic_hess] ))
identifier[difference] = identifier[numpy] . identifier[abs] ( identifier[analytic_hess] - identifier[numeric_hess] )
identifier[check_passed] = identifier[numpy] . identifier[all] (( identifier[numpy] . identifier[abs] ( literal[int] - identifier[ratio] ))< identifier[tolerance] ) keyword[or] identifier[numpy] . identifier[allclose] ( identifier[numeric_hess] , identifier[analytic_hess] , identifier[atol] = identifier[tolerance] )
keyword[if] identifier[verbose] :
keyword[if] identifier[block_indices] :
identifier[print] ( literal[string] . identifier[format] ( identifier[block_indices] ))
keyword[else] :
identifier[print] ( literal[string] )
identifier[header] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[header_string] = identifier[map] ( keyword[lambda] identifier[x] : literal[string] . identifier[join] ( identifier[header] ),[ identifier[header] ])
identifier[separator] = literal[string] * identifier[len] ( identifier[header_string] [ literal[int] ])
identifier[print] ( literal[string] . identifier[join] ([ identifier[header_string] [ literal[int] ], identifier[separator] ]))
identifier[min_r] = literal[string] % identifier[float] ( identifier[numpy] . identifier[min] ( identifier[ratio] ))
identifier[max_r] = literal[string] % identifier[float] ( identifier[numpy] . identifier[max] ( identifier[ratio] ))
identifier[max_d] = literal[string] % identifier[float] ( identifier[numpy] . identifier[max] ( identifier[difference] ))
identifier[min_d] = literal[string] % identifier[float] ( identifier[numpy] . identifier[min] ( identifier[difference] ))
identifier[cols] =[ identifier[max_r] , identifier[min_r] , identifier[min_d] , identifier[max_d] ]
keyword[if] identifier[check_passed] :
identifier[checked] = literal[string]
keyword[else] :
identifier[checked] = literal[string]
identifier[grad_string] = literal[string] . identifier[format] ( identifier[checked] , identifier[cols] [ literal[int] ], identifier[cols] [ literal[int] ], identifier[cols] [ literal[int] ], identifier[cols] [ literal[int] ])
identifier[print] ( identifier[grad_string] )
keyword[if] identifier[plot] :
keyword[from] identifier[matplotlib] keyword[import] identifier[pyplot] keyword[as] identifier[pb]
identifier[fig] , identifier[axes] = identifier[pb] . identifier[subplots] ( literal[int] , literal[int] )
identifier[max_lim] = identifier[numpy] . identifier[max] ( identifier[numpy] . identifier[vstack] (( identifier[analytic_hess] , identifier[numeric_hess] )))
identifier[min_lim] = identifier[numpy] . identifier[min] ( identifier[numpy] . identifier[vstack] (( identifier[analytic_hess] , identifier[numeric_hess] )))
identifier[msa] = identifier[axes] [ literal[int] , literal[int] ]. identifier[matshow] ( identifier[analytic_hess] , identifier[vmin] = identifier[min_lim] , identifier[vmax] = identifier[max_lim] )
identifier[axes] [ literal[int] , literal[int] ]. identifier[set_title] ( literal[string] )
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticks] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticks] ([ keyword[None] ])
identifier[msn] = identifier[axes] [ literal[int] , literal[int] ]. identifier[matshow] ( identifier[numeric_hess] , identifier[vmin] = identifier[min_lim] , identifier[vmax] = identifier[max_lim] )
identifier[pb] . identifier[colorbar] ( identifier[msn] , identifier[ax] = identifier[axes] [ literal[int] , literal[int] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[set_title] ( literal[string] )
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticks] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticks] ([ keyword[None] ])
identifier[msr] = identifier[axes] [ literal[int] , literal[int] ]. identifier[matshow] ( identifier[ratio] )
identifier[pb] . identifier[colorbar] ( identifier[msr] , identifier[ax] = identifier[axes] [ literal[int] , literal[int] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[set_title] ( literal[string] )
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticks] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticks] ([ keyword[None] ])
identifier[msd] = identifier[axes] [ literal[int] , literal[int] ]. identifier[matshow] ( identifier[difference] )
identifier[pb] . identifier[colorbar] ( identifier[msd] , identifier[ax] = identifier[axes] [ literal[int] , literal[int] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[set_title] ( literal[string] )
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticklabels] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[xaxis] . identifier[set_ticks] ([ keyword[None] ])
identifier[axes] [ literal[int] , literal[int] ]. identifier[yaxis] . identifier[set_ticks] ([ keyword[None] ])
keyword[if] identifier[block_indices] :
identifier[fig] . identifier[suptitle] ( literal[string] . identifier[format] ( identifier[block_indices] ))
identifier[pb] . identifier[show] ()
keyword[return] identifier[check_passed] | def checkgrad_block(self, analytic_hess, numeric_hess, verbose=False, step=1e-06, tolerance=0.001, block_indices=None, plot=False):
"""
Checkgrad a block matrix
"""
if analytic_hess.dtype is np.dtype('object'):
#Make numeric hessian also into a block matrix
real_size = get_block_shapes(analytic_hess)
num_elements = np.sum(real_size)
if (num_elements, num_elements) == numeric_hess.shape:
#If the sizes are the same we assume they are the same
#(we have not fixed any values so the numeric is the whole hessian)
numeric_hess = get_blocks(numeric_hess, real_size) # depends on [control=['if'], data=[]]
else:
#Make a fake empty matrix and fill out the correct block
tmp_numeric_hess = get_blocks(np.zeros((num_elements, num_elements)), real_size)
tmp_numeric_hess[block_indices] = numeric_hess.copy()
numeric_hess = tmp_numeric_hess # depends on [control=['if'], data=[]]
if block_indices is not None:
#Extract the right block
analytic_hess = analytic_hess[block_indices]
numeric_hess = numeric_hess[block_indices] # depends on [control=['if'], data=['block_indices']]
#Unblock them if they are in blocks and you aren't checking a single block (checking whole hessian)
elif analytic_hess.dtype is np.dtype('object'):
analytic_hess = unblock(analytic_hess)
numeric_hess = unblock(numeric_hess) # depends on [control=['if'], data=[]]
ratio = numeric_hess / numpy.where(analytic_hess == 0, 1e-10, analytic_hess)
difference = numpy.abs(analytic_hess - numeric_hess)
check_passed = numpy.all(numpy.abs(1 - ratio) < tolerance) or numpy.allclose(numeric_hess, analytic_hess, atol=tolerance)
if verbose:
if block_indices:
print('\nBlock {}'.format(block_indices)) # depends on [control=['if'], data=[]]
else:
print('\nAll blocks')
header = ['Checked', 'Max-Ratio', 'Min-Ratio', 'Min-Difference', 'Max-Difference']
header_string = map(lambda x: ' | '.join(header), [header])
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
min_r = '%.6f' % float(numpy.min(ratio))
max_r = '%.6f' % float(numpy.max(ratio))
max_d = '%.6f' % float(numpy.max(difference))
min_d = '%.6f' % float(numpy.min(difference))
cols = [max_r, min_r, min_d, max_d]
if check_passed:
checked = '\x1b[92m True \x1b[0m' # depends on [control=['if'], data=[]]
else:
checked = '\x1b[91m False \x1b[0m'
grad_string = '{} | {} | {} | {} | {} '.format(checked, cols[0], cols[1], cols[2], cols[3])
print(grad_string)
if plot:
from matplotlib import pyplot as pb
(fig, axes) = pb.subplots(2, 2)
max_lim = numpy.max(numpy.vstack((analytic_hess, numeric_hess)))
min_lim = numpy.min(numpy.vstack((analytic_hess, numeric_hess)))
msa = axes[0, 0].matshow(analytic_hess, vmin=min_lim, vmax=max_lim)
axes[0, 0].set_title('Analytic hessian')
axes[0, 0].xaxis.set_ticklabels([None])
axes[0, 0].yaxis.set_ticklabels([None])
axes[0, 0].xaxis.set_ticks([None])
axes[0, 0].yaxis.set_ticks([None])
msn = axes[0, 1].matshow(numeric_hess, vmin=min_lim, vmax=max_lim)
pb.colorbar(msn, ax=axes[0, 1])
axes[0, 1].set_title('Numeric hessian')
axes[0, 1].xaxis.set_ticklabels([None])
axes[0, 1].yaxis.set_ticklabels([None])
axes[0, 1].xaxis.set_ticks([None])
axes[0, 1].yaxis.set_ticks([None])
msr = axes[1, 0].matshow(ratio)
pb.colorbar(msr, ax=axes[1, 0])
axes[1, 0].set_title('Ratio')
axes[1, 0].xaxis.set_ticklabels([None])
axes[1, 0].yaxis.set_ticklabels([None])
axes[1, 0].xaxis.set_ticks([None])
axes[1, 0].yaxis.set_ticks([None])
msd = axes[1, 1].matshow(difference)
pb.colorbar(msd, ax=axes[1, 1])
axes[1, 1].set_title('difference')
axes[1, 1].xaxis.set_ticklabels([None])
axes[1, 1].yaxis.set_ticklabels([None])
axes[1, 1].xaxis.set_ticks([None])
axes[1, 1].yaxis.set_ticks([None])
if block_indices:
fig.suptitle('Block: {}'.format(block_indices)) # depends on [control=['if'], data=[]]
pb.show() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return check_passed |
def spawn(
cls, argv, cwd=None, env=None, echo=True, preexec_fn=None,
dimensions=(24, 80)):
'''Start the given command in a child process in a pseudo terminal.
This does all the fork/exec type of stuff for a pty, and returns an
instance of PtyProcess.
If preexec_fn is supplied, it will be called with no arguments in the
child process before exec-ing the specified command.
It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be used.
'''
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
command_with_path = which(command)
if command_with_path is None:
raise FileNotFoundError('The command was not found or was not ' +
'executable: %s.' % command)
command = command_with_path
argv[0] = command
# [issue #119] To prevent the case where exec fails and the user is
# stuck interacting with a python child process instead of whatever
# was expected, we implement the solution from
# http://stackoverflow.com/a/3703179 to pass the exception to the
# parent process
# [issue #119] 1. Before forking, open a pipe in the parent process.
exec_err_pipe_read, exec_err_pipe_write = os.pipe()
if use_native_pty_fork:
pid, fd = pty.fork()
else:
# Use internal fork_pty, for Solaris
pid, fd = _fork_pty.fork_pty()
# Some platforms must call setwinsize() and setecho() from the
# child process, and others from the master process. We do both,
# allowing IOError for either.
if pid == CHILD:
# set window size
try:
_setwinsize(STDIN_FILENO, *dimensions)
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# disable echo if spawn argument echo was unset
if not echo:
try:
_setecho(STDIN_FILENO, False)
except (IOError, termios.error) as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise
# [issue #119] 3. The child closes the reading end and sets the
# close-on-exec flag for the writing end.
os.close(exec_err_pipe_read)
fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
# Do not allow child to inherit open file descriptors from parent,
# with the exception of the exec_err_pipe_write of the pipe
# Impose ceiling on max_fd: AIX bugfix for users with unlimited
# nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange()
# occasionally raises out of range error
max_fd = min(1048576, resource.getrlimit(resource.RLIMIT_NOFILE)[0])
os.closerange(3, exec_err_pipe_write)
os.closerange(exec_err_pipe_write+1, max_fd)
if cwd is not None:
os.chdir(cwd)
if preexec_fn is not None:
try:
preexec_fn()
except Exception as e:
ename = type(e).__name__
tosend = '{}:0:{}'.format(ename, str(e))
if PY3:
tosend = tosend.encode('utf-8')
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(1)
try:
if env is None:
os.execv(command, argv)
else:
os.execvpe(command, argv, env)
except OSError as err:
# [issue #119] 5. If exec fails, the child writes the error
# code back to the parent using the pipe, then exits.
tosend = 'OSError:{}:{}'.format(err.errno, str(err))
if PY3:
tosend = tosend.encode('utf-8')
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(os.EX_OSERR)
# Parent
inst = cls(pid, fd)
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
# [issue #119] 2. After forking, the parent closes the writing end
# of the pipe and reads from the reading end.
os.close(exec_err_pipe_write)
exec_err_data = os.read(exec_err_pipe_read, 4096)
os.close(exec_err_pipe_read)
# [issue #119] 6. The parent reads eof (a zero-length read) if the
# child successfully performed exec, since close-on-exec made
# successful exec close the writing end of the pipe. Or, if exec
# failed, the parent reads the error code and can proceed
# accordingly. Either way, the parent blocks until the child calls
# exec.
if len(exec_err_data) != 0:
try:
errclass, errno_s, errmsg = exec_err_data.split(b':', 2)
exctype = getattr(builtins, errclass.decode('ascii'), Exception)
exception = exctype(errmsg.decode('utf-8', 'replace'))
if exctype is OSError:
exception.errno = int(errno_s)
except:
raise Exception('Subprocess failed, got bad error data: %r'
% exec_err_data)
else:
raise exception
try:
inst.setwinsize(*dimensions)
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO):
raise
return inst | def function[spawn, parameter[cls, argv, cwd, env, echo, preexec_fn, dimensions]]:
constant[Start the given command in a child process in a pseudo terminal.
This does all the fork/exec type of stuff for a pty, and returns an
instance of PtyProcess.
If preexec_fn is supplied, it will be called with no arguments in the
child process before exec-ing the specified command.
It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be used.
]
if <ast.UnaryOp object at 0x7da18dc05ff0> begin[:]
<ast.Raise object at 0x7da18dc06ec0>
variable[argv] assign[=] call[name[argv]][<ast.Slice object at 0x7da18dc04af0>]
variable[command] assign[=] call[name[argv]][constant[0]]
variable[command_with_path] assign[=] call[name[which], parameter[name[command]]]
if compare[name[command_with_path] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc04b50>
variable[command] assign[=] name[command_with_path]
call[name[argv]][constant[0]] assign[=] name[command]
<ast.Tuple object at 0x7da18dc04c10> assign[=] call[name[os].pipe, parameter[]]
if name[use_native_pty_fork] begin[:]
<ast.Tuple object at 0x7da18dc074f0> assign[=] call[name[pty].fork, parameter[]]
if compare[name[pid] equal[==] name[CHILD]] begin[:]
<ast.Try object at 0x7da18dc04fd0>
if <ast.UnaryOp object at 0x7da18dc06110> begin[:]
<ast.Try object at 0x7da18dc04e20>
call[name[os].close, parameter[name[exec_err_pipe_read]]]
call[name[fcntl].fcntl, parameter[name[exec_err_pipe_write], name[fcntl].F_SETFD, name[fcntl].FD_CLOEXEC]]
variable[max_fd] assign[=] call[name[min], parameter[constant[1048576], call[call[name[resource].getrlimit, parameter[name[resource].RLIMIT_NOFILE]]][constant[0]]]]
call[name[os].closerange, parameter[constant[3], name[exec_err_pipe_write]]]
call[name[os].closerange, parameter[binary_operation[name[exec_err_pipe_write] + constant[1]], name[max_fd]]]
if compare[name[cwd] is_not constant[None]] begin[:]
call[name[os].chdir, parameter[name[cwd]]]
if compare[name[preexec_fn] is_not constant[None]] begin[:]
<ast.Try object at 0x7da18dc05d20>
<ast.Try object at 0x7da18dc06ef0>
variable[inst] assign[=] call[name[cls], parameter[name[pid], name[fd]]]
name[inst].argv assign[=] name[argv]
if compare[name[env] is_not constant[None]] begin[:]
name[inst].env assign[=] name[env]
if compare[name[cwd] is_not constant[None]] begin[:]
name[inst].launch_dir assign[=] name[cwd]
call[name[os].close, parameter[name[exec_err_pipe_write]]]
variable[exec_err_data] assign[=] call[name[os].read, parameter[name[exec_err_pipe_read], constant[4096]]]
call[name[os].close, parameter[name[exec_err_pipe_read]]]
if compare[call[name[len], parameter[name[exec_err_data]]] not_equal[!=] constant[0]] begin[:]
<ast.Try object at 0x7da1b1f82440>
<ast.Try object at 0x7da1b1f818a0>
return[name[inst]] | keyword[def] identifier[spawn] (
identifier[cls] , identifier[argv] , identifier[cwd] = keyword[None] , identifier[env] = keyword[None] , identifier[echo] = keyword[True] , identifier[preexec_fn] = keyword[None] ,
identifier[dimensions] =( literal[int] , literal[int] )):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[argv] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[TypeError] ( literal[string] % identifier[argv] )
identifier[argv] = identifier[argv] [:]
identifier[command] = identifier[argv] [ literal[int] ]
identifier[command_with_path] = identifier[which] ( identifier[command] )
keyword[if] identifier[command_with_path] keyword[is] keyword[None] :
keyword[raise] identifier[FileNotFoundError] ( literal[string] +
literal[string] % identifier[command] )
identifier[command] = identifier[command_with_path]
identifier[argv] [ literal[int] ]= identifier[command]
identifier[exec_err_pipe_read] , identifier[exec_err_pipe_write] = identifier[os] . identifier[pipe] ()
keyword[if] identifier[use_native_pty_fork] :
identifier[pid] , identifier[fd] = identifier[pty] . identifier[fork] ()
keyword[else] :
identifier[pid] , identifier[fd] = identifier[_fork_pty] . identifier[fork_pty] ()
keyword[if] identifier[pid] == identifier[CHILD] :
keyword[try] :
identifier[_setwinsize] ( identifier[STDIN_FILENO] ,* identifier[dimensions] )
keyword[except] identifier[IOError] keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[args] [ literal[int] ] keyword[not] keyword[in] ( identifier[errno] . identifier[EINVAL] , identifier[errno] . identifier[ENOTTY] ):
keyword[raise]
keyword[if] keyword[not] identifier[echo] :
keyword[try] :
identifier[_setecho] ( identifier[STDIN_FILENO] , keyword[False] )
keyword[except] ( identifier[IOError] , identifier[termios] . identifier[error] ) keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[args] [ literal[int] ] keyword[not] keyword[in] ( identifier[errno] . identifier[EINVAL] , identifier[errno] . identifier[ENOTTY] ):
keyword[raise]
identifier[os] . identifier[close] ( identifier[exec_err_pipe_read] )
identifier[fcntl] . identifier[fcntl] ( identifier[exec_err_pipe_write] , identifier[fcntl] . identifier[F_SETFD] , identifier[fcntl] . identifier[FD_CLOEXEC] )
identifier[max_fd] = identifier[min] ( literal[int] , identifier[resource] . identifier[getrlimit] ( identifier[resource] . identifier[RLIMIT_NOFILE] )[ literal[int] ])
identifier[os] . identifier[closerange] ( literal[int] , identifier[exec_err_pipe_write] )
identifier[os] . identifier[closerange] ( identifier[exec_err_pipe_write] + literal[int] , identifier[max_fd] )
keyword[if] identifier[cwd] keyword[is] keyword[not] keyword[None] :
identifier[os] . identifier[chdir] ( identifier[cwd] )
keyword[if] identifier[preexec_fn] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[preexec_fn] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[ename] = identifier[type] ( identifier[e] ). identifier[__name__]
identifier[tosend] = literal[string] . identifier[format] ( identifier[ename] , identifier[str] ( identifier[e] ))
keyword[if] identifier[PY3] :
identifier[tosend] = identifier[tosend] . identifier[encode] ( literal[string] )
identifier[os] . identifier[write] ( identifier[exec_err_pipe_write] , identifier[tosend] )
identifier[os] . identifier[close] ( identifier[exec_err_pipe_write] )
identifier[os] . identifier[_exit] ( literal[int] )
keyword[try] :
keyword[if] identifier[env] keyword[is] keyword[None] :
identifier[os] . identifier[execv] ( identifier[command] , identifier[argv] )
keyword[else] :
identifier[os] . identifier[execvpe] ( identifier[command] , identifier[argv] , identifier[env] )
keyword[except] identifier[OSError] keyword[as] identifier[err] :
identifier[tosend] = literal[string] . identifier[format] ( identifier[err] . identifier[errno] , identifier[str] ( identifier[err] ))
keyword[if] identifier[PY3] :
identifier[tosend] = identifier[tosend] . identifier[encode] ( literal[string] )
identifier[os] . identifier[write] ( identifier[exec_err_pipe_write] , identifier[tosend] )
identifier[os] . identifier[close] ( identifier[exec_err_pipe_write] )
identifier[os] . identifier[_exit] ( identifier[os] . identifier[EX_OSERR] )
identifier[inst] = identifier[cls] ( identifier[pid] , identifier[fd] )
identifier[inst] . identifier[argv] = identifier[argv]
keyword[if] identifier[env] keyword[is] keyword[not] keyword[None] :
identifier[inst] . identifier[env] = identifier[env]
keyword[if] identifier[cwd] keyword[is] keyword[not] keyword[None] :
identifier[inst] . identifier[launch_dir] = identifier[cwd]
identifier[os] . identifier[close] ( identifier[exec_err_pipe_write] )
identifier[exec_err_data] = identifier[os] . identifier[read] ( identifier[exec_err_pipe_read] , literal[int] )
identifier[os] . identifier[close] ( identifier[exec_err_pipe_read] )
keyword[if] identifier[len] ( identifier[exec_err_data] )!= literal[int] :
keyword[try] :
identifier[errclass] , identifier[errno_s] , identifier[errmsg] = identifier[exec_err_data] . identifier[split] ( literal[string] , literal[int] )
identifier[exctype] = identifier[getattr] ( identifier[builtins] , identifier[errclass] . identifier[decode] ( literal[string] ), identifier[Exception] )
identifier[exception] = identifier[exctype] ( identifier[errmsg] . identifier[decode] ( literal[string] , literal[string] ))
keyword[if] identifier[exctype] keyword[is] identifier[OSError] :
identifier[exception] . identifier[errno] = identifier[int] ( identifier[errno_s] )
keyword[except] :
keyword[raise] identifier[Exception] ( literal[string]
% identifier[exec_err_data] )
keyword[else] :
keyword[raise] identifier[exception]
keyword[try] :
identifier[inst] . identifier[setwinsize] (* identifier[dimensions] )
keyword[except] identifier[IOError] keyword[as] identifier[err] :
keyword[if] identifier[err] . identifier[args] [ literal[int] ] keyword[not] keyword[in] ( identifier[errno] . identifier[EINVAL] , identifier[errno] . identifier[ENOTTY] , identifier[errno] . identifier[ENXIO] ):
keyword[raise]
keyword[return] identifier[inst] | def spawn(cls, argv, cwd=None, env=None, echo=True, preexec_fn=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the fork/exec type of stuff for a pty, and returns an
instance of PtyProcess.
If preexec_fn is supplied, it will be called with no arguments in the
child process before exec-ing the specified command.
It may, for instance, set signal handlers to SIG_DFL or SIG_IGN.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be used.
"""
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
if not isinstance(argv, (list, tuple)):
raise TypeError('Expected a list or tuple for argv, got %r' % argv) # depends on [control=['if'], data=[]]
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
command_with_path = which(command)
if command_with_path is None:
raise FileNotFoundError('The command was not found or was not ' + 'executable: %s.' % command) # depends on [control=['if'], data=[]]
command = command_with_path
argv[0] = command
# [issue #119] To prevent the case where exec fails and the user is
# stuck interacting with a python child process instead of whatever
# was expected, we implement the solution from
# http://stackoverflow.com/a/3703179 to pass the exception to the
# parent process
# [issue #119] 1. Before forking, open a pipe in the parent process.
(exec_err_pipe_read, exec_err_pipe_write) = os.pipe()
if use_native_pty_fork:
(pid, fd) = pty.fork() # depends on [control=['if'], data=[]]
else:
# Use internal fork_pty, for Solaris
(pid, fd) = _fork_pty.fork_pty()
# Some platforms must call setwinsize() and setecho() from the
# child process, and others from the master process. We do both,
# allowing IOError for either.
if pid == CHILD:
# set window size
try:
_setwinsize(STDIN_FILENO, *dimensions) # depends on [control=['try'], data=[]]
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']]
# disable echo if spawn argument echo was unset
if not echo:
try:
_setecho(STDIN_FILENO, False) # depends on [control=['try'], data=[]]
except (IOError, termios.error) as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']] # depends on [control=['if'], data=[]]
# [issue #119] 3. The child closes the reading end and sets the
# close-on-exec flag for the writing end.
os.close(exec_err_pipe_read)
fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
# Do not allow child to inherit open file descriptors from parent,
# with the exception of the exec_err_pipe_write of the pipe
# Impose ceiling on max_fd: AIX bugfix for users with unlimited
# nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange()
# occasionally raises out of range error
max_fd = min(1048576, resource.getrlimit(resource.RLIMIT_NOFILE)[0])
os.closerange(3, exec_err_pipe_write)
os.closerange(exec_err_pipe_write + 1, max_fd)
if cwd is not None:
os.chdir(cwd) # depends on [control=['if'], data=['cwd']]
if preexec_fn is not None:
try:
preexec_fn() # depends on [control=['try'], data=[]]
except Exception as e:
ename = type(e).__name__
tosend = '{}:0:{}'.format(ename, str(e))
if PY3:
tosend = tosend.encode('utf-8') # depends on [control=['if'], data=[]]
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(1) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['preexec_fn']]
try:
if env is None:
os.execv(command, argv) # depends on [control=['if'], data=[]]
else:
os.execvpe(command, argv, env) # depends on [control=['try'], data=[]]
except OSError as err:
# [issue #119] 5. If exec fails, the child writes the error
# code back to the parent using the pipe, then exits.
tosend = 'OSError:{}:{}'.format(err.errno, str(err))
if PY3:
tosend = tosend.encode('utf-8') # depends on [control=['if'], data=[]]
os.write(exec_err_pipe_write, tosend)
os.close(exec_err_pipe_write)
os._exit(os.EX_OSERR) # depends on [control=['except'], data=['err']] # depends on [control=['if'], data=[]]
# Parent
inst = cls(pid, fd)
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env # depends on [control=['if'], data=['env']]
if cwd is not None:
inst.launch_dir = cwd # depends on [control=['if'], data=['cwd']]
# [issue #119] 2. After forking, the parent closes the writing end
# of the pipe and reads from the reading end.
os.close(exec_err_pipe_write)
exec_err_data = os.read(exec_err_pipe_read, 4096)
os.close(exec_err_pipe_read)
# [issue #119] 6. The parent reads eof (a zero-length read) if the
# child successfully performed exec, since close-on-exec made
# successful exec close the writing end of the pipe. Or, if exec
# failed, the parent reads the error code and can proceed
# accordingly. Either way, the parent blocks until the child calls
# exec.
if len(exec_err_data) != 0:
try:
(errclass, errno_s, errmsg) = exec_err_data.split(b':', 2)
exctype = getattr(builtins, errclass.decode('ascii'), Exception)
exception = exctype(errmsg.decode('utf-8', 'replace'))
if exctype is OSError:
exception.errno = int(errno_s) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
raise Exception('Subprocess failed, got bad error data: %r' % exec_err_data) # depends on [control=['except'], data=[]]
else:
raise exception # depends on [control=['if'], data=[]]
try:
inst.setwinsize(*dimensions) # depends on [control=['try'], data=[]]
except IOError as err:
if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['err']]
return inst |
def three_hours_forecast_at_coords(self, lat, lon):
"""
Queries the OWM Weather API for three hours weather forecast for the
specified geographic coordinate (eg: latitude: 51.5073509,
longitude: -0.1277583). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of
five days: this instance encapsulates *Weather* objects, with a time
interval of three hours one from each other
:param lat: location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'lang': self._language}
uri = http_client.HttpClient.to_url(THREE_HOURS_FORECAST_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
forecast = self._parsers['forecast'].parse_JSON(json_data)
if forecast is not None:
forecast.set_interval("3h")
return forecaster.Forecaster(forecast)
else:
return None | def function[three_hours_forecast_at_coords, parameter[self, lat, lon]]:
constant[
Queries the OWM Weather API for three hours weather forecast for the
specified geographic coordinate (eg: latitude: 51.5073509,
longitude: -0.1277583). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of
five days: this instance encapsulates *Weather* objects, with a time
interval of three hours one from each other
:param lat: location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached
]
call[name[geo].assert_is_lon, parameter[name[lon]]]
call[name[geo].assert_is_lat, parameter[name[lat]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e7e20>, <ast.Constant object at 0x7da20c6e56c0>, <ast.Constant object at 0x7da20c6e61a0>], [<ast.Name object at 0x7da20c6e53c0>, <ast.Name object at 0x7da20c6e5570>, <ast.Attribute object at 0x7da20c6e4ac0>]]
variable[uri] assign[=] call[name[http_client].HttpClient.to_url, parameter[name[THREE_HOURS_FORECAST_URL], name[self]._API_key, name[self]._subscription_type, name[self]._use_ssl]]
<ast.Tuple object at 0x7da20c6e74c0> assign[=] call[name[self]._wapi.cacheable_get_json, parameter[name[uri]]]
variable[forecast] assign[=] call[call[name[self]._parsers][constant[forecast]].parse_JSON, parameter[name[json_data]]]
if compare[name[forecast] is_not constant[None]] begin[:]
call[name[forecast].set_interval, parameter[constant[3h]]]
return[call[name[forecaster].Forecaster, parameter[name[forecast]]]] | keyword[def] identifier[three_hours_forecast_at_coords] ( identifier[self] , identifier[lat] , identifier[lon] ):
literal[string]
identifier[geo] . identifier[assert_is_lon] ( identifier[lon] )
identifier[geo] . identifier[assert_is_lat] ( identifier[lat] )
identifier[params] ={ literal[string] : identifier[lon] , literal[string] : identifier[lat] , literal[string] : identifier[self] . identifier[_language] }
identifier[uri] = identifier[http_client] . identifier[HttpClient] . identifier[to_url] ( identifier[THREE_HOURS_FORECAST_URL] ,
identifier[self] . identifier[_API_key] ,
identifier[self] . identifier[_subscription_type] ,
identifier[self] . identifier[_use_ssl] )
identifier[_] , identifier[json_data] = identifier[self] . identifier[_wapi] . identifier[cacheable_get_json] ( identifier[uri] , identifier[params] = identifier[params] )
identifier[forecast] = identifier[self] . identifier[_parsers] [ literal[string] ]. identifier[parse_JSON] ( identifier[json_data] )
keyword[if] identifier[forecast] keyword[is] keyword[not] keyword[None] :
identifier[forecast] . identifier[set_interval] ( literal[string] )
keyword[return] identifier[forecaster] . identifier[Forecaster] ( identifier[forecast] )
keyword[else] :
keyword[return] keyword[None] | def three_hours_forecast_at_coords(self, lat, lon):
"""
Queries the OWM Weather API for three hours weather forecast for the
specified geographic coordinate (eg: latitude: 51.5073509,
longitude: -0.1277583). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of
five days: this instance encapsulates *Weather* objects, with a time
interval of three hours one from each other
:param lat: location's latitude, must be between -90.0 and 90.0
:type lat: int/float
:param lon: location's longitude, must be between -180.0 and 180.0
:type lon: int/float
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached
"""
geo.assert_is_lon(lon)
geo.assert_is_lat(lat)
params = {'lon': lon, 'lat': lat, 'lang': self._language}
uri = http_client.HttpClient.to_url(THREE_HOURS_FORECAST_URL, self._API_key, self._subscription_type, self._use_ssl)
(_, json_data) = self._wapi.cacheable_get_json(uri, params=params)
forecast = self._parsers['forecast'].parse_JSON(json_data)
if forecast is not None:
forecast.set_interval('3h')
return forecaster.Forecaster(forecast) # depends on [control=['if'], data=['forecast']]
else:
return None |
def remove_file(path, conn=None):
'''
Remove a single file from the file system
'''
if conn is None:
conn = init()
log.debug('Removing package file %s', path)
os.remove(path) | def function[remove_file, parameter[path, conn]]:
constant[
Remove a single file from the file system
]
if compare[name[conn] is constant[None]] begin[:]
variable[conn] assign[=] call[name[init], parameter[]]
call[name[log].debug, parameter[constant[Removing package file %s], name[path]]]
call[name[os].remove, parameter[name[path]]] | keyword[def] identifier[remove_file] ( identifier[path] , identifier[conn] = keyword[None] ):
literal[string]
keyword[if] identifier[conn] keyword[is] keyword[None] :
identifier[conn] = identifier[init] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[path] )
identifier[os] . identifier[remove] ( identifier[path] ) | def remove_file(path, conn=None):
"""
Remove a single file from the file system
"""
if conn is None:
conn = init() # depends on [control=['if'], data=['conn']]
log.debug('Removing package file %s', path)
os.remove(path) |
def user_log_list(self, userid, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
'user_id': userid,
}
if self.is_p:
self.render('admin/log_ajax/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid,
current_page_num=current_page_number
),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid,
current_page_num=current_page_number
),
format_date=tools.format_date,
userinfo=self.userinfo) | def function[user_log_list, parameter[self, userid, cur_p]]:
constant[
View the list of the Log.
]
if compare[name[cur_p] equal[==] constant[]] begin[:]
variable[current_page_number] assign[=] constant[1]
variable[current_page_number] assign[=] <ast.IfExp object at 0x7da1b0416d70>
variable[pager_num] assign[=] call[name[int], parameter[binary_operation[call[name[MLog].total_number, parameter[]] / call[name[CMS_CFG]][constant[list_num]]]]]
variable[kwd] assign[=] dictionary[[<ast.Constant object at 0x7da1b0414d60>, <ast.Constant object at 0x7da1b0417ee0>, <ast.Constant object at 0x7da1b0417f10>, <ast.Constant object at 0x7da1b0416320>], [<ast.Constant object at 0x7da1b04162c0>, <ast.Constant object at 0x7da1b0417550>, <ast.Name object at 0x7da1b0417520>, <ast.Name object at 0x7da1b0417580>]]
if name[self].is_p begin[:]
call[name[self].render, parameter[constant[admin/log_ajax/user_log_list.html]]] | keyword[def] identifier[user_log_list] ( identifier[self] , identifier[userid] , identifier[cur_p] = literal[string] ):
literal[string]
keyword[if] identifier[cur_p] == literal[string] :
identifier[current_page_number] = literal[int]
keyword[else] :
identifier[current_page_number] = identifier[int] ( identifier[cur_p] )
identifier[current_page_number] = literal[int] keyword[if] identifier[current_page_number] < literal[int] keyword[else] identifier[current_page_number]
identifier[pager_num] = identifier[int] ( identifier[MLog] . identifier[total_number] ()/ identifier[CMS_CFG] [ literal[string] ])
identifier[kwd] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[current_page_number] ,
literal[string] : identifier[userid] ,
}
keyword[if] identifier[self] . identifier[is_p] :
identifier[self] . identifier[render] ( literal[string] ,
identifier[kwd] = identifier[kwd] ,
identifier[infos] = identifier[MLog] . identifier[query_pager_by_user] (
identifier[userid] ,
identifier[current_page_num] = identifier[current_page_number]
),
identifier[format_date] = identifier[tools] . identifier[format_date] ,
identifier[userinfo] = identifier[self] . identifier[userinfo] )
keyword[else] :
identifier[self] . identifier[render] ( literal[string] ,
identifier[kwd] = identifier[kwd] ,
identifier[infos] = identifier[MLog] . identifier[query_pager_by_user] (
identifier[userid] ,
identifier[current_page_num] = identifier[current_page_number]
),
identifier[format_date] = identifier[tools] . identifier[format_date] ,
identifier[userinfo] = identifier[self] . identifier[userinfo] ) | def user_log_list(self, userid, cur_p=''):
"""
View the list of the Log.
"""
if cur_p == '':
current_page_number = 1 # depends on [control=['if'], data=[]]
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {'pager': '', 'title': '', 'current_page': current_page_number, 'user_id': userid}
if self.is_p:
self.render('admin/log_ajax/user_log_list.html', kwd=kwd, infos=MLog.query_pager_by_user(userid, current_page_num=current_page_number), format_date=tools.format_date, userinfo=self.userinfo) # depends on [control=['if'], data=[]]
else:
self.render('misc/log/user_log_list.html', kwd=kwd, infos=MLog.query_pager_by_user(userid, current_page_num=current_page_number), format_date=tools.format_date, userinfo=self.userinfo) |
def _init_options(self, kwargs):
""" Initializes self.options """
self.options = self.task_config.options
if self.options is None:
self.options = {}
if kwargs:
self.options.update(kwargs)
# Handle dynamic lookup of project_config values via $project_config.attr
for option, value in list(self.options.items()):
try:
if value.startswith("$project_config."):
attr = value.replace("$project_config.", "", 1)
self.options[option] = getattr(self.project_config, attr, None)
except AttributeError:
pass | def function[_init_options, parameter[self, kwargs]]:
constant[ Initializes self.options ]
name[self].options assign[=] name[self].task_config.options
if compare[name[self].options is constant[None]] begin[:]
name[self].options assign[=] dictionary[[], []]
if name[kwargs] begin[:]
call[name[self].options.update, parameter[name[kwargs]]]
for taget[tuple[[<ast.Name object at 0x7da1b1504c40>, <ast.Name object at 0x7da1b1504790>]]] in starred[call[name[list], parameter[call[name[self].options.items, parameter[]]]]] begin[:]
<ast.Try object at 0x7da1b15070a0> | keyword[def] identifier[_init_options] ( identifier[self] , identifier[kwargs] ):
literal[string]
identifier[self] . identifier[options] = identifier[self] . identifier[task_config] . identifier[options]
keyword[if] identifier[self] . identifier[options] keyword[is] keyword[None] :
identifier[self] . identifier[options] ={}
keyword[if] identifier[kwargs] :
identifier[self] . identifier[options] . identifier[update] ( identifier[kwargs] )
keyword[for] identifier[option] , identifier[value] keyword[in] identifier[list] ( identifier[self] . identifier[options] . identifier[items] ()):
keyword[try] :
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ):
identifier[attr] = identifier[value] . identifier[replace] ( literal[string] , literal[string] , literal[int] )
identifier[self] . identifier[options] [ identifier[option] ]= identifier[getattr] ( identifier[self] . identifier[project_config] , identifier[attr] , keyword[None] )
keyword[except] identifier[AttributeError] :
keyword[pass] | def _init_options(self, kwargs):
""" Initializes self.options """
self.options = self.task_config.options
if self.options is None:
self.options = {} # depends on [control=['if'], data=[]]
if kwargs:
self.options.update(kwargs) # depends on [control=['if'], data=[]]
# Handle dynamic lookup of project_config values via $project_config.attr
for (option, value) in list(self.options.items()):
try:
if value.startswith('$project_config.'):
attr = value.replace('$project_config.', '', 1)
self.options[option] = getattr(self.project_config, attr, None) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] |
def downsample_grid(a, b, samples, ret_idx=False):
"""Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
samples = int(samples)
if samples and samples < a.size:
# The events to keep
keep = np.zeros_like(a, dtype=bool)
# 1. Produce evenly distributed samples
# Choosing grid-size:
# - large numbers tend to show actual structures of the sample,
# which is not desired for plotting
# - small numbers tend will not result in too few samples and,
# in order to reach the desired samples, the data must be
# upsampled again.
# 300 is about the size of the plot in marker sizes and yields
# good results.
grid_size = 300
xpx = norm(a, a, b) * grid_size
ypx = norm(b, b, a) * grid_size
# The events on the grid to process
toproc = np.ones((grid_size, grid_size), dtype=bool)
for ii in range(xpx.size):
xi = xpx[ii]
yi = ypx[ii]
# filter for overlapping events
if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]:
toproc[int(xi-1), int(yi-1)] = False
# include event
keep[ii] = True
# 2. Make sure that we reach `samples` by adding or
# removing events.
diff = np.sum(keep) - samples
if diff > 0:
# Too many samples
rem_indices = np.where(keep)[0]
np.random.set_state(rs)
rem = np.random.choice(rem_indices,
size=diff,
replace=False)
keep[rem] = False
elif diff < 0:
# Not enough samples
add_indices = np.where(~keep)[0]
np.random.set_state(rs)
add = np.random.choice(add_indices,
size=abs(diff),
replace=False)
keep[add] = True
assert np.sum(keep) == samples, "sanity check"
asd = a[keep]
bsd = b[keep]
assert np.allclose(a[keep], asd, equal_nan=True), "sanity check"
assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check"
else:
keep = np.ones_like(a, dtype=bool)
asd = a
bsd = b
if ret_idx:
return asd, bsd, keep
else:
return asd, bsd | def function[downsample_grid, parameter[a, b, samples, ret_idx]]:
constant[Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
]
variable[rs] assign[=] call[call[name[np].random.RandomState, parameter[]].get_state, parameter[]]
variable[samples] assign[=] call[name[int], parameter[name[samples]]]
if <ast.BoolOp object at 0x7da1b1933d90> begin[:]
variable[keep] assign[=] call[name[np].zeros_like, parameter[name[a]]]
variable[grid_size] assign[=] constant[300]
variable[xpx] assign[=] binary_operation[call[name[norm], parameter[name[a], name[a], name[b]]] * name[grid_size]]
variable[ypx] assign[=] binary_operation[call[name[norm], parameter[name[b], name[b], name[a]]] * name[grid_size]]
variable[toproc] assign[=] call[name[np].ones, parameter[tuple[[<ast.Name object at 0x7da1b1933610>, <ast.Name object at 0x7da1b1933a90>]]]]
for taget[name[ii]] in starred[call[name[range], parameter[name[xpx].size]]] begin[:]
variable[xi] assign[=] call[name[xpx]][name[ii]]
variable[yi] assign[=] call[name[ypx]][name[ii]]
if <ast.BoolOp object at 0x7da1b1a77040> begin[:]
call[name[toproc]][tuple[[<ast.Call object at 0x7da1b1a76650>, <ast.Call object at 0x7da1b1a77250>]]] assign[=] constant[False]
call[name[keep]][name[ii]] assign[=] constant[True]
variable[diff] assign[=] binary_operation[call[name[np].sum, parameter[name[keep]]] - name[samples]]
if compare[name[diff] greater[>] constant[0]] begin[:]
variable[rem_indices] assign[=] call[call[name[np].where, parameter[name[keep]]]][constant[0]]
call[name[np].random.set_state, parameter[name[rs]]]
variable[rem] assign[=] call[name[np].random.choice, parameter[name[rem_indices]]]
call[name[keep]][name[rem]] assign[=] constant[False]
assert[compare[call[name[np].sum, parameter[name[keep]]] equal[==] name[samples]]]
variable[asd] assign[=] call[name[a]][name[keep]]
variable[bsd] assign[=] call[name[b]][name[keep]]
assert[call[name[np].allclose, parameter[call[name[a]][name[keep]], name[asd]]]]
assert[call[name[np].allclose, parameter[call[name[b]][name[keep]], name[bsd]]]]
if name[ret_idx] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1a3cdc0>, <ast.Name object at 0x7da1b1a3c4f0>, <ast.Name object at 0x7da1b1a3e980>]]] | keyword[def] identifier[downsample_grid] ( identifier[a] , identifier[b] , identifier[samples] , identifier[ret_idx] = keyword[False] ):
literal[string]
identifier[rs] = identifier[np] . identifier[random] . identifier[RandomState] ( identifier[seed] = literal[int] ). identifier[get_state] ()
identifier[samples] = identifier[int] ( identifier[samples] )
keyword[if] identifier[samples] keyword[and] identifier[samples] < identifier[a] . identifier[size] :
identifier[keep] = identifier[np] . identifier[zeros_like] ( identifier[a] , identifier[dtype] = identifier[bool] )
identifier[grid_size] = literal[int]
identifier[xpx] = identifier[norm] ( identifier[a] , identifier[a] , identifier[b] )* identifier[grid_size]
identifier[ypx] = identifier[norm] ( identifier[b] , identifier[b] , identifier[a] )* identifier[grid_size]
identifier[toproc] = identifier[np] . identifier[ones] (( identifier[grid_size] , identifier[grid_size] ), identifier[dtype] = identifier[bool] )
keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[xpx] . identifier[size] ):
identifier[xi] = identifier[xpx] [ identifier[ii] ]
identifier[yi] = identifier[ypx] [ identifier[ii] ]
keyword[if] identifier[valid] ( identifier[xi] , identifier[yi] ) keyword[and] identifier[toproc] [ identifier[int] ( identifier[xi] - literal[int] ), identifier[int] ( identifier[yi] - literal[int] )]:
identifier[toproc] [ identifier[int] ( identifier[xi] - literal[int] ), identifier[int] ( identifier[yi] - literal[int] )]= keyword[False]
identifier[keep] [ identifier[ii] ]= keyword[True]
identifier[diff] = identifier[np] . identifier[sum] ( identifier[keep] )- identifier[samples]
keyword[if] identifier[diff] > literal[int] :
identifier[rem_indices] = identifier[np] . identifier[where] ( identifier[keep] )[ literal[int] ]
identifier[np] . identifier[random] . identifier[set_state] ( identifier[rs] )
identifier[rem] = identifier[np] . identifier[random] . identifier[choice] ( identifier[rem_indices] ,
identifier[size] = identifier[diff] ,
identifier[replace] = keyword[False] )
identifier[keep] [ identifier[rem] ]= keyword[False]
keyword[elif] identifier[diff] < literal[int] :
identifier[add_indices] = identifier[np] . identifier[where] (~ identifier[keep] )[ literal[int] ]
identifier[np] . identifier[random] . identifier[set_state] ( identifier[rs] )
identifier[add] = identifier[np] . identifier[random] . identifier[choice] ( identifier[add_indices] ,
identifier[size] = identifier[abs] ( identifier[diff] ),
identifier[replace] = keyword[False] )
identifier[keep] [ identifier[add] ]= keyword[True]
keyword[assert] identifier[np] . identifier[sum] ( identifier[keep] )== identifier[samples] , literal[string]
identifier[asd] = identifier[a] [ identifier[keep] ]
identifier[bsd] = identifier[b] [ identifier[keep] ]
keyword[assert] identifier[np] . identifier[allclose] ( identifier[a] [ identifier[keep] ], identifier[asd] , identifier[equal_nan] = keyword[True] ), literal[string]
keyword[assert] identifier[np] . identifier[allclose] ( identifier[b] [ identifier[keep] ], identifier[bsd] , identifier[equal_nan] = keyword[True] ), literal[string]
keyword[else] :
identifier[keep] = identifier[np] . identifier[ones_like] ( identifier[a] , identifier[dtype] = identifier[bool] )
identifier[asd] = identifier[a]
identifier[bsd] = identifier[b]
keyword[if] identifier[ret_idx] :
keyword[return] identifier[asd] , identifier[bsd] , identifier[keep]
keyword[else] :
keyword[return] identifier[asd] , identifier[bsd] | def downsample_grid(a, b, samples, ret_idx=False):
"""Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
samples = int(samples)
if samples and samples < a.size:
# The events to keep
keep = np.zeros_like(a, dtype=bool)
# 1. Produce evenly distributed samples
# Choosing grid-size:
# - large numbers tend to show actual structures of the sample,
# which is not desired for plotting
# - small numbers tend will not result in too few samples and,
# in order to reach the desired samples, the data must be
# upsampled again.
# 300 is about the size of the plot in marker sizes and yields
# good results.
grid_size = 300
xpx = norm(a, a, b) * grid_size
ypx = norm(b, b, a) * grid_size
# The events on the grid to process
toproc = np.ones((grid_size, grid_size), dtype=bool)
for ii in range(xpx.size):
xi = xpx[ii]
yi = ypx[ii]
# filter for overlapping events
if valid(xi, yi) and toproc[int(xi - 1), int(yi - 1)]:
toproc[int(xi - 1), int(yi - 1)] = False
# include event
keep[ii] = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ii']]
# 2. Make sure that we reach `samples` by adding or
# removing events.
diff = np.sum(keep) - samples
if diff > 0:
# Too many samples
rem_indices = np.where(keep)[0]
np.random.set_state(rs)
rem = np.random.choice(rem_indices, size=diff, replace=False)
keep[rem] = False # depends on [control=['if'], data=['diff']]
elif diff < 0:
# Not enough samples
add_indices = np.where(~keep)[0]
np.random.set_state(rs)
add = np.random.choice(add_indices, size=abs(diff), replace=False)
keep[add] = True # depends on [control=['if'], data=['diff']]
assert np.sum(keep) == samples, 'sanity check'
asd = a[keep]
bsd = b[keep]
assert np.allclose(a[keep], asd, equal_nan=True), 'sanity check'
assert np.allclose(b[keep], bsd, equal_nan=True), 'sanity check' # depends on [control=['if'], data=[]]
else:
keep = np.ones_like(a, dtype=bool)
asd = a
bsd = b
if ret_idx:
return (asd, bsd, keep) # depends on [control=['if'], data=[]]
else:
return (asd, bsd) |
def parse_url(self):
""" Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
"""
self.scheme = None
self.resource = None
self.host = None
self.port = None
if self.url is None:
return
scheme, url = self.url.split(":", 1)
parsed = urlsplit(url, scheme="http")
if parsed.hostname:
self.host = parsed.hostname
elif '+unix' in scheme:
self.host = 'localhost'
else:
raise ValueError("Invalid hostname from: %s", self.url)
if parsed.port:
self.port = parsed.port
if scheme == "ws":
if not self.port:
self.port = 8080
elif scheme == "wss":
if not self.port:
self.port = 443
elif scheme in ('ws+unix', 'wss+unix'):
pass
else:
raise ValueError("Invalid scheme: %s" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/'
if parsed.query:
resource += "?" + parsed.query
self.scheme = scheme
self.resource = resource | def function[parse_url, parameter[self]]:
constant[ Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
]
name[self].scheme assign[=] constant[None]
name[self].resource assign[=] constant[None]
name[self].host assign[=] constant[None]
name[self].port assign[=] constant[None]
if compare[name[self].url is constant[None]] begin[:]
return[None]
<ast.Tuple object at 0x7da2044c2080> assign[=] call[name[self].url.split, parameter[constant[:], constant[1]]]
variable[parsed] assign[=] call[name[urlsplit], parameter[name[url]]]
if name[parsed].hostname begin[:]
name[self].host assign[=] name[parsed].hostname
if name[parsed].port begin[:]
name[self].port assign[=] name[parsed].port
if compare[name[scheme] equal[==] constant[ws]] begin[:]
if <ast.UnaryOp object at 0x7da2044c0160> begin[:]
name[self].port assign[=] constant[8080]
if name[parsed].path begin[:]
variable[resource] assign[=] name[parsed].path
if compare[constant[+unix] in name[scheme]] begin[:]
name[self].unix_socket_path assign[=] name[resource]
variable[resource] assign[=] constant[/]
if name[parsed].query begin[:]
<ast.AugAssign object at 0x7da18fe90d30>
name[self].scheme assign[=] name[scheme]
name[self].resource assign[=] name[resource] | keyword[def] identifier[parse_url] ( identifier[self] ):
literal[string]
identifier[self] . identifier[scheme] = keyword[None]
identifier[self] . identifier[resource] = keyword[None]
identifier[self] . identifier[host] = keyword[None]
identifier[self] . identifier[port] = keyword[None]
keyword[if] identifier[self] . identifier[url] keyword[is] keyword[None] :
keyword[return]
identifier[scheme] , identifier[url] = identifier[self] . identifier[url] . identifier[split] ( literal[string] , literal[int] )
identifier[parsed] = identifier[urlsplit] ( identifier[url] , identifier[scheme] = literal[string] )
keyword[if] identifier[parsed] . identifier[hostname] :
identifier[self] . identifier[host] = identifier[parsed] . identifier[hostname]
keyword[elif] literal[string] keyword[in] identifier[scheme] :
identifier[self] . identifier[host] = literal[string]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] , identifier[self] . identifier[url] )
keyword[if] identifier[parsed] . identifier[port] :
identifier[self] . identifier[port] = identifier[parsed] . identifier[port]
keyword[if] identifier[scheme] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[port] :
identifier[self] . identifier[port] = literal[int]
keyword[elif] identifier[scheme] == literal[string] :
keyword[if] keyword[not] identifier[self] . identifier[port] :
identifier[self] . identifier[port] = literal[int]
keyword[elif] identifier[scheme] keyword[in] ( literal[string] , literal[string] ):
keyword[pass]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[scheme] )
keyword[if] identifier[parsed] . identifier[path] :
identifier[resource] = identifier[parsed] . identifier[path]
keyword[else] :
identifier[resource] = literal[string]
keyword[if] literal[string] keyword[in] identifier[scheme] :
identifier[self] . identifier[unix_socket_path] = identifier[resource]
identifier[resource] = literal[string]
keyword[if] identifier[parsed] . identifier[query] :
identifier[resource] += literal[string] + identifier[parsed] . identifier[query]
identifier[self] . identifier[scheme] = identifier[scheme]
identifier[self] . identifier[resource] = identifier[resource] | def parse_url(self):
""" Parses a URL of the form:
- ws://host[:port][path]
- wss://host[:port][path]
- ws+unix:///path/to/my.socket
"""
self.scheme = None
self.resource = None
self.host = None
self.port = None
if self.url is None:
return # depends on [control=['if'], data=[]]
(scheme, url) = self.url.split(':', 1)
parsed = urlsplit(url, scheme='http')
if parsed.hostname:
self.host = parsed.hostname # depends on [control=['if'], data=[]]
elif '+unix' in scheme:
self.host = 'localhost' # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid hostname from: %s', self.url)
if parsed.port:
self.port = parsed.port # depends on [control=['if'], data=[]]
if scheme == 'ws':
if not self.port:
self.port = 8080 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif scheme == 'wss':
if not self.port:
self.port = 443 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif scheme in ('ws+unix', 'wss+unix'):
pass # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid scheme: %s' % scheme)
if parsed.path:
resource = parsed.path # depends on [control=['if'], data=[]]
else:
resource = '/'
if '+unix' in scheme:
self.unix_socket_path = resource
resource = '/' # depends on [control=['if'], data=[]]
if parsed.query:
resource += '?' + parsed.query # depends on [control=['if'], data=[]]
self.scheme = scheme
self.resource = resource |
def _pre_process_line(line, comment_markers=_COMMENT_MARKERS):
"""
Preprocess a line in properties; strip comments, etc.
:param line:
A string not starting w/ any white spaces and ending w/ line breaks.
It may be empty. see also: :func:`load`.
:param comment_markers: Comment markers, e.g. '#' (hash)
>>> _pre_process_line('') is None
True
>>> s0 = "calendar.japanese.type: LocalGregorianCalendar"
>>> _pre_process_line("# " + s0) is None
True
>>> _pre_process_line("! " + s0) is None
True
>>> _pre_process_line(s0 + "# comment")
'calendar.japanese.type: LocalGregorianCalendar# comment'
"""
if not line:
return None
if any(c in line for c in comment_markers):
if line.startswith(comment_markers):
return None
return line | def function[_pre_process_line, parameter[line, comment_markers]]:
constant[
Preprocess a line in properties; strip comments, etc.
:param line:
A string not starting w/ any white spaces and ending w/ line breaks.
It may be empty. see also: :func:`load`.
:param comment_markers: Comment markers, e.g. '#' (hash)
>>> _pre_process_line('') is None
True
>>> s0 = "calendar.japanese.type: LocalGregorianCalendar"
>>> _pre_process_line("# " + s0) is None
True
>>> _pre_process_line("! " + s0) is None
True
>>> _pre_process_line(s0 + "# comment")
'calendar.japanese.type: LocalGregorianCalendar# comment'
]
if <ast.UnaryOp object at 0x7da18fe90a00> begin[:]
return[constant[None]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da18fe911b0>]] begin[:]
if call[name[line].startswith, parameter[name[comment_markers]]] begin[:]
return[constant[None]]
return[name[line]] | keyword[def] identifier[_pre_process_line] ( identifier[line] , identifier[comment_markers] = identifier[_COMMENT_MARKERS] ):
literal[string]
keyword[if] keyword[not] identifier[line] :
keyword[return] keyword[None]
keyword[if] identifier[any] ( identifier[c] keyword[in] identifier[line] keyword[for] identifier[c] keyword[in] identifier[comment_markers] ):
keyword[if] identifier[line] . identifier[startswith] ( identifier[comment_markers] ):
keyword[return] keyword[None]
keyword[return] identifier[line] | def _pre_process_line(line, comment_markers=_COMMENT_MARKERS):
"""
Preprocess a line in properties; strip comments, etc.
:param line:
A string not starting w/ any white spaces and ending w/ line breaks.
It may be empty. see also: :func:`load`.
:param comment_markers: Comment markers, e.g. '#' (hash)
>>> _pre_process_line('') is None
True
>>> s0 = "calendar.japanese.type: LocalGregorianCalendar"
>>> _pre_process_line("# " + s0) is None
True
>>> _pre_process_line("! " + s0) is None
True
>>> _pre_process_line(s0 + "# comment")
'calendar.japanese.type: LocalGregorianCalendar# comment'
"""
if not line:
return None # depends on [control=['if'], data=[]]
if any((c in line for c in comment_markers)):
if line.startswith(comment_markers):
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return line |
def eeg_add_events(raw, events_channel, conditions=None, treshold="auto", cut="higher", time_index=None, number="all", after=0, before=None, min_duration=1):
"""
Find events on a channel, convert them into an MNE compatible format, and add them to the raw data.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
events_channel : str or array
Name of the trigger channel if in the raw, or array of equal length if externally supplied.
conditions : list
List containing the stimuli types/conditions.
treshold : float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut : str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
number : str or int
How many events should it select.
after : int
If number different than "all", then at what time should it start selecting the events.
before : int
If number different than "all", before what time should it select the events.
min_duration : int
The minimum duration of an event (in timepoints).
Returns
----------
(raw, events, event_id) : tuple
The raw file with events, the mne-formatted events and event_id.
Example
----------
>>> import neurokit as nk
>>>
>>> raw, events, event_id = nk.eeg_add_events(raw, events_channel, conditions)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
*See Also*
- mne: http://martinos.org/mne/dev/index.html
References
-----------
- None
"""
# Extract the events_channel from raw if needed
if isinstance(events_channel, str):
try:
events_channel = eeg_select_channels(raw, events_channel)
except:
print("NeuroKit error: eeg_add_events(): Wrong events_channel name provided.")
# Find event onsets
events = find_events(events_channel, treshold=treshold, cut=cut, time_index=time_index, number=number, after=after, before=before, min_duration=min_duration)
# Create mne compatible events
events, event_id = eeg_create_mne_events(events["onsets"], conditions)
# Add them
raw.add_events(events)
return(raw, events, event_id) | def function[eeg_add_events, parameter[raw, events_channel, conditions, treshold, cut, time_index, number, after, before, min_duration]]:
constant[
Find events on a channel, convert them into an MNE compatible format, and add them to the raw data.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
events_channel : str or array
Name of the trigger channel if in the raw, or array of equal length if externally supplied.
conditions : list
List containing the stimuli types/conditions.
treshold : float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut : str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
number : str or int
How many events should it select.
after : int
If number different than "all", then at what time should it start selecting the events.
before : int
If number different than "all", before what time should it select the events.
min_duration : int
The minimum duration of an event (in timepoints).
Returns
----------
(raw, events, event_id) : tuple
The raw file with events, the mne-formatted events and event_id.
Example
----------
>>> import neurokit as nk
>>>
>>> raw, events, event_id = nk.eeg_add_events(raw, events_channel, conditions)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
*See Also*
- mne: http://martinos.org/mne/dev/index.html
References
-----------
- None
]
if call[name[isinstance], parameter[name[events_channel], name[str]]] begin[:]
<ast.Try object at 0x7da18f8110f0>
variable[events] assign[=] call[name[find_events], parameter[name[events_channel]]]
<ast.Tuple object at 0x7da18f8101c0> assign[=] call[name[eeg_create_mne_events], parameter[call[name[events]][constant[onsets]], name[conditions]]]
call[name[raw].add_events, parameter[name[events]]]
return[tuple[[<ast.Name object at 0x7da2041d9d80>, <ast.Name object at 0x7da2041db280>, <ast.Name object at 0x7da2041da8f0>]]] | keyword[def] identifier[eeg_add_events] ( identifier[raw] , identifier[events_channel] , identifier[conditions] = keyword[None] , identifier[treshold] = literal[string] , identifier[cut] = literal[string] , identifier[time_index] = keyword[None] , identifier[number] = literal[string] , identifier[after] = literal[int] , identifier[before] = keyword[None] , identifier[min_duration] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[events_channel] , identifier[str] ):
keyword[try] :
identifier[events_channel] = identifier[eeg_select_channels] ( identifier[raw] , identifier[events_channel] )
keyword[except] :
identifier[print] ( literal[string] )
identifier[events] = identifier[find_events] ( identifier[events_channel] , identifier[treshold] = identifier[treshold] , identifier[cut] = identifier[cut] , identifier[time_index] = identifier[time_index] , identifier[number] = identifier[number] , identifier[after] = identifier[after] , identifier[before] = identifier[before] , identifier[min_duration] = identifier[min_duration] )
identifier[events] , identifier[event_id] = identifier[eeg_create_mne_events] ( identifier[events] [ literal[string] ], identifier[conditions] )
identifier[raw] . identifier[add_events] ( identifier[events] )
keyword[return] ( identifier[raw] , identifier[events] , identifier[event_id] ) | def eeg_add_events(raw, events_channel, conditions=None, treshold='auto', cut='higher', time_index=None, number='all', after=0, before=None, min_duration=1):
"""
Find events on a channel, convert them into an MNE compatible format, and add them to the raw data.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
events_channel : str or array
Name of the trigger channel if in the raw, or array of equal length if externally supplied.
conditions : list
List containing the stimuli types/conditions.
treshold : float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut : str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
number : str or int
How many events should it select.
after : int
If number different than "all", then at what time should it start selecting the events.
before : int
If number different than "all", before what time should it select the events.
min_duration : int
The minimum duration of an event (in timepoints).
Returns
----------
(raw, events, event_id) : tuple
The raw file with events, the mne-formatted events and event_id.
Example
----------
>>> import neurokit as nk
>>>
>>> raw, events, event_id = nk.eeg_add_events(raw, events_channel, conditions)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
*See Also*
- mne: http://martinos.org/mne/dev/index.html
References
-----------
- None
"""
# Extract the events_channel from raw if needed
if isinstance(events_channel, str):
try:
events_channel = eeg_select_channels(raw, events_channel) # depends on [control=['try'], data=[]]
except:
print('NeuroKit error: eeg_add_events(): Wrong events_channel name provided.') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Find event onsets
events = find_events(events_channel, treshold=treshold, cut=cut, time_index=time_index, number=number, after=after, before=before, min_duration=min_duration)
# Create mne compatible events
(events, event_id) = eeg_create_mne_events(events['onsets'], conditions)
# Add them
raw.add_events(events)
return (raw, events, event_id) |
def load_resources(bucket, prefix, region, account_config, accounts,
assume, start, end, resources, store, db, verbose, debug):
"""load resources into resource database."""
logging.basicConfig(level=(verbose and logging.DEBUG or logging.INFO))
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('s3transfer').setLevel(logging.WARNING)
start = date_parse(start)
end = date_parse(end)
if not resources:
resources = ['NetworkInterface', 'Instance', 'LoadBalancer']
account_map = {}
data = yaml.safe_load(account_config.read())
for a in data.get('accounts', ()):
if accounts and (a['name'] in accounts or a['account_id'] in accounts):
account_map[a['account_id']] = a
elif not accounts:
account_map[a['account_id']] = a
account_ids = list(account_map)
executor = ProcessPoolExecutor
if debug:
from c7n.executor import MainThreadExecutor
MainThreadExecutor.c7n_async = False
executor = MainThreadExecutor
stats = Counter()
t = time.time()
with executor(max_workers=multiprocessing.cpu_count()) as w:
futures = {}
for a in account_ids:
for r in resources:
futures[w.submit(
process_account_resources, a, bucket, prefix,
region, store, start, end, r)] = (a, r)
indexer = RESOURCE_FILE_INDEXERS[r]
for f in as_completed(futures):
a, r = futures[f]
if f.exception():
log.error("account:%s error:%s", a, f.exception())
continue
files, dl_stats = f.result()
idx_stats = indexer(db, resource_config_iter(files))
log.info(
"loaded account:%s files:%d bytes:%s events:%d resources:%d idx-time:%d dl-time:%d",
account_map[a]['name'], len(files),
human_size(dl_stats['DownloadSize'] + dl_stats['CacheSize']),
idx_stats['Records'],
idx_stats['RowCount'],
idx_stats['IndexTime'],
dl_stats['FetchTime'])
stats.update(dl_stats)
stats.update(idx_stats)
log.info("Loaded %d resources across %d accounts in %0.2f",
stats['RowCount'], len(account_ids), time.time() - t) | def function[load_resources, parameter[bucket, prefix, region, account_config, accounts, assume, start, end, resources, store, db, verbose, debug]]:
constant[load resources into resource database.]
call[name[logging].basicConfig, parameter[]]
call[call[name[logging].getLogger, parameter[constant[botocore]]].setLevel, parameter[name[logging].WARNING]]
call[call[name[logging].getLogger, parameter[constant[s3transfer]]].setLevel, parameter[name[logging].WARNING]]
variable[start] assign[=] call[name[date_parse], parameter[name[start]]]
variable[end] assign[=] call[name[date_parse], parameter[name[end]]]
if <ast.UnaryOp object at 0x7da1b1f385b0> begin[:]
variable[resources] assign[=] list[[<ast.Constant object at 0x7da1b1f39fc0>, <ast.Constant object at 0x7da1b1f3b4c0>, <ast.Constant object at 0x7da1b1f39480>]]
variable[account_map] assign[=] dictionary[[], []]
variable[data] assign[=] call[name[yaml].safe_load, parameter[call[name[account_config].read, parameter[]]]]
for taget[name[a]] in starred[call[name[data].get, parameter[constant[accounts], tuple[[]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b1f39f30> begin[:]
call[name[account_map]][call[name[a]][constant[account_id]]] assign[=] name[a]
variable[account_ids] assign[=] call[name[list], parameter[name[account_map]]]
variable[executor] assign[=] name[ProcessPoolExecutor]
if name[debug] begin[:]
from relative_module[c7n.executor] import module[MainThreadExecutor]
name[MainThreadExecutor].c7n_async assign[=] constant[False]
variable[executor] assign[=] name[MainThreadExecutor]
variable[stats] assign[=] call[name[Counter], parameter[]]
variable[t] assign[=] call[name[time].time, parameter[]]
with call[name[executor], parameter[]] begin[:]
variable[futures] assign[=] dictionary[[], []]
for taget[name[a]] in starred[name[account_ids]] begin[:]
for taget[name[r]] in starred[name[resources]] begin[:]
call[name[futures]][call[name[w].submit, parameter[name[process_account_resources], name[a], name[bucket], name[prefix], name[region], name[store], name[start], name[end], name[r]]]] assign[=] tuple[[<ast.Name object at 0x7da2044c1bd0>, <ast.Name object at 0x7da2044c3070>]]
variable[indexer] assign[=] call[name[RESOURCE_FILE_INDEXERS]][name[r]]
for taget[name[f]] in starred[call[name[as_completed], parameter[name[futures]]]] begin[:]
<ast.Tuple object at 0x7da2044c02b0> assign[=] call[name[futures]][name[f]]
if call[name[f].exception, parameter[]] begin[:]
call[name[log].error, parameter[constant[account:%s error:%s], name[a], call[name[f].exception, parameter[]]]]
continue
<ast.Tuple object at 0x7da18f58d240> assign[=] call[name[f].result, parameter[]]
variable[idx_stats] assign[=] call[name[indexer], parameter[name[db], call[name[resource_config_iter], parameter[name[files]]]]]
call[name[log].info, parameter[constant[loaded account:%s files:%d bytes:%s events:%d resources:%d idx-time:%d dl-time:%d], call[call[name[account_map]][name[a]]][constant[name]], call[name[len], parameter[name[files]]], call[name[human_size], parameter[binary_operation[call[name[dl_stats]][constant[DownloadSize]] + call[name[dl_stats]][constant[CacheSize]]]]], call[name[idx_stats]][constant[Records]], call[name[idx_stats]][constant[RowCount]], call[name[idx_stats]][constant[IndexTime]], call[name[dl_stats]][constant[FetchTime]]]]
call[name[stats].update, parameter[name[dl_stats]]]
call[name[stats].update, parameter[name[idx_stats]]]
call[name[log].info, parameter[constant[Loaded %d resources across %d accounts in %0.2f], call[name[stats]][constant[RowCount]], call[name[len], parameter[name[account_ids]]], binary_operation[call[name[time].time, parameter[]] - name[t]]]] | keyword[def] identifier[load_resources] ( identifier[bucket] , identifier[prefix] , identifier[region] , identifier[account_config] , identifier[accounts] ,
identifier[assume] , identifier[start] , identifier[end] , identifier[resources] , identifier[store] , identifier[db] , identifier[verbose] , identifier[debug] ):
literal[string]
identifier[logging] . identifier[basicConfig] ( identifier[level] =( identifier[verbose] keyword[and] identifier[logging] . identifier[DEBUG] keyword[or] identifier[logging] . identifier[INFO] ))
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[setLevel] ( identifier[logging] . identifier[WARNING] )
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[setLevel] ( identifier[logging] . identifier[WARNING] )
identifier[start] = identifier[date_parse] ( identifier[start] )
identifier[end] = identifier[date_parse] ( identifier[end] )
keyword[if] keyword[not] identifier[resources] :
identifier[resources] =[ literal[string] , literal[string] , literal[string] ]
identifier[account_map] ={}
identifier[data] = identifier[yaml] . identifier[safe_load] ( identifier[account_config] . identifier[read] ())
keyword[for] identifier[a] keyword[in] identifier[data] . identifier[get] ( literal[string] ,()):
keyword[if] identifier[accounts] keyword[and] ( identifier[a] [ literal[string] ] keyword[in] identifier[accounts] keyword[or] identifier[a] [ literal[string] ] keyword[in] identifier[accounts] ):
identifier[account_map] [ identifier[a] [ literal[string] ]]= identifier[a]
keyword[elif] keyword[not] identifier[accounts] :
identifier[account_map] [ identifier[a] [ literal[string] ]]= identifier[a]
identifier[account_ids] = identifier[list] ( identifier[account_map] )
identifier[executor] = identifier[ProcessPoolExecutor]
keyword[if] identifier[debug] :
keyword[from] identifier[c7n] . identifier[executor] keyword[import] identifier[MainThreadExecutor]
identifier[MainThreadExecutor] . identifier[c7n_async] = keyword[False]
identifier[executor] = identifier[MainThreadExecutor]
identifier[stats] = identifier[Counter] ()
identifier[t] = identifier[time] . identifier[time] ()
keyword[with] identifier[executor] ( identifier[max_workers] = identifier[multiprocessing] . identifier[cpu_count] ()) keyword[as] identifier[w] :
identifier[futures] ={}
keyword[for] identifier[a] keyword[in] identifier[account_ids] :
keyword[for] identifier[r] keyword[in] identifier[resources] :
identifier[futures] [ identifier[w] . identifier[submit] (
identifier[process_account_resources] , identifier[a] , identifier[bucket] , identifier[prefix] ,
identifier[region] , identifier[store] , identifier[start] , identifier[end] , identifier[r] )]=( identifier[a] , identifier[r] )
identifier[indexer] = identifier[RESOURCE_FILE_INDEXERS] [ identifier[r] ]
keyword[for] identifier[f] keyword[in] identifier[as_completed] ( identifier[futures] ):
identifier[a] , identifier[r] = identifier[futures] [ identifier[f] ]
keyword[if] identifier[f] . identifier[exception] ():
identifier[log] . identifier[error] ( literal[string] , identifier[a] , identifier[f] . identifier[exception] ())
keyword[continue]
identifier[files] , identifier[dl_stats] = identifier[f] . identifier[result] ()
identifier[idx_stats] = identifier[indexer] ( identifier[db] , identifier[resource_config_iter] ( identifier[files] ))
identifier[log] . identifier[info] (
literal[string] ,
identifier[account_map] [ identifier[a] ][ literal[string] ], identifier[len] ( identifier[files] ),
identifier[human_size] ( identifier[dl_stats] [ literal[string] ]+ identifier[dl_stats] [ literal[string] ]),
identifier[idx_stats] [ literal[string] ],
identifier[idx_stats] [ literal[string] ],
identifier[idx_stats] [ literal[string] ],
identifier[dl_stats] [ literal[string] ])
identifier[stats] . identifier[update] ( identifier[dl_stats] )
identifier[stats] . identifier[update] ( identifier[idx_stats] )
identifier[log] . identifier[info] ( literal[string] ,
identifier[stats] [ literal[string] ], identifier[len] ( identifier[account_ids] ), identifier[time] . identifier[time] ()- identifier[t] ) | def load_resources(bucket, prefix, region, account_config, accounts, assume, start, end, resources, store, db, verbose, debug):
"""load resources into resource database."""
logging.basicConfig(level=verbose and logging.DEBUG or logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('s3transfer').setLevel(logging.WARNING)
start = date_parse(start)
end = date_parse(end)
if not resources:
resources = ['NetworkInterface', 'Instance', 'LoadBalancer'] # depends on [control=['if'], data=[]]
account_map = {}
data = yaml.safe_load(account_config.read())
for a in data.get('accounts', ()):
if accounts and (a['name'] in accounts or a['account_id'] in accounts):
account_map[a['account_id']] = a # depends on [control=['if'], data=[]]
elif not accounts:
account_map[a['account_id']] = a # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
account_ids = list(account_map)
executor = ProcessPoolExecutor
if debug:
from c7n.executor import MainThreadExecutor
MainThreadExecutor.c7n_async = False
executor = MainThreadExecutor # depends on [control=['if'], data=[]]
stats = Counter()
t = time.time()
with executor(max_workers=multiprocessing.cpu_count()) as w:
futures = {}
for a in account_ids:
for r in resources:
futures[w.submit(process_account_resources, a, bucket, prefix, region, store, start, end, r)] = (a, r) # depends on [control=['for'], data=['r']] # depends on [control=['for'], data=['a']]
indexer = RESOURCE_FILE_INDEXERS[r]
for f in as_completed(futures):
(a, r) = futures[f]
if f.exception():
log.error('account:%s error:%s', a, f.exception())
continue # depends on [control=['if'], data=[]]
(files, dl_stats) = f.result()
idx_stats = indexer(db, resource_config_iter(files))
log.info('loaded account:%s files:%d bytes:%s events:%d resources:%d idx-time:%d dl-time:%d', account_map[a]['name'], len(files), human_size(dl_stats['DownloadSize'] + dl_stats['CacheSize']), idx_stats['Records'], idx_stats['RowCount'], idx_stats['IndexTime'], dl_stats['FetchTime'])
stats.update(dl_stats)
stats.update(idx_stats) # depends on [control=['for'], data=['f']] # depends on [control=['with'], data=['w']]
log.info('Loaded %d resources across %d accounts in %0.2f', stats['RowCount'], len(account_ids), time.time() - t) |
def _create_and_add_parameters(params):
'''
Parses the configuration and creates Parameter instances.
'''
global _current_parameter
if _is_simple_type(params):
_current_parameter = SimpleParameter(params)
_current_option.add_parameter(_current_parameter)
else:
# must be a list
for i in params:
if _is_simple_type(i):
_current_parameter = SimpleParameter(i)
else:
_current_parameter = TypedParameter()
_parse_typed_parameter(i)
_current_option.add_parameter(_current_parameter) | def function[_create_and_add_parameters, parameter[params]]:
constant[
Parses the configuration and creates Parameter instances.
]
<ast.Global object at 0x7da18f00fb80>
if call[name[_is_simple_type], parameter[name[params]]] begin[:]
variable[_current_parameter] assign[=] call[name[SimpleParameter], parameter[name[params]]]
call[name[_current_option].add_parameter, parameter[name[_current_parameter]]] | keyword[def] identifier[_create_and_add_parameters] ( identifier[params] ):
literal[string]
keyword[global] identifier[_current_parameter]
keyword[if] identifier[_is_simple_type] ( identifier[params] ):
identifier[_current_parameter] = identifier[SimpleParameter] ( identifier[params] )
identifier[_current_option] . identifier[add_parameter] ( identifier[_current_parameter] )
keyword[else] :
keyword[for] identifier[i] keyword[in] identifier[params] :
keyword[if] identifier[_is_simple_type] ( identifier[i] ):
identifier[_current_parameter] = identifier[SimpleParameter] ( identifier[i] )
keyword[else] :
identifier[_current_parameter] = identifier[TypedParameter] ()
identifier[_parse_typed_parameter] ( identifier[i] )
identifier[_current_option] . identifier[add_parameter] ( identifier[_current_parameter] ) | def _create_and_add_parameters(params):
"""
Parses the configuration and creates Parameter instances.
"""
global _current_parameter
if _is_simple_type(params):
_current_parameter = SimpleParameter(params)
_current_option.add_parameter(_current_parameter) # depends on [control=['if'], data=[]]
else:
# must be a list
for i in params:
if _is_simple_type(i):
_current_parameter = SimpleParameter(i) # depends on [control=['if'], data=[]]
else:
_current_parameter = TypedParameter()
_parse_typed_parameter(i)
_current_option.add_parameter(_current_parameter) # depends on [control=['for'], data=['i']] |
def _get_phenotypicseries_parents(entry, graph):
"""
Extract the phenotypic series parent relationship out of the entry
:param entry:
:return:
"""
model = Model(graph)
omim_num = str(entry['mimNumber'])
omim_curie = 'OMIM:' + omim_num
# the phenotypic series mappings
serieslist = []
if 'phenotypicSeriesExists' in entry:
if entry['phenotypicSeriesExists'] is True:
if 'phenotypeMapList' in entry:
phenolist = entry['phenotypeMapList']
for p in phenolist:
for q in p['phenotypeMap']['phenotypicSeriesNumber'].split(','):
serieslist.append(q)
if 'geneMap' in entry and 'phenotypeMapList' in entry['geneMap']:
phenolist = entry['geneMap']['phenotypeMapList']
for p in phenolist:
if 'phenotypicSeriesNumber' in p['phenotypeMap']:
for q in p['phenotypeMap']['phenotypicSeriesNumber'].split(
','):
serieslist.append(q)
# add this entry as a subclass of the series entry
for ser in serieslist:
series_id = 'OMIMPS:' + ser
model.addClassToGraph(series_id, None)
model.addSubClass(omim_curie, series_id) | def function[_get_phenotypicseries_parents, parameter[entry, graph]]:
constant[
Extract the phenotypic series parent relationship out of the entry
:param entry:
:return:
]
variable[model] assign[=] call[name[Model], parameter[name[graph]]]
variable[omim_num] assign[=] call[name[str], parameter[call[name[entry]][constant[mimNumber]]]]
variable[omim_curie] assign[=] binary_operation[constant[OMIM:] + name[omim_num]]
variable[serieslist] assign[=] list[[]]
if compare[constant[phenotypicSeriesExists] in name[entry]] begin[:]
if compare[call[name[entry]][constant[phenotypicSeriesExists]] is constant[True]] begin[:]
if compare[constant[phenotypeMapList] in name[entry]] begin[:]
variable[phenolist] assign[=] call[name[entry]][constant[phenotypeMapList]]
for taget[name[p]] in starred[name[phenolist]] begin[:]
for taget[name[q]] in starred[call[call[call[name[p]][constant[phenotypeMap]]][constant[phenotypicSeriesNumber]].split, parameter[constant[,]]]] begin[:]
call[name[serieslist].append, parameter[name[q]]]
if <ast.BoolOp object at 0x7da20c76d840> begin[:]
variable[phenolist] assign[=] call[call[name[entry]][constant[geneMap]]][constant[phenotypeMapList]]
for taget[name[p]] in starred[name[phenolist]] begin[:]
if compare[constant[phenotypicSeriesNumber] in call[name[p]][constant[phenotypeMap]]] begin[:]
for taget[name[q]] in starred[call[call[call[name[p]][constant[phenotypeMap]]][constant[phenotypicSeriesNumber]].split, parameter[constant[,]]]] begin[:]
call[name[serieslist].append, parameter[name[q]]]
for taget[name[ser]] in starred[name[serieslist]] begin[:]
variable[series_id] assign[=] binary_operation[constant[OMIMPS:] + name[ser]]
call[name[model].addClassToGraph, parameter[name[series_id], constant[None]]]
call[name[model].addSubClass, parameter[name[omim_curie], name[series_id]]] | keyword[def] identifier[_get_phenotypicseries_parents] ( identifier[entry] , identifier[graph] ):
literal[string]
identifier[model] = identifier[Model] ( identifier[graph] )
identifier[omim_num] = identifier[str] ( identifier[entry] [ literal[string] ])
identifier[omim_curie] = literal[string] + identifier[omim_num]
identifier[serieslist] =[]
keyword[if] literal[string] keyword[in] identifier[entry] :
keyword[if] identifier[entry] [ literal[string] ] keyword[is] keyword[True] :
keyword[if] literal[string] keyword[in] identifier[entry] :
identifier[phenolist] = identifier[entry] [ literal[string] ]
keyword[for] identifier[p] keyword[in] identifier[phenolist] :
keyword[for] identifier[q] keyword[in] identifier[p] [ literal[string] ][ literal[string] ]. identifier[split] ( literal[string] ):
identifier[serieslist] . identifier[append] ( identifier[q] )
keyword[if] literal[string] keyword[in] identifier[entry] keyword[and] literal[string] keyword[in] identifier[entry] [ literal[string] ]:
identifier[phenolist] = identifier[entry] [ literal[string] ][ literal[string] ]
keyword[for] identifier[p] keyword[in] identifier[phenolist] :
keyword[if] literal[string] keyword[in] identifier[p] [ literal[string] ]:
keyword[for] identifier[q] keyword[in] identifier[p] [ literal[string] ][ literal[string] ]. identifier[split] (
literal[string] ):
identifier[serieslist] . identifier[append] ( identifier[q] )
keyword[for] identifier[ser] keyword[in] identifier[serieslist] :
identifier[series_id] = literal[string] + identifier[ser]
identifier[model] . identifier[addClassToGraph] ( identifier[series_id] , keyword[None] )
identifier[model] . identifier[addSubClass] ( identifier[omim_curie] , identifier[series_id] ) | def _get_phenotypicseries_parents(entry, graph):
"""
Extract the phenotypic series parent relationship out of the entry
:param entry:
:return:
"""
model = Model(graph)
omim_num = str(entry['mimNumber'])
omim_curie = 'OMIM:' + omim_num
# the phenotypic series mappings
serieslist = []
if 'phenotypicSeriesExists' in entry:
if entry['phenotypicSeriesExists'] is True:
if 'phenotypeMapList' in entry:
phenolist = entry['phenotypeMapList']
for p in phenolist:
for q in p['phenotypeMap']['phenotypicSeriesNumber'].split(','):
serieslist.append(q) # depends on [control=['for'], data=['q']] # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=['entry']]
if 'geneMap' in entry and 'phenotypeMapList' in entry['geneMap']:
phenolist = entry['geneMap']['phenotypeMapList']
for p in phenolist:
if 'phenotypicSeriesNumber' in p['phenotypeMap']:
for q in p['phenotypeMap']['phenotypicSeriesNumber'].split(','):
serieslist.append(q) # depends on [control=['for'], data=['q']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['entry']]
# add this entry as a subclass of the series entry
for ser in serieslist:
series_id = 'OMIMPS:' + ser
model.addClassToGraph(series_id, None)
model.addSubClass(omim_curie, series_id) # depends on [control=['for'], data=['ser']] |
def add(name,
uid=None,
gid=None,
groups=None,
home=None,
shell=None,
unique=True,
fullname='',
roomnumber='',
workphone='',
homephone='',
createhome=True,
loginclass=None,
**kwargs):
'''
Add a user to the minion
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if salt.utils.data.is_true(kwargs.pop('system', False)):
log.warning('pw_user module does not support the \'system\' argument')
if kwargs:
log.warning('Invalid kwargs passed to user.add')
if isinstance(groups, six.string_types):
groups = groups.split(',')
cmd = ['pw', 'useradd']
if uid:
cmd.extend(['-u', uid])
if gid:
cmd.extend(['-g', gid])
if groups:
cmd.extend(['-G', ','.join(groups)])
if home is not None:
cmd.extend(['-d', home])
if createhome is True:
cmd.append('-m')
if loginclass:
cmd.extend(['-L', loginclass])
if shell:
cmd.extend(['-s', shell])
if not salt.utils.data.is_true(unique):
cmd.append('-o')
gecos_field = _build_gecos({'fullname': fullname,
'roomnumber': roomnumber,
'workphone': workphone,
'homephone': homephone})
cmd.extend(['-c', gecos_field])
cmd.extend(['-n', name])
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 | def function[add, parameter[name, uid, gid, groups, home, shell, unique, fullname, roomnumber, workphone, homephone, createhome, loginclass]]:
constant[
Add a user to the minion
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
]
variable[kwargs] assign[=] call[name[salt].utils.args.clean_kwargs, parameter[]]
if call[name[salt].utils.data.is_true, parameter[call[name[kwargs].pop, parameter[constant[system], constant[False]]]]] begin[:]
call[name[log].warning, parameter[constant[pw_user module does not support the 'system' argument]]]
if name[kwargs] begin[:]
call[name[log].warning, parameter[constant[Invalid kwargs passed to user.add]]]
if call[name[isinstance], parameter[name[groups], name[six].string_types]] begin[:]
variable[groups] assign[=] call[name[groups].split, parameter[constant[,]]]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b20228f0>, <ast.Constant object at 0x7da1b20229b0>]]
if name[uid] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b2022aa0>, <ast.Name object at 0x7da1b2022860>]]]]
if name[gid] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b2021ff0>, <ast.Name object at 0x7da1b2022680>]]]]
if name[groups] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b2022560>, <ast.Call object at 0x7da1b20224a0>]]]]
if compare[name[home] is_not constant[None]] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b20221d0>, <ast.Name object at 0x7da1b20226e0>]]]]
if compare[name[createhome] is constant[True]] begin[:]
call[name[cmd].append, parameter[constant[-m]]]
if name[loginclass] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b2021bd0>, <ast.Name object at 0x7da1b2021d20>]]]]
if name[shell] begin[:]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b2021e40>, <ast.Name object at 0x7da1b20207c0>]]]]
if <ast.UnaryOp object at 0x7da1b20bbd60> begin[:]
call[name[cmd].append, parameter[constant[-o]]]
variable[gecos_field] assign[=] call[name[_build_gecos], parameter[dictionary[[<ast.Constant object at 0x7da1b209bca0>, <ast.Constant object at 0x7da1b209be80>, <ast.Constant object at 0x7da1b209bfa0>, <ast.Constant object at 0x7da2044c1750>], [<ast.Name object at 0x7da2044c2fb0>, <ast.Name object at 0x7da2044c0460>, <ast.Name object at 0x7da2044c0a60>, <ast.Name object at 0x7da2044c3b50>]]]]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da2044c0880>, <ast.Name object at 0x7da2044c18d0>]]]]
call[name[cmd].extend, parameter[list[[<ast.Constant object at 0x7da1b2020580>, <ast.Name object at 0x7da1b20205b0>]]]]
return[compare[call[call[name[__salt__]][constant[cmd.retcode]], parameter[name[cmd]]] equal[==] constant[0]]] | keyword[def] identifier[add] ( identifier[name] ,
identifier[uid] = keyword[None] ,
identifier[gid] = keyword[None] ,
identifier[groups] = keyword[None] ,
identifier[home] = keyword[None] ,
identifier[shell] = keyword[None] ,
identifier[unique] = keyword[True] ,
identifier[fullname] = literal[string] ,
identifier[roomnumber] = literal[string] ,
identifier[workphone] = literal[string] ,
identifier[homephone] = literal[string] ,
identifier[createhome] = keyword[True] ,
identifier[loginclass] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[kwargs] = identifier[salt] . identifier[utils] . identifier[args] . identifier[clean_kwargs] (** identifier[kwargs] )
keyword[if] identifier[salt] . identifier[utils] . identifier[data] . identifier[is_true] ( identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )):
identifier[log] . identifier[warning] ( literal[string] )
keyword[if] identifier[kwargs] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[groups] , identifier[six] . identifier[string_types] ):
identifier[groups] = identifier[groups] . identifier[split] ( literal[string] )
identifier[cmd] =[ literal[string] , literal[string] ]
keyword[if] identifier[uid] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[uid] ])
keyword[if] identifier[gid] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[gid] ])
keyword[if] identifier[groups] :
identifier[cmd] . identifier[extend] ([ literal[string] , literal[string] . identifier[join] ( identifier[groups] )])
keyword[if] identifier[home] keyword[is] keyword[not] keyword[None] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[home] ])
keyword[if] identifier[createhome] keyword[is] keyword[True] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[loginclass] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[loginclass] ])
keyword[if] identifier[shell] :
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[shell] ])
keyword[if] keyword[not] identifier[salt] . identifier[utils] . identifier[data] . identifier[is_true] ( identifier[unique] ):
identifier[cmd] . identifier[append] ( literal[string] )
identifier[gecos_field] = identifier[_build_gecos] ({ literal[string] : identifier[fullname] ,
literal[string] : identifier[roomnumber] ,
literal[string] : identifier[workphone] ,
literal[string] : identifier[homephone] })
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[gecos_field] ])
identifier[cmd] . identifier[extend] ([ literal[string] , identifier[name] ])
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[python_shell] = keyword[False] )== literal[int] | def add(name, uid=None, gid=None, groups=None, home=None, shell=None, unique=True, fullname='', roomnumber='', workphone='', homephone='', createhome=True, loginclass=None, **kwargs):
"""
Add a user to the minion
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
"""
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if salt.utils.data.is_true(kwargs.pop('system', False)):
log.warning("pw_user module does not support the 'system' argument") # depends on [control=['if'], data=[]]
if kwargs:
log.warning('Invalid kwargs passed to user.add') # depends on [control=['if'], data=[]]
if isinstance(groups, six.string_types):
groups = groups.split(',') # depends on [control=['if'], data=[]]
cmd = ['pw', 'useradd']
if uid:
cmd.extend(['-u', uid]) # depends on [control=['if'], data=[]]
if gid:
cmd.extend(['-g', gid]) # depends on [control=['if'], data=[]]
if groups:
cmd.extend(['-G', ','.join(groups)]) # depends on [control=['if'], data=[]]
if home is not None:
cmd.extend(['-d', home]) # depends on [control=['if'], data=['home']]
if createhome is True:
cmd.append('-m') # depends on [control=['if'], data=[]]
if loginclass:
cmd.extend(['-L', loginclass]) # depends on [control=['if'], data=[]]
if shell:
cmd.extend(['-s', shell]) # depends on [control=['if'], data=[]]
if not salt.utils.data.is_true(unique):
cmd.append('-o') # depends on [control=['if'], data=[]]
gecos_field = _build_gecos({'fullname': fullname, 'roomnumber': roomnumber, 'workphone': workphone, 'homephone': homephone})
cmd.extend(['-c', gecos_field])
cmd.extend(['-n', name])
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 |
def _is_version_greater(self):
"""
Check if the current version is greater as the older older one.
"""
# we compare the 2 versions.
checked = Version(True).check_versions(
self.current_version[0], self.version_yaml
)
if checked is not None and not checked:
# The current version is greater as the older one.
# We return True.
return True
# We return False
return False | def function[_is_version_greater, parameter[self]]:
constant[
Check if the current version is greater as the older older one.
]
variable[checked] assign[=] call[call[name[Version], parameter[constant[True]]].check_versions, parameter[call[name[self].current_version][constant[0]], name[self].version_yaml]]
if <ast.BoolOp object at 0x7da20e9567a0> begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[_is_version_greater] ( identifier[self] ):
literal[string]
identifier[checked] = identifier[Version] ( keyword[True] ). identifier[check_versions] (
identifier[self] . identifier[current_version] [ literal[int] ], identifier[self] . identifier[version_yaml]
)
keyword[if] identifier[checked] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[checked] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def _is_version_greater(self):
"""
Check if the current version is greater as the older older one.
"""
# we compare the 2 versions.
checked = Version(True).check_versions(self.current_version[0], self.version_yaml)
if checked is not None and (not checked):
# The current version is greater as the older one.
# We return True.
return True # depends on [control=['if'], data=[]]
# We return False
return False |
def get_content(self, start=None, end=None):
"""
Retrieve the content of the requested resource which is located
at the given absolute path.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
"""
with open(self.filepath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return | def function[get_content, parameter[self, start, end]]:
constant[
Retrieve the content of the requested resource which is located
at the given absolute path.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
]
with call[name[open], parameter[name[self].filepath, constant[rb]]] begin[:]
if compare[name[start] is_not constant[None]] begin[:]
call[name[file].seek, parameter[name[start]]]
if compare[name[end] is_not constant[None]] begin[:]
variable[remaining] assign[=] binary_operation[name[end] - <ast.BoolOp object at 0x7da2054a4850>]
while constant[True] begin[:]
variable[chunk_size] assign[=] binary_operation[constant[64] * constant[1024]]
if <ast.BoolOp object at 0x7da2054a6c80> begin[:]
variable[chunk_size] assign[=] name[remaining]
variable[chunk] assign[=] call[name[file].read, parameter[name[chunk_size]]]
if name[chunk] begin[:]
if compare[name[remaining] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da2054a5150>
<ast.Yield object at 0x7da2054a6c20> | keyword[def] identifier[get_content] ( identifier[self] , identifier[start] = keyword[None] , identifier[end] = keyword[None] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[filepath] , literal[string] ) keyword[as] identifier[file] :
keyword[if] identifier[start] keyword[is] keyword[not] keyword[None] :
identifier[file] . identifier[seek] ( identifier[start] )
keyword[if] identifier[end] keyword[is] keyword[not] keyword[None] :
identifier[remaining] = identifier[end] -( identifier[start] keyword[or] literal[int] )
keyword[else] :
identifier[remaining] = keyword[None]
keyword[while] keyword[True] :
identifier[chunk_size] = literal[int] * literal[int]
keyword[if] identifier[remaining] keyword[is] keyword[not] keyword[None] keyword[and] identifier[remaining] < identifier[chunk_size] :
identifier[chunk_size] = identifier[remaining]
identifier[chunk] = identifier[file] . identifier[read] ( identifier[chunk_size] )
keyword[if] identifier[chunk] :
keyword[if] identifier[remaining] keyword[is] keyword[not] keyword[None] :
identifier[remaining] -= identifier[len] ( identifier[chunk] )
keyword[yield] identifier[chunk]
keyword[else] :
keyword[if] identifier[remaining] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[remaining] == literal[int]
keyword[return] | def get_content(self, start=None, end=None):
"""
Retrieve the content of the requested resource which is located
at the given absolute path.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
"""
with open(self.filepath, 'rb') as file:
if start is not None:
file.seek(start) # depends on [control=['if'], data=['start']]
if end is not None:
remaining = end - (start or 0) # depends on [control=['if'], data=['end']]
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining # depends on [control=['if'], data=[]]
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk) # depends on [control=['if'], data=['remaining']]
yield chunk # depends on [control=['if'], data=[]]
else:
if remaining is not None:
assert remaining == 0 # depends on [control=['if'], data=['remaining']]
return # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['file']] |
def get_gated_grpc_tensors(self, matching_debug_op=None):
"""Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples.
"""
with self._grpc_gated_lock:
matching_debug_op = matching_debug_op or 'DebugIdentity'
if matching_debug_op not in self._grpc_gated_tensors:
# First, construct a map from node name to op type.
node_name_to_op_type = dict(
(node.name, node.op) for node in self._graph_def.node)
# Second, populate the output list.
gated = []
for node in self._graph_def.node:
if node.op == matching_debug_op:
for attr_key in node.attr:
if attr_key == 'gated_grpc' and node.attr[attr_key].b:
node_name, output_slot, _, debug_op = (
debug_graphs.parse_debug_node_name(node.name))
gated.append(
(node_name, node_name_to_op_type[node_name], output_slot,
debug_op))
break
self._grpc_gated_tensors[matching_debug_op] = gated
return self._grpc_gated_tensors[matching_debug_op] | def function[get_gated_grpc_tensors, parameter[self, matching_debug_op]]:
constant[Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples.
]
with name[self]._grpc_gated_lock begin[:]
variable[matching_debug_op] assign[=] <ast.BoolOp object at 0x7da18c4cdcf0>
if compare[name[matching_debug_op] <ast.NotIn object at 0x7da2590d7190> name[self]._grpc_gated_tensors] begin[:]
variable[node_name_to_op_type] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da18c4ce290>]]
variable[gated] assign[=] list[[]]
for taget[name[node]] in starred[name[self]._graph_def.node] begin[:]
if compare[name[node].op equal[==] name[matching_debug_op]] begin[:]
for taget[name[attr_key]] in starred[name[node].attr] begin[:]
if <ast.BoolOp object at 0x7da18f7237f0> begin[:]
<ast.Tuple object at 0x7da18f721180> assign[=] call[name[debug_graphs].parse_debug_node_name, parameter[name[node].name]]
call[name[gated].append, parameter[tuple[[<ast.Name object at 0x7da18f7226e0>, <ast.Subscript object at 0x7da1b21eabc0>, <ast.Name object at 0x7da1b21ebeb0>, <ast.Name object at 0x7da1b21ea0e0>]]]]
break
call[name[self]._grpc_gated_tensors][name[matching_debug_op]] assign[=] name[gated]
return[call[name[self]._grpc_gated_tensors][name[matching_debug_op]]] | keyword[def] identifier[get_gated_grpc_tensors] ( identifier[self] , identifier[matching_debug_op] = keyword[None] ):
literal[string]
keyword[with] identifier[self] . identifier[_grpc_gated_lock] :
identifier[matching_debug_op] = identifier[matching_debug_op] keyword[or] literal[string]
keyword[if] identifier[matching_debug_op] keyword[not] keyword[in] identifier[self] . identifier[_grpc_gated_tensors] :
identifier[node_name_to_op_type] = identifier[dict] (
( identifier[node] . identifier[name] , identifier[node] . identifier[op] ) keyword[for] identifier[node] keyword[in] identifier[self] . identifier[_graph_def] . identifier[node] )
identifier[gated] =[]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[_graph_def] . identifier[node] :
keyword[if] identifier[node] . identifier[op] == identifier[matching_debug_op] :
keyword[for] identifier[attr_key] keyword[in] identifier[node] . identifier[attr] :
keyword[if] identifier[attr_key] == literal[string] keyword[and] identifier[node] . identifier[attr] [ identifier[attr_key] ]. identifier[b] :
identifier[node_name] , identifier[output_slot] , identifier[_] , identifier[debug_op] =(
identifier[debug_graphs] . identifier[parse_debug_node_name] ( identifier[node] . identifier[name] ))
identifier[gated] . identifier[append] (
( identifier[node_name] , identifier[node_name_to_op_type] [ identifier[node_name] ], identifier[output_slot] ,
identifier[debug_op] ))
keyword[break]
identifier[self] . identifier[_grpc_gated_tensors] [ identifier[matching_debug_op] ]= identifier[gated]
keyword[return] identifier[self] . identifier[_grpc_gated_tensors] [ identifier[matching_debug_op] ] | def get_gated_grpc_tensors(self, matching_debug_op=None):
"""Extract all nodes with gated-gRPC debug ops attached.
Uses cached values if available.
This method is thread-safe.
Args:
graph_def: A tf.GraphDef proto.
matching_debug_op: Return tensors and nodes with only matching the
specified debug op name (optional). If `None`, will extract only
`DebugIdentity` debug ops.
Returns:
A list of (node_name, op_type, output_slot, debug_op) tuples.
"""
with self._grpc_gated_lock:
matching_debug_op = matching_debug_op or 'DebugIdentity'
if matching_debug_op not in self._grpc_gated_tensors:
# First, construct a map from node name to op type.
node_name_to_op_type = dict(((node.name, node.op) for node in self._graph_def.node))
# Second, populate the output list.
gated = []
for node in self._graph_def.node:
if node.op == matching_debug_op:
for attr_key in node.attr:
if attr_key == 'gated_grpc' and node.attr[attr_key].b:
(node_name, output_slot, _, debug_op) = debug_graphs.parse_debug_node_name(node.name)
gated.append((node_name, node_name_to_op_type[node_name], output_slot, debug_op))
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr_key']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
self._grpc_gated_tensors[matching_debug_op] = gated # depends on [control=['if'], data=['matching_debug_op']]
return self._grpc_gated_tensors[matching_debug_op] # depends on [control=['with'], data=[]] |
def need_permissions(object_getter, action, hidden=True):
"""Get permission for buckets or abort.
:param object_getter: The function used to retrieve the object and pass it
to the permission factory.
:param action: The action needed.
:param hidden: Determine which kind of error to return. (Default: ``True``)
"""
def decorator_builder(f):
@wraps(f)
def decorate(*args, **kwargs):
check_permission(current_permission_factory(
object_getter(*args, **kwargs),
action(*args, **kwargs) if callable(action) else action,
), hidden=hidden)
return f(*args, **kwargs)
return decorate
return decorator_builder | def function[need_permissions, parameter[object_getter, action, hidden]]:
constant[Get permission for buckets or abort.
:param object_getter: The function used to retrieve the object and pass it
to the permission factory.
:param action: The action needed.
:param hidden: Determine which kind of error to return. (Default: ``True``)
]
def function[decorator_builder, parameter[f]]:
def function[decorate, parameter[]]:
call[name[check_permission], parameter[call[name[current_permission_factory], parameter[call[name[object_getter], parameter[<ast.Starred object at 0x7da1b199a590>]], <ast.IfExp object at 0x7da1b19992a0>]]]]
return[call[name[f], parameter[<ast.Starred object at 0x7da1b199b460>]]]
return[name[decorate]]
return[name[decorator_builder]] | keyword[def] identifier[need_permissions] ( identifier[object_getter] , identifier[action] , identifier[hidden] = keyword[True] ):
literal[string]
keyword[def] identifier[decorator_builder] ( identifier[f] ):
@ identifier[wraps] ( identifier[f] )
keyword[def] identifier[decorate] (* identifier[args] ,** identifier[kwargs] ):
identifier[check_permission] ( identifier[current_permission_factory] (
identifier[object_getter] (* identifier[args] ,** identifier[kwargs] ),
identifier[action] (* identifier[args] ,** identifier[kwargs] ) keyword[if] identifier[callable] ( identifier[action] ) keyword[else] identifier[action] ,
), identifier[hidden] = identifier[hidden] )
keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[decorate]
keyword[return] identifier[decorator_builder] | def need_permissions(object_getter, action, hidden=True):
"""Get permission for buckets or abort.
:param object_getter: The function used to retrieve the object and pass it
to the permission factory.
:param action: The action needed.
:param hidden: Determine which kind of error to return. (Default: ``True``)
"""
def decorator_builder(f):
@wraps(f)
def decorate(*args, **kwargs):
check_permission(current_permission_factory(object_getter(*args, **kwargs), action(*args, **kwargs) if callable(action) else action), hidden=hidden)
return f(*args, **kwargs)
return decorate
return decorator_builder |
def einstein_radius_rescaled(self):
"""Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles
parameters"""
return ((3 - self.slope) / (1 + self.axis_ratio)) * self.einstein_radius ** (self.slope - 1) | def function[einstein_radius_rescaled, parameter[self]]:
constant[Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles
parameters]
return[binary_operation[binary_operation[binary_operation[constant[3] - name[self].slope] / binary_operation[constant[1] + name[self].axis_ratio]] * binary_operation[name[self].einstein_radius ** binary_operation[name[self].slope - constant[1]]]]] | keyword[def] identifier[einstein_radius_rescaled] ( identifier[self] ):
literal[string]
keyword[return] (( literal[int] - identifier[self] . identifier[slope] )/( literal[int] + identifier[self] . identifier[axis_ratio] ))* identifier[self] . identifier[einstein_radius] **( identifier[self] . identifier[slope] - literal[int] ) | def einstein_radius_rescaled(self):
"""Rescale the einstein radius by slope and axis_ratio, to reduce its degeneracy with other mass-profiles
parameters"""
return (3 - self.slope) / (1 + self.axis_ratio) * self.einstein_radius ** (self.slope - 1) |
def fix_page_relative_url(rel_url):
"""
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not)
"""
rel_url = rel_url.lstrip('/') # trim all heading '/'
endswith_slash = rel_url.endswith('/')
rel_url = rel_url.rstrip('/') + (
'/' if endswith_slash else '') # preserve only one trailing '/'
if not rel_url or rel_url == '/':
return None, False
file_path = os.path.join(current_app.instance_path, 'pages',
rel_url.replace('/', os.path.sep))
if rel_url.endswith('/'):
index_html_file_path = os.path.join(file_path, 'index.html')
if os.path.isfile(index_html_file_path):
# index.html exists
return index_html_file_path, True
return rel_url, False
elif os.path.isfile(file_path):
ext = os.path.splitext(file_path)[1][1:]
if get_standard_format_name(ext) is not None:
# is source of custom page
if current_app.config['PAGE_SOURCE_ACCESSIBLE']:
return file_path, True
else:
# is other direct files
return file_path, True
elif os.path.isdir(file_path):
return rel_url + '/', False
sp = rel_url.rsplit('/', 1)
m = re.match(r'(.+)\.html?', sp[-1])
if m:
sp[-1] = m.group(1) + '.html'
else:
sp[-1] += '.html'
return '/'.join(sp), False | def function[fix_page_relative_url, parameter[rel_url]]:
constant[
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not)
]
variable[rel_url] assign[=] call[name[rel_url].lstrip, parameter[constant[/]]]
variable[endswith_slash] assign[=] call[name[rel_url].endswith, parameter[constant[/]]]
variable[rel_url] assign[=] binary_operation[call[name[rel_url].rstrip, parameter[constant[/]]] + <ast.IfExp object at 0x7da18f00d060>]
if <ast.BoolOp object at 0x7da18f58f820> begin[:]
return[tuple[[<ast.Constant object at 0x7da18f58fb50>, <ast.Constant object at 0x7da18f58f2e0>]]]
variable[file_path] assign[=] call[name[os].path.join, parameter[name[current_app].instance_path, constant[pages], call[name[rel_url].replace, parameter[constant[/], name[os].path.sep]]]]
if call[name[rel_url].endswith, parameter[constant[/]]] begin[:]
variable[index_html_file_path] assign[=] call[name[os].path.join, parameter[name[file_path], constant[index.html]]]
if call[name[os].path.isfile, parameter[name[index_html_file_path]]] begin[:]
return[tuple[[<ast.Name object at 0x7da18f58cc40>, <ast.Constant object at 0x7da18f58cb50>]]]
return[tuple[[<ast.Name object at 0x7da18f58f9d0>, <ast.Constant object at 0x7da18f58d840>]]]
variable[sp] assign[=] call[name[rel_url].rsplit, parameter[constant[/], constant[1]]]
variable[m] assign[=] call[name[re].match, parameter[constant[(.+)\.html?], call[name[sp]][<ast.UnaryOp object at 0x7da18f58c070>]]]
if name[m] begin[:]
call[name[sp]][<ast.UnaryOp object at 0x7da18ede6500>] assign[=] binary_operation[call[name[m].group, parameter[constant[1]]] + constant[.html]]
return[tuple[[<ast.Call object at 0x7da18ede64d0>, <ast.Constant object at 0x7da18ede48b0>]]] | keyword[def] identifier[fix_page_relative_url] ( identifier[rel_url] ):
literal[string]
identifier[rel_url] = identifier[rel_url] . identifier[lstrip] ( literal[string] )
identifier[endswith_slash] = identifier[rel_url] . identifier[endswith] ( literal[string] )
identifier[rel_url] = identifier[rel_url] . identifier[rstrip] ( literal[string] )+(
literal[string] keyword[if] identifier[endswith_slash] keyword[else] literal[string] )
keyword[if] keyword[not] identifier[rel_url] keyword[or] identifier[rel_url] == literal[string] :
keyword[return] keyword[None] , keyword[False]
identifier[file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[current_app] . identifier[instance_path] , literal[string] ,
identifier[rel_url] . identifier[replace] ( literal[string] , identifier[os] . identifier[path] . identifier[sep] ))
keyword[if] identifier[rel_url] . identifier[endswith] ( literal[string] ):
identifier[index_html_file_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[file_path] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[index_html_file_path] ):
keyword[return] identifier[index_html_file_path] , keyword[True]
keyword[return] identifier[rel_url] , keyword[False]
keyword[elif] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_path] ):
identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_path] )[ literal[int] ][ literal[int] :]
keyword[if] identifier[get_standard_format_name] ( identifier[ext] ) keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[current_app] . identifier[config] [ literal[string] ]:
keyword[return] identifier[file_path] , keyword[True]
keyword[else] :
keyword[return] identifier[file_path] , keyword[True]
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[file_path] ):
keyword[return] identifier[rel_url] + literal[string] , keyword[False]
identifier[sp] = identifier[rel_url] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[m] = identifier[re] . identifier[match] ( literal[string] , identifier[sp] [- literal[int] ])
keyword[if] identifier[m] :
identifier[sp] [- literal[int] ]= identifier[m] . identifier[group] ( literal[int] )+ literal[string]
keyword[else] :
identifier[sp] [- literal[int] ]+= literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[sp] ), keyword[False] | def fix_page_relative_url(rel_url):
"""
Fix page relative url to a standard, uniform format.
Possible input:
- my-page
- my-page/
- my-page/index
- my-page/index.htm
- my-page/index.html
- my-page/specific.file
:param rel_url: relative url to fix
:return: tuple(fixed relative url or FILE PATH if exists else None,
file exists or not)
"""
rel_url = rel_url.lstrip('/') # trim all heading '/'
endswith_slash = rel_url.endswith('/')
rel_url = rel_url.rstrip('/') + ('/' if endswith_slash else '') # preserve only one trailing '/'
if not rel_url or rel_url == '/':
return (None, False) # depends on [control=['if'], data=[]]
file_path = os.path.join(current_app.instance_path, 'pages', rel_url.replace('/', os.path.sep))
if rel_url.endswith('/'):
index_html_file_path = os.path.join(file_path, 'index.html')
if os.path.isfile(index_html_file_path):
# index.html exists
return (index_html_file_path, True) # depends on [control=['if'], data=[]]
return (rel_url, False) # depends on [control=['if'], data=[]]
elif os.path.isfile(file_path):
ext = os.path.splitext(file_path)[1][1:]
if get_standard_format_name(ext) is not None:
# is source of custom page
if current_app.config['PAGE_SOURCE_ACCESSIBLE']:
return (file_path, True) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# is other direct files
return (file_path, True) # depends on [control=['if'], data=[]]
elif os.path.isdir(file_path):
return (rel_url + '/', False) # depends on [control=['if'], data=[]]
sp = rel_url.rsplit('/', 1)
m = re.match('(.+)\\.html?', sp[-1])
if m:
sp[-1] = m.group(1) + '.html' # depends on [control=['if'], data=[]]
else:
sp[-1] += '.html'
return ('/'.join(sp), False) |
def point(self):
""" Return the point for the public key """
string = unhexlify(self.unCompressed())
return ecdsa.VerifyingKey.from_string(
string[1:], curve=ecdsa.SECP256k1
).pubkey.point | def function[point, parameter[self]]:
constant[ Return the point for the public key ]
variable[string] assign[=] call[name[unhexlify], parameter[call[name[self].unCompressed, parameter[]]]]
return[call[name[ecdsa].VerifyingKey.from_string, parameter[call[name[string]][<ast.Slice object at 0x7da1b007df30>]]].pubkey.point] | keyword[def] identifier[point] ( identifier[self] ):
literal[string]
identifier[string] = identifier[unhexlify] ( identifier[self] . identifier[unCompressed] ())
keyword[return] identifier[ecdsa] . identifier[VerifyingKey] . identifier[from_string] (
identifier[string] [ literal[int] :], identifier[curve] = identifier[ecdsa] . identifier[SECP256k1]
). identifier[pubkey] . identifier[point] | def point(self):
""" Return the point for the public key """
string = unhexlify(self.unCompressed())
return ecdsa.VerifyingKey.from_string(string[1:], curve=ecdsa.SECP256k1).pubkey.point |
def Nu_Mokry(Re, Pr, rho_w=None, rho_b=None):
r'''Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
and reviewed in [2]_.
.. math::
Nu_b = 0.0061 Re_b^{0.904} \bar{Pr}_b^{0.684}
\left(\frac{\rho_w}{\rho_b}\right)^{0.564}
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P was set at 20 MPa, and D
was 10 mm. G varied from 200-1500 kg/m^2/s and q varied from 0 to 1250
kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the four most accurate correlation
in [2]_ with a MAD of 24.0%. It was also the 7th most accurate against
enhanced heat transfer, with a MAD of 14.7%, and the most accurate for the
normal heat transfer database as well as the top correlation in all
categories combined.
If the extra density information is not provided, it will not be used.
Examples
--------
>>> Nu_Mokry(1E5, 1.2, 330, 290.)
246.1156319156992
References
----------
.. [1] Mokry, Sarah, Igor Pioro, Amjad Farah, Krysten King, Sahil Gupta,
Wargha Peiman, and Pavel Kirillov. "Development of Supercritical Water
Heat-Transfer Correlation for Vertical Bare Tubes." Nuclear Engineering
and Design, International Conference on Nuclear Energy for New Europe
2009, 241, no. 4 (April 2011): 1126-36.
doi:10.1016/j.nucengdes.2010.06.012.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
'''
Nu = 0.0061*Re**0.904*Pr**0.684
if rho_w and rho_b:
Nu *= (rho_w/rho_b)**0.564
return Nu | def function[Nu_Mokry, parameter[Re, Pr, rho_w, rho_b]]:
constant[Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
and reviewed in [2]_.
.. math::
Nu_b = 0.0061 Re_b^{0.904} \bar{Pr}_b^{0.684}
\left(\frac{\rho_w}{\rho_b}\right)^{0.564}
\bar{Cp} = \frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P was set at 20 MPa, and D
was 10 mm. G varied from 200-1500 kg/m^2/s and q varied from 0 to 1250
kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the four most accurate correlation
in [2]_ with a MAD of 24.0%. It was also the 7th most accurate against
enhanced heat transfer, with a MAD of 14.7%, and the most accurate for the
normal heat transfer database as well as the top correlation in all
categories combined.
If the extra density information is not provided, it will not be used.
Examples
--------
>>> Nu_Mokry(1E5, 1.2, 330, 290.)
246.1156319156992
References
----------
.. [1] Mokry, Sarah, Igor Pioro, Amjad Farah, Krysten King, Sahil Gupta,
Wargha Peiman, and Pavel Kirillov. "Development of Supercritical Water
Heat-Transfer Correlation for Vertical Bare Tubes." Nuclear Engineering
and Design, International Conference on Nuclear Energy for New Europe
2009, 241, no. 4 (April 2011): 1126-36.
doi:10.1016/j.nucengdes.2010.06.012.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
]
variable[Nu] assign[=] binary_operation[binary_operation[constant[0.0061] * binary_operation[name[Re] ** constant[0.904]]] * binary_operation[name[Pr] ** constant[0.684]]]
if <ast.BoolOp object at 0x7da2047e9210> begin[:]
<ast.AugAssign object at 0x7da2047eb5e0>
return[name[Nu]] | keyword[def] identifier[Nu_Mokry] ( identifier[Re] , identifier[Pr] , identifier[rho_w] = keyword[None] , identifier[rho_b] = keyword[None] ):
literal[string]
identifier[Nu] = literal[int] * identifier[Re] ** literal[int] * identifier[Pr] ** literal[int]
keyword[if] identifier[rho_w] keyword[and] identifier[rho_b] :
identifier[Nu] *=( identifier[rho_w] / identifier[rho_b] )** literal[int]
keyword[return] identifier[Nu] | def Nu_Mokry(Re, Pr, rho_w=None, rho_b=None):
"""Calculates internal convection Nusselt number for turbulent vertical
upward flow in a pipe under supercritical conditions according to [1]_,
and reviewed in [2]_.
.. math::
Nu_b = 0.0061 Re_b^{0.904} \\bar{Pr}_b^{0.684}
\\left(\\frac{\\rho_w}{\\rho_b}\\right)^{0.564}
\\bar{Cp} = \\frac{H_w-H_b}{T_w-T_b}
Parameters
----------
Re : float
Reynolds number with bulk fluid properties, [-]
Pr : float
Prandtl number with bulk fluid properties and an average heat capacity
between the wall and bulk temperatures [-]
rho_w : float, optional
Density at the wall temperature, [kg/m^3]
rho_b : float, optional
Density at the bulk temperature, [kg/m^3]
Returns
-------
Nu : float
Nusselt number with bulk fluid properties, [-]
Notes
-----
For the data used to develop the correlation, P was set at 20 MPa, and D
was 10 mm. G varied from 200-1500 kg/m^2/s and q varied from 0 to 1250
kW/m^2.
Cp used in the calculation of Prandtl number should be the average value
of those at the wall and the bulk temperatures.
For deteriorated heat transfer, this was the four most accurate correlation
in [2]_ with a MAD of 24.0%. It was also the 7th most accurate against
enhanced heat transfer, with a MAD of 14.7%, and the most accurate for the
normal heat transfer database as well as the top correlation in all
categories combined.
If the extra density information is not provided, it will not be used.
Examples
--------
>>> Nu_Mokry(1E5, 1.2, 330, 290.)
246.1156319156992
References
----------
.. [1] Mokry, Sarah, Igor Pioro, Amjad Farah, Krysten King, Sahil Gupta,
Wargha Peiman, and Pavel Kirillov. "Development of Supercritical Water
Heat-Transfer Correlation for Vertical Bare Tubes." Nuclear Engineering
and Design, International Conference on Nuclear Energy for New Europe
2009, 241, no. 4 (April 2011): 1126-36.
doi:10.1016/j.nucengdes.2010.06.012.
.. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of
Correlations of Forced Convection Heat Transfer to Water at
Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015):
451-60. doi:10.1016/j.anucene.2014.10.027.
"""
Nu = 0.0061 * Re ** 0.904 * Pr ** 0.684
if rho_w and rho_b:
Nu *= (rho_w / rho_b) ** 0.564 # depends on [control=['if'], data=[]]
return Nu |
def makeRetweetNetwork(tweets):
"""Receives tweets, returns directed retweet networks.
Without and with isolated nodes.
"""
G=x.DiGraph()
G_=x.DiGraph()
for tweet in tweets:
text=tweet["text"]
us=tweet["user"]["screen_name"]
if text.startswith("RT @"):
prev_us=text.split(":")[0].split("@")[1]
#print(us,prev_us,text)
if G.has_edge(prev_us,us):
G[prev_us][us]["weight"]+=1
G_[prev_us][us]["weight"]+=1
else:
G.add_edge(prev_us, us, weight=1.)
G_.add_edge(prev_us, us, weight=1.)
if us not in G_.nodes():
G_.add_node(us)
return G,G_ | def function[makeRetweetNetwork, parameter[tweets]]:
constant[Receives tweets, returns directed retweet networks.
Without and with isolated nodes.
]
variable[G] assign[=] call[name[x].DiGraph, parameter[]]
variable[G_] assign[=] call[name[x].DiGraph, parameter[]]
for taget[name[tweet]] in starred[name[tweets]] begin[:]
variable[text] assign[=] call[name[tweet]][constant[text]]
variable[us] assign[=] call[call[name[tweet]][constant[user]]][constant[screen_name]]
if call[name[text].startswith, parameter[constant[RT @]]] begin[:]
variable[prev_us] assign[=] call[call[call[call[name[text].split, parameter[constant[:]]]][constant[0]].split, parameter[constant[@]]]][constant[1]]
if call[name[G].has_edge, parameter[name[prev_us], name[us]]] begin[:]
<ast.AugAssign object at 0x7da20c6e4be0>
<ast.AugAssign object at 0x7da20c6e6320>
if compare[name[us] <ast.NotIn object at 0x7da2590d7190> call[name[G_].nodes, parameter[]]] begin[:]
call[name[G_].add_node, parameter[name[us]]]
return[tuple[[<ast.Name object at 0x7da20c6e6830>, <ast.Name object at 0x7da20c6e7970>]]] | keyword[def] identifier[makeRetweetNetwork] ( identifier[tweets] ):
literal[string]
identifier[G] = identifier[x] . identifier[DiGraph] ()
identifier[G_] = identifier[x] . identifier[DiGraph] ()
keyword[for] identifier[tweet] keyword[in] identifier[tweets] :
identifier[text] = identifier[tweet] [ literal[string] ]
identifier[us] = identifier[tweet] [ literal[string] ][ literal[string] ]
keyword[if] identifier[text] . identifier[startswith] ( literal[string] ):
identifier[prev_us] = identifier[text] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]
keyword[if] identifier[G] . identifier[has_edge] ( identifier[prev_us] , identifier[us] ):
identifier[G] [ identifier[prev_us] ][ identifier[us] ][ literal[string] ]+= literal[int]
identifier[G_] [ identifier[prev_us] ][ identifier[us] ][ literal[string] ]+= literal[int]
keyword[else] :
identifier[G] . identifier[add_edge] ( identifier[prev_us] , identifier[us] , identifier[weight] = literal[int] )
identifier[G_] . identifier[add_edge] ( identifier[prev_us] , identifier[us] , identifier[weight] = literal[int] )
keyword[if] identifier[us] keyword[not] keyword[in] identifier[G_] . identifier[nodes] ():
identifier[G_] . identifier[add_node] ( identifier[us] )
keyword[return] identifier[G] , identifier[G_] | def makeRetweetNetwork(tweets):
"""Receives tweets, returns directed retweet networks.
Without and with isolated nodes.
"""
G = x.DiGraph()
G_ = x.DiGraph()
for tweet in tweets:
text = tweet['text']
us = tweet['user']['screen_name']
if text.startswith('RT @'):
prev_us = text.split(':')[0].split('@')[1]
#print(us,prev_us,text)
if G.has_edge(prev_us, us):
G[prev_us][us]['weight'] += 1
G_[prev_us][us]['weight'] += 1 # depends on [control=['if'], data=[]]
else:
G.add_edge(prev_us, us, weight=1.0)
G_.add_edge(prev_us, us, weight=1.0) # depends on [control=['if'], data=[]]
if us not in G_.nodes():
G_.add_node(us) # depends on [control=['if'], data=['us']] # depends on [control=['for'], data=['tweet']]
return (G, G_) |
def _get_first_aggregate_text(node_list):
'''
Extract text from the first occurred DOM aggregate.
'''
if not node_list:
return ''
out = []
for node in node_list[0].childNodes:
if node.nodeType == dom.Document.TEXT_NODE:
out.append(node.nodeValue)
return '\n'.join(out) | def function[_get_first_aggregate_text, parameter[node_list]]:
constant[
Extract text from the first occurred DOM aggregate.
]
if <ast.UnaryOp object at 0x7da1b1c144c0> begin[:]
return[constant[]]
variable[out] assign[=] list[[]]
for taget[name[node]] in starred[call[name[node_list]][constant[0]].childNodes] begin[:]
if compare[name[node].nodeType equal[==] name[dom].Document.TEXT_NODE] begin[:]
call[name[out].append, parameter[name[node].nodeValue]]
return[call[constant[
].join, parameter[name[out]]]] | keyword[def] identifier[_get_first_aggregate_text] ( identifier[node_list] ):
literal[string]
keyword[if] keyword[not] identifier[node_list] :
keyword[return] literal[string]
identifier[out] =[]
keyword[for] identifier[node] keyword[in] identifier[node_list] [ literal[int] ]. identifier[childNodes] :
keyword[if] identifier[node] . identifier[nodeType] == identifier[dom] . identifier[Document] . identifier[TEXT_NODE] :
identifier[out] . identifier[append] ( identifier[node] . identifier[nodeValue] )
keyword[return] literal[string] . identifier[join] ( identifier[out] ) | def _get_first_aggregate_text(node_list):
"""
Extract text from the first occurred DOM aggregate.
"""
if not node_list:
return '' # depends on [control=['if'], data=[]]
out = []
for node in node_list[0].childNodes:
if node.nodeType == dom.Document.TEXT_NODE:
out.append(node.nodeValue) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
return '\n'.join(out) |
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return "" | def function[extract, parameter[filename_url_or_filelike]]:
constant[A more precise algorithm over the original eatiht algorithm
]
variable[pars_tnodes] assign[=] call[name[get_parent_xpaths_and_textnodes], parameter[name[filename_url_or_filelike]]]
call[name[calc_across_paths_textnodes], parameter[name[pars_tnodes]]]
<ast.Tuple object at 0x7da2046201c0> assign[=] call[name[calc_avgstrlen_pathstextnodes], parameter[name[pars_tnodes]]]
variable[filtered] assign[=] <ast.ListComp object at 0x7da2046230d0>
variable[paths] assign[=] <ast.ListComp object at 0x7da204621060>
variable[hist] assign[=] call[name[get_xpath_frequencydistribution], parameter[name[paths]]]
<ast.Try object at 0x7da2046203a0> | keyword[def] identifier[extract] ( identifier[filename_url_or_filelike] ):
literal[string]
identifier[pars_tnodes] = identifier[get_parent_xpaths_and_textnodes] ( identifier[filename_url_or_filelike] )
identifier[calc_across_paths_textnodes] ( identifier[pars_tnodes] )
identifier[avg] , identifier[_] , identifier[_] = identifier[calc_avgstrlen_pathstextnodes] ( identifier[pars_tnodes] )
identifier[filtered] =[ identifier[parpath_tnodes] keyword[for] identifier[parpath_tnodes] keyword[in] identifier[pars_tnodes]
keyword[if] identifier[parpath_tnodes] [ literal[int] ][ literal[int] ]> identifier[avg] ]
identifier[paths] =[ identifier[path] keyword[for] identifier[path] , identifier[tnode] keyword[in] identifier[filtered] ]
identifier[hist] = identifier[get_xpath_frequencydistribution] ( identifier[paths] )
keyword[try] :
identifier[target_tnodes] =[ identifier[tnode] keyword[for] identifier[par] , identifier[tnode] keyword[in] identifier[pars_tnodes] keyword[if] identifier[hist] [ literal[int] ][ literal[int] ] keyword[in] identifier[par] ]
identifier[target_text] = literal[string] . identifier[join] ([ literal[string] . identifier[join] ( identifier[tnode] [ literal[int] ]) keyword[for] identifier[tnode] keyword[in] identifier[target_tnodes] ])
keyword[return] identifier[target_text]
keyword[except] identifier[IndexError] :
keyword[return] literal[string] | def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike) #[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
(avg, _, _) = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes if parpath_tnodes[1][2] > avg]
paths = [path for (path, tnode) in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for (par, tnode) in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text # depends on [control=['try'], data=[]]
except IndexError:
return '' # depends on [control=['except'], data=[]] |
def get_active_queue(self):
"""Get name of first active job queue"""
# Get dict of active queues keyed by name
queues = {q['jobQueueName']: q for q in self._client.describe_job_queues()['jobQueues']
if q['state'] == 'ENABLED' and q['status'] == 'VALID'}
if not queues:
raise Exception('No job queues with state=ENABLED and status=VALID')
# Pick the first queue as default
return list(queues.keys())[0] | def function[get_active_queue, parameter[self]]:
constant[Get name of first active job queue]
variable[queues] assign[=] <ast.DictComp object at 0x7da18dc05a80>
if <ast.UnaryOp object at 0x7da18c4cf1c0> begin[:]
<ast.Raise object at 0x7da18c4ce530>
return[call[call[name[list], parameter[call[name[queues].keys, parameter[]]]]][constant[0]]] | keyword[def] identifier[get_active_queue] ( identifier[self] ):
literal[string]
identifier[queues] ={ identifier[q] [ literal[string] ]: identifier[q] keyword[for] identifier[q] keyword[in] identifier[self] . identifier[_client] . identifier[describe_job_queues] ()[ literal[string] ]
keyword[if] identifier[q] [ literal[string] ]== literal[string] keyword[and] identifier[q] [ literal[string] ]== literal[string] }
keyword[if] keyword[not] identifier[queues] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[list] ( identifier[queues] . identifier[keys] ())[ literal[int] ] | def get_active_queue(self):
"""Get name of first active job queue"""
# Get dict of active queues keyed by name
queues = {q['jobQueueName']: q for q in self._client.describe_job_queues()['jobQueues'] if q['state'] == 'ENABLED' and q['status'] == 'VALID'}
if not queues:
raise Exception('No job queues with state=ENABLED and status=VALID') # depends on [control=['if'], data=[]]
# Pick the first queue as default
return list(queues.keys())[0] |
def qgis_composer_extractor(impact_report, component_metadata):
"""Extract composer context.
This method extract necessary context for a given impact report and
component metadata and save the context so it can be used in composer
rendering phase
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
# QGIS Composer needed certain context to generate the output
# - Map Settings
# - Substitution maps
# - Element settings, such as icon for picture file or image source
# Generate map settings
qgis_context = impact_report.qgis_composition_context
inasafe_context = impact_report.inasafe_context
provenance = impact_report.impact_function.provenance
extra_args = component_metadata.extra_args
context = QGISComposerContext()
# Set default image elements to replace
image_elements = [
{
'id': 'safe-logo',
'path': inasafe_context.inasafe_logo
},
{
'id': 'black-inasafe-logo',
'path': inasafe_context.black_inasafe_logo
},
{
'id': 'white-inasafe-logo',
'path': inasafe_context.white_inasafe_logo
},
{
'id': 'north-arrow',
'path': inasafe_context.north_arrow
},
{
'id': 'organisation-logo',
'path': inasafe_context.organisation_logo
},
{
'id': 'supporters_logo',
'path': inasafe_context.supporters_logo
}
]
context.image_elements = image_elements
# Set default HTML Frame elements to replace
html_frame_elements = [
{
'id': 'impact-report',
'mode': 'text', # another mode is url
'text': '', # TODO: get impact summary table
}
]
context.html_frame_elements = html_frame_elements
"""Define the layers for the impact map."""
project = QgsProject.instance()
layers = []
exposure_summary_layers = []
if impact_report.multi_exposure_impact_function:
for impact_function in (
impact_report.multi_exposure_impact_function.impact_functions):
impact_layer = impact_function.exposure_summary or (
impact_function.aggregate_hazard_impacted)
exposure_summary_layers.append(impact_layer)
# use custom ordered layer if any
if impact_report.ordered_layers:
for layer in impact_report.ordered_layers:
layers.append(layer)
# We are keeping this if we want to enable below behaviour again.
# Currently realtime might have layer order without impact layer in it.
# # make sure at least there is an impact layer
# if impact_report.multi_exposure_impact_function:
# additional_layers = [] # for exposure summary layers
# impact_layer_found = False
# impact_functions = (
# impact_report.multi_exposure_impact_function.impact_functions)
# # check for impact layer occurrences
# for analysis in impact_functions:
# impact_layer = analysis.exposure_summary or (
# analysis.aggregate_hazard_impacted)
# for index, layer in enumerate(layers):
# if impact_layer.source() == layer.source():
# add_impact_layers_to_canvas(analysis)
# layers[index] = impact_layer
# impact_layer_found = True
# if not impact_layer_found:
# for analysis in impact_functions:
# add_impact_layers_to_canvas(analysis)
# impact_layer = analysis.exposure_summary or (
# analysis.aggregate_hazard_impacted)
# layer_uri = full_layer_uri(impact_layer)
# layer = load_layer_from_registry(layer_uri)
# additional_layers.append(layer)
# layers = additional_layers + layers
# else:
# impact_layer = (
# impact_report.impact_function.exposure_summary or (
# impact_report.impact_function.aggregate_hazard_impacted))
# if impact_layer not in layers:
# layers.insert(0, impact_layer)
# use default layer order if no custom ordered layer found
else:
if not impact_report.multi_exposure_impact_function: # single IF
layers = [impact_report.impact] + impact_report.extra_layers
else: # multi-exposure IF
layers = [] + impact_report.extra_layers
add_supplementary_layers = (
not impact_report.multi_exposure_impact_function or not (
impact_report.multi_exposure_impact_function.
output_layers_ordered)
)
if add_supplementary_layers:
# Check show only impact.
show_only_impact = setting(
'set_show_only_impact_on_report', expected_type=bool)
if not show_only_impact:
hazard_layer = project.mapLayers().get(
provenance['hazard_layer_id'], None)
aggregation_layer_id = provenance['aggregation_layer_id']
if aggregation_layer_id:
aggregation_layer = project.mapLayers().get(
aggregation_layer_id, None)
layers.append(aggregation_layer)
layers.append(hazard_layer)
# check hide exposure settings
hide_exposure_flag = setting(
'setHideExposureFlag', expected_type=bool)
if not hide_exposure_flag:
exposure_layers_id = []
if provenance.get(
provenance_exposure_layer_id['provenance_key']):
exposure_layers_id.append(
provenance.get(
provenance_exposure_layer_id['provenance_key']))
elif provenance.get(
provenance_multi_exposure_layers_id['provenance_key']):
exposure_layers_id = provenance.get(
provenance_multi_exposure_layers_id['provenance_key'])
# place exposure at the bottom
for layer_id in exposure_layers_id:
exposure_layer = project.mapLayers().get(layer_id)
layers.append(exposure_layer)
# default extent is analysis extent
if not qgis_context.extent:
qgis_context.extent = impact_report.impact_function.analysis_extent
map_elements = [
{
'id': 'impact-map',
'extent': qgis_context.extent,
'grid_split_count': 5,
'layers': layers,
}
]
context.map_elements = map_elements
# calculate map_legends, only show the legend for impact layer
if impact_report.legend_layers: # use requested legend if any
layers = impact_report.legend_layers
elif impact_report.multi_exposure_impact_function: # multi-exposure IF
layers = exposure_summary_layers
else: # single IF
layers = [impact_report.impact]
symbol_count = 0
for l in layers:
layer = l
""":type: qgis.core.QgsMapLayer"""
try:
symbol_count += len(layer.legendSymbologyItems())
continue
except Exception: # pylint: disable=broad-except
pass
try:
symbol_count += len(layer.renderer().legendSymbolItems())
continue
except Exception: # pylint: disable=broad-except
pass
symbol_count += 1
legend_title = provenance.get('map_legend_title') or ''
map_legends = [
{
'id': 'impact-legend',
'title': legend_title,
'layers': layers,
'symbol_count': symbol_count,
# 'column_count': 2, # the number of column in legend display
}
]
context.map_legends = map_legends
# process substitution map
start_datetime = provenance['start_datetime']
""":type: datetime.datetime"""
date_format = resolve_from_dictionary(extra_args, 'date-format')
time_format = resolve_from_dictionary(extra_args, 'time-format')
if isinstance(start_datetime, datetime.datetime):
date = start_datetime.strftime(date_format)
time = start_datetime.strftime(time_format)
else:
date = ''
time = ''
long_version = get_version()
tokens = long_version.split('.')
version = '%s.%s.%s' % (tokens[0], tokens[1], tokens[2])
# Get title of the layer
title = provenance.get('map_title') or ''
# Set source
unknown_source_text = resolve_from_dictionary(
extra_args, ['defaults', 'unknown_source'])
aggregation_not_used = resolve_from_dictionary(
extra_args, ['defaults', 'aggregation_not_used'])
hazard_source = (
provenance.get(
'hazard_keywords', {}).get('source') or unknown_source_text)
exposure_source = (
provenance.get(
'exposure_keywords', {}).get('source') or unknown_source_text)
if provenance['aggregation_layer']:
aggregation_source = (
provenance['aggregation_keywords'].get('source')
or unknown_source_text)
else:
aggregation_source = aggregation_not_used
spatial_reference_format = resolve_from_dictionary(
extra_args, 'spatial-reference-format')
reference_name = spatial_reference_format.format(
crs=impact_report.impact_function.crs.authid())
analysis_layer = impact_report.analysis
analysis_name = value_from_field_name(
analysis_name_field['field_name'], analysis_layer)
# Prepare the substitution map
version_title = resolve_from_dictionary(extra_args, 'version-title')
disclaimer_title = resolve_from_dictionary(extra_args, 'disclaimer-title')
date_title = resolve_from_dictionary(extra_args, 'date-title')
time_title = resolve_from_dictionary(extra_args, 'time-title')
caution_title = resolve_from_dictionary(extra_args, 'caution-title')
caution_text = resolve_from_dictionary(extra_args, 'caution-text')
version_text = resolve_from_dictionary(extra_args, 'version-text')
legend_section_title = resolve_from_dictionary(
extra_args, 'legend-title')
information_title = resolve_from_dictionary(
extra_args, 'information-title')
supporters_title = resolve_from_dictionary(
extra_args, 'supporters-title')
source_title = resolve_from_dictionary(extra_args, 'source-title')
analysis_title = resolve_from_dictionary(extra_args, 'analysis-title')
reference_title = resolve_from_dictionary(
extra_args, 'spatial-reference-title')
substitution_map = {
'impact-title': title,
'date': date,
'time': time,
'safe-version': version, # deprecated
'disclaimer': inasafe_context.disclaimer,
# These added in 3.2
'version-title': version_title,
'inasafe-version': version,
'disclaimer-title': disclaimer_title,
'date-title': date_title,
'time-title': time_title,
'caution-title': caution_title,
'caution-text': caution_text,
'version-text': version_text.format(version=version),
'legend-title': legend_section_title,
'information-title': information_title,
'supporters-title': supporters_title,
'source-title': source_title,
'analysis-title': analysis_title,
'analysis-name': analysis_name,
'reference-title': reference_title,
'reference-name': reference_name,
'hazard-source': hazard_source,
'exposure-source': exposure_source,
'aggregation-source': aggregation_source,
}
context.substitution_map = substitution_map
return context | def function[qgis_composer_extractor, parameter[impact_report, component_metadata]]:
constant[Extract composer context.
This method extract necessary context for a given impact report and
component metadata and save the context so it can be used in composer
rendering phase
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
]
variable[qgis_context] assign[=] name[impact_report].qgis_composition_context
variable[inasafe_context] assign[=] name[impact_report].inasafe_context
variable[provenance] assign[=] name[impact_report].impact_function.provenance
variable[extra_args] assign[=] name[component_metadata].extra_args
variable[context] assign[=] call[name[QGISComposerContext], parameter[]]
variable[image_elements] assign[=] list[[<ast.Dict object at 0x7da20e9b1de0>, <ast.Dict object at 0x7da20e9b2f80>, <ast.Dict object at 0x7da20e9b2f50>, <ast.Dict object at 0x7da20e9b1bd0>, <ast.Dict object at 0x7da20e9b2110>, <ast.Dict object at 0x7da20e9b2320>]]
name[context].image_elements assign[=] name[image_elements]
variable[html_frame_elements] assign[=] list[[<ast.Dict object at 0x7da20e9b09a0>]]
name[context].html_frame_elements assign[=] name[html_frame_elements]
constant[Define the layers for the impact map.]
variable[project] assign[=] call[name[QgsProject].instance, parameter[]]
variable[layers] assign[=] list[[]]
variable[exposure_summary_layers] assign[=] list[[]]
if name[impact_report].multi_exposure_impact_function begin[:]
for taget[name[impact_function]] in starred[name[impact_report].multi_exposure_impact_function.impact_functions] begin[:]
variable[impact_layer] assign[=] <ast.BoolOp object at 0x7da20e9b0580>
call[name[exposure_summary_layers].append, parameter[name[impact_layer]]]
if name[impact_report].ordered_layers begin[:]
for taget[name[layer]] in starred[name[impact_report].ordered_layers] begin[:]
call[name[layers].append, parameter[name[layer]]]
if <ast.UnaryOp object at 0x7da204961a80> begin[:]
name[qgis_context].extent assign[=] name[impact_report].impact_function.analysis_extent
variable[map_elements] assign[=] list[[<ast.Dict object at 0x7da204963e50>]]
name[context].map_elements assign[=] name[map_elements]
if name[impact_report].legend_layers begin[:]
variable[layers] assign[=] name[impact_report].legend_layers
variable[symbol_count] assign[=] constant[0]
for taget[name[l]] in starred[name[layers]] begin[:]
variable[layer] assign[=] name[l]
constant[:type: qgis.core.QgsMapLayer]
<ast.Try object at 0x7da20c6ab4c0>
<ast.Try object at 0x7da20c6a8a60>
<ast.AugAssign object at 0x7da20c6a97e0>
variable[legend_title] assign[=] <ast.BoolOp object at 0x7da20c6ab340>
variable[map_legends] assign[=] list[[<ast.Dict object at 0x7da20c6a89a0>]]
name[context].map_legends assign[=] name[map_legends]
variable[start_datetime] assign[=] call[name[provenance]][constant[start_datetime]]
constant[:type: datetime.datetime]
variable[date_format] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[date-format]]]
variable[time_format] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[time-format]]]
if call[name[isinstance], parameter[name[start_datetime], name[datetime].datetime]] begin[:]
variable[date] assign[=] call[name[start_datetime].strftime, parameter[name[date_format]]]
variable[time] assign[=] call[name[start_datetime].strftime, parameter[name[time_format]]]
variable[long_version] assign[=] call[name[get_version], parameter[]]
variable[tokens] assign[=] call[name[long_version].split, parameter[constant[.]]]
variable[version] assign[=] binary_operation[constant[%s.%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da20c6abd30>, <ast.Subscript object at 0x7da20c6aa260>, <ast.Subscript object at 0x7da20c6a9690>]]]
variable[title] assign[=] <ast.BoolOp object at 0x7da20c6a8130>
variable[unknown_source_text] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], list[[<ast.Constant object at 0x7da20c6a82e0>, <ast.Constant object at 0x7da20c6aa1a0>]]]]
variable[aggregation_not_used] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], list[[<ast.Constant object at 0x7da20c6a8220>, <ast.Constant object at 0x7da20c6abbe0>]]]]
variable[hazard_source] assign[=] <ast.BoolOp object at 0x7da20c6a9e10>
variable[exposure_source] assign[=] <ast.BoolOp object at 0x7da20c6aa320>
if call[name[provenance]][constant[aggregation_layer]] begin[:]
variable[aggregation_source] assign[=] <ast.BoolOp object at 0x7da20c6a8e50>
variable[spatial_reference_format] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[spatial-reference-format]]]
variable[reference_name] assign[=] call[name[spatial_reference_format].format, parameter[]]
variable[analysis_layer] assign[=] name[impact_report].analysis
variable[analysis_name] assign[=] call[name[value_from_field_name], parameter[call[name[analysis_name_field]][constant[field_name]], name[analysis_layer]]]
variable[version_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[version-title]]]
variable[disclaimer_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[disclaimer-title]]]
variable[date_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[date-title]]]
variable[time_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[time-title]]]
variable[caution_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[caution-title]]]
variable[caution_text] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[caution-text]]]
variable[version_text] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[version-text]]]
variable[legend_section_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[legend-title]]]
variable[information_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[information-title]]]
variable[supporters_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[supporters-title]]]
variable[source_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[source-title]]]
variable[analysis_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[analysis-title]]]
variable[reference_title] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[spatial-reference-title]]]
variable[substitution_map] assign[=] dictionary[[<ast.Constant object at 0x7da20c6aa860>, <ast.Constant object at 0x7da20c6a91e0>, <ast.Constant object at 0x7da20c6a81c0>, <ast.Constant object at 0x7da20c6abe80>, <ast.Constant object at 0x7da20c6ab730>, <ast.Constant object at 0x7da20c6a9e40>, <ast.Constant object at 0x7da20c6a98d0>, <ast.Constant object at 0x7da20c6a9660>, <ast.Constant object at 0x7da20c6a92a0>, <ast.Constant object at 0x7da20c6aabf0>, <ast.Constant object at 0x7da20c6aa800>, <ast.Constant object at 0x7da20c6a8dc0>, <ast.Constant object at 0x7da20c6a99c0>, <ast.Constant object at 0x7da20c6a8a00>, <ast.Constant object at 0x7da20c6ab550>, <ast.Constant object at 0x7da20c6aa5f0>, <ast.Constant object at 0x7da20c6ab370>, <ast.Constant object at 0x7da20c6a8280>, <ast.Constant object at 0x7da20c6aba00>, <ast.Constant object at 0x7da20c6aacb0>, <ast.Constant object at 0x7da20c6aa680>, <ast.Constant object at 0x7da20c6aa1d0>, <ast.Constant object at 0x7da20c6aa440>, <ast.Constant object at 0x7da20c6aaa40>], [<ast.Name object at 0x7da20c6a9600>, <ast.Name object at 0x7da20c6a9ed0>, <ast.Name object at 0x7da20c6ab9d0>, <ast.Name object at 0x7da20c6aaf80>, <ast.Attribute object at 0x7da20c6a8fd0>, <ast.Name object at 0x7da20c6a9870>, <ast.Name object at 0x7da20c6a9000>, <ast.Name object at 0x7da20c6abc10>, <ast.Name object at 0x7da20c6ab0d0>, <ast.Name object at 0x7da204622bf0>, <ast.Name object at 0x7da204621c60>, <ast.Name object at 0x7da204623490>, <ast.Call object at 0x7da204623fa0>, <ast.Name object at 0x7da204620e80>, <ast.Name object at 0x7da2046230d0>, <ast.Name object at 0x7da204620c40>, <ast.Name object at 0x7da204622500>, <ast.Name object at 0x7da2046232e0>, <ast.Name object at 0x7da2046220e0>, <ast.Name object at 0x7da204620a00>, <ast.Name object at 0x7da204621510>, <ast.Name object at 0x7da204622530>, <ast.Name object at 0x7da204622c20>, <ast.Name object at 0x7da204620730>]]
name[context].substitution_map assign[=] name[substitution_map]
return[name[context]] | keyword[def] identifier[qgis_composer_extractor] ( identifier[impact_report] , identifier[component_metadata] ):
literal[string]
identifier[qgis_context] = identifier[impact_report] . identifier[qgis_composition_context]
identifier[inasafe_context] = identifier[impact_report] . identifier[inasafe_context]
identifier[provenance] = identifier[impact_report] . identifier[impact_function] . identifier[provenance]
identifier[extra_args] = identifier[component_metadata] . identifier[extra_args]
identifier[context] = identifier[QGISComposerContext] ()
identifier[image_elements] =[
{
literal[string] : literal[string] ,
literal[string] : identifier[inasafe_context] . identifier[inasafe_logo]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[inasafe_context] . identifier[black_inasafe_logo]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[inasafe_context] . identifier[white_inasafe_logo]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[inasafe_context] . identifier[north_arrow]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[inasafe_context] . identifier[organisation_logo]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[inasafe_context] . identifier[supporters_logo]
}
]
identifier[context] . identifier[image_elements] = identifier[image_elements]
identifier[html_frame_elements] =[
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
]
identifier[context] . identifier[html_frame_elements] = identifier[html_frame_elements]
literal[string]
identifier[project] = identifier[QgsProject] . identifier[instance] ()
identifier[layers] =[]
identifier[exposure_summary_layers] =[]
keyword[if] identifier[impact_report] . identifier[multi_exposure_impact_function] :
keyword[for] identifier[impact_function] keyword[in] (
identifier[impact_report] . identifier[multi_exposure_impact_function] . identifier[impact_functions] ):
identifier[impact_layer] = identifier[impact_function] . identifier[exposure_summary] keyword[or] (
identifier[impact_function] . identifier[aggregate_hazard_impacted] )
identifier[exposure_summary_layers] . identifier[append] ( identifier[impact_layer] )
keyword[if] identifier[impact_report] . identifier[ordered_layers] :
keyword[for] identifier[layer] keyword[in] identifier[impact_report] . identifier[ordered_layers] :
identifier[layers] . identifier[append] ( identifier[layer] )
keyword[else] :
keyword[if] keyword[not] identifier[impact_report] . identifier[multi_exposure_impact_function] :
identifier[layers] =[ identifier[impact_report] . identifier[impact] ]+ identifier[impact_report] . identifier[extra_layers]
keyword[else] :
identifier[layers] =[]+ identifier[impact_report] . identifier[extra_layers]
identifier[add_supplementary_layers] =(
keyword[not] identifier[impact_report] . identifier[multi_exposure_impact_function] keyword[or] keyword[not] (
identifier[impact_report] . identifier[multi_exposure_impact_function] .
identifier[output_layers_ordered] )
)
keyword[if] identifier[add_supplementary_layers] :
identifier[show_only_impact] = identifier[setting] (
literal[string] , identifier[expected_type] = identifier[bool] )
keyword[if] keyword[not] identifier[show_only_impact] :
identifier[hazard_layer] = identifier[project] . identifier[mapLayers] (). identifier[get] (
identifier[provenance] [ literal[string] ], keyword[None] )
identifier[aggregation_layer_id] = identifier[provenance] [ literal[string] ]
keyword[if] identifier[aggregation_layer_id] :
identifier[aggregation_layer] = identifier[project] . identifier[mapLayers] (). identifier[get] (
identifier[aggregation_layer_id] , keyword[None] )
identifier[layers] . identifier[append] ( identifier[aggregation_layer] )
identifier[layers] . identifier[append] ( identifier[hazard_layer] )
identifier[hide_exposure_flag] = identifier[setting] (
literal[string] , identifier[expected_type] = identifier[bool] )
keyword[if] keyword[not] identifier[hide_exposure_flag] :
identifier[exposure_layers_id] =[]
keyword[if] identifier[provenance] . identifier[get] (
identifier[provenance_exposure_layer_id] [ literal[string] ]):
identifier[exposure_layers_id] . identifier[append] (
identifier[provenance] . identifier[get] (
identifier[provenance_exposure_layer_id] [ literal[string] ]))
keyword[elif] identifier[provenance] . identifier[get] (
identifier[provenance_multi_exposure_layers_id] [ literal[string] ]):
identifier[exposure_layers_id] = identifier[provenance] . identifier[get] (
identifier[provenance_multi_exposure_layers_id] [ literal[string] ])
keyword[for] identifier[layer_id] keyword[in] identifier[exposure_layers_id] :
identifier[exposure_layer] = identifier[project] . identifier[mapLayers] (). identifier[get] ( identifier[layer_id] )
identifier[layers] . identifier[append] ( identifier[exposure_layer] )
keyword[if] keyword[not] identifier[qgis_context] . identifier[extent] :
identifier[qgis_context] . identifier[extent] = identifier[impact_report] . identifier[impact_function] . identifier[analysis_extent]
identifier[map_elements] =[
{
literal[string] : literal[string] ,
literal[string] : identifier[qgis_context] . identifier[extent] ,
literal[string] : literal[int] ,
literal[string] : identifier[layers] ,
}
]
identifier[context] . identifier[map_elements] = identifier[map_elements]
keyword[if] identifier[impact_report] . identifier[legend_layers] :
identifier[layers] = identifier[impact_report] . identifier[legend_layers]
keyword[elif] identifier[impact_report] . identifier[multi_exposure_impact_function] :
identifier[layers] = identifier[exposure_summary_layers]
keyword[else] :
identifier[layers] =[ identifier[impact_report] . identifier[impact] ]
identifier[symbol_count] = literal[int]
keyword[for] identifier[l] keyword[in] identifier[layers] :
identifier[layer] = identifier[l]
literal[string]
keyword[try] :
identifier[symbol_count] += identifier[len] ( identifier[layer] . identifier[legendSymbologyItems] ())
keyword[continue]
keyword[except] identifier[Exception] :
keyword[pass]
keyword[try] :
identifier[symbol_count] += identifier[len] ( identifier[layer] . identifier[renderer] (). identifier[legendSymbolItems] ())
keyword[continue]
keyword[except] identifier[Exception] :
keyword[pass]
identifier[symbol_count] += literal[int]
identifier[legend_title] = identifier[provenance] . identifier[get] ( literal[string] ) keyword[or] literal[string]
identifier[map_legends] =[
{
literal[string] : literal[string] ,
literal[string] : identifier[legend_title] ,
literal[string] : identifier[layers] ,
literal[string] : identifier[symbol_count] ,
}
]
identifier[context] . identifier[map_legends] = identifier[map_legends]
identifier[start_datetime] = identifier[provenance] [ literal[string] ]
literal[string]
identifier[date_format] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[time_format] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
keyword[if] identifier[isinstance] ( identifier[start_datetime] , identifier[datetime] . identifier[datetime] ):
identifier[date] = identifier[start_datetime] . identifier[strftime] ( identifier[date_format] )
identifier[time] = identifier[start_datetime] . identifier[strftime] ( identifier[time_format] )
keyword[else] :
identifier[date] = literal[string]
identifier[time] = literal[string]
identifier[long_version] = identifier[get_version] ()
identifier[tokens] = identifier[long_version] . identifier[split] ( literal[string] )
identifier[version] = literal[string] %( identifier[tokens] [ literal[int] ], identifier[tokens] [ literal[int] ], identifier[tokens] [ literal[int] ])
identifier[title] = identifier[provenance] . identifier[get] ( literal[string] ) keyword[or] literal[string]
identifier[unknown_source_text] = identifier[resolve_from_dictionary] (
identifier[extra_args] ,[ literal[string] , literal[string] ])
identifier[aggregation_not_used] = identifier[resolve_from_dictionary] (
identifier[extra_args] ,[ literal[string] , literal[string] ])
identifier[hazard_source] =(
identifier[provenance] . identifier[get] (
literal[string] ,{}). identifier[get] ( literal[string] ) keyword[or] identifier[unknown_source_text] )
identifier[exposure_source] =(
identifier[provenance] . identifier[get] (
literal[string] ,{}). identifier[get] ( literal[string] ) keyword[or] identifier[unknown_source_text] )
keyword[if] identifier[provenance] [ literal[string] ]:
identifier[aggregation_source] =(
identifier[provenance] [ literal[string] ]. identifier[get] ( literal[string] )
keyword[or] identifier[unknown_source_text] )
keyword[else] :
identifier[aggregation_source] = identifier[aggregation_not_used]
identifier[spatial_reference_format] = identifier[resolve_from_dictionary] (
identifier[extra_args] , literal[string] )
identifier[reference_name] = identifier[spatial_reference_format] . identifier[format] (
identifier[crs] = identifier[impact_report] . identifier[impact_function] . identifier[crs] . identifier[authid] ())
identifier[analysis_layer] = identifier[impact_report] . identifier[analysis]
identifier[analysis_name] = identifier[value_from_field_name] (
identifier[analysis_name_field] [ literal[string] ], identifier[analysis_layer] )
identifier[version_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[disclaimer_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[date_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[time_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[caution_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[caution_text] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[version_text] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[legend_section_title] = identifier[resolve_from_dictionary] (
identifier[extra_args] , literal[string] )
identifier[information_title] = identifier[resolve_from_dictionary] (
identifier[extra_args] , literal[string] )
identifier[supporters_title] = identifier[resolve_from_dictionary] (
identifier[extra_args] , literal[string] )
identifier[source_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[analysis_title] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[reference_title] = identifier[resolve_from_dictionary] (
identifier[extra_args] , literal[string] )
identifier[substitution_map] ={
literal[string] : identifier[title] ,
literal[string] : identifier[date] ,
literal[string] : identifier[time] ,
literal[string] : identifier[version] ,
literal[string] : identifier[inasafe_context] . identifier[disclaimer] ,
literal[string] : identifier[version_title] ,
literal[string] : identifier[version] ,
literal[string] : identifier[disclaimer_title] ,
literal[string] : identifier[date_title] ,
literal[string] : identifier[time_title] ,
literal[string] : identifier[caution_title] ,
literal[string] : identifier[caution_text] ,
literal[string] : identifier[version_text] . identifier[format] ( identifier[version] = identifier[version] ),
literal[string] : identifier[legend_section_title] ,
literal[string] : identifier[information_title] ,
literal[string] : identifier[supporters_title] ,
literal[string] : identifier[source_title] ,
literal[string] : identifier[analysis_title] ,
literal[string] : identifier[analysis_name] ,
literal[string] : identifier[reference_title] ,
literal[string] : identifier[reference_name] ,
literal[string] : identifier[hazard_source] ,
literal[string] : identifier[exposure_source] ,
literal[string] : identifier[aggregation_source] ,
}
identifier[context] . identifier[substitution_map] = identifier[substitution_map]
keyword[return] identifier[context] | def qgis_composer_extractor(impact_report, component_metadata):
"""Extract composer context.
This method extract necessary context for a given impact report and
component metadata and save the context so it can be used in composer
rendering phase
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
# QGIS Composer needed certain context to generate the output
# - Map Settings
# - Substitution maps
# - Element settings, such as icon for picture file or image source
# Generate map settings
qgis_context = impact_report.qgis_composition_context
inasafe_context = impact_report.inasafe_context
provenance = impact_report.impact_function.provenance
extra_args = component_metadata.extra_args
context = QGISComposerContext()
# Set default image elements to replace
image_elements = [{'id': 'safe-logo', 'path': inasafe_context.inasafe_logo}, {'id': 'black-inasafe-logo', 'path': inasafe_context.black_inasafe_logo}, {'id': 'white-inasafe-logo', 'path': inasafe_context.white_inasafe_logo}, {'id': 'north-arrow', 'path': inasafe_context.north_arrow}, {'id': 'organisation-logo', 'path': inasafe_context.organisation_logo}, {'id': 'supporters_logo', 'path': inasafe_context.supporters_logo}]
context.image_elements = image_elements
# Set default HTML Frame elements to replace
# another mode is url
# TODO: get impact summary table
html_frame_elements = [{'id': 'impact-report', 'mode': 'text', 'text': ''}]
context.html_frame_elements = html_frame_elements
'Define the layers for the impact map.'
project = QgsProject.instance()
layers = []
exposure_summary_layers = []
if impact_report.multi_exposure_impact_function:
for impact_function in impact_report.multi_exposure_impact_function.impact_functions:
impact_layer = impact_function.exposure_summary or impact_function.aggregate_hazard_impacted
exposure_summary_layers.append(impact_layer) # depends on [control=['for'], data=['impact_function']] # depends on [control=['if'], data=[]]
# use custom ordered layer if any
if impact_report.ordered_layers:
for layer in impact_report.ordered_layers:
layers.append(layer) # depends on [control=['for'], data=['layer']] # depends on [control=['if'], data=[]]
else:
# We are keeping this if we want to enable below behaviour again.
# Currently realtime might have layer order without impact layer in it.
# # make sure at least there is an impact layer
# if impact_report.multi_exposure_impact_function:
# additional_layers = [] # for exposure summary layers
# impact_layer_found = False
# impact_functions = (
# impact_report.multi_exposure_impact_function.impact_functions)
# # check for impact layer occurrences
# for analysis in impact_functions:
# impact_layer = analysis.exposure_summary or (
# analysis.aggregate_hazard_impacted)
# for index, layer in enumerate(layers):
# if impact_layer.source() == layer.source():
# add_impact_layers_to_canvas(analysis)
# layers[index] = impact_layer
# impact_layer_found = True
# if not impact_layer_found:
# for analysis in impact_functions:
# add_impact_layers_to_canvas(analysis)
# impact_layer = analysis.exposure_summary or (
# analysis.aggregate_hazard_impacted)
# layer_uri = full_layer_uri(impact_layer)
# layer = load_layer_from_registry(layer_uri)
# additional_layers.append(layer)
# layers = additional_layers + layers
# else:
# impact_layer = (
# impact_report.impact_function.exposure_summary or (
# impact_report.impact_function.aggregate_hazard_impacted))
# if impact_layer not in layers:
# layers.insert(0, impact_layer)
# use default layer order if no custom ordered layer found
if not impact_report.multi_exposure_impact_function: # single IF
layers = [impact_report.impact] + impact_report.extra_layers # depends on [control=['if'], data=[]]
else: # multi-exposure IF
layers = [] + impact_report.extra_layers
add_supplementary_layers = not impact_report.multi_exposure_impact_function or not impact_report.multi_exposure_impact_function.output_layers_ordered
if add_supplementary_layers:
# Check show only impact.
show_only_impact = setting('set_show_only_impact_on_report', expected_type=bool)
if not show_only_impact:
hazard_layer = project.mapLayers().get(provenance['hazard_layer_id'], None)
aggregation_layer_id = provenance['aggregation_layer_id']
if aggregation_layer_id:
aggregation_layer = project.mapLayers().get(aggregation_layer_id, None)
layers.append(aggregation_layer) # depends on [control=['if'], data=[]]
layers.append(hazard_layer) # depends on [control=['if'], data=[]]
# check hide exposure settings
hide_exposure_flag = setting('setHideExposureFlag', expected_type=bool)
if not hide_exposure_flag:
exposure_layers_id = []
if provenance.get(provenance_exposure_layer_id['provenance_key']):
exposure_layers_id.append(provenance.get(provenance_exposure_layer_id['provenance_key'])) # depends on [control=['if'], data=[]]
elif provenance.get(provenance_multi_exposure_layers_id['provenance_key']):
exposure_layers_id = provenance.get(provenance_multi_exposure_layers_id['provenance_key']) # depends on [control=['if'], data=[]]
# place exposure at the bottom
for layer_id in exposure_layers_id:
exposure_layer = project.mapLayers().get(layer_id)
layers.append(exposure_layer) # depends on [control=['for'], data=['layer_id']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# default extent is analysis extent
if not qgis_context.extent:
qgis_context.extent = impact_report.impact_function.analysis_extent # depends on [control=['if'], data=[]]
map_elements = [{'id': 'impact-map', 'extent': qgis_context.extent, 'grid_split_count': 5, 'layers': layers}]
context.map_elements = map_elements
# calculate map_legends, only show the legend for impact layer
if impact_report.legend_layers: # use requested legend if any
layers = impact_report.legend_layers # depends on [control=['if'], data=[]]
elif impact_report.multi_exposure_impact_function: # multi-exposure IF
layers = exposure_summary_layers # depends on [control=['if'], data=[]]
else: # single IF
layers = [impact_report.impact]
symbol_count = 0
for l in layers:
layer = l
':type: qgis.core.QgsMapLayer'
try:
symbol_count += len(layer.legendSymbologyItems())
continue # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=broad-except
pass # depends on [control=['except'], data=[]]
try:
symbol_count += len(layer.renderer().legendSymbolItems())
continue # depends on [control=['try'], data=[]]
except Exception: # pylint: disable=broad-except
pass # depends on [control=['except'], data=[]]
symbol_count += 1 # depends on [control=['for'], data=['l']]
legend_title = provenance.get('map_legend_title') or ''
# 'column_count': 2, # the number of column in legend display
map_legends = [{'id': 'impact-legend', 'title': legend_title, 'layers': layers, 'symbol_count': symbol_count}]
context.map_legends = map_legends
# process substitution map
start_datetime = provenance['start_datetime']
':type: datetime.datetime'
date_format = resolve_from_dictionary(extra_args, 'date-format')
time_format = resolve_from_dictionary(extra_args, 'time-format')
if isinstance(start_datetime, datetime.datetime):
date = start_datetime.strftime(date_format)
time = start_datetime.strftime(time_format) # depends on [control=['if'], data=[]]
else:
date = ''
time = ''
long_version = get_version()
tokens = long_version.split('.')
version = '%s.%s.%s' % (tokens[0], tokens[1], tokens[2])
# Get title of the layer
title = provenance.get('map_title') or ''
# Set source
unknown_source_text = resolve_from_dictionary(extra_args, ['defaults', 'unknown_source'])
aggregation_not_used = resolve_from_dictionary(extra_args, ['defaults', 'aggregation_not_used'])
hazard_source = provenance.get('hazard_keywords', {}).get('source') or unknown_source_text
exposure_source = provenance.get('exposure_keywords', {}).get('source') or unknown_source_text
if provenance['aggregation_layer']:
aggregation_source = provenance['aggregation_keywords'].get('source') or unknown_source_text # depends on [control=['if'], data=[]]
else:
aggregation_source = aggregation_not_used
spatial_reference_format = resolve_from_dictionary(extra_args, 'spatial-reference-format')
reference_name = spatial_reference_format.format(crs=impact_report.impact_function.crs.authid())
analysis_layer = impact_report.analysis
analysis_name = value_from_field_name(analysis_name_field['field_name'], analysis_layer)
# Prepare the substitution map
version_title = resolve_from_dictionary(extra_args, 'version-title')
disclaimer_title = resolve_from_dictionary(extra_args, 'disclaimer-title')
date_title = resolve_from_dictionary(extra_args, 'date-title')
time_title = resolve_from_dictionary(extra_args, 'time-title')
caution_title = resolve_from_dictionary(extra_args, 'caution-title')
caution_text = resolve_from_dictionary(extra_args, 'caution-text')
version_text = resolve_from_dictionary(extra_args, 'version-text')
legend_section_title = resolve_from_dictionary(extra_args, 'legend-title')
information_title = resolve_from_dictionary(extra_args, 'information-title')
supporters_title = resolve_from_dictionary(extra_args, 'supporters-title')
source_title = resolve_from_dictionary(extra_args, 'source-title')
analysis_title = resolve_from_dictionary(extra_args, 'analysis-title')
reference_title = resolve_from_dictionary(extra_args, 'spatial-reference-title') # deprecated
# These added in 3.2
substitution_map = {'impact-title': title, 'date': date, 'time': time, 'safe-version': version, 'disclaimer': inasafe_context.disclaimer, 'version-title': version_title, 'inasafe-version': version, 'disclaimer-title': disclaimer_title, 'date-title': date_title, 'time-title': time_title, 'caution-title': caution_title, 'caution-text': caution_text, 'version-text': version_text.format(version=version), 'legend-title': legend_section_title, 'information-title': information_title, 'supporters-title': supporters_title, 'source-title': source_title, 'analysis-title': analysis_title, 'analysis-name': analysis_name, 'reference-title': reference_title, 'reference-name': reference_name, 'hazard-source': hazard_source, 'exposure-source': exposure_source, 'aggregation-source': aggregation_source}
context.substitution_map = substitution_map
return context |
def main():
"""
Prototype to see how an RPG simulation might be used
in the AIKIF framework.
The idea is to build a simple character and run a simulation
to see how it succeeds in a random world against another players
character
character
stats
world
locations
"""
character1 = Character('Albogh', str=4,int=7,sta=50)
character2 = Character('Zoltor', str=6,int=6,sta=70)
print('PLAYER1 [start]:', character1)
print('PLAYER2 [start]:', character2)
b = Battle(character1, character2)
print(b)
print('PLAYER1 [end]:', character1)
print('PLAYER2 [end]:', character2) | def function[main, parameter[]]:
constant[
Prototype to see how an RPG simulation might be used
in the AIKIF framework.
The idea is to build a simple character and run a simulation
to see how it succeeds in a random world against another players
character
character
stats
world
locations
]
variable[character1] assign[=] call[name[Character], parameter[constant[Albogh]]]
variable[character2] assign[=] call[name[Character], parameter[constant[Zoltor]]]
call[name[print], parameter[constant[PLAYER1 [start]:], name[character1]]]
call[name[print], parameter[constant[PLAYER2 [start]:], name[character2]]]
variable[b] assign[=] call[name[Battle], parameter[name[character1], name[character2]]]
call[name[print], parameter[name[b]]]
call[name[print], parameter[constant[PLAYER1 [end]:], name[character1]]]
call[name[print], parameter[constant[PLAYER2 [end]:], name[character2]]] | keyword[def] identifier[main] ():
literal[string]
identifier[character1] = identifier[Character] ( literal[string] , identifier[str] = literal[int] , identifier[int] = literal[int] , identifier[sta] = literal[int] )
identifier[character2] = identifier[Character] ( literal[string] , identifier[str] = literal[int] , identifier[int] = literal[int] , identifier[sta] = literal[int] )
identifier[print] ( literal[string] , identifier[character1] )
identifier[print] ( literal[string] , identifier[character2] )
identifier[b] = identifier[Battle] ( identifier[character1] , identifier[character2] )
identifier[print] ( identifier[b] )
identifier[print] ( literal[string] , identifier[character1] )
identifier[print] ( literal[string] , identifier[character2] ) | def main():
"""
Prototype to see how an RPG simulation might be used
in the AIKIF framework.
The idea is to build a simple character and run a simulation
to see how it succeeds in a random world against another players
character
character
stats
world
locations
"""
character1 = Character('Albogh', str=4, int=7, sta=50)
character2 = Character('Zoltor', str=6, int=6, sta=70)
print('PLAYER1 [start]:', character1)
print('PLAYER2 [start]:', character2)
b = Battle(character1, character2)
print(b)
print('PLAYER1 [end]:', character1)
print('PLAYER2 [end]:', character2) |
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b"\r\n")
self.flush_headers() | def function[end_headers, parameter[self]]:
constant[Send the blank line ending the MIME headers.]
if compare[name[self].request_version not_equal[!=] constant[HTTP/0.9]] begin[:]
call[name[self]._headers_buffer.append, parameter[constant[b'\r\n']]]
call[name[self].flush_headers, parameter[]] | keyword[def] identifier[end_headers] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[request_version] != literal[string] :
identifier[self] . identifier[_headers_buffer] . identifier[append] ( literal[string] )
identifier[self] . identifier[flush_headers] () | def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b'\r\n')
self.flush_headers() # depends on [control=['if'], data=[]] |
def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format('Database dump file {dump_fn} does not exist.')
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and not self.is_local:
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
r.pc('Uploading MongoDB database snapshot...')
# r.put(
# local_path=r.env.dump_fn,
# remote_path=r.env.remote_dump_fn)
r.local('rsync -rvz --progress --no-p --no-g '
'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" '
'{dump_fn} {user}@{host_string}:{remote_dump_fn}')
if self.is_local and not prep_only and not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error
r.run_or_local(r.env.load_command) | def function[load, parameter[self, dump_fn, prep_only, force_upload, from_local, name, site, dest_dir]]:
constant[
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
]
variable[r] assign[=] call[name[self].database_renderer, parameter[]]
name[r].env.dump_fn assign[=] call[name[self].get_default_db_fn, parameter[]]
variable[from_local] assign[=] call[name[int], parameter[name[from_local]]]
variable[prep_only] assign[=] call[name[int], parameter[name[prep_only]]]
variable[missing_local_dump_error] assign[=] call[name[r].format, parameter[constant[Database dump file {dump_fn} does not exist.]]]
if name[self].is_local begin[:]
name[r].env.remote_dump_fn assign[=] name[dump_fn]
if <ast.BoolOp object at 0x7da1b003c310> begin[:]
if <ast.UnaryOp object at 0x7da1b003d420> begin[:]
assert[call[name[os].path.isfile, parameter[name[r].env.dump_fn]]]
call[name[r].pc, parameter[constant[Uploading MongoDB database snapshot...]]]
call[name[r].local, parameter[constant[rsync -rvz --progress --no-p --no-g --rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {dump_fn} {user}@{host_string}:{remote_dump_fn}]]]
if <ast.BoolOp object at 0x7da1b003c820> begin[:]
assert[call[name[os].path.isfile, parameter[name[r].env.dump_fn]]]
call[name[r].run_or_local, parameter[name[r].env.load_command]] | keyword[def] identifier[load] ( identifier[self] , identifier[dump_fn] = literal[string] , identifier[prep_only] = literal[int] , identifier[force_upload] = literal[int] , identifier[from_local] = literal[int] , identifier[name] = keyword[None] , identifier[site] = keyword[None] , identifier[dest_dir] = keyword[None] ):
literal[string]
identifier[r] = identifier[self] . identifier[database_renderer] ( identifier[name] = identifier[name] , identifier[site] = identifier[site] )
identifier[r] . identifier[env] . identifier[dump_fn] = identifier[self] . identifier[get_default_db_fn] ( identifier[fn_template] = identifier[dump_fn] , identifier[dest_dir] = identifier[dest_dir] )
identifier[from_local] = identifier[int] ( identifier[from_local] )
identifier[prep_only] = identifier[int] ( identifier[prep_only] )
identifier[missing_local_dump_error] = identifier[r] . identifier[format] ( literal[string] )
keyword[if] identifier[self] . identifier[is_local] :
identifier[r] . identifier[env] . identifier[remote_dump_fn] = identifier[dump_fn]
keyword[else] :
identifier[r] . identifier[env] . identifier[remote_dump_fn] = literal[string] + identifier[os] . identifier[path] . identifier[split] ( identifier[r] . identifier[env] . identifier[dump_fn] )[- literal[int] ]
keyword[if] keyword[not] identifier[prep_only] keyword[and] keyword[not] identifier[self] . identifier[is_local] :
keyword[if] keyword[not] identifier[self] . identifier[dryrun] :
keyword[assert] identifier[os] . identifier[path] . identifier[isfile] ( identifier[r] . identifier[env] . identifier[dump_fn] ), identifier[missing_local_dump_error]
identifier[r] . identifier[pc] ( literal[string] )
identifier[r] . identifier[local] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[self] . identifier[is_local] keyword[and] keyword[not] identifier[prep_only] keyword[and] keyword[not] identifier[self] . identifier[dryrun] :
keyword[assert] identifier[os] . identifier[path] . identifier[isfile] ( identifier[r] . identifier[env] . identifier[dump_fn] ), identifier[missing_local_dump_error]
identifier[r] . identifier[run_or_local] ( identifier[r] . identifier[env] . identifier[load_command] ) | def load(self, dump_fn='', prep_only=0, force_upload=0, from_local=0, name=None, site=None, dest_dir=None):
"""
Restores a database snapshot onto the target database server.
If prep_only=1, commands for preparing the load will be generated,
but not the command to finally load the snapshot.
"""
r = self.database_renderer(name=name, site=site)
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(fn_template=dump_fn, dest_dir=dest_dir)
from_local = int(from_local)
prep_only = int(prep_only)
missing_local_dump_error = r.format('Database dump file {dump_fn} does not exist.')
# Copy snapshot file to target.
if self.is_local:
r.env.remote_dump_fn = dump_fn # depends on [control=['if'], data=[]]
else:
r.env.remote_dump_fn = '/tmp/' + os.path.split(r.env.dump_fn)[-1]
if not prep_only and (not self.is_local):
if not self.dryrun:
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error # depends on [control=['if'], data=[]]
r.pc('Uploading MongoDB database snapshot...')
# r.put(
# local_path=r.env.dump_fn,
# remote_path=r.env.remote_dump_fn)
r.local('rsync -rvz --progress --no-p --no-g --rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {dump_fn} {user}@{host_string}:{remote_dump_fn}') # depends on [control=['if'], data=[]]
if self.is_local and (not prep_only) and (not self.dryrun):
assert os.path.isfile(r.env.dump_fn), missing_local_dump_error # depends on [control=['if'], data=[]]
r.run_or_local(r.env.load_command) |
def dbRestore(self, db_value, context=None):
"""
Converts a stored database value to Python.
:param py_value: <variant>
:param context: <orb.Context>
:return: <variant>
"""
if db_value is not None:
jdata = super(QueryColumn, self).dbRestore(db_value, context=context)
return orb.Query.fromJSON(jdata)
else:
return db_value | def function[dbRestore, parameter[self, db_value, context]]:
constant[
Converts a stored database value to Python.
:param py_value: <variant>
:param context: <orb.Context>
:return: <variant>
]
if compare[name[db_value] is_not constant[None]] begin[:]
variable[jdata] assign[=] call[call[name[super], parameter[name[QueryColumn], name[self]]].dbRestore, parameter[name[db_value]]]
return[call[name[orb].Query.fromJSON, parameter[name[jdata]]]] | keyword[def] identifier[dbRestore] ( identifier[self] , identifier[db_value] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[db_value] keyword[is] keyword[not] keyword[None] :
identifier[jdata] = identifier[super] ( identifier[QueryColumn] , identifier[self] ). identifier[dbRestore] ( identifier[db_value] , identifier[context] = identifier[context] )
keyword[return] identifier[orb] . identifier[Query] . identifier[fromJSON] ( identifier[jdata] )
keyword[else] :
keyword[return] identifier[db_value] | def dbRestore(self, db_value, context=None):
"""
Converts a stored database value to Python.
:param py_value: <variant>
:param context: <orb.Context>
:return: <variant>
"""
if db_value is not None:
jdata = super(QueryColumn, self).dbRestore(db_value, context=context)
return orb.Query.fromJSON(jdata) # depends on [control=['if'], data=['db_value']]
else:
return db_value |
def show(self, id):
""" Print the profile stats to stdout, id is the RDD id """
stats = self.stats()
if stats:
print("=" * 60)
print("Profile of RDD<id=%d>" % id)
print("=" * 60)
stats.sort_stats("time", "cumulative").print_stats() | def function[show, parameter[self, id]]:
constant[ Print the profile stats to stdout, id is the RDD id ]
variable[stats] assign[=] call[name[self].stats, parameter[]]
if name[stats] begin[:]
call[name[print], parameter[binary_operation[constant[=] * constant[60]]]]
call[name[print], parameter[binary_operation[constant[Profile of RDD<id=%d>] <ast.Mod object at 0x7da2590d6920> name[id]]]]
call[name[print], parameter[binary_operation[constant[=] * constant[60]]]]
call[call[name[stats].sort_stats, parameter[constant[time], constant[cumulative]]].print_stats, parameter[]] | keyword[def] identifier[show] ( identifier[self] , identifier[id] ):
literal[string]
identifier[stats] = identifier[self] . identifier[stats] ()
keyword[if] identifier[stats] :
identifier[print] ( literal[string] * literal[int] )
identifier[print] ( literal[string] % identifier[id] )
identifier[print] ( literal[string] * literal[int] )
identifier[stats] . identifier[sort_stats] ( literal[string] , literal[string] ). identifier[print_stats] () | def show(self, id):
""" Print the profile stats to stdout, id is the RDD id """
stats = self.stats()
if stats:
print('=' * 60)
print('Profile of RDD<id=%d>' % id)
print('=' * 60)
stats.sort_stats('time', 'cumulative').print_stats() # depends on [control=['if'], data=[]] |
def get_comp_instance_metrics(cluster, environ, topology, component,
metrics, instances, time_range, role=None):
'''
Get the metrics for some instances of a topology from tracker
:param cluster:
:param environ:
:param topology:
:param component:
:param metrics: dict of display name to cuckoo name
:param instances:
:param time_range: 2-tuple consisting of start and end of range
:param role:
:return:
'''
params = dict(
cluster=cluster,
environ=environ,
topology=topology,
component=component)
if role is not None:
params['role'] = role
# form the fetch url
request_url = tornado.httputil.url_concat(
create_url(METRICS_URL_FMT), params)
# convert a single instance to a list, if needed
all_instances = instances if isinstance(instances, list) else [instances]
# append each metric to the url
for _, metric_name in metrics.items():
request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name[0]))
# append each instance to the url
for i in all_instances:
request_url = tornado.httputil.url_concat(request_url, dict(instance=i))
# append the time interval to the url
request_url = tornado.httputil.url_concat(request_url, dict(interval=time_range[1]))
raise tornado.gen.Return((yield fetch_url_as_json(request_url))) | def function[get_comp_instance_metrics, parameter[cluster, environ, topology, component, metrics, instances, time_range, role]]:
constant[
Get the metrics for some instances of a topology from tracker
:param cluster:
:param environ:
:param topology:
:param component:
:param metrics: dict of display name to cuckoo name
:param instances:
:param time_range: 2-tuple consisting of start and end of range
:param role:
:return:
]
variable[params] assign[=] call[name[dict], parameter[]]
if compare[name[role] is_not constant[None]] begin[:]
call[name[params]][constant[role]] assign[=] name[role]
variable[request_url] assign[=] call[name[tornado].httputil.url_concat, parameter[call[name[create_url], parameter[name[METRICS_URL_FMT]]], name[params]]]
variable[all_instances] assign[=] <ast.IfExp object at 0x7da20c6e5bd0>
for taget[tuple[[<ast.Name object at 0x7da20c6e6770>, <ast.Name object at 0x7da20c6e4e50>]]] in starred[call[name[metrics].items, parameter[]]] begin[:]
variable[request_url] assign[=] call[name[tornado].httputil.url_concat, parameter[name[request_url], call[name[dict], parameter[]]]]
for taget[name[i]] in starred[name[all_instances]] begin[:]
variable[request_url] assign[=] call[name[tornado].httputil.url_concat, parameter[name[request_url], call[name[dict], parameter[]]]]
variable[request_url] assign[=] call[name[tornado].httputil.url_concat, parameter[name[request_url], call[name[dict], parameter[]]]]
<ast.Raise object at 0x7da20c6e7550> | keyword[def] identifier[get_comp_instance_metrics] ( identifier[cluster] , identifier[environ] , identifier[topology] , identifier[component] ,
identifier[metrics] , identifier[instances] , identifier[time_range] , identifier[role] = keyword[None] ):
literal[string]
identifier[params] = identifier[dict] (
identifier[cluster] = identifier[cluster] ,
identifier[environ] = identifier[environ] ,
identifier[topology] = identifier[topology] ,
identifier[component] = identifier[component] )
keyword[if] identifier[role] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[role]
identifier[request_url] = identifier[tornado] . identifier[httputil] . identifier[url_concat] (
identifier[create_url] ( identifier[METRICS_URL_FMT] ), identifier[params] )
identifier[all_instances] = identifier[instances] keyword[if] identifier[isinstance] ( identifier[instances] , identifier[list] ) keyword[else] [ identifier[instances] ]
keyword[for] identifier[_] , identifier[metric_name] keyword[in] identifier[metrics] . identifier[items] ():
identifier[request_url] = identifier[tornado] . identifier[httputil] . identifier[url_concat] ( identifier[request_url] , identifier[dict] ( identifier[metricname] = identifier[metric_name] [ literal[int] ]))
keyword[for] identifier[i] keyword[in] identifier[all_instances] :
identifier[request_url] = identifier[tornado] . identifier[httputil] . identifier[url_concat] ( identifier[request_url] , identifier[dict] ( identifier[instance] = identifier[i] ))
identifier[request_url] = identifier[tornado] . identifier[httputil] . identifier[url_concat] ( identifier[request_url] , identifier[dict] ( identifier[interval] = identifier[time_range] [ literal[int] ]))
keyword[raise] identifier[tornado] . identifier[gen] . identifier[Return] (( keyword[yield] identifier[fetch_url_as_json] ( identifier[request_url] ))) | def get_comp_instance_metrics(cluster, environ, topology, component, metrics, instances, time_range, role=None):
"""
Get the metrics for some instances of a topology from tracker
:param cluster:
:param environ:
:param topology:
:param component:
:param metrics: dict of display name to cuckoo name
:param instances:
:param time_range: 2-tuple consisting of start and end of range
:param role:
:return:
"""
params = dict(cluster=cluster, environ=environ, topology=topology, component=component)
if role is not None:
params['role'] = role # depends on [control=['if'], data=['role']]
# form the fetch url
request_url = tornado.httputil.url_concat(create_url(METRICS_URL_FMT), params)
# convert a single instance to a list, if needed
all_instances = instances if isinstance(instances, list) else [instances]
# append each metric to the url
for (_, metric_name) in metrics.items():
request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name[0])) # depends on [control=['for'], data=[]]
# append each instance to the url
for i in all_instances:
request_url = tornado.httputil.url_concat(request_url, dict(instance=i)) # depends on [control=['for'], data=['i']]
# append the time interval to the url
request_url = tornado.httputil.url_concat(request_url, dict(interval=time_range[1]))
raise tornado.gen.Return((yield fetch_url_as_json(request_url))) |
def gather_file_data(config):
""" Gather policy information from files
"""
file_regex = re.compile(config['file_regex'])
category_regex = re.compile(config['category_regex'])
policies = {}
for root, dirs, files in os.walk(config['c7n_policy_directory']):
for file in files:
if file_regex.match(file):
file_path = root + '/' + file
logging.debug('Processing file %s', file_path)
with open(file_path, 'r') as stream:
try:
if category_regex.search(file_path):
category = 'Security & Governance'
else:
category = 'Cost Controls'
policies = yaml.load(stream)
for policy in policies['policies']:
logging.debug(
'Processing policy %s', policy['name'])
policy['file_url'] = get_file_url(
file_path, config)
resource_type = policy['resource']
if category not in c7n_data:
c7n_data[category] = {}
if resource_type not in c7n_data[category]:
c7n_data[category][resource_type] = []
c7n_data[category][resource_type].append(policy)
except yaml.YAMLError as exc:
logging.error(exc) | def function[gather_file_data, parameter[config]]:
constant[ Gather policy information from files
]
variable[file_regex] assign[=] call[name[re].compile, parameter[call[name[config]][constant[file_regex]]]]
variable[category_regex] assign[=] call[name[re].compile, parameter[call[name[config]][constant[category_regex]]]]
variable[policies] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204564d00>, <ast.Name object at 0x7da204567d30>, <ast.Name object at 0x7da2045655a0>]]] in starred[call[name[os].walk, parameter[call[name[config]][constant[c7n_policy_directory]]]]] begin[:]
for taget[name[file]] in starred[name[files]] begin[:]
if call[name[file_regex].match, parameter[name[file]]] begin[:]
variable[file_path] assign[=] binary_operation[binary_operation[name[root] + constant[/]] + name[file]]
call[name[logging].debug, parameter[constant[Processing file %s], name[file_path]]]
with call[name[open], parameter[name[file_path], constant[r]]] begin[:]
<ast.Try object at 0x7da2041d8940> | keyword[def] identifier[gather_file_data] ( identifier[config] ):
literal[string]
identifier[file_regex] = identifier[re] . identifier[compile] ( identifier[config] [ literal[string] ])
identifier[category_regex] = identifier[re] . identifier[compile] ( identifier[config] [ literal[string] ])
identifier[policies] ={}
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[config] [ literal[string] ]):
keyword[for] identifier[file] keyword[in] identifier[files] :
keyword[if] identifier[file_regex] . identifier[match] ( identifier[file] ):
identifier[file_path] = identifier[root] + literal[string] + identifier[file]
identifier[logging] . identifier[debug] ( literal[string] , identifier[file_path] )
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[stream] :
keyword[try] :
keyword[if] identifier[category_regex] . identifier[search] ( identifier[file_path] ):
identifier[category] = literal[string]
keyword[else] :
identifier[category] = literal[string]
identifier[policies] = identifier[yaml] . identifier[load] ( identifier[stream] )
keyword[for] identifier[policy] keyword[in] identifier[policies] [ literal[string] ]:
identifier[logging] . identifier[debug] (
literal[string] , identifier[policy] [ literal[string] ])
identifier[policy] [ literal[string] ]= identifier[get_file_url] (
identifier[file_path] , identifier[config] )
identifier[resource_type] = identifier[policy] [ literal[string] ]
keyword[if] identifier[category] keyword[not] keyword[in] identifier[c7n_data] :
identifier[c7n_data] [ identifier[category] ]={}
keyword[if] identifier[resource_type] keyword[not] keyword[in] identifier[c7n_data] [ identifier[category] ]:
identifier[c7n_data] [ identifier[category] ][ identifier[resource_type] ]=[]
identifier[c7n_data] [ identifier[category] ][ identifier[resource_type] ]. identifier[append] ( identifier[policy] )
keyword[except] identifier[yaml] . identifier[YAMLError] keyword[as] identifier[exc] :
identifier[logging] . identifier[error] ( identifier[exc] ) | def gather_file_data(config):
""" Gather policy information from files
"""
file_regex = re.compile(config['file_regex'])
category_regex = re.compile(config['category_regex'])
policies = {}
for (root, dirs, files) in os.walk(config['c7n_policy_directory']):
for file in files:
if file_regex.match(file):
file_path = root + '/' + file
logging.debug('Processing file %s', file_path)
with open(file_path, 'r') as stream:
try:
if category_regex.search(file_path):
category = 'Security & Governance' # depends on [control=['if'], data=[]]
else:
category = 'Cost Controls'
policies = yaml.load(stream)
for policy in policies['policies']:
logging.debug('Processing policy %s', policy['name'])
policy['file_url'] = get_file_url(file_path, config)
resource_type = policy['resource']
if category not in c7n_data:
c7n_data[category] = {} # depends on [control=['if'], data=['category', 'c7n_data']]
if resource_type not in c7n_data[category]:
c7n_data[category][resource_type] = [] # depends on [control=['if'], data=['resource_type']]
c7n_data[category][resource_type].append(policy) # depends on [control=['for'], data=['policy']] # depends on [control=['try'], data=[]]
except yaml.YAMLError as exc:
logging.error(exc) # depends on [control=['except'], data=['exc']] # depends on [control=['with'], data=['stream']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file']] # depends on [control=['for'], data=[]] |
def update_stream(self):
"""
Restarts the stream with the current list of tracking terms.
"""
need_to_restart = False
# If we think we are running, but something has gone wrong in the streaming thread
# Restart it.
if self.stream is not None and not self.stream.running:
logger.warning("Stream exists but isn't running")
self.listener.error = False
self.listener.streaming_exception = None
need_to_restart = True
# Check if the tracking list has changed
if self.term_checker.check():
logger.info("Terms have changed")
need_to_restart = True
# If we aren't running and we are allowing unfiltered streams
if self.stream is None and self.unfiltered:
need_to_restart = True
if not need_to_restart:
return
logger.info("Restarting stream...")
# Stop any old stream
self.stop_stream()
# Start a new stream
self.start_stream() | def function[update_stream, parameter[self]]:
constant[
Restarts the stream with the current list of tracking terms.
]
variable[need_to_restart] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b1041cc0> begin[:]
call[name[logger].warning, parameter[constant[Stream exists but isn't running]]]
name[self].listener.error assign[=] constant[False]
name[self].listener.streaming_exception assign[=] constant[None]
variable[need_to_restart] assign[=] constant[True]
if call[name[self].term_checker.check, parameter[]] begin[:]
call[name[logger].info, parameter[constant[Terms have changed]]]
variable[need_to_restart] assign[=] constant[True]
if <ast.BoolOp object at 0x7da1b1041e40> begin[:]
variable[need_to_restart] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b1042380> begin[:]
return[None]
call[name[logger].info, parameter[constant[Restarting stream...]]]
call[name[self].stop_stream, parameter[]]
call[name[self].start_stream, parameter[]] | keyword[def] identifier[update_stream] ( identifier[self] ):
literal[string]
identifier[need_to_restart] = keyword[False]
keyword[if] identifier[self] . identifier[stream] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[self] . identifier[stream] . identifier[running] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[listener] . identifier[error] = keyword[False]
identifier[self] . identifier[listener] . identifier[streaming_exception] = keyword[None]
identifier[need_to_restart] = keyword[True]
keyword[if] identifier[self] . identifier[term_checker] . identifier[check] ():
identifier[logger] . identifier[info] ( literal[string] )
identifier[need_to_restart] = keyword[True]
keyword[if] identifier[self] . identifier[stream] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[unfiltered] :
identifier[need_to_restart] = keyword[True]
keyword[if] keyword[not] identifier[need_to_restart] :
keyword[return]
identifier[logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[stop_stream] ()
identifier[self] . identifier[start_stream] () | def update_stream(self):
"""
Restarts the stream with the current list of tracking terms.
"""
need_to_restart = False
# If we think we are running, but something has gone wrong in the streaming thread
# Restart it.
if self.stream is not None and (not self.stream.running):
logger.warning("Stream exists but isn't running")
self.listener.error = False
self.listener.streaming_exception = None
need_to_restart = True # depends on [control=['if'], data=[]]
# Check if the tracking list has changed
if self.term_checker.check():
logger.info('Terms have changed')
need_to_restart = True # depends on [control=['if'], data=[]]
# If we aren't running and we are allowing unfiltered streams
if self.stream is None and self.unfiltered:
need_to_restart = True # depends on [control=['if'], data=[]]
if not need_to_restart:
return # depends on [control=['if'], data=[]]
logger.info('Restarting stream...')
# Stop any old stream
self.stop_stream()
# Start a new stream
self.start_stream() |
def get_pistacking_frequency(self,analysis_cutoff):
"""Calculates the frequency of pi-pi interactions throughout simulations. If the frequency exceeds the
analysis cutoff, this interaction will be plotted in the final figure.
Takes:
* analysis_cutoff * - fraction of simulation time a feature has to be present for to be plotted
Output:
* self.pi_contacts_for_drawing * - dictionary of pi-pi interactions to be plotted """
self.frequency = defaultdict(int)
for traj in self.pistacking_by_type:
for contact in self.pistacking_by_type[traj]:
self.frequency[contact["ligand_ring_ids"],contact["type"],contact["resid"],contact["resname"],contact["segid"]]+=contact["frequency"]
draw_frequency = {i:self.frequency[i] for i in self.frequency if self.frequency[i]>(int(len(self.trajectory))*float(analysis_cutoff))}
self.pi_contacts_for_drawing = {}
for contact in draw_frequency:
self.pi_contacts_for_drawing[contact]=draw_frequency[contact] | def function[get_pistacking_frequency, parameter[self, analysis_cutoff]]:
constant[Calculates the frequency of pi-pi interactions throughout simulations. If the frequency exceeds the
analysis cutoff, this interaction will be plotted in the final figure.
Takes:
* analysis_cutoff * - fraction of simulation time a feature has to be present for to be plotted
Output:
* self.pi_contacts_for_drawing * - dictionary of pi-pi interactions to be plotted ]
name[self].frequency assign[=] call[name[defaultdict], parameter[name[int]]]
for taget[name[traj]] in starred[name[self].pistacking_by_type] begin[:]
for taget[name[contact]] in starred[call[name[self].pistacking_by_type][name[traj]]] begin[:]
<ast.AugAssign object at 0x7da1b246a8c0>
variable[draw_frequency] assign[=] <ast.DictComp object at 0x7da1b246aa40>
name[self].pi_contacts_for_drawing assign[=] dictionary[[], []]
for taget[name[contact]] in starred[name[draw_frequency]] begin[:]
call[name[self].pi_contacts_for_drawing][name[contact]] assign[=] call[name[draw_frequency]][name[contact]] | keyword[def] identifier[get_pistacking_frequency] ( identifier[self] , identifier[analysis_cutoff] ):
literal[string]
identifier[self] . identifier[frequency] = identifier[defaultdict] ( identifier[int] )
keyword[for] identifier[traj] keyword[in] identifier[self] . identifier[pistacking_by_type] :
keyword[for] identifier[contact] keyword[in] identifier[self] . identifier[pistacking_by_type] [ identifier[traj] ]:
identifier[self] . identifier[frequency] [ identifier[contact] [ literal[string] ], identifier[contact] [ literal[string] ], identifier[contact] [ literal[string] ], identifier[contact] [ literal[string] ], identifier[contact] [ literal[string] ]]+= identifier[contact] [ literal[string] ]
identifier[draw_frequency] ={ identifier[i] : identifier[self] . identifier[frequency] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[self] . identifier[frequency] keyword[if] identifier[self] . identifier[frequency] [ identifier[i] ]>( identifier[int] ( identifier[len] ( identifier[self] . identifier[trajectory] ))* identifier[float] ( identifier[analysis_cutoff] ))}
identifier[self] . identifier[pi_contacts_for_drawing] ={}
keyword[for] identifier[contact] keyword[in] identifier[draw_frequency] :
identifier[self] . identifier[pi_contacts_for_drawing] [ identifier[contact] ]= identifier[draw_frequency] [ identifier[contact] ] | def get_pistacking_frequency(self, analysis_cutoff):
"""Calculates the frequency of pi-pi interactions throughout simulations. If the frequency exceeds the
analysis cutoff, this interaction will be plotted in the final figure.
Takes:
* analysis_cutoff * - fraction of simulation time a feature has to be present for to be plotted
Output:
* self.pi_contacts_for_drawing * - dictionary of pi-pi interactions to be plotted """
self.frequency = defaultdict(int)
for traj in self.pistacking_by_type:
for contact in self.pistacking_by_type[traj]:
self.frequency[contact['ligand_ring_ids'], contact['type'], contact['resid'], contact['resname'], contact['segid']] += contact['frequency'] # depends on [control=['for'], data=['contact']] # depends on [control=['for'], data=['traj']]
draw_frequency = {i: self.frequency[i] for i in self.frequency if self.frequency[i] > int(len(self.trajectory)) * float(analysis_cutoff)}
self.pi_contacts_for_drawing = {}
for contact in draw_frequency:
self.pi_contacts_for_drawing[contact] = draw_frequency[contact] # depends on [control=['for'], data=['contact']] |
def nslookup(cls):
"""
Implementation of UNIX nslookup.
"""
try:
# We try to get the addresse information of the given domain or IP.
if "current_test_data" in PyFunceble.INTERN: # pragma: no cover
# The end-user want more information whith his test.
if not Check().is_ip_valid():
# The element we are testing is not an IP.
# We request the address informations.
request = PyFunceble.socket.getaddrinfo(
PyFunceble.INTERN["to_test"],
80,
0,
0,
PyFunceble.socket.IPPROTO_TCP,
)
for sequence in request:
# We loop through the sequence returned by the request.
# We append the NS informations into the nslookup index.
PyFunceble.INTERN["current_test_data"]["nslookup"].append(
sequence[-1][0]
)
else:
# The element we are testing is an IP.
request = PyFunceble.socket.gethostbyaddr(
PyFunceble.INTERN["to_test"]
)
# We append the NS informations into the nslookup index.
PyFunceble.INTERN["current_test_data"]["nslookup"][
"hostname"
] = request[0]
PyFunceble.INTERN["current_test_data"]["nslookup"][
"aliases"
] = request[1]
PyFunceble.INTERN["current_test_data"]["nslookup"]["ips"] = request[
2
]
else:
if not Check().is_ip_valid():
# The element we are testing is not an IP.
PyFunceble.socket.getaddrinfo(
PyFunceble.INTERN["to_test"],
80,
0,
0,
PyFunceble.socket.IPPROTO_TCP,
)
else:
# The element we are testing is an IP.
PyFunceble.socket.gethostbyaddr(PyFunceble.INTERN["to_test"])
# It was done successfuly, we return True.
# Note: we don't need to read the addresses so we consider as successful
# as long as there is no error.
return True
except (OSError, PyFunceble.socket.herror, PyFunceble.socket.gaierror):
# One of the listed exception is matched.
# It was done unsuccesfuly, we return False.
return False | def function[nslookup, parameter[cls]]:
constant[
Implementation of UNIX nslookup.
]
<ast.Try object at 0x7da20c7cabf0> | keyword[def] identifier[nslookup] ( identifier[cls] ):
literal[string]
keyword[try] :
keyword[if] literal[string] keyword[in] identifier[PyFunceble] . identifier[INTERN] :
keyword[if] keyword[not] identifier[Check] (). identifier[is_ip_valid] ():
identifier[request] = identifier[PyFunceble] . identifier[socket] . identifier[getaddrinfo] (
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ],
literal[int] ,
literal[int] ,
literal[int] ,
identifier[PyFunceble] . identifier[socket] . identifier[IPPROTO_TCP] ,
)
keyword[for] identifier[sequence] keyword[in] identifier[request] :
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ literal[string] ]. identifier[append] (
identifier[sequence] [- literal[int] ][ literal[int] ]
)
keyword[else] :
identifier[request] = identifier[PyFunceble] . identifier[socket] . identifier[gethostbyaddr] (
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ]
)
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ literal[string] ][
literal[string]
]= identifier[request] [ literal[int] ]
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ literal[string] ][
literal[string]
]= identifier[request] [ literal[int] ]
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ][ literal[string] ][ literal[string] ]= identifier[request] [
literal[int]
]
keyword[else] :
keyword[if] keyword[not] identifier[Check] (). identifier[is_ip_valid] ():
identifier[PyFunceble] . identifier[socket] . identifier[getaddrinfo] (
identifier[PyFunceble] . identifier[INTERN] [ literal[string] ],
literal[int] ,
literal[int] ,
literal[int] ,
identifier[PyFunceble] . identifier[socket] . identifier[IPPROTO_TCP] ,
)
keyword[else] :
identifier[PyFunceble] . identifier[socket] . identifier[gethostbyaddr] ( identifier[PyFunceble] . identifier[INTERN] [ literal[string] ])
keyword[return] keyword[True]
keyword[except] ( identifier[OSError] , identifier[PyFunceble] . identifier[socket] . identifier[herror] , identifier[PyFunceble] . identifier[socket] . identifier[gaierror] ):
keyword[return] keyword[False] | def nslookup(cls):
"""
Implementation of UNIX nslookup.
"""
try:
# We try to get the addresse information of the given domain or IP.
if 'current_test_data' in PyFunceble.INTERN: # pragma: no cover
# The end-user want more information whith his test.
if not Check().is_ip_valid():
# The element we are testing is not an IP.
# We request the address informations.
request = PyFunceble.socket.getaddrinfo(PyFunceble.INTERN['to_test'], 80, 0, 0, PyFunceble.socket.IPPROTO_TCP)
for sequence in request:
# We loop through the sequence returned by the request.
# We append the NS informations into the nslookup index.
PyFunceble.INTERN['current_test_data']['nslookup'].append(sequence[-1][0]) # depends on [control=['for'], data=['sequence']] # depends on [control=['if'], data=[]]
else:
# The element we are testing is an IP.
request = PyFunceble.socket.gethostbyaddr(PyFunceble.INTERN['to_test'])
# We append the NS informations into the nslookup index.
PyFunceble.INTERN['current_test_data']['nslookup']['hostname'] = request[0]
PyFunceble.INTERN['current_test_data']['nslookup']['aliases'] = request[1]
PyFunceble.INTERN['current_test_data']['nslookup']['ips'] = request[2] # depends on [control=['if'], data=[]]
elif not Check().is_ip_valid():
# The element we are testing is not an IP.
PyFunceble.socket.getaddrinfo(PyFunceble.INTERN['to_test'], 80, 0, 0, PyFunceble.socket.IPPROTO_TCP) # depends on [control=['if'], data=[]]
else:
# The element we are testing is an IP.
PyFunceble.socket.gethostbyaddr(PyFunceble.INTERN['to_test'])
# It was done successfuly, we return True.
# Note: we don't need to read the addresses so we consider as successful
# as long as there is no error.
return True # depends on [control=['try'], data=[]]
except (OSError, PyFunceble.socket.herror, PyFunceble.socket.gaierror):
# One of the listed exception is matched.
# It was done unsuccesfuly, we return False.
return False # depends on [control=['except'], data=[]] |
def update_many(self, context, request):
"""/@@API/update_many: Update existing object values
This is a wrapper around the update method, allowing multiple updates
to be combined into a single request.
required parameters:
- input_values: A json-encoded dictionary.
Each key is an obj_path, and each value is a dictionary
containing key/value pairs to be set on the object.
Return value:
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
updates: return values from update
}
"""
self.context = context
self.request = request
self.unused = [x for x in self.request.form.keys()]
self.used("form.submitted")
self.used("__ac_name")
self.used("__ac_password")
ret = {
"url": router.url_for("update_many", force_external=True),
"success": False,
"error": True,
"updates": [],
}
input_values = json.loads(request.get('input_values', '[]'))
if not input_values:
raise BadRequest("missing input_values")
site_path = request['PATH_INFO'].replace("/@@API/update_many", "")
for obj_path, i in input_values.items():
savepoint = transaction.savepoint()
if not obj_path.startswith("/"):
obj_path = "/" + obj_path
if obj_path.startswith(site_path):
obj_path = obj_path[len(site_path):]
obj = context.restrictedTraverse(str(site_path + obj_path))
this_ret = {
"url": router.url_for("update_many", force_external=True),
"success": False,
"error": True,
}
try:
fields = set_fields_from_request(obj, i)
if not fields:
this_ret['success'] = False
this_ret['error'] = True
else:
this_ret['success'] = True
this_ret['error'] = False
except:
savepoint.rollback()
raise
ret['updates'].append(this_ret)
ret['success'] = True
ret['error'] = False
return ret | def function[update_many, parameter[self, context, request]]:
constant[/@@API/update_many: Update existing object values
This is a wrapper around the update method, allowing multiple updates
to be combined into a single request.
required parameters:
- input_values: A json-encoded dictionary.
Each key is an obj_path, and each value is a dictionary
containing key/value pairs to be set on the object.
Return value:
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
updates: return values from update
}
]
name[self].context assign[=] name[context]
name[self].request assign[=] name[request]
name[self].unused assign[=] <ast.ListComp object at 0x7da18bc73070>
call[name[self].used, parameter[constant[form.submitted]]]
call[name[self].used, parameter[constant[__ac_name]]]
call[name[self].used, parameter[constant[__ac_password]]]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cdd50>, <ast.Constant object at 0x7da18c4ccaf0>, <ast.Constant object at 0x7da18c4cf070>, <ast.Constant object at 0x7da18c4cf790>], [<ast.Call object at 0x7da18c4cf340>, <ast.Constant object at 0x7da18c4cff10>, <ast.Constant object at 0x7da18c4cfd90>, <ast.List object at 0x7da20c76d3f0>]]
variable[input_values] assign[=] call[name[json].loads, parameter[call[name[request].get, parameter[constant[input_values], constant[[]]]]]]
if <ast.UnaryOp object at 0x7da204960fa0> begin[:]
<ast.Raise object at 0x7da204960880>
variable[site_path] assign[=] call[call[name[request]][constant[PATH_INFO]].replace, parameter[constant[/@@API/update_many], constant[]]]
for taget[tuple[[<ast.Name object at 0x7da18c4cc490>, <ast.Name object at 0x7da18c4cd8a0>]]] in starred[call[name[input_values].items, parameter[]]] begin[:]
variable[savepoint] assign[=] call[name[transaction].savepoint, parameter[]]
if <ast.UnaryOp object at 0x7da18c4cfa60> begin[:]
variable[obj_path] assign[=] binary_operation[constant[/] + name[obj_path]]
if call[name[obj_path].startswith, parameter[name[site_path]]] begin[:]
variable[obj_path] assign[=] call[name[obj_path]][<ast.Slice object at 0x7da18c4cded0>]
variable[obj] assign[=] call[name[context].restrictedTraverse, parameter[call[name[str], parameter[binary_operation[name[site_path] + name[obj_path]]]]]]
variable[this_ret] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cebf0>, <ast.Constant object at 0x7da18c4cd7e0>, <ast.Constant object at 0x7da18c4cfac0>], [<ast.Call object at 0x7da18c4cf7f0>, <ast.Constant object at 0x7da18c4cfbe0>, <ast.Constant object at 0x7da18c4cd030>]]
<ast.Try object at 0x7da18c4ce2f0>
call[call[name[ret]][constant[updates]].append, parameter[name[this_ret]]]
call[name[ret]][constant[success]] assign[=] constant[True]
call[name[ret]][constant[error]] assign[=] constant[False]
return[name[ret]] | keyword[def] identifier[update_many] ( identifier[self] , identifier[context] , identifier[request] ):
literal[string]
identifier[self] . identifier[context] = identifier[context]
identifier[self] . identifier[request] = identifier[request]
identifier[self] . identifier[unused] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[request] . identifier[form] . identifier[keys] ()]
identifier[self] . identifier[used] ( literal[string] )
identifier[self] . identifier[used] ( literal[string] )
identifier[self] . identifier[used] ( literal[string] )
identifier[ret] ={
literal[string] : identifier[router] . identifier[url_for] ( literal[string] , identifier[force_external] = keyword[True] ),
literal[string] : keyword[False] ,
literal[string] : keyword[True] ,
literal[string] :[],
}
identifier[input_values] = identifier[json] . identifier[loads] ( identifier[request] . identifier[get] ( literal[string] , literal[string] ))
keyword[if] keyword[not] identifier[input_values] :
keyword[raise] identifier[BadRequest] ( literal[string] )
identifier[site_path] = identifier[request] [ literal[string] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[obj_path] , identifier[i] keyword[in] identifier[input_values] . identifier[items] ():
identifier[savepoint] = identifier[transaction] . identifier[savepoint] ()
keyword[if] keyword[not] identifier[obj_path] . identifier[startswith] ( literal[string] ):
identifier[obj_path] = literal[string] + identifier[obj_path]
keyword[if] identifier[obj_path] . identifier[startswith] ( identifier[site_path] ):
identifier[obj_path] = identifier[obj_path] [ identifier[len] ( identifier[site_path] ):]
identifier[obj] = identifier[context] . identifier[restrictedTraverse] ( identifier[str] ( identifier[site_path] + identifier[obj_path] ))
identifier[this_ret] ={
literal[string] : identifier[router] . identifier[url_for] ( literal[string] , identifier[force_external] = keyword[True] ),
literal[string] : keyword[False] ,
literal[string] : keyword[True] ,
}
keyword[try] :
identifier[fields] = identifier[set_fields_from_request] ( identifier[obj] , identifier[i] )
keyword[if] keyword[not] identifier[fields] :
identifier[this_ret] [ literal[string] ]= keyword[False]
identifier[this_ret] [ literal[string] ]= keyword[True]
keyword[else] :
identifier[this_ret] [ literal[string] ]= keyword[True]
identifier[this_ret] [ literal[string] ]= keyword[False]
keyword[except] :
identifier[savepoint] . identifier[rollback] ()
keyword[raise]
identifier[ret] [ literal[string] ]. identifier[append] ( identifier[this_ret] )
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret] | def update_many(self, context, request):
"""/@@API/update_many: Update existing object values
This is a wrapper around the update method, allowing multiple updates
to be combined into a single request.
required parameters:
- input_values: A json-encoded dictionary.
Each key is an obj_path, and each value is a dictionary
containing key/value pairs to be set on the object.
Return value:
{
runtime: Function running time.
error: true or string(message) if error. false if no error.
success: true or string(message) if success. false if no success.
updates: return values from update
}
"""
self.context = context
self.request = request
self.unused = [x for x in self.request.form.keys()]
self.used('form.submitted')
self.used('__ac_name')
self.used('__ac_password')
ret = {'url': router.url_for('update_many', force_external=True), 'success': False, 'error': True, 'updates': []}
input_values = json.loads(request.get('input_values', '[]'))
if not input_values:
raise BadRequest('missing input_values') # depends on [control=['if'], data=[]]
site_path = request['PATH_INFO'].replace('/@@API/update_many', '')
for (obj_path, i) in input_values.items():
savepoint = transaction.savepoint()
if not obj_path.startswith('/'):
obj_path = '/' + obj_path # depends on [control=['if'], data=[]]
if obj_path.startswith(site_path):
obj_path = obj_path[len(site_path):] # depends on [control=['if'], data=[]]
obj = context.restrictedTraverse(str(site_path + obj_path))
this_ret = {'url': router.url_for('update_many', force_external=True), 'success': False, 'error': True}
try:
fields = set_fields_from_request(obj, i)
if not fields:
this_ret['success'] = False
this_ret['error'] = True # depends on [control=['if'], data=[]]
else:
this_ret['success'] = True
this_ret['error'] = False # depends on [control=['try'], data=[]]
except:
savepoint.rollback()
raise # depends on [control=['except'], data=[]]
ret['updates'].append(this_ret) # depends on [control=['for'], data=[]]
ret['success'] = True
ret['error'] = False
return ret |
def clearDevice(self):
"""Remove the current stream
"""
print(self.pre, "clearDevice: ")
self.report("clearDevice")
if not self.device:
return
if (self.mvision_process==None):
return
self.filterchain.delViewPort(self.viewport)
self.filterchain.releaseShmem(self.shmem_name)
self.mvision_process.deactivate() # put process back to sleep ..
self.main_layout.removeWidget(self.mvision_widget)
self.filterchain = None
self.device = None
self.video.update() | def function[clearDevice, parameter[self]]:
constant[Remove the current stream
]
call[name[print], parameter[name[self].pre, constant[clearDevice: ]]]
call[name[self].report, parameter[constant[clearDevice]]]
if <ast.UnaryOp object at 0x7da18bcc8700> begin[:]
return[None]
if compare[name[self].mvision_process equal[==] constant[None]] begin[:]
return[None]
call[name[self].filterchain.delViewPort, parameter[name[self].viewport]]
call[name[self].filterchain.releaseShmem, parameter[name[self].shmem_name]]
call[name[self].mvision_process.deactivate, parameter[]]
call[name[self].main_layout.removeWidget, parameter[name[self].mvision_widget]]
name[self].filterchain assign[=] constant[None]
name[self].device assign[=] constant[None]
call[name[self].video.update, parameter[]] | keyword[def] identifier[clearDevice] ( identifier[self] ):
literal[string]
identifier[print] ( identifier[self] . identifier[pre] , literal[string] )
identifier[self] . identifier[report] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[device] :
keyword[return]
keyword[if] ( identifier[self] . identifier[mvision_process] == keyword[None] ):
keyword[return]
identifier[self] . identifier[filterchain] . identifier[delViewPort] ( identifier[self] . identifier[viewport] )
identifier[self] . identifier[filterchain] . identifier[releaseShmem] ( identifier[self] . identifier[shmem_name] )
identifier[self] . identifier[mvision_process] . identifier[deactivate] ()
identifier[self] . identifier[main_layout] . identifier[removeWidget] ( identifier[self] . identifier[mvision_widget] )
identifier[self] . identifier[filterchain] = keyword[None]
identifier[self] . identifier[device] = keyword[None]
identifier[self] . identifier[video] . identifier[update] () | def clearDevice(self):
"""Remove the current stream
"""
print(self.pre, 'clearDevice: ')
self.report('clearDevice')
if not self.device:
return # depends on [control=['if'], data=[]]
if self.mvision_process == None:
return # depends on [control=['if'], data=[]]
self.filterchain.delViewPort(self.viewport)
self.filterchain.releaseShmem(self.shmem_name)
self.mvision_process.deactivate() # put process back to sleep ..
self.main_layout.removeWidget(self.mvision_widget)
self.filterchain = None
self.device = None
self.video.update() |
def set_(uri, value):
'''
Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``.
If the uri provided does not start with ``sdb://`` or the value is not
successfully set, return ``False``.
CLI Example:
.. code-block:: bash
salt '*' sdb.set sdb://mymemcached/foo bar
'''
return salt.utils.sdb.sdb_set(uri, value, __opts__, __utils__) | def function[set_, parameter[uri, value]]:
constant[
Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``.
If the uri provided does not start with ``sdb://`` or the value is not
successfully set, return ``False``.
CLI Example:
.. code-block:: bash
salt '*' sdb.set sdb://mymemcached/foo bar
]
return[call[name[salt].utils.sdb.sdb_set, parameter[name[uri], name[value], name[__opts__], name[__utils__]]]] | keyword[def] identifier[set_] ( identifier[uri] , identifier[value] ):
literal[string]
keyword[return] identifier[salt] . identifier[utils] . identifier[sdb] . identifier[sdb_set] ( identifier[uri] , identifier[value] , identifier[__opts__] , identifier[__utils__] ) | def set_(uri, value):
"""
Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``.
If the uri provided does not start with ``sdb://`` or the value is not
successfully set, return ``False``.
CLI Example:
.. code-block:: bash
salt '*' sdb.set sdb://mymemcached/foo bar
"""
return salt.utils.sdb.sdb_set(uri, value, __opts__, __utils__) |
def apply_cats(df, trn):
"""Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
"""
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True) | def function[apply_cats, parameter[df, trn]]:
constant[Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
]
for taget[tuple[[<ast.Name object at 0x7da1b1e9b970>, <ast.Name object at 0x7da1b1e99ff0>]]] in starred[call[name[df].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b1e986d0> begin[:]
call[name[df]][name[n]] assign[=] call[call[name[c].astype, parameter[constant[category]]].cat.as_ordered, parameter[]]
call[call[name[df]][name[n]].cat.set_categories, parameter[call[name[trn]][name[n]].cat.categories]] | keyword[def] identifier[apply_cats] ( identifier[df] , identifier[trn] ):
literal[string]
keyword[for] identifier[n] , identifier[c] keyword[in] identifier[df] . identifier[items] ():
keyword[if] ( identifier[n] keyword[in] identifier[trn] . identifier[columns] ) keyword[and] ( identifier[trn] [ identifier[n] ]. identifier[dtype] . identifier[name] == literal[string] ):
identifier[df] [ identifier[n] ]= identifier[c] . identifier[astype] ( literal[string] ). identifier[cat] . identifier[as_ordered] ()
identifier[df] [ identifier[n] ]. identifier[cat] . identifier[set_categories] ( identifier[trn] [ identifier[n] ]. identifier[cat] . identifier[categories] , identifier[ordered] = keyword[True] , identifier[inplace] = keyword[True] ) | def apply_cats(df, trn):
"""Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 a
2 3 a
now the type of col is category {a : 1, b : 2}
"""
for (n, c) in df.items():
if n in trn.columns and trn[n].dtype.name == 'category':
df[n] = c.astype('category').cat.as_ordered()
df[n].cat.set_categories(trn[n].cat.categories, ordered=True, inplace=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def get_score_df(self, correction_method=None):
'''
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
cat_X, ncat_X = self._get_cat_and_ncat(X)
def normal_apx(u, x, y):
# from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
m_u = len(x) * len(y) / 2
sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
z = (u - m_u) / sigma_u
return 2*norm.cdf(z)
scores = []
for i in range(cat_X.shape[1]):
cat_list = cat_X.T[i].A1
ncat_list = ncat_X.T[i].A1
try:
if cat_list.mean() > ncat_list.mean():
mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list)
scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid':True})
else:
mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list)
scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1. - norm.isf(float(mw.pvalue)), 'valid':True})
except:
scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid':False})
score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
for method in ['mwu']:
valid_pvals = score_df[score_df.valid].mwu_p
valid_pvals_abs = np.min([valid_pvals, 1-valid_pvals], axis=0)
valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
score_df[method + '_p_corr'] = 0.5
valid_pvals_abs_corr[valid_pvals > 0.5] = 1. - valid_pvals_abs_corr[valid_pvals > 0.5]
valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr'])
return score_df | def function[get_score_df, parameter[self, correction_method]]:
constant[
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
]
variable[X] assign[=] call[call[name[self]._get_X, parameter[]].astype, parameter[name[np].float64]]
variable[X] assign[=] binary_operation[name[X] / call[name[X].sum, parameter[]]]
<ast.Tuple object at 0x7da1b1a20370> assign[=] call[name[self]._get_cat_and_ncat, parameter[name[X]]]
def function[normal_apx, parameter[u, x, y]]:
variable[m_u] assign[=] binary_operation[binary_operation[call[name[len], parameter[name[x]]] * call[name[len], parameter[name[y]]]] / constant[2]]
variable[sigma_u] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[call[name[len], parameter[name[x]]] * call[name[len], parameter[name[y]]]] * binary_operation[binary_operation[call[name[len], parameter[name[x]]] + call[name[len], parameter[name[y]]]] + constant[1]]] / constant[12]]]]
variable[z] assign[=] binary_operation[binary_operation[name[u] - name[m_u]] / name[sigma_u]]
return[binary_operation[constant[2] * call[name[norm].cdf, parameter[name[z]]]]]
variable[scores] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[cat_X].shape][constant[1]]]]] begin[:]
variable[cat_list] assign[=] call[name[cat_X].T][name[i]].A1
variable[ncat_list] assign[=] call[name[ncat_X].T][name[i]].A1
<ast.Try object at 0x7da1b1a211e0>
variable[score_df] assign[=] call[call[name[pd].DataFrame, parameter[name[scores]]].fillna, parameter[constant[0]]]
if compare[name[correction_method] is_not constant[None]] begin[:]
from relative_module[statsmodels.stats.multitest] import module[multipletests]
for taget[name[method]] in starred[list[[<ast.Constant object at 0x7da1b1a21f90>]]] begin[:]
variable[valid_pvals] assign[=] call[name[score_df]][name[score_df].valid].mwu_p
variable[valid_pvals_abs] assign[=] call[name[np].min, parameter[list[[<ast.Name object at 0x7da1b1a22890>, <ast.BinOp object at 0x7da1b1a227d0>]]]]
variable[valid_pvals_abs_corr] assign[=] call[call[name[multipletests], parameter[name[valid_pvals_abs]]]][constant[1]]
call[name[score_df]][binary_operation[name[method] + constant[_p_corr]]] assign[=] constant[0.5]
call[name[valid_pvals_abs_corr]][compare[name[valid_pvals] greater[>] constant[0.5]]] assign[=] binary_operation[constant[1.0] - call[name[valid_pvals_abs_corr]][compare[name[valid_pvals] greater[>] constant[0.5]]]]
call[name[valid_pvals_abs_corr]][compare[name[valid_pvals] less[<] constant[0.5]]] assign[=] call[name[valid_pvals_abs_corr]][compare[name[valid_pvals] less[<] constant[0.5]]]
call[name[score_df].loc][tuple[[<ast.Attribute object at 0x7da1b1a23ac0>, <ast.BinOp object at 0x7da1b1a22230>]]] assign[=] name[valid_pvals_abs_corr]
call[name[score_df]][binary_operation[name[method] + constant[_z]]] assign[=] <ast.UnaryOp object at 0x7da1b1a20700>
return[name[score_df]] | keyword[def] identifier[get_score_df] ( identifier[self] , identifier[correction_method] = keyword[None] ):
literal[string]
identifier[X] = identifier[self] . identifier[_get_X] (). identifier[astype] ( identifier[np] . identifier[float64] )
identifier[X] = identifier[X] / identifier[X] . identifier[sum] ( identifier[axis] = literal[int] )
identifier[cat_X] , identifier[ncat_X] = identifier[self] . identifier[_get_cat_and_ncat] ( identifier[X] )
keyword[def] identifier[normal_apx] ( identifier[u] , identifier[x] , identifier[y] ):
identifier[m_u] = identifier[len] ( identifier[x] )* identifier[len] ( identifier[y] )/ literal[int]
identifier[sigma_u] = identifier[np] . identifier[sqrt] ( identifier[len] ( identifier[x] )* identifier[len] ( identifier[y] )*( identifier[len] ( identifier[x] )+ identifier[len] ( identifier[y] )+ literal[int] )/ literal[int] )
identifier[z] =( identifier[u] - identifier[m_u] )/ identifier[sigma_u]
keyword[return] literal[int] * identifier[norm] . identifier[cdf] ( identifier[z] )
identifier[scores] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[cat_X] . identifier[shape] [ literal[int] ]):
identifier[cat_list] = identifier[cat_X] . identifier[T] [ identifier[i] ]. identifier[A1]
identifier[ncat_list] = identifier[ncat_X] . identifier[T] [ identifier[i] ]. identifier[A1]
keyword[try] :
keyword[if] identifier[cat_list] . identifier[mean] ()> identifier[ncat_list] . identifier[mean] ():
identifier[mw] = identifier[mannwhitneyu] ( identifier[cat_list] , identifier[ncat_list] , identifier[alternative] = literal[string] )
keyword[if] identifier[mw] . identifier[pvalue] keyword[in] ( literal[int] , literal[int] ):
identifier[mw] . identifier[pvalue] = identifier[normal_apx] ( identifier[mw] . identifier[staistic] , identifier[cat_list] , identifier[ncat_list] )
identifier[scores] . identifier[append] ({ literal[string] : identifier[mw] . identifier[statistic] , literal[string] : identifier[mw] . identifier[pvalue] , literal[string] : identifier[norm] . identifier[isf] ( identifier[float] ( identifier[mw] . identifier[pvalue] )), literal[string] : keyword[True] })
keyword[else] :
identifier[mw] = identifier[mannwhitneyu] ( identifier[ncat_list] , identifier[cat_list] , identifier[alternative] = literal[string] )
keyword[if] identifier[mw] . identifier[pvalue] keyword[in] ( literal[int] , literal[int] ):
identifier[mw] . identifier[pvalue] = identifier[normal_apx] ( identifier[mw] . identifier[staistic] , identifier[ncat_list] , identifier[cat_list] )
identifier[scores] . identifier[append] ({ literal[string] :- identifier[mw] . identifier[statistic] , literal[string] : literal[int] - identifier[mw] . identifier[pvalue] , literal[string] : literal[int] - identifier[norm] . identifier[isf] ( identifier[float] ( identifier[mw] . identifier[pvalue] )), literal[string] : keyword[True] })
keyword[except] :
identifier[scores] . identifier[append] ({ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : keyword[False] })
identifier[score_df] = identifier[pd] . identifier[DataFrame] ( identifier[scores] , identifier[index] = identifier[self] . identifier[corpus_] . identifier[get_terms] ()). identifier[fillna] ( literal[int] )
keyword[if] identifier[correction_method] keyword[is] keyword[not] keyword[None] :
keyword[from] identifier[statsmodels] . identifier[stats] . identifier[multitest] keyword[import] identifier[multipletests]
keyword[for] identifier[method] keyword[in] [ literal[string] ]:
identifier[valid_pvals] = identifier[score_df] [ identifier[score_df] . identifier[valid] ]. identifier[mwu_p]
identifier[valid_pvals_abs] = identifier[np] . identifier[min] ([ identifier[valid_pvals] , literal[int] - identifier[valid_pvals] ], identifier[axis] = literal[int] )
identifier[valid_pvals_abs_corr] = identifier[multipletests] ( identifier[valid_pvals_abs] , identifier[method] = identifier[correction_method] )[ literal[int] ]
identifier[score_df] [ identifier[method] + literal[string] ]= literal[int]
identifier[valid_pvals_abs_corr] [ identifier[valid_pvals] > literal[int] ]= literal[int] - identifier[valid_pvals_abs_corr] [ identifier[valid_pvals] > literal[int] ]
identifier[valid_pvals_abs_corr] [ identifier[valid_pvals] < literal[int] ]= identifier[valid_pvals_abs_corr] [ identifier[valid_pvals] < literal[int] ]
identifier[score_df] . identifier[loc] [ identifier[score_df] . identifier[valid] , identifier[method] + literal[string] ]= identifier[valid_pvals_abs_corr]
identifier[score_df] [ identifier[method] + literal[string] ]=- identifier[norm] . identifier[ppf] ( identifier[score_df] [ identifier[method] + literal[string] ])
keyword[return] identifier[score_df] | def get_score_df(self, correction_method=None):
"""
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
"""
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
(cat_X, ncat_X) = self._get_cat_and_ncat(X)
def normal_apx(u, x, y):
# from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
m_u = len(x) * len(y) / 2
sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
z = (u - m_u) / sigma_u
return 2 * norm.cdf(z)
scores = []
for i in range(cat_X.shape[1]):
cat_list = cat_X.T[i].A1
ncat_list = ncat_X.T[i].A1
try:
if cat_list.mean() > ncat_list.mean():
mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list) # depends on [control=['if'], data=[]]
scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid': True}) # depends on [control=['if'], data=[]]
else:
mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list) # depends on [control=['if'], data=[]]
scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1.0 - norm.isf(float(mw.pvalue)), 'valid': True}) # depends on [control=['try'], data=[]]
except:
scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid': False}) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']]
score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
for method in ['mwu']:
valid_pvals = score_df[score_df.valid].mwu_p
valid_pvals_abs = np.min([valid_pvals, 1 - valid_pvals], axis=0)
valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
score_df[method + '_p_corr'] = 0.5
valid_pvals_abs_corr[valid_pvals > 0.5] = 1.0 - valid_pvals_abs_corr[valid_pvals > 0.5]
valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr']) # depends on [control=['for'], data=['method']] # depends on [control=['if'], data=['correction_method']]
return score_df |
def load_name(self, name):
"""
Implementation of the LOAD_NAME operation
"""
if name in self.globals_:
return self.globals_[name]
b = self.globals_['__builtins__']
if isinstance(b, dict):
return b[name]
else:
return getattr(b, name) | def function[load_name, parameter[self, name]]:
constant[
Implementation of the LOAD_NAME operation
]
if compare[name[name] in name[self].globals_] begin[:]
return[call[name[self].globals_][name[name]]]
variable[b] assign[=] call[name[self].globals_][constant[__builtins__]]
if call[name[isinstance], parameter[name[b], name[dict]]] begin[:]
return[call[name[b]][name[name]]] | keyword[def] identifier[load_name] ( identifier[self] , identifier[name] ):
literal[string]
keyword[if] identifier[name] keyword[in] identifier[self] . identifier[globals_] :
keyword[return] identifier[self] . identifier[globals_] [ identifier[name] ]
identifier[b] = identifier[self] . identifier[globals_] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[b] , identifier[dict] ):
keyword[return] identifier[b] [ identifier[name] ]
keyword[else] :
keyword[return] identifier[getattr] ( identifier[b] , identifier[name] ) | def load_name(self, name):
"""
Implementation of the LOAD_NAME operation
"""
if name in self.globals_:
return self.globals_[name] # depends on [control=['if'], data=['name']]
b = self.globals_['__builtins__']
if isinstance(b, dict):
return b[name] # depends on [control=['if'], data=[]]
else:
return getattr(b, name) |
def conf(self, key=None, defval=None):
'''return YunpianConf if key=None, else return value in YunpianConf'''
if key is None:
return self._ypconf
val = self._ypconf.conf(key)
return defval if val is None else val | def function[conf, parameter[self, key, defval]]:
constant[return YunpianConf if key=None, else return value in YunpianConf]
if compare[name[key] is constant[None]] begin[:]
return[name[self]._ypconf]
variable[val] assign[=] call[name[self]._ypconf.conf, parameter[name[key]]]
return[<ast.IfExp object at 0x7da20c6c5600>] | keyword[def] identifier[conf] ( identifier[self] , identifier[key] = keyword[None] , identifier[defval] = keyword[None] ):
literal[string]
keyword[if] identifier[key] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_ypconf]
identifier[val] = identifier[self] . identifier[_ypconf] . identifier[conf] ( identifier[key] )
keyword[return] identifier[defval] keyword[if] identifier[val] keyword[is] keyword[None] keyword[else] identifier[val] | def conf(self, key=None, defval=None):
"""return YunpianConf if key=None, else return value in YunpianConf"""
if key is None:
return self._ypconf # depends on [control=['if'], data=[]]
val = self._ypconf.conf(key)
return defval if val is None else val |
def stacks_2_eqns(self,stacks):
"""returns equation strings from stacks"""
if stacks:
return list(map(lambda p: self.stack_2_eqn(p), stacks))
else:
return [] | def function[stacks_2_eqns, parameter[self, stacks]]:
constant[returns equation strings from stacks]
if name[stacks] begin[:]
return[call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b1972ec0>, name[stacks]]]]]] | keyword[def] identifier[stacks_2_eqns] ( identifier[self] , identifier[stacks] ):
literal[string]
keyword[if] identifier[stacks] :
keyword[return] identifier[list] ( identifier[map] ( keyword[lambda] identifier[p] : identifier[self] . identifier[stack_2_eqn] ( identifier[p] ), identifier[stacks] ))
keyword[else] :
keyword[return] [] | def stacks_2_eqns(self, stacks):
"""returns equation strings from stacks"""
if stacks:
return list(map(lambda p: self.stack_2_eqn(p), stacks)) # depends on [control=['if'], data=[]]
else:
return [] |
def from_text(cls, fname, vocabUnicodeSize=78, desired_vocab=None, encoding="utf-8"):
"""
Create a WordVectors class based on a word2vec text file
Parameters
----------
fname : path to file
vocabUnicodeSize: the maximum string length (78, by default)
desired_vocab: if set, this will ignore any word and vector that
doesn't fall inside desired_vocab.
Returns
-------
WordVectors instance
"""
with open(fname, "rb") as fin:
header = fin.readline()
vocab_size, vector_size = list(map(int, header.split()))
vocab = np.empty(vocab_size, dtype="<U%s" % vocabUnicodeSize)
vectors = np.empty((vocab_size, vector_size), dtype=np.float)
for i, line in enumerate(fin):
line = line.decode(encoding).rstrip()
parts = line.split(" ")
word = parts[0]
include = desired_vocab is None or word in desired_vocab
if include:
vector = np.array(parts[1:], dtype=np.float)
vocab[i] = word
vectors[i] = unitvec(vector)
if desired_vocab is not None:
vectors = vectors[vocab != "", :]
vocab = vocab[vocab != ""]
return cls(vocab=vocab, vectors=vectors) | def function[from_text, parameter[cls, fname, vocabUnicodeSize, desired_vocab, encoding]]:
constant[
Create a WordVectors class based on a word2vec text file
Parameters
----------
fname : path to file
vocabUnicodeSize: the maximum string length (78, by default)
desired_vocab: if set, this will ignore any word and vector that
doesn't fall inside desired_vocab.
Returns
-------
WordVectors instance
]
with call[name[open], parameter[name[fname], constant[rb]]] begin[:]
variable[header] assign[=] call[name[fin].readline, parameter[]]
<ast.Tuple object at 0x7da1b17a9e10> assign[=] call[name[list], parameter[call[name[map], parameter[name[int], call[name[header].split, parameter[]]]]]]
variable[vocab] assign[=] call[name[np].empty, parameter[name[vocab_size]]]
variable[vectors] assign[=] call[name[np].empty, parameter[tuple[[<ast.Name object at 0x7da1b17a8f10>, <ast.Name object at 0x7da1b17a8910>]]]]
for taget[tuple[[<ast.Name object at 0x7da1b17aaaa0>, <ast.Name object at 0x7da1b17a9390>]]] in starred[call[name[enumerate], parameter[name[fin]]]] begin[:]
variable[line] assign[=] call[call[name[line].decode, parameter[name[encoding]]].rstrip, parameter[]]
variable[parts] assign[=] call[name[line].split, parameter[constant[ ]]]
variable[word] assign[=] call[name[parts]][constant[0]]
variable[include] assign[=] <ast.BoolOp object at 0x7da1b17a8370>
if name[include] begin[:]
variable[vector] assign[=] call[name[np].array, parameter[call[name[parts]][<ast.Slice object at 0x7da20c6a9330>]]]
call[name[vocab]][name[i]] assign[=] name[word]
call[name[vectors]][name[i]] assign[=] call[name[unitvec], parameter[name[vector]]]
if compare[name[desired_vocab] is_not constant[None]] begin[:]
variable[vectors] assign[=] call[name[vectors]][tuple[[<ast.Compare object at 0x7da20eb2a110>, <ast.Slice object at 0x7da20eb29fc0>]]]
variable[vocab] assign[=] call[name[vocab]][compare[name[vocab] not_equal[!=] constant[]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_text] ( identifier[cls] , identifier[fname] , identifier[vocabUnicodeSize] = literal[int] , identifier[desired_vocab] = keyword[None] , identifier[encoding] = literal[string] ):
literal[string]
keyword[with] identifier[open] ( identifier[fname] , literal[string] ) keyword[as] identifier[fin] :
identifier[header] = identifier[fin] . identifier[readline] ()
identifier[vocab_size] , identifier[vector_size] = identifier[list] ( identifier[map] ( identifier[int] , identifier[header] . identifier[split] ()))
identifier[vocab] = identifier[np] . identifier[empty] ( identifier[vocab_size] , identifier[dtype] = literal[string] % identifier[vocabUnicodeSize] )
identifier[vectors] = identifier[np] . identifier[empty] (( identifier[vocab_size] , identifier[vector_size] ), identifier[dtype] = identifier[np] . identifier[float] )
keyword[for] identifier[i] , identifier[line] keyword[in] identifier[enumerate] ( identifier[fin] ):
identifier[line] = identifier[line] . identifier[decode] ( identifier[encoding] ). identifier[rstrip] ()
identifier[parts] = identifier[line] . identifier[split] ( literal[string] )
identifier[word] = identifier[parts] [ literal[int] ]
identifier[include] = identifier[desired_vocab] keyword[is] keyword[None] keyword[or] identifier[word] keyword[in] identifier[desired_vocab]
keyword[if] identifier[include] :
identifier[vector] = identifier[np] . identifier[array] ( identifier[parts] [ literal[int] :], identifier[dtype] = identifier[np] . identifier[float] )
identifier[vocab] [ identifier[i] ]= identifier[word]
identifier[vectors] [ identifier[i] ]= identifier[unitvec] ( identifier[vector] )
keyword[if] identifier[desired_vocab] keyword[is] keyword[not] keyword[None] :
identifier[vectors] = identifier[vectors] [ identifier[vocab] != literal[string] ,:]
identifier[vocab] = identifier[vocab] [ identifier[vocab] != literal[string] ]
keyword[return] identifier[cls] ( identifier[vocab] = identifier[vocab] , identifier[vectors] = identifier[vectors] ) | def from_text(cls, fname, vocabUnicodeSize=78, desired_vocab=None, encoding='utf-8'):
"""
Create a WordVectors class based on a word2vec text file
Parameters
----------
fname : path to file
vocabUnicodeSize: the maximum string length (78, by default)
desired_vocab: if set, this will ignore any word and vector that
doesn't fall inside desired_vocab.
Returns
-------
WordVectors instance
"""
with open(fname, 'rb') as fin:
header = fin.readline()
(vocab_size, vector_size) = list(map(int, header.split()))
vocab = np.empty(vocab_size, dtype='<U%s' % vocabUnicodeSize)
vectors = np.empty((vocab_size, vector_size), dtype=np.float)
for (i, line) in enumerate(fin):
line = line.decode(encoding).rstrip()
parts = line.split(' ')
word = parts[0]
include = desired_vocab is None or word in desired_vocab
if include:
vector = np.array(parts[1:], dtype=np.float)
vocab[i] = word
vectors[i] = unitvec(vector) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if desired_vocab is not None:
vectors = vectors[vocab != '', :]
vocab = vocab[vocab != ''] # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['fin']]
return cls(vocab=vocab, vectors=vectors) |
def __set_style_sheet(self):
"""
Sets the Widget stylesheet.
"""
colors = map(
lambda x: "rgb({0}, {1}, {2}, {3})".format(x.red(), x.green(), x.blue(), int(self.__opacity * 255)),
(self.__color, self.__background_color, self.__border_color))
self.setStyleSheet(self.__style.format(*colors)) | def function[__set_style_sheet, parameter[self]]:
constant[
Sets the Widget stylesheet.
]
variable[colors] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b09bd3f0>, tuple[[<ast.Attribute object at 0x7da1b09bc730>, <ast.Attribute object at 0x7da1b09bda20>, <ast.Attribute object at 0x7da1b09bfb50>]]]]
call[name[self].setStyleSheet, parameter[call[name[self].__style.format, parameter[<ast.Starred object at 0x7da1b09bdfc0>]]]] | keyword[def] identifier[__set_style_sheet] ( identifier[self] ):
literal[string]
identifier[colors] = identifier[map] (
keyword[lambda] identifier[x] : literal[string] . identifier[format] ( identifier[x] . identifier[red] (), identifier[x] . identifier[green] (), identifier[x] . identifier[blue] (), identifier[int] ( identifier[self] . identifier[__opacity] * literal[int] )),
( identifier[self] . identifier[__color] , identifier[self] . identifier[__background_color] , identifier[self] . identifier[__border_color] ))
identifier[self] . identifier[setStyleSheet] ( identifier[self] . identifier[__style] . identifier[format] (* identifier[colors] )) | def __set_style_sheet(self):
"""
Sets the Widget stylesheet.
"""
colors = map(lambda x: 'rgb({0}, {1}, {2}, {3})'.format(x.red(), x.green(), x.blue(), int(self.__opacity * 255)), (self.__color, self.__background_color, self.__border_color))
self.setStyleSheet(self.__style.format(*colors)) |
def validate(self):
"""Ensure that the QueryRoot block is valid."""
if not (isinstance(self.start_class, set) and
all(isinstance(x, six.string_types) for x in self.start_class)):
raise TypeError(u'Expected set of string start_class, got: {} {}'.format(
type(self.start_class).__name__, self.start_class))
for cls in self.start_class:
validate_safe_string(cls) | def function[validate, parameter[self]]:
constant[Ensure that the QueryRoot block is valid.]
if <ast.UnaryOp object at 0x7da1b1726e00> begin[:]
<ast.Raise object at 0x7da1b1724c70>
for taget[name[cls]] in starred[name[self].start_class] begin[:]
call[name[validate_safe_string], parameter[name[cls]]] | keyword[def] identifier[validate] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] ( identifier[isinstance] ( identifier[self] . identifier[start_class] , identifier[set] ) keyword[and]
identifier[all] ( identifier[isinstance] ( identifier[x] , identifier[six] . identifier[string_types] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[start_class] )):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] (
identifier[type] ( identifier[self] . identifier[start_class] ). identifier[__name__] , identifier[self] . identifier[start_class] ))
keyword[for] identifier[cls] keyword[in] identifier[self] . identifier[start_class] :
identifier[validate_safe_string] ( identifier[cls] ) | def validate(self):
"""Ensure that the QueryRoot block is valid."""
if not (isinstance(self.start_class, set) and all((isinstance(x, six.string_types) for x in self.start_class))):
raise TypeError(u'Expected set of string start_class, got: {} {}'.format(type(self.start_class).__name__, self.start_class)) # depends on [control=['if'], data=[]]
for cls in self.start_class:
validate_safe_string(cls) # depends on [control=['for'], data=['cls']] |
def _set_data(self, action):
"""
capture Wikidata API response data
"""
if action == 'siteinfo':
self._set_siteinfo()
elif action == 'sitematrix':
self._set_sitematrix()
elif action == 'sitevisitors':
self._set_sitevisitors() | def function[_set_data, parameter[self, action]]:
constant[
capture Wikidata API response data
]
if compare[name[action] equal[==] constant[siteinfo]] begin[:]
call[name[self]._set_siteinfo, parameter[]] | keyword[def] identifier[_set_data] ( identifier[self] , identifier[action] ):
literal[string]
keyword[if] identifier[action] == literal[string] :
identifier[self] . identifier[_set_siteinfo] ()
keyword[elif] identifier[action] == literal[string] :
identifier[self] . identifier[_set_sitematrix] ()
keyword[elif] identifier[action] == literal[string] :
identifier[self] . identifier[_set_sitevisitors] () | def _set_data(self, action):
"""
capture Wikidata API response data
"""
if action == 'siteinfo':
self._set_siteinfo() # depends on [control=['if'], data=[]]
elif action == 'sitematrix':
self._set_sitematrix() # depends on [control=['if'], data=[]]
elif action == 'sitevisitors':
self._set_sitevisitors() # depends on [control=['if'], data=[]] |
def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360
elif angle < -90:
angle += 360
return angle | def function[optimizeAngle, parameter[angle]]:
constant[
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
]
if compare[name[angle] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c6c41f0>
if compare[name[angle] greater_or_equal[>=] constant[270]] begin[:]
<ast.AugAssign object at 0x7da20c6c5240>
return[name[angle]] | keyword[def] identifier[optimizeAngle] ( identifier[angle] ):
literal[string]
keyword[if] identifier[angle] < literal[int] :
identifier[angle] %=- literal[int]
keyword[else] :
identifier[angle] %= literal[int]
keyword[if] identifier[angle] >= literal[int] :
identifier[angle] -= literal[int]
keyword[elif] identifier[angle] <- literal[int] :
identifier[angle] += literal[int]
keyword[return] identifier[angle] | def optimizeAngle(angle):
"""
Because any rotation can be expressed within 360 degrees
of any given number, and since negative angles sometimes
are one character longer than corresponding positive angle,
we shorten the number to one in the range to [-90, 270[.
"""
# First, we put the new angle in the range ]-360, 360[.
# The modulo operator yields results with the sign of the
# divisor, so for negative dividends, we preserve the sign
# of the angle.
if angle < 0:
angle %= -360 # depends on [control=['if'], data=['angle']]
else:
angle %= 360
# 720 degrees is unnecessary, as 360 covers all angles.
# As "-x" is shorter than "35x" and "-xxx" one character
# longer than positive angles <= 260, we constrain angle
# range to [-90, 270[ (or, equally valid: ]-100, 260]).
if angle >= 270:
angle -= 360 # depends on [control=['if'], data=['angle']]
elif angle < -90:
angle += 360 # depends on [control=['if'], data=['angle']]
return angle |
def resnet_imagenet_34_td_unit_05_05():
"""Set of hyperparameters."""
hp = resnet_imagenet_34()
hp.use_td = "unit"
hp.targeting_rate = 0.5
hp.keep_prob = 0.5
return hp | def function[resnet_imagenet_34_td_unit_05_05, parameter[]]:
constant[Set of hyperparameters.]
variable[hp] assign[=] call[name[resnet_imagenet_34], parameter[]]
name[hp].use_td assign[=] constant[unit]
name[hp].targeting_rate assign[=] constant[0.5]
name[hp].keep_prob assign[=] constant[0.5]
return[name[hp]] | keyword[def] identifier[resnet_imagenet_34_td_unit_05_05] ():
literal[string]
identifier[hp] = identifier[resnet_imagenet_34] ()
identifier[hp] . identifier[use_td] = literal[string]
identifier[hp] . identifier[targeting_rate] = literal[int]
identifier[hp] . identifier[keep_prob] = literal[int]
keyword[return] identifier[hp] | def resnet_imagenet_34_td_unit_05_05():
"""Set of hyperparameters."""
hp = resnet_imagenet_34()
hp.use_td = 'unit'
hp.targeting_rate = 0.5
hp.keep_prob = 0.5
return hp |
def next_object(self):
"""Get next GridOut object from cursor."""
grid_out = super(self.__class__, self).next_object()
if grid_out:
grid_out_class = create_class_with_framework(
AgnosticGridOut, self._framework, self.__module__)
return grid_out_class(self.collection, delegate=grid_out)
else:
# Exhausted.
return None | def function[next_object, parameter[self]]:
constant[Get next GridOut object from cursor.]
variable[grid_out] assign[=] call[call[name[super], parameter[name[self].__class__, name[self]]].next_object, parameter[]]
if name[grid_out] begin[:]
variable[grid_out_class] assign[=] call[name[create_class_with_framework], parameter[name[AgnosticGridOut], name[self]._framework, name[self].__module__]]
return[call[name[grid_out_class], parameter[name[self].collection]]] | keyword[def] identifier[next_object] ( identifier[self] ):
literal[string]
identifier[grid_out] = identifier[super] ( identifier[self] . identifier[__class__] , identifier[self] ). identifier[next_object] ()
keyword[if] identifier[grid_out] :
identifier[grid_out_class] = identifier[create_class_with_framework] (
identifier[AgnosticGridOut] , identifier[self] . identifier[_framework] , identifier[self] . identifier[__module__] )
keyword[return] identifier[grid_out_class] ( identifier[self] . identifier[collection] , identifier[delegate] = identifier[grid_out] )
keyword[else] :
keyword[return] keyword[None] | def next_object(self):
"""Get next GridOut object from cursor."""
grid_out = super(self.__class__, self).next_object()
if grid_out:
grid_out_class = create_class_with_framework(AgnosticGridOut, self._framework, self.__module__)
return grid_out_class(self.collection, delegate=grid_out) # depends on [control=['if'], data=[]]
else:
# Exhausted.
return None |
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()] | def function[_parse_list, parameter[cls, value, separator]]:
constant[Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
]
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
return[name[value]]
if compare[constant[
] in name[value]] begin[:]
variable[value] assign[=] call[name[value].splitlines, parameter[]]
return[<ast.ListComp object at 0x7da1b1b13e20>] | keyword[def] identifier[_parse_list] ( identifier[cls] , identifier[value] , identifier[separator] = literal[string] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[return] identifier[value]
keyword[if] literal[string] keyword[in] identifier[value] :
identifier[value] = identifier[value] . identifier[splitlines] ()
keyword[else] :
identifier[value] = identifier[value] . identifier[split] ( identifier[separator] )
keyword[return] [ identifier[chunk] . identifier[strip] () keyword[for] identifier[chunk] keyword[in] identifier[value] keyword[if] identifier[chunk] . identifier[strip] ()] | def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value # depends on [control=['if'], data=[]]
if '\n' in value:
value = value.splitlines() # depends on [control=['if'], data=['value']]
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()] |
def QA_fetch_get_macroindex_list(ip=None, port=None):
"""宏观指标列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
38 10 宏观指标 HG
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==38') | def function[QA_fetch_get_macroindex_list, parameter[ip, port]]:
constant[宏观指标列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
38 10 宏观指标 HG
]
<ast.Global object at 0x7da1b1ff1de0>
variable[extension_market_list] assign[=] <ast.IfExp object at 0x7da1b1ff0580>
return[call[name[extension_market_list].query, parameter[constant[market==38]]]] | keyword[def] identifier[QA_fetch_get_macroindex_list] ( identifier[ip] = keyword[None] , identifier[port] = keyword[None] ):
literal[string]
keyword[global] identifier[extension_market_list]
identifier[extension_market_list] = identifier[QA_fetch_get_extensionmarket_list] (
) keyword[if] identifier[extension_market_list] keyword[is] keyword[None] keyword[else] identifier[extension_market_list]
keyword[return] identifier[extension_market_list] . identifier[query] ( literal[string] ) | def QA_fetch_get_macroindex_list(ip=None, port=None):
"""宏观指标列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
38 10 宏观指标 HG
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list() if extension_market_list is None else extension_market_list
return extension_market_list.query('market==38') |
def _poly_eval(self, u, ids, der=0):
"""Evaluate internal polynomial."""
if der == 0:
return self._poly_eval_0(u, ids)
elif der == 1:
return self._poly_eval_1(u, ids)
elif der == 2:
return self._poly_eval_2(u, ids)
elif der == 3:
return self._poly_eval_3(u, ids)
elif der >= 4:
return self._poly_eval_4(u, ids)
else:
raise ValueError("der={} is impossible".format(der)) | def function[_poly_eval, parameter[self, u, ids, der]]:
constant[Evaluate internal polynomial.]
if compare[name[der] equal[==] constant[0]] begin[:]
return[call[name[self]._poly_eval_0, parameter[name[u], name[ids]]]] | keyword[def] identifier[_poly_eval] ( identifier[self] , identifier[u] , identifier[ids] , identifier[der] = literal[int] ):
literal[string]
keyword[if] identifier[der] == literal[int] :
keyword[return] identifier[self] . identifier[_poly_eval_0] ( identifier[u] , identifier[ids] )
keyword[elif] identifier[der] == literal[int] :
keyword[return] identifier[self] . identifier[_poly_eval_1] ( identifier[u] , identifier[ids] )
keyword[elif] identifier[der] == literal[int] :
keyword[return] identifier[self] . identifier[_poly_eval_2] ( identifier[u] , identifier[ids] )
keyword[elif] identifier[der] == literal[int] :
keyword[return] identifier[self] . identifier[_poly_eval_3] ( identifier[u] , identifier[ids] )
keyword[elif] identifier[der] >= literal[int] :
keyword[return] identifier[self] . identifier[_poly_eval_4] ( identifier[u] , identifier[ids] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[der] )) | def _poly_eval(self, u, ids, der=0):
"""Evaluate internal polynomial."""
if der == 0:
return self._poly_eval_0(u, ids) # depends on [control=['if'], data=[]]
elif der == 1:
return self._poly_eval_1(u, ids) # depends on [control=['if'], data=[]]
elif der == 2:
return self._poly_eval_2(u, ids) # depends on [control=['if'], data=[]]
elif der == 3:
return self._poly_eval_3(u, ids) # depends on [control=['if'], data=[]]
elif der >= 4:
return self._poly_eval_4(u, ids) # depends on [control=['if'], data=[]]
else:
raise ValueError('der={} is impossible'.format(der)) |
def _add_phrase(cls, phrase, phrase_id, dictionary=None, word_list=None):
"""
Adds a phrase to a phrase dictionary.
:param phrase: list of tokens
:param phrase_id: phrase id
:param dictionary: phrase dictionary
:return: new phrase dictionary
"""
# @Note: this function is called recursively
if dictionary is None:
dictionary = {}
if len(phrase):
current_word = phrase[0]
# if word_list is not None and current_word not in word_list: #@TODO remove, words should already be ids
# word_list.add(current_word)
if current_word not in dictionary:
dictionary[current_word] = {}
cls._add_phrase(phrase[1:], phrase_id, dictionary[current_word], word_list)
else:
dictionary[None] = phrase_id
return dictionary | def function[_add_phrase, parameter[cls, phrase, phrase_id, dictionary, word_list]]:
constant[
Adds a phrase to a phrase dictionary.
:param phrase: list of tokens
:param phrase_id: phrase id
:param dictionary: phrase dictionary
:return: new phrase dictionary
]
if compare[name[dictionary] is constant[None]] begin[:]
variable[dictionary] assign[=] dictionary[[], []]
if call[name[len], parameter[name[phrase]]] begin[:]
variable[current_word] assign[=] call[name[phrase]][constant[0]]
if compare[name[current_word] <ast.NotIn object at 0x7da2590d7190> name[dictionary]] begin[:]
call[name[dictionary]][name[current_word]] assign[=] dictionary[[], []]
call[name[cls]._add_phrase, parameter[call[name[phrase]][<ast.Slice object at 0x7da20e957370>], name[phrase_id], call[name[dictionary]][name[current_word]], name[word_list]]]
return[name[dictionary]] | keyword[def] identifier[_add_phrase] ( identifier[cls] , identifier[phrase] , identifier[phrase_id] , identifier[dictionary] = keyword[None] , identifier[word_list] = keyword[None] ):
literal[string]
keyword[if] identifier[dictionary] keyword[is] keyword[None] :
identifier[dictionary] ={}
keyword[if] identifier[len] ( identifier[phrase] ):
identifier[current_word] = identifier[phrase] [ literal[int] ]
keyword[if] identifier[current_word] keyword[not] keyword[in] identifier[dictionary] :
identifier[dictionary] [ identifier[current_word] ]={}
identifier[cls] . identifier[_add_phrase] ( identifier[phrase] [ literal[int] :], identifier[phrase_id] , identifier[dictionary] [ identifier[current_word] ], identifier[word_list] )
keyword[else] :
identifier[dictionary] [ keyword[None] ]= identifier[phrase_id]
keyword[return] identifier[dictionary] | def _add_phrase(cls, phrase, phrase_id, dictionary=None, word_list=None):
"""
Adds a phrase to a phrase dictionary.
:param phrase: list of tokens
:param phrase_id: phrase id
:param dictionary: phrase dictionary
:return: new phrase dictionary
"""
# @Note: this function is called recursively
if dictionary is None:
dictionary = {} # depends on [control=['if'], data=['dictionary']]
if len(phrase):
current_word = phrase[0]
# if word_list is not None and current_word not in word_list: #@TODO remove, words should already be ids
# word_list.add(current_word)
if current_word not in dictionary:
dictionary[current_word] = {} # depends on [control=['if'], data=['current_word', 'dictionary']]
cls._add_phrase(phrase[1:], phrase_id, dictionary[current_word], word_list) # depends on [control=['if'], data=[]]
else:
dictionary[None] = phrase_id
return dictionary |
def demo_login(self, auth=None, url=None):
"""Authenticate with a "Share Your Class" URL using a demo user.
You may provide either the entire ``url`` or simply the ``auth``
parameter.
:param url: Example - "https://piazza.com/demo_login?nid=hbj11a1gcvl1s6&auth=06c111b"
:param auth: Example - "06c111b"
"""
assert all([
auth or url, # Must provide at least one
not (auth and url) # Cannot provide more than one
])
if url is None:
url = "https://piazza.com/demo_login"
params = dict(nid=self._nid, auth=auth)
res = self.session.get(url, params=params)
else:
res = self.session.get(url) | def function[demo_login, parameter[self, auth, url]]:
constant[Authenticate with a "Share Your Class" URL using a demo user.
You may provide either the entire ``url`` or simply the ``auth``
parameter.
:param url: Example - "https://piazza.com/demo_login?nid=hbj11a1gcvl1s6&auth=06c111b"
:param auth: Example - "06c111b"
]
assert[call[name[all], parameter[list[[<ast.BoolOp object at 0x7da1b0fee590>, <ast.UnaryOp object at 0x7da1b0fef640>]]]]]
if compare[name[url] is constant[None]] begin[:]
variable[url] assign[=] constant[https://piazza.com/demo_login]
variable[params] assign[=] call[name[dict], parameter[]]
variable[res] assign[=] call[name[self].session.get, parameter[name[url]]] | keyword[def] identifier[demo_login] ( identifier[self] , identifier[auth] = keyword[None] , identifier[url] = keyword[None] ):
literal[string]
keyword[assert] identifier[all] ([
identifier[auth] keyword[or] identifier[url] ,
keyword[not] ( identifier[auth] keyword[and] identifier[url] )
])
keyword[if] identifier[url] keyword[is] keyword[None] :
identifier[url] = literal[string]
identifier[params] = identifier[dict] ( identifier[nid] = identifier[self] . identifier[_nid] , identifier[auth] = identifier[auth] )
identifier[res] = identifier[self] . identifier[session] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] )
keyword[else] :
identifier[res] = identifier[self] . identifier[session] . identifier[get] ( identifier[url] ) | def demo_login(self, auth=None, url=None):
"""Authenticate with a "Share Your Class" URL using a demo user.
You may provide either the entire ``url`` or simply the ``auth``
parameter.
:param url: Example - "https://piazza.com/demo_login?nid=hbj11a1gcvl1s6&auth=06c111b"
:param auth: Example - "06c111b"
""" # Must provide at least one
# Cannot provide more than one
assert all([auth or url, not (auth and url)])
if url is None:
url = 'https://piazza.com/demo_login'
params = dict(nid=self._nid, auth=auth)
res = self.session.get(url, params=params) # depends on [control=['if'], data=['url']]
else:
res = self.session.get(url) |
def run(self, *args):
"""Autocomplete profile information."""
params = self.parser.parse_args(args)
sources = params.source
code = self.autocomplete(sources)
return code | def function[run, parameter[self]]:
constant[Autocomplete profile information.]
variable[params] assign[=] call[name[self].parser.parse_args, parameter[name[args]]]
variable[sources] assign[=] name[params].source
variable[code] assign[=] call[name[self].autocomplete, parameter[name[sources]]]
return[name[code]] | keyword[def] identifier[run] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[params] = identifier[self] . identifier[parser] . identifier[parse_args] ( identifier[args] )
identifier[sources] = identifier[params] . identifier[source]
identifier[code] = identifier[self] . identifier[autocomplete] ( identifier[sources] )
keyword[return] identifier[code] | def run(self, *args):
"""Autocomplete profile information."""
params = self.parser.parse_args(args)
sources = params.source
code = self.autocomplete(sources)
return code |
def delete_channel_cb(self, gshell, chinfo):
"""Called when a channel is deleted from the main interface.
Parameter is chinfo (a bunch)."""
chname = chinfo.name
if chname not in self.name_dict:
return
del self.name_dict[chname]
self.logger.debug('{0} removed from ChangeHistory'.format(chname))
if not self.gui_up:
return False
self.clear_selected_history()
self.recreate_toc() | def function[delete_channel_cb, parameter[self, gshell, chinfo]]:
constant[Called when a channel is deleted from the main interface.
Parameter is chinfo (a bunch).]
variable[chname] assign[=] name[chinfo].name
if compare[name[chname] <ast.NotIn object at 0x7da2590d7190> name[self].name_dict] begin[:]
return[None]
<ast.Delete object at 0x7da1b0dbfd90>
call[name[self].logger.debug, parameter[call[constant[{0} removed from ChangeHistory].format, parameter[name[chname]]]]]
if <ast.UnaryOp object at 0x7da1b0d55870> begin[:]
return[constant[False]]
call[name[self].clear_selected_history, parameter[]]
call[name[self].recreate_toc, parameter[]] | keyword[def] identifier[delete_channel_cb] ( identifier[self] , identifier[gshell] , identifier[chinfo] ):
literal[string]
identifier[chname] = identifier[chinfo] . identifier[name]
keyword[if] identifier[chname] keyword[not] keyword[in] identifier[self] . identifier[name_dict] :
keyword[return]
keyword[del] identifier[self] . identifier[name_dict] [ identifier[chname] ]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[chname] ))
keyword[if] keyword[not] identifier[self] . identifier[gui_up] :
keyword[return] keyword[False]
identifier[self] . identifier[clear_selected_history] ()
identifier[self] . identifier[recreate_toc] () | def delete_channel_cb(self, gshell, chinfo):
"""Called when a channel is deleted from the main interface.
Parameter is chinfo (a bunch)."""
chname = chinfo.name
if chname not in self.name_dict:
return # depends on [control=['if'], data=[]]
del self.name_dict[chname]
self.logger.debug('{0} removed from ChangeHistory'.format(chname))
if not self.gui_up:
return False # depends on [control=['if'], data=[]]
self.clear_selected_history()
self.recreate_toc() |
def find_videos_by_ids(self, video_ids):
"""doc: http://open.youku.com/docs/doc?id=45
"""
url = 'https://openapi.youku.com/v2/videos/show_basic_batch.json'
params = {
'client_id': self.client_id,
'video_ids': video_ids
}
r = requests.get(url, params=params)
check_error(r)
return r.json() | def function[find_videos_by_ids, parameter[self, video_ids]]:
constant[doc: http://open.youku.com/docs/doc?id=45
]
variable[url] assign[=] constant[https://openapi.youku.com/v2/videos/show_basic_batch.json]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2606980>, <ast.Constant object at 0x7da1b2606950>], [<ast.Attribute object at 0x7da1b2607a60>, <ast.Name object at 0x7da1b2606740>]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
call[name[check_error], parameter[name[r]]]
return[call[name[r].json, parameter[]]] | keyword[def] identifier[find_videos_by_ids] ( identifier[self] , identifier[video_ids] ):
literal[string]
identifier[url] = literal[string]
identifier[params] ={
literal[string] : identifier[self] . identifier[client_id] ,
literal[string] : identifier[video_ids]
}
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[params] = identifier[params] )
identifier[check_error] ( identifier[r] )
keyword[return] identifier[r] . identifier[json] () | def find_videos_by_ids(self, video_ids):
"""doc: http://open.youku.com/docs/doc?id=45
"""
url = 'https://openapi.youku.com/v2/videos/show_basic_batch.json'
params = {'client_id': self.client_id, 'video_ids': video_ids}
r = requests.get(url, params=params)
check_error(r)
return r.json() |
def _read_para_seq(self, code, cbit, clen, *, desc, length, version):
"""Read HIP SEQ parameter.
Structure of HIP SEQ parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Update ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 seq.type Parameter Type
1 15 seq.critical Critical Bit
2 16 seq.length Length of Contents
4 32 seq.id Update ID
"""
if clen != 4:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
_upid = self._read_unpack(4)
seq = dict(
type=desc,
critical=cbit,
length=clen,
id=_upid,
)
return seq | def function[_read_para_seq, parameter[self, code, cbit, clen]]:
constant[Read HIP SEQ parameter.
Structure of HIP SEQ parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Update ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 seq.type Parameter Type
1 15 seq.critical Critical Bit
2 16 seq.length Length of Contents
4 32 seq.id Update ID
]
if compare[name[clen] not_equal[!=] constant[4]] begin[:]
<ast.Raise object at 0x7da1b2344e50>
variable[_upid] assign[=] call[name[self]._read_unpack, parameter[constant[4]]]
variable[seq] assign[=] call[name[dict], parameter[]]
return[name[seq]] | keyword[def] identifier[_read_para_seq] ( identifier[self] , identifier[code] , identifier[cbit] , identifier[clen] ,*, identifier[desc] , identifier[length] , identifier[version] ):
literal[string]
keyword[if] identifier[clen] != literal[int] :
keyword[raise] identifier[ProtocolError] ( literal[string] )
identifier[_upid] = identifier[self] . identifier[_read_unpack] ( literal[int] )
identifier[seq] = identifier[dict] (
identifier[type] = identifier[desc] ,
identifier[critical] = identifier[cbit] ,
identifier[length] = identifier[clen] ,
identifier[id] = identifier[_upid] ,
)
keyword[return] identifier[seq] | def _read_para_seq(self, code, cbit, clen, *, desc, length, version):
"""Read HIP SEQ parameter.
Structure of HIP SEQ parameter [RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Update ID |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 seq.type Parameter Type
1 15 seq.critical Critical Bit
2 16 seq.length Length of Contents
4 32 seq.id Update ID
"""
if clen != 4:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format') # depends on [control=['if'], data=[]]
_upid = self._read_unpack(4)
seq = dict(type=desc, critical=cbit, length=clen, id=_upid)
return seq |
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
"""
if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy':
return
compile_classpath = self.context.products.get_data('compile_classpath',
init_func=ClasspathProducts.init_func(self.get_options().pants_workdir))
targets = self.context.targets()
if all(not isinstance(target, JarLibrary) for target in targets):
if self._report:
self.context.log.info("Not generating a report. No resolution performed.")
return
executor = self.create_java_executor()
results = self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=self.get_options().confs,
extra_args=self._args)
if self._report:
results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts]
if not results_with_resolved_artifacts:
self.context.log.info("Not generating a report. No resolution performed.")
else:
for result in results_with_resolved_artifacts:
self._generate_ivy_report(result) | def function[execute, parameter[self]]:
constant[Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
]
if compare[call[call[name[JvmResolveSubsystem].global_instance, parameter[]].get_options, parameter[]].resolver not_equal[!=] constant[ivy]] begin[:]
return[None]
variable[compile_classpath] assign[=] call[name[self].context.products.get_data, parameter[constant[compile_classpath]]]
variable[targets] assign[=] call[name[self].context.targets, parameter[]]
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b22799c0>]] begin[:]
if name[self]._report begin[:]
call[name[self].context.log.info, parameter[constant[Not generating a report. No resolution performed.]]]
return[None]
variable[executor] assign[=] call[name[self].create_java_executor, parameter[]]
variable[results] assign[=] call[name[self].resolve, parameter[]]
if name[self]._report begin[:]
variable[results_with_resolved_artifacts] assign[=] <ast.ListComp object at 0x7da1b22a72e0>
if <ast.UnaryOp object at 0x7da1b22a7b50> begin[:]
call[name[self].context.log.info, parameter[constant[Not generating a report. No resolution performed.]]] | keyword[def] identifier[execute] ( identifier[self] ):
literal[string]
keyword[if] identifier[JvmResolveSubsystem] . identifier[global_instance] (). identifier[get_options] (). identifier[resolver] != literal[string] :
keyword[return]
identifier[compile_classpath] = identifier[self] . identifier[context] . identifier[products] . identifier[get_data] ( literal[string] ,
identifier[init_func] = identifier[ClasspathProducts] . identifier[init_func] ( identifier[self] . identifier[get_options] (). identifier[pants_workdir] ))
identifier[targets] = identifier[self] . identifier[context] . identifier[targets] ()
keyword[if] identifier[all] ( keyword[not] identifier[isinstance] ( identifier[target] , identifier[JarLibrary] ) keyword[for] identifier[target] keyword[in] identifier[targets] ):
keyword[if] identifier[self] . identifier[_report] :
identifier[self] . identifier[context] . identifier[log] . identifier[info] ( literal[string] )
keyword[return]
identifier[executor] = identifier[self] . identifier[create_java_executor] ()
identifier[results] = identifier[self] . identifier[resolve] ( identifier[executor] = identifier[executor] ,
identifier[targets] = identifier[targets] ,
identifier[classpath_products] = identifier[compile_classpath] ,
identifier[confs] = identifier[self] . identifier[get_options] (). identifier[confs] ,
identifier[extra_args] = identifier[self] . identifier[_args] )
keyword[if] identifier[self] . identifier[_report] :
identifier[results_with_resolved_artifacts] =[ identifier[r] keyword[for] identifier[r] keyword[in] identifier[results] keyword[if] identifier[r] . identifier[has_resolved_artifacts] ]
keyword[if] keyword[not] identifier[results_with_resolved_artifacts] :
identifier[self] . identifier[context] . identifier[log] . identifier[info] ( literal[string] )
keyword[else] :
keyword[for] identifier[result] keyword[in] identifier[results_with_resolved_artifacts] :
identifier[self] . identifier[_generate_ivy_report] ( identifier[result] ) | def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
"""
if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy':
return # depends on [control=['if'], data=[]]
compile_classpath = self.context.products.get_data('compile_classpath', init_func=ClasspathProducts.init_func(self.get_options().pants_workdir))
targets = self.context.targets()
if all((not isinstance(target, JarLibrary) for target in targets)):
if self._report:
self.context.log.info('Not generating a report. No resolution performed.') # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
executor = self.create_java_executor()
results = self.resolve(executor=executor, targets=targets, classpath_products=compile_classpath, confs=self.get_options().confs, extra_args=self._args)
if self._report:
results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts]
if not results_with_resolved_artifacts:
self.context.log.info('Not generating a report. No resolution performed.') # depends on [control=['if'], data=[]]
else:
for result in results_with_resolved_artifacts:
self._generate_ivy_report(result) # depends on [control=['for'], data=['result']] # depends on [control=['if'], data=[]] |
def maybe_call_closing_deferred(self):
"""
Used internally to callback on the _closing_deferred if it
exists.
"""
if self._closing_deferred:
self._closing_deferred.callback(self)
self._closing_deferred = None | def function[maybe_call_closing_deferred, parameter[self]]:
constant[
Used internally to callback on the _closing_deferred if it
exists.
]
if name[self]._closing_deferred begin[:]
call[name[self]._closing_deferred.callback, parameter[name[self]]]
name[self]._closing_deferred assign[=] constant[None] | keyword[def] identifier[maybe_call_closing_deferred] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_closing_deferred] :
identifier[self] . identifier[_closing_deferred] . identifier[callback] ( identifier[self] )
identifier[self] . identifier[_closing_deferred] = keyword[None] | def maybe_call_closing_deferred(self):
"""
Used internally to callback on the _closing_deferred if it
exists.
"""
if self._closing_deferred:
self._closing_deferred.callback(self)
self._closing_deferred = None # depends on [control=['if'], data=[]] |
def meth_delete(args):
""" Remove (redact) a method from the method repository """
message = "WARNING: this will delete workflow \n\t{0}/{1}:{2}".format(
args.namespace, args.method, args.snapshot_id)
if not args.yes and not _confirm_prompt(message):
return
r = fapi.delete_repository_method(args.namespace, args.method,
args.snapshot_id)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Method %s removed from project %s" % (args.method,args.namespace))
return 0 | def function[meth_delete, parameter[args]]:
constant[ Remove (redact) a method from the method repository ]
variable[message] assign[=] call[constant[WARNING: this will delete workflow
{0}/{1}:{2}].format, parameter[name[args].namespace, name[args].method, name[args].snapshot_id]]
if <ast.BoolOp object at 0x7da1b1a29450> begin[:]
return[None]
variable[r] assign[=] call[name[fapi].delete_repository_method, parameter[name[args].namespace, name[args].method, name[args].snapshot_id]]
call[name[fapi]._check_response_code, parameter[name[r], constant[200]]]
if name[fcconfig].verbosity begin[:]
call[name[print], parameter[binary_operation[constant[Method %s removed from project %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1a29300>, <ast.Attribute object at 0x7da1b1a28340>]]]]]
return[constant[0]] | keyword[def] identifier[meth_delete] ( identifier[args] ):
literal[string]
identifier[message] = literal[string] . identifier[format] (
identifier[args] . identifier[namespace] , identifier[args] . identifier[method] , identifier[args] . identifier[snapshot_id] )
keyword[if] keyword[not] identifier[args] . identifier[yes] keyword[and] keyword[not] identifier[_confirm_prompt] ( identifier[message] ):
keyword[return]
identifier[r] = identifier[fapi] . identifier[delete_repository_method] ( identifier[args] . identifier[namespace] , identifier[args] . identifier[method] ,
identifier[args] . identifier[snapshot_id] )
identifier[fapi] . identifier[_check_response_code] ( identifier[r] , literal[int] )
keyword[if] identifier[fcconfig] . identifier[verbosity] :
identifier[print] ( literal[string] %( identifier[args] . identifier[method] , identifier[args] . identifier[namespace] ))
keyword[return] literal[int] | def meth_delete(args):
""" Remove (redact) a method from the method repository """
message = 'WARNING: this will delete workflow \n\t{0}/{1}:{2}'.format(args.namespace, args.method, args.snapshot_id)
if not args.yes and (not _confirm_prompt(message)):
return # depends on [control=['if'], data=[]]
r = fapi.delete_repository_method(args.namespace, args.method, args.snapshot_id)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print('Method %s removed from project %s' % (args.method, args.namespace)) # depends on [control=['if'], data=[]]
return 0 |
def rle_1d(arr):
"""Return the length, starting position and value of consecutive identical values.
Parameters
----------
arr : sequence
Array of values to be parsed.
Returns
-------
(values, run lengths, start positions)
values : np.array
The values taken by arr over each run
run lengths : np.array
The length of each run
start position : np.array
The starting index of each run
Examples
--------
>>> a = [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
>>> rle_1d(a)
(array([1, 2, 3]), array([2, 4, 6]), array([0, 2, 6]))
"""
ia = np.asarray(arr)
n = len(ia)
if n == 0:
e = 'run length array empty'
warn(e)
return None, None, None
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element position
rl = np.diff(np.append(-1, i)) # run lengths
pos = np.cumsum(np.append(0, rl))[:-1] # positions
return ia[i], rl, pos | def function[rle_1d, parameter[arr]]:
constant[Return the length, starting position and value of consecutive identical values.
Parameters
----------
arr : sequence
Array of values to be parsed.
Returns
-------
(values, run lengths, start positions)
values : np.array
The values taken by arr over each run
run lengths : np.array
The length of each run
start position : np.array
The starting index of each run
Examples
--------
>>> a = [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
>>> rle_1d(a)
(array([1, 2, 3]), array([2, 4, 6]), array([0, 2, 6]))
]
variable[ia] assign[=] call[name[np].asarray, parameter[name[arr]]]
variable[n] assign[=] call[name[len], parameter[name[ia]]]
if compare[name[n] equal[==] constant[0]] begin[:]
variable[e] assign[=] constant[run length array empty]
call[name[warn], parameter[name[e]]]
return[tuple[[<ast.Constant object at 0x7da2044c3a90>, <ast.Constant object at 0x7da2044c1ff0>, <ast.Constant object at 0x7da2044c3400>]]]
variable[y] assign[=] call[name[np].array, parameter[compare[call[name[ia]][<ast.Slice object at 0x7da18dc05840>] not_equal[!=] call[name[ia]][<ast.Slice object at 0x7da18dc07700>]]]]
variable[i] assign[=] call[name[np].append, parameter[call[name[np].where, parameter[name[y]]], binary_operation[name[n] - constant[1]]]]
variable[rl] assign[=] call[name[np].diff, parameter[call[name[np].append, parameter[<ast.UnaryOp object at 0x7da18dc06500>, name[i]]]]]
variable[pos] assign[=] call[call[name[np].cumsum, parameter[call[name[np].append, parameter[constant[0], name[rl]]]]]][<ast.Slice object at 0x7da18dc079d0>]
return[tuple[[<ast.Subscript object at 0x7da18dc06020>, <ast.Name object at 0x7da18dc05a80>, <ast.Name object at 0x7da18dc066e0>]]] | keyword[def] identifier[rle_1d] ( identifier[arr] ):
literal[string]
identifier[ia] = identifier[np] . identifier[asarray] ( identifier[arr] )
identifier[n] = identifier[len] ( identifier[ia] )
keyword[if] identifier[n] == literal[int] :
identifier[e] = literal[string]
identifier[warn] ( identifier[e] )
keyword[return] keyword[None] , keyword[None] , keyword[None]
identifier[y] = identifier[np] . identifier[array] ( identifier[ia] [ literal[int] :]!= identifier[ia] [:- literal[int] ])
identifier[i] = identifier[np] . identifier[append] ( identifier[np] . identifier[where] ( identifier[y] ), identifier[n] - literal[int] )
identifier[rl] = identifier[np] . identifier[diff] ( identifier[np] . identifier[append] (- literal[int] , identifier[i] ))
identifier[pos] = identifier[np] . identifier[cumsum] ( identifier[np] . identifier[append] ( literal[int] , identifier[rl] ))[:- literal[int] ]
keyword[return] identifier[ia] [ identifier[i] ], identifier[rl] , identifier[pos] | def rle_1d(arr):
"""Return the length, starting position and value of consecutive identical values.
Parameters
----------
arr : sequence
Array of values to be parsed.
Returns
-------
(values, run lengths, start positions)
values : np.array
The values taken by arr over each run
run lengths : np.array
The length of each run
start position : np.array
The starting index of each run
Examples
--------
>>> a = [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
>>> rle_1d(a)
(array([1, 2, 3]), array([2, 4, 6]), array([0, 2, 6]))
"""
ia = np.asarray(arr)
n = len(ia)
if n == 0:
e = 'run length array empty'
warn(e)
return (None, None, None) # depends on [control=['if'], data=[]]
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element position
rl = np.diff(np.append(-1, i)) # run lengths
pos = np.cumsum(np.append(0, rl))[:-1] # positions
return (ia[i], rl, pos) |
def nmin(wave, indep_min=None, indep_max=None):
r"""
Return the minimum of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.nmin
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
return np.min(ret._dep_vector) | def function[nmin, parameter[wave, indep_min, indep_max]]:
constant[
Return the minimum of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.nmin
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
.. [[[end]]]
]
variable[ret] assign[=] call[name[copy].copy, parameter[name[wave]]]
call[name[_bound_waveform], parameter[name[ret], name[indep_min], name[indep_max]]]
return[call[name[np].min, parameter[name[ret]._dep_vector]]] | keyword[def] identifier[nmin] ( identifier[wave] , identifier[indep_min] = keyword[None] , identifier[indep_max] = keyword[None] ):
literal[string]
identifier[ret] = identifier[copy] . identifier[copy] ( identifier[wave] )
identifier[_bound_waveform] ( identifier[ret] , identifier[indep_min] , identifier[indep_max] )
keyword[return] identifier[np] . identifier[min] ( identifier[ret] . identifier[_dep_vector] ) | def nmin(wave, indep_min=None, indep_max=None):
"""
Return the minimum of a waveform's dependent variable vector.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.nmin
:raises:
* RuntimeError (Argument \\`indep_max\\` is not valid)
* RuntimeError (Argument \\`indep_min\\` is not valid)
* RuntimeError (Argument \\`wave\\` is not valid)
* RuntimeError (Incongruent \\`indep_min\\` and \\`indep_max\\`
arguments)
.. [[[end]]]
"""
ret = copy.copy(wave)
_bound_waveform(ret, indep_min, indep_max)
return np.min(ret._dep_vector) |
def cf_decoder(variables, attributes,
concat_characters=True, mask_and_scale=True,
decode_times=True):
"""
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times)
return variables, attributes | def function[cf_decoder, parameter[variables, attributes, concat_characters, mask_and_scale, decode_times]]:
constant[
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
]
<ast.Tuple object at 0x7da20c7c9cc0> assign[=] call[name[decode_cf_variables], parameter[name[variables], name[attributes], name[concat_characters], name[mask_and_scale], name[decode_times]]]
return[tuple[[<ast.Name object at 0x7da20c7c8dc0>, <ast.Name object at 0x7da20c7c9900>]]] | keyword[def] identifier[cf_decoder] ( identifier[variables] , identifier[attributes] ,
identifier[concat_characters] = keyword[True] , identifier[mask_and_scale] = keyword[True] ,
identifier[decode_times] = keyword[True] ):
literal[string]
identifier[variables] , identifier[attributes] , identifier[_] = identifier[decode_cf_variables] (
identifier[variables] , identifier[attributes] , identifier[concat_characters] , identifier[mask_and_scale] , identifier[decode_times] )
keyword[return] identifier[variables] , identifier[attributes] | def cf_decoder(variables, attributes, concat_characters=True, mask_and_scale=True, decode_times=True):
"""
Decode a set of CF encoded variables and attributes.
See Also, decode_cf_variable
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
"""
(variables, attributes, _) = decode_cf_variables(variables, attributes, concat_characters, mask_and_scale, decode_times)
return (variables, attributes) |
def read_msr(address):
"""
Read the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to read.
@rtype: int
@return: Value of the specified MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
"""
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
raise NotImplementedError(
"MSR reading is only supported on i386 or amd64 processors.")
msr = win32.SYSDBG_MSR()
msr.Address = address
msr.Data = 0
win32.NtSystemDebugControl(win32.SysDbgReadMsr,
InputBuffer = msr,
OutputBuffer = msr)
return msr.Data | def function[read_msr, parameter[address]]:
constant[
Read the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to read.
@rtype: int
@return: Value of the specified MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
]
if compare[name[win32].arch <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da1b08da1d0>, <ast.Attribute object at 0x7da1b08dbbb0>]]] begin[:]
<ast.Raise object at 0x7da1b08db220>
variable[msr] assign[=] call[name[win32].SYSDBG_MSR, parameter[]]
name[msr].Address assign[=] name[address]
name[msr].Data assign[=] constant[0]
call[name[win32].NtSystemDebugControl, parameter[name[win32].SysDbgReadMsr]]
return[name[msr].Data] | keyword[def] identifier[read_msr] ( identifier[address] ):
literal[string]
keyword[if] identifier[win32] . identifier[arch] keyword[not] keyword[in] ( identifier[win32] . identifier[ARCH_I386] , identifier[win32] . identifier[ARCH_AMD64] ):
keyword[raise] identifier[NotImplementedError] (
literal[string] )
identifier[msr] = identifier[win32] . identifier[SYSDBG_MSR] ()
identifier[msr] . identifier[Address] = identifier[address]
identifier[msr] . identifier[Data] = literal[int]
identifier[win32] . identifier[NtSystemDebugControl] ( identifier[win32] . identifier[SysDbgReadMsr] ,
identifier[InputBuffer] = identifier[msr] ,
identifier[OutputBuffer] = identifier[msr] )
keyword[return] identifier[msr] . identifier[Data] | def read_msr(address):
"""
Read the contents of the specified MSR (Machine Specific Register).
@type address: int
@param address: MSR to read.
@rtype: int
@return: Value of the specified MSR.
@raise WindowsError:
Raises an exception on error.
@raise NotImplementedError:
Current architecture is not C{i386} or C{amd64}.
@warning:
It could potentially brick your machine.
It works on my machine, but your mileage may vary.
"""
if win32.arch not in (win32.ARCH_I386, win32.ARCH_AMD64):
raise NotImplementedError('MSR reading is only supported on i386 or amd64 processors.') # depends on [control=['if'], data=[]]
msr = win32.SYSDBG_MSR()
msr.Address = address
msr.Data = 0
win32.NtSystemDebugControl(win32.SysDbgReadMsr, InputBuffer=msr, OutputBuffer=msr)
return msr.Data |
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(signature)
for chunk in chunks:
write_chunk(out, *chunk) | def function[write_chunks, parameter[out, chunks]]:
constant[Create a PNG file by writing out the chunks.]
call[name[out].write, parameter[name[signature]]]
for taget[name[chunk]] in starred[name[chunks]] begin[:]
call[name[write_chunk], parameter[name[out], <ast.Starred object at 0x7da1b0508fa0>]] | keyword[def] identifier[write_chunks] ( identifier[out] , identifier[chunks] ):
literal[string]
identifier[out] . identifier[write] ( identifier[signature] )
keyword[for] identifier[chunk] keyword[in] identifier[chunks] :
identifier[write_chunk] ( identifier[out] ,* identifier[chunk] ) | def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(signature)
for chunk in chunks:
write_chunk(out, *chunk) # depends on [control=['for'], data=['chunk']] |
def _set_rho_grids(self):
""" Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
"""
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \
+ 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return rho_grids, rho_weights | def function[_set_rho_grids, parameter[self]]:
constant[ Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
]
variable[rho_grids] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[np].arange, parameter[name[self].rho_bins]] * constant[2]] / name[self].rho_bins] - constant[1]] + binary_operation[constant[1] / name[self].rho_bins]]
variable[rho_weights] assign[=] binary_operation[call[name[np].ones, parameter[name[self].rho_bins]] / name[self].rho_bins]
return[tuple[[<ast.Name object at 0x7da1b083ea10>, <ast.Name object at 0x7da1b083e7d0>]]] | keyword[def] identifier[_set_rho_grids] ( identifier[self] ):
literal[string]
identifier[rho_grids] = identifier[np] . identifier[arange] ( identifier[self] . identifier[rho_bins] )* literal[int] / identifier[self] . identifier[rho_bins] - literal[int] + literal[int] / identifier[self] . identifier[rho_bins]
identifier[rho_weights] = identifier[np] . identifier[ones] ( identifier[self] . identifier[rho_bins] )/ identifier[self] . identifier[rho_bins]
keyword[return] identifier[rho_grids] , identifier[rho_weights] | def _set_rho_grids(self):
""" Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
"""
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 + 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return (rho_grids, rho_weights) |
def calc_missingremoterelease_v1(self):
"""Calculate the portion of the required remote demand that could not
be met by the actual discharge release.
Required flux sequences:
|RequiredRemoteRelease|
|ActualRelease|
Calculated flux sequence:
|MissingRemoteRelease|
Basic equation:
:math:`MissingRemoteRelease = max(
RequiredRemoteRelease-ActualRelease, 0)`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> fluxes.requiredremoterelease = 2.0
>>> fluxes.actualrelease = 1.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(1.0)
>>> fluxes.actualrelease = 3.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(0.0)
"""
flu = self.sequences.fluxes.fastaccess
flu.missingremoterelease = max(
flu.requiredremoterelease-flu.actualrelease, 0.) | def function[calc_missingremoterelease_v1, parameter[self]]:
constant[Calculate the portion of the required remote demand that could not
be met by the actual discharge release.
Required flux sequences:
|RequiredRemoteRelease|
|ActualRelease|
Calculated flux sequence:
|MissingRemoteRelease|
Basic equation:
:math:`MissingRemoteRelease = max(
RequiredRemoteRelease-ActualRelease, 0)`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> fluxes.requiredremoterelease = 2.0
>>> fluxes.actualrelease = 1.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(1.0)
>>> fluxes.actualrelease = 3.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(0.0)
]
variable[flu] assign[=] name[self].sequences.fluxes.fastaccess
name[flu].missingremoterelease assign[=] call[name[max], parameter[binary_operation[name[flu].requiredremoterelease - name[flu].actualrelease], constant[0.0]]] | keyword[def] identifier[calc_missingremoterelease_v1] ( identifier[self] ):
literal[string]
identifier[flu] = identifier[self] . identifier[sequences] . identifier[fluxes] . identifier[fastaccess]
identifier[flu] . identifier[missingremoterelease] = identifier[max] (
identifier[flu] . identifier[requiredremoterelease] - identifier[flu] . identifier[actualrelease] , literal[int] ) | def calc_missingremoterelease_v1(self):
"""Calculate the portion of the required remote demand that could not
be met by the actual discharge release.
Required flux sequences:
|RequiredRemoteRelease|
|ActualRelease|
Calculated flux sequence:
|MissingRemoteRelease|
Basic equation:
:math:`MissingRemoteRelease = max(
RequiredRemoteRelease-ActualRelease, 0)`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> fluxes.requiredremoterelease = 2.0
>>> fluxes.actualrelease = 1.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(1.0)
>>> fluxes.actualrelease = 3.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(0.0)
"""
flu = self.sequences.fluxes.fastaccess
flu.missingremoterelease = max(flu.requiredremoterelease - flu.actualrelease, 0.0) |
def stack(xs, dim_name, axis=0, name=None):
"""Stack multiple Tensors to make a new dimension.
Args:
xs: a list of Tensors with identical shapes.
dim_name: a string (name of the new dimension)
axis: an integer (index of the new dimension in the output shape)
name: an optional string
Returns:
a Tensor
"""
ret = StackOperation(xs, dim_name, axis, name).outputs[0]
return ret | def function[stack, parameter[xs, dim_name, axis, name]]:
constant[Stack multiple Tensors to make a new dimension.
Args:
xs: a list of Tensors with identical shapes.
dim_name: a string (name of the new dimension)
axis: an integer (index of the new dimension in the output shape)
name: an optional string
Returns:
a Tensor
]
variable[ret] assign[=] call[call[name[StackOperation], parameter[name[xs], name[dim_name], name[axis], name[name]]].outputs][constant[0]]
return[name[ret]] | keyword[def] identifier[stack] ( identifier[xs] , identifier[dim_name] , identifier[axis] = literal[int] , identifier[name] = keyword[None] ):
literal[string]
identifier[ret] = identifier[StackOperation] ( identifier[xs] , identifier[dim_name] , identifier[axis] , identifier[name] ). identifier[outputs] [ literal[int] ]
keyword[return] identifier[ret] | def stack(xs, dim_name, axis=0, name=None):
"""Stack multiple Tensors to make a new dimension.
Args:
xs: a list of Tensors with identical shapes.
dim_name: a string (name of the new dimension)
axis: an integer (index of the new dimension in the output shape)
name: an optional string
Returns:
a Tensor
"""
ret = StackOperation(xs, dim_name, axis, name).outputs[0]
return ret |
def regressfile(filename):
"""
Run all stories in filename 'filename' in python 2 and 3.
"""
_storybook({"rewrite": False}).in_filename(filename).with_params(
**{"python version": "2.7.14"}
).filter(
lambda story: not story.info.get("fails_on_python_2")
).ordered_by_name().play()
_storybook({"rewrite": False}).with_params(
**{"python version": "3.7.0"}
).in_filename(filename).ordered_by_name().play() | def function[regressfile, parameter[filename]]:
constant[
Run all stories in filename 'filename' in python 2 and 3.
]
call[call[call[call[call[call[name[_storybook], parameter[dictionary[[<ast.Constant object at 0x7da20c6e5720>], [<ast.Constant object at 0x7da20c6e7820>]]]].in_filename, parameter[name[filename]]].with_params, parameter[]].filter, parameter[<ast.Lambda object at 0x7da18fe923e0>]].ordered_by_name, parameter[]].play, parameter[]]
call[call[call[call[call[name[_storybook], parameter[dictionary[[<ast.Constant object at 0x7da20c6e7fa0>], [<ast.Constant object at 0x7da20c6e5b40>]]]].with_params, parameter[]].in_filename, parameter[name[filename]]].ordered_by_name, parameter[]].play, parameter[]] | keyword[def] identifier[regressfile] ( identifier[filename] ):
literal[string]
identifier[_storybook] ({ literal[string] : keyword[False] }). identifier[in_filename] ( identifier[filename] ). identifier[with_params] (
**{ literal[string] : literal[string] }
). identifier[filter] (
keyword[lambda] identifier[story] : keyword[not] identifier[story] . identifier[info] . identifier[get] ( literal[string] )
). identifier[ordered_by_name] (). identifier[play] ()
identifier[_storybook] ({ literal[string] : keyword[False] }). identifier[with_params] (
**{ literal[string] : literal[string] }
). identifier[in_filename] ( identifier[filename] ). identifier[ordered_by_name] (). identifier[play] () | def regressfile(filename):
"""
Run all stories in filename 'filename' in python 2 and 3.
"""
_storybook({'rewrite': False}).in_filename(filename).with_params(**{'python version': '2.7.14'}).filter(lambda story: not story.info.get('fails_on_python_2')).ordered_by_name().play()
_storybook({'rewrite': False}).with_params(**{'python version': '3.7.0'}).in_filename(filename).ordered_by_name().play() |
def msg_body_for_event(event, context):
"""
Generate the JSON-serialized message body for an event.
:param event: Lambda event that triggered the handler
:type event: dict
:param context: Lambda function context - see
http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
:return: JSON-serialized success response
:rtype: str
"""
# find the actual input data - this differs between GET and POST
http_method = event.get('context', {}).get('http-method', None)
if http_method == 'GET':
data = event.get('params', {}).get('querystring', {})
else: # POST
data = event.get('body-json', {})
# build the message to enqueue
msg_dict = {
'data': serializable_dict(data),
'event': serializable_dict(event),
'context': serializable_dict(vars(context))
}
msg = json.dumps(msg_dict, sort_keys=True)
logger.debug('Message to enqueue: %s', msg)
return msg | def function[msg_body_for_event, parameter[event, context]]:
constant[
Generate the JSON-serialized message body for an event.
:param event: Lambda event that triggered the handler
:type event: dict
:param context: Lambda function context - see
http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
:return: JSON-serialized success response
:rtype: str
]
variable[http_method] assign[=] call[call[name[event].get, parameter[constant[context], dictionary[[], []]]].get, parameter[constant[http-method], constant[None]]]
if compare[name[http_method] equal[==] constant[GET]] begin[:]
variable[data] assign[=] call[call[name[event].get, parameter[constant[params], dictionary[[], []]]].get, parameter[constant[querystring], dictionary[[], []]]]
variable[msg_dict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a05090>, <ast.Constant object at 0x7da1b0a05810>, <ast.Constant object at 0x7da1b0a06a10>], [<ast.Call object at 0x7da1b0a05e70>, <ast.Call object at 0x7da1b0a05fc0>, <ast.Call object at 0x7da1b0a06620>]]
variable[msg] assign[=] call[name[json].dumps, parameter[name[msg_dict]]]
call[name[logger].debug, parameter[constant[Message to enqueue: %s], name[msg]]]
return[name[msg]] | keyword[def] identifier[msg_body_for_event] ( identifier[event] , identifier[context] ):
literal[string]
identifier[http_method] = identifier[event] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[http_method] == literal[string] :
identifier[data] = identifier[event] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,{})
keyword[else] :
identifier[data] = identifier[event] . identifier[get] ( literal[string] ,{})
identifier[msg_dict] ={
literal[string] : identifier[serializable_dict] ( identifier[data] ),
literal[string] : identifier[serializable_dict] ( identifier[event] ),
literal[string] : identifier[serializable_dict] ( identifier[vars] ( identifier[context] ))
}
identifier[msg] = identifier[json] . identifier[dumps] ( identifier[msg_dict] , identifier[sort_keys] = keyword[True] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[msg] )
keyword[return] identifier[msg] | def msg_body_for_event(event, context):
"""
Generate the JSON-serialized message body for an event.
:param event: Lambda event that triggered the handler
:type event: dict
:param context: Lambda function context - see
http://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
:return: JSON-serialized success response
:rtype: str
"""
# find the actual input data - this differs between GET and POST
http_method = event.get('context', {}).get('http-method', None)
if http_method == 'GET':
data = event.get('params', {}).get('querystring', {}) # depends on [control=['if'], data=[]]
else: # POST
data = event.get('body-json', {})
# build the message to enqueue
msg_dict = {'data': serializable_dict(data), 'event': serializable_dict(event), 'context': serializable_dict(vars(context))}
msg = json.dumps(msg_dict, sort_keys=True)
logger.debug('Message to enqueue: %s', msg)
return msg |
def create_logger(name, level='INFO'):
"""
Creates a new ready-to-use logger.
:param name: new logger's name
:type name: str
:param level: default logging level.
:type level: :class:`str` or :class:`int`
:return: new logger.
:rtype: :class:`logging.Logger`
"""
formatter = ColorFormatter(LOG_FORMAT, DATE_FORMAT)
if not isinstance(logging.getLevelName(level), int):
level = 'INFO'
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger | def function[create_logger, parameter[name, level]]:
constant[
Creates a new ready-to-use logger.
:param name: new logger's name
:type name: str
:param level: default logging level.
:type level: :class:`str` or :class:`int`
:return: new logger.
:rtype: :class:`logging.Logger`
]
variable[formatter] assign[=] call[name[ColorFormatter], parameter[name[LOG_FORMAT], name[DATE_FORMAT]]]
if <ast.UnaryOp object at 0x7da18f8136d0> begin[:]
variable[level] assign[=] constant[INFO]
variable[handler] assign[=] call[name[logging].StreamHandler, parameter[]]
call[name[handler].setFormatter, parameter[name[formatter]]]
variable[logger] assign[=] call[name[logging].getLogger, parameter[name[name]]]
call[name[logger].setLevel, parameter[name[level]]]
call[name[logger].addHandler, parameter[name[handler]]]
return[name[logger]] | keyword[def] identifier[create_logger] ( identifier[name] , identifier[level] = literal[string] ):
literal[string]
identifier[formatter] = identifier[ColorFormatter] ( identifier[LOG_FORMAT] , identifier[DATE_FORMAT] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[logging] . identifier[getLevelName] ( identifier[level] ), identifier[int] ):
identifier[level] = literal[string]
identifier[handler] = identifier[logging] . identifier[StreamHandler] ()
identifier[handler] . identifier[setFormatter] ( identifier[formatter] )
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[name] )
identifier[logger] . identifier[setLevel] ( identifier[level] )
identifier[logger] . identifier[addHandler] ( identifier[handler] )
keyword[return] identifier[logger] | def create_logger(name, level='INFO'):
"""
Creates a new ready-to-use logger.
:param name: new logger's name
:type name: str
:param level: default logging level.
:type level: :class:`str` or :class:`int`
:return: new logger.
:rtype: :class:`logging.Logger`
"""
formatter = ColorFormatter(LOG_FORMAT, DATE_FORMAT)
if not isinstance(logging.getLevelName(level), int):
level = 'INFO' # depends on [control=['if'], data=[]]
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger |
def season(self):
"""
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season.
"""
date = self.date()
return date.year - 1 if date.month <= 3 else date.year | def function[season, parameter[self]]:
constant[
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season.
]
variable[date] assign[=] call[name[self].date, parameter[]]
return[<ast.IfExp object at 0x7da1b01c28c0>] | keyword[def] identifier[season] ( identifier[self] ):
literal[string]
identifier[date] = identifier[self] . identifier[date] ()
keyword[return] identifier[date] . identifier[year] - literal[int] keyword[if] identifier[date] . identifier[month] <= literal[int] keyword[else] identifier[date] . identifier[year] | def season(self):
"""
Returns the year ID of the season in which this game took place.
Useful for week 17 January games.
:returns: An int representing the year of the season.
"""
date = self.date()
return date.year - 1 if date.month <= 3 else date.year |
def GetMessages(self, soft_size_limit=None):
"""Retrieves and removes the messages from the queue.
Args:
soft_size_limit: int If there is more data in the queue than
soft_size_limit bytes, the returned list of messages will be
approximately this large. If None (default), returns all messages
currently on the queue.
Returns:
rdf_flows.MessageList A list of messages that were .Put on the queue
earlier.
"""
with self._lock:
ret = rdf_flows.MessageList()
ret_size = 0
for message in self._Generate():
self._total_size -= len(message)
ret.job.append(rdf_flows.GrrMessage.FromSerializedString(message))
ret_size += len(message)
if soft_size_limit is not None and ret_size > soft_size_limit:
break
return ret | def function[GetMessages, parameter[self, soft_size_limit]]:
constant[Retrieves and removes the messages from the queue.
Args:
soft_size_limit: int If there is more data in the queue than
soft_size_limit bytes, the returned list of messages will be
approximately this large. If None (default), returns all messages
currently on the queue.
Returns:
rdf_flows.MessageList A list of messages that were .Put on the queue
earlier.
]
with name[self]._lock begin[:]
variable[ret] assign[=] call[name[rdf_flows].MessageList, parameter[]]
variable[ret_size] assign[=] constant[0]
for taget[name[message]] in starred[call[name[self]._Generate, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b1b45840>
call[name[ret].job.append, parameter[call[name[rdf_flows].GrrMessage.FromSerializedString, parameter[name[message]]]]]
<ast.AugAssign object at 0x7da1b1b476a0>
if <ast.BoolOp object at 0x7da1b1b45870> begin[:]
break
return[name[ret]] | keyword[def] identifier[GetMessages] ( identifier[self] , identifier[soft_size_limit] = keyword[None] ):
literal[string]
keyword[with] identifier[self] . identifier[_lock] :
identifier[ret] = identifier[rdf_flows] . identifier[MessageList] ()
identifier[ret_size] = literal[int]
keyword[for] identifier[message] keyword[in] identifier[self] . identifier[_Generate] ():
identifier[self] . identifier[_total_size] -= identifier[len] ( identifier[message] )
identifier[ret] . identifier[job] . identifier[append] ( identifier[rdf_flows] . identifier[GrrMessage] . identifier[FromSerializedString] ( identifier[message] ))
identifier[ret_size] += identifier[len] ( identifier[message] )
keyword[if] identifier[soft_size_limit] keyword[is] keyword[not] keyword[None] keyword[and] identifier[ret_size] > identifier[soft_size_limit] :
keyword[break]
keyword[return] identifier[ret] | def GetMessages(self, soft_size_limit=None):
"""Retrieves and removes the messages from the queue.
Args:
soft_size_limit: int If there is more data in the queue than
soft_size_limit bytes, the returned list of messages will be
approximately this large. If None (default), returns all messages
currently on the queue.
Returns:
rdf_flows.MessageList A list of messages that were .Put on the queue
earlier.
"""
with self._lock:
ret = rdf_flows.MessageList()
ret_size = 0
for message in self._Generate():
self._total_size -= len(message)
ret.job.append(rdf_flows.GrrMessage.FromSerializedString(message))
ret_size += len(message)
if soft_size_limit is not None and ret_size > soft_size_limit:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['message']]
return ret # depends on [control=['with'], data=[]] |
def _print_stats(cls, stats: Statistics, human_format_speed: bool=True):
'''Log the final statistics to the user.'''
time_length = datetime.timedelta(
seconds=int(stats.stop_time - stats.start_time)
)
file_size = wpull.string.format_size(stats.size)
if stats.bandwidth_meter.num_samples:
speed = stats.bandwidth_meter.speed()
if human_format_speed:
speed_size_str = wpull.string.format_size(speed)
else:
speed_size_str = '{:.1f} b'.format(speed * 8)
else:
speed_size_str = _('-- B')
_logger.info(_('FINISHED.'))
_logger.info(__(
_(
'Duration: {preformatted_timedelta}. '
'Speed: {preformatted_speed_size}/s.'
),
preformatted_timedelta=time_length,
preformatted_speed_size=speed_size_str,
))
_logger.info(__(
gettext.ngettext(
'Downloaded: {num_files} file, {preformatted_file_size}.',
'Downloaded: {num_files} files, {preformatted_file_size}.',
stats.files
),
num_files=stats.files,
preformatted_file_size=file_size
))
if stats.is_quota_exceeded:
_logger.info(_('Download quota exceeded.')) | def function[_print_stats, parameter[cls, stats, human_format_speed]]:
constant[Log the final statistics to the user.]
variable[time_length] assign[=] call[name[datetime].timedelta, parameter[]]
variable[file_size] assign[=] call[name[wpull].string.format_size, parameter[name[stats].size]]
if name[stats].bandwidth_meter.num_samples begin[:]
variable[speed] assign[=] call[name[stats].bandwidth_meter.speed, parameter[]]
if name[human_format_speed] begin[:]
variable[speed_size_str] assign[=] call[name[wpull].string.format_size, parameter[name[speed]]]
call[name[_logger].info, parameter[call[name[_], parameter[constant[FINISHED.]]]]]
call[name[_logger].info, parameter[call[name[__], parameter[call[name[_], parameter[constant[Duration: {preformatted_timedelta}. Speed: {preformatted_speed_size}/s.]]]]]]]
call[name[_logger].info, parameter[call[name[__], parameter[call[name[gettext].ngettext, parameter[constant[Downloaded: {num_files} file, {preformatted_file_size}.], constant[Downloaded: {num_files} files, {preformatted_file_size}.], name[stats].files]]]]]]
if name[stats].is_quota_exceeded begin[:]
call[name[_logger].info, parameter[call[name[_], parameter[constant[Download quota exceeded.]]]]] | keyword[def] identifier[_print_stats] ( identifier[cls] , identifier[stats] : identifier[Statistics] , identifier[human_format_speed] : identifier[bool] = keyword[True] ):
literal[string]
identifier[time_length] = identifier[datetime] . identifier[timedelta] (
identifier[seconds] = identifier[int] ( identifier[stats] . identifier[stop_time] - identifier[stats] . identifier[start_time] )
)
identifier[file_size] = identifier[wpull] . identifier[string] . identifier[format_size] ( identifier[stats] . identifier[size] )
keyword[if] identifier[stats] . identifier[bandwidth_meter] . identifier[num_samples] :
identifier[speed] = identifier[stats] . identifier[bandwidth_meter] . identifier[speed] ()
keyword[if] identifier[human_format_speed] :
identifier[speed_size_str] = identifier[wpull] . identifier[string] . identifier[format_size] ( identifier[speed] )
keyword[else] :
identifier[speed_size_str] = literal[string] . identifier[format] ( identifier[speed] * literal[int] )
keyword[else] :
identifier[speed_size_str] = identifier[_] ( literal[string] )
identifier[_logger] . identifier[info] ( identifier[_] ( literal[string] ))
identifier[_logger] . identifier[info] ( identifier[__] (
identifier[_] (
literal[string]
literal[string]
),
identifier[preformatted_timedelta] = identifier[time_length] ,
identifier[preformatted_speed_size] = identifier[speed_size_str] ,
))
identifier[_logger] . identifier[info] ( identifier[__] (
identifier[gettext] . identifier[ngettext] (
literal[string] ,
literal[string] ,
identifier[stats] . identifier[files]
),
identifier[num_files] = identifier[stats] . identifier[files] ,
identifier[preformatted_file_size] = identifier[file_size]
))
keyword[if] identifier[stats] . identifier[is_quota_exceeded] :
identifier[_logger] . identifier[info] ( identifier[_] ( literal[string] )) | def _print_stats(cls, stats: Statistics, human_format_speed: bool=True):
"""Log the final statistics to the user."""
time_length = datetime.timedelta(seconds=int(stats.stop_time - stats.start_time))
file_size = wpull.string.format_size(stats.size)
if stats.bandwidth_meter.num_samples:
speed = stats.bandwidth_meter.speed()
if human_format_speed:
speed_size_str = wpull.string.format_size(speed) # depends on [control=['if'], data=[]]
else:
speed_size_str = '{:.1f} b'.format(speed * 8) # depends on [control=['if'], data=[]]
else:
speed_size_str = _('-- B')
_logger.info(_('FINISHED.'))
_logger.info(__(_('Duration: {preformatted_timedelta}. Speed: {preformatted_speed_size}/s.'), preformatted_timedelta=time_length, preformatted_speed_size=speed_size_str))
_logger.info(__(gettext.ngettext('Downloaded: {num_files} file, {preformatted_file_size}.', 'Downloaded: {num_files} files, {preformatted_file_size}.', stats.files), num_files=stats.files, preformatted_file_size=file_size))
if stats.is_quota_exceeded:
_logger.info(_('Download quota exceeded.')) # depends on [control=['if'], data=[]] |
def find_by_conversation(cls, conversation_id, limit=None, reversed=None, before_time=None, before_message_id=None):
# type: (str, Optional[int], Optional[bool], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取某个对话中的聊天记录
:param conversation_id: 对话 id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param reversed: 以默认排序(查找更老的历史消息)相反的方向返回结果(也即从某条消息记录开始查找更新的消息),服务端默认为 False
如果 reversed = True,则 before_time/before_message_id 转变成最老的消息的时间戳和 message_id,
否则还是指最新的消息的时间戳和 message_id。
:param before_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param before_message_id: 起始的消息 id,使用时必须加上对应消息的时间 before_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
query_params['convid'] = conversation_id
if limit is not None:
query_params['limit'] = limit
if reversed is not None:
query_params['reversed'] = reversed
if isinstance(before_time, datetime):
query_params['max_ts'] = round(before_time.timestamp() * 1000)
elif isinstance(before_time, six.integer_types) or isinstance(before_time, float):
query_params['max_ts'] = round(before_time * 1000)
if before_message_id is not None:
query_params['msgid'] = before_message_id
return list(cls._find(query_params)) | def function[find_by_conversation, parameter[cls, conversation_id, limit, reversed, before_time, before_message_id]]:
constant[获取某个对话中的聊天记录
:param conversation_id: 对话 id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param reversed: 以默认排序(查找更老的历史消息)相反的方向返回结果(也即从某条消息记录开始查找更新的消息),服务端默认为 False
如果 reversed = True,则 before_time/before_message_id 转变成最老的消息的时间戳和 message_id,
否则还是指最新的消息的时间戳和 message_id。
:param before_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param before_message_id: 起始的消息 id,使用时必须加上对应消息的时间 before_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
]
variable[query_params] assign[=] dictionary[[], []]
call[name[query_params]][constant[convid]] assign[=] name[conversation_id]
if compare[name[limit] is_not constant[None]] begin[:]
call[name[query_params]][constant[limit]] assign[=] name[limit]
if compare[name[reversed] is_not constant[None]] begin[:]
call[name[query_params]][constant[reversed]] assign[=] name[reversed]
if call[name[isinstance], parameter[name[before_time], name[datetime]]] begin[:]
call[name[query_params]][constant[max_ts]] assign[=] call[name[round], parameter[binary_operation[call[name[before_time].timestamp, parameter[]] * constant[1000]]]]
if compare[name[before_message_id] is_not constant[None]] begin[:]
call[name[query_params]][constant[msgid]] assign[=] name[before_message_id]
return[call[name[list], parameter[call[name[cls]._find, parameter[name[query_params]]]]]] | keyword[def] identifier[find_by_conversation] ( identifier[cls] , identifier[conversation_id] , identifier[limit] = keyword[None] , identifier[reversed] = keyword[None] , identifier[before_time] = keyword[None] , identifier[before_message_id] = keyword[None] ):
literal[string]
identifier[query_params] ={}
identifier[query_params] [ literal[string] ]= identifier[conversation_id]
keyword[if] identifier[limit] keyword[is] keyword[not] keyword[None] :
identifier[query_params] [ literal[string] ]= identifier[limit]
keyword[if] identifier[reversed] keyword[is] keyword[not] keyword[None] :
identifier[query_params] [ literal[string] ]= identifier[reversed]
keyword[if] identifier[isinstance] ( identifier[before_time] , identifier[datetime] ):
identifier[query_params] [ literal[string] ]= identifier[round] ( identifier[before_time] . identifier[timestamp] ()* literal[int] )
keyword[elif] identifier[isinstance] ( identifier[before_time] , identifier[six] . identifier[integer_types] ) keyword[or] identifier[isinstance] ( identifier[before_time] , identifier[float] ):
identifier[query_params] [ literal[string] ]= identifier[round] ( identifier[before_time] * literal[int] )
keyword[if] identifier[before_message_id] keyword[is] keyword[not] keyword[None] :
identifier[query_params] [ literal[string] ]= identifier[before_message_id]
keyword[return] identifier[list] ( identifier[cls] . identifier[_find] ( identifier[query_params] )) | def find_by_conversation(cls, conversation_id, limit=None, reversed=None, before_time=None, before_message_id=None):
# type: (str, Optional[int], Optional[bool], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
'获取某个对话中的聊天记录\n\n :param conversation_id: 对话 id\n :param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条\n :param reversed: 以默认排序(查找更老的历史消息)相反的方向返回结果(也即从某条消息记录开始查找更新的消息),服务端默认为 False\n 如果 reversed = True,则 before_time/before_message_id 转变成最老的消息的时间戳和 message_id,\n 否则还是指最新的消息的时间戳和 message_id。\n :param before_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间\n :param before_message_id: 起始的消息 id,使用时必须加上对应消息的时间 before_time 参数,一起作为查询的起点\n :return: 符合条件的聊天记录\n '
query_params = {} # type: Dict[str, Any]
query_params['convid'] = conversation_id
if limit is not None:
query_params['limit'] = limit # depends on [control=['if'], data=['limit']]
if reversed is not None:
query_params['reversed'] = reversed # depends on [control=['if'], data=['reversed']]
if isinstance(before_time, datetime):
query_params['max_ts'] = round(before_time.timestamp() * 1000) # depends on [control=['if'], data=[]]
elif isinstance(before_time, six.integer_types) or isinstance(before_time, float):
query_params['max_ts'] = round(before_time * 1000) # depends on [control=['if'], data=[]]
if before_message_id is not None:
query_params['msgid'] = before_message_id # depends on [control=['if'], data=['before_message_id']]
return list(cls._find(query_params)) |
def extend_rows(self, list_or_dict):
""" Add multiple rows at once
:param list_or_dict: a 2 dimensional structure for adding multiple rows at once
:return:
"""
if isinstance(list_or_dict, list):
for r in list_or_dict:
self.add_row(r)
else:
for k,r in list_or_dict.iteritems():
self.add_row(r, k) | def function[extend_rows, parameter[self, list_or_dict]]:
constant[ Add multiple rows at once
:param list_or_dict: a 2 dimensional structure for adding multiple rows at once
:return:
]
if call[name[isinstance], parameter[name[list_or_dict], name[list]]] begin[:]
for taget[name[r]] in starred[name[list_or_dict]] begin[:]
call[name[self].add_row, parameter[name[r]]] | keyword[def] identifier[extend_rows] ( identifier[self] , identifier[list_or_dict] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[list_or_dict] , identifier[list] ):
keyword[for] identifier[r] keyword[in] identifier[list_or_dict] :
identifier[self] . identifier[add_row] ( identifier[r] )
keyword[else] :
keyword[for] identifier[k] , identifier[r] keyword[in] identifier[list_or_dict] . identifier[iteritems] ():
identifier[self] . identifier[add_row] ( identifier[r] , identifier[k] ) | def extend_rows(self, list_or_dict):
""" Add multiple rows at once
:param list_or_dict: a 2 dimensional structure for adding multiple rows at once
:return:
"""
if isinstance(list_or_dict, list):
for r in list_or_dict:
self.add_row(r) # depends on [control=['for'], data=['r']] # depends on [control=['if'], data=[]]
else:
for (k, r) in list_or_dict.iteritems():
self.add_row(r, k) # depends on [control=['for'], data=[]] |
def get_klass_parents(gi_name):
'''
Returns a sorted list of qualified symbols representing
the parents of the klass-like symbol named gi_name
'''
res = []
parents = __HIERARCHY_GRAPH.predecessors(gi_name)
if not parents:
return []
__get_parent_link_recurse(parents[0], res)
return res | def function[get_klass_parents, parameter[gi_name]]:
constant[
Returns a sorted list of qualified symbols representing
the parents of the klass-like symbol named gi_name
]
variable[res] assign[=] list[[]]
variable[parents] assign[=] call[name[__HIERARCHY_GRAPH].predecessors, parameter[name[gi_name]]]
if <ast.UnaryOp object at 0x7da20c6abc40> begin[:]
return[list[[]]]
call[name[__get_parent_link_recurse], parameter[call[name[parents]][constant[0]], name[res]]]
return[name[res]] | keyword[def] identifier[get_klass_parents] ( identifier[gi_name] ):
literal[string]
identifier[res] =[]
identifier[parents] = identifier[__HIERARCHY_GRAPH] . identifier[predecessors] ( identifier[gi_name] )
keyword[if] keyword[not] identifier[parents] :
keyword[return] []
identifier[__get_parent_link_recurse] ( identifier[parents] [ literal[int] ], identifier[res] )
keyword[return] identifier[res] | def get_klass_parents(gi_name):
"""
Returns a sorted list of qualified symbols representing
the parents of the klass-like symbol named gi_name
"""
res = []
parents = __HIERARCHY_GRAPH.predecessors(gi_name)
if not parents:
return [] # depends on [control=['if'], data=[]]
__get_parent_link_recurse(parents[0], res)
return res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.