code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def resolve_attr(obj, name):
"""A custom attrgetter that operates both on dictionaries and objects"""
# TODO: setup some hinting, so we can go directly to the correct
# Maybe it's a dict ? Let's try dict lookup, it's the fastest
try:
return obj[name]
except TypeError:
pass
except KeyError:
raise exceptions.MissingField('Dict {0} has no attribute or key "{1}"'.format(obj, name))
# Okay, it's not a dict, what if we try to access the value as for a regular object attribute?
try:
# Slight hack for better speed, since accessing dict is fast
return obj.__dict__[name]
except (KeyError, AttributeError):
pass
try:
# Lookup using regular attribute
return getattr(obj, name)
except AttributeError:
pass
# Last possible choice, it's an iterable
if isinstance(obj, collections.Iterable):
return IterableAttr(obj, name)
raise exceptions.MissingField('Object {0} has no attribute or key "{1}"'.format(obj, name)) | def function[resolve_attr, parameter[obj, name]]:
constant[A custom attrgetter that operates both on dictionaries and objects]
<ast.Try object at 0x7da1b1952830>
<ast.Try object at 0x7da1b1952cb0>
<ast.Try object at 0x7da1b1952fe0>
if call[name[isinstance], parameter[name[obj], name[collections].Iterable]] begin[:]
return[call[name[IterableAttr], parameter[name[obj], name[name]]]]
<ast.Raise object at 0x7da1b19536d0> | keyword[def] identifier[resolve_attr] ( identifier[obj] , identifier[name] ):
literal[string]
keyword[try] :
keyword[return] identifier[obj] [ identifier[name] ]
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[exceptions] . identifier[MissingField] ( literal[string] . identifier[format] ( identifier[obj] , identifier[name] ))
keyword[try] :
keyword[return] identifier[obj] . identifier[__dict__] [ identifier[name] ]
keyword[except] ( identifier[KeyError] , identifier[AttributeError] ):
keyword[pass]
keyword[try] :
keyword[return] identifier[getattr] ( identifier[obj] , identifier[name] )
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[collections] . identifier[Iterable] ):
keyword[return] identifier[IterableAttr] ( identifier[obj] , identifier[name] )
keyword[raise] identifier[exceptions] . identifier[MissingField] ( literal[string] . identifier[format] ( identifier[obj] , identifier[name] )) | def resolve_attr(obj, name):
"""A custom attrgetter that operates both on dictionaries and objects"""
# TODO: setup some hinting, so we can go directly to the correct
# Maybe it's a dict ? Let's try dict lookup, it's the fastest
try:
return obj[name] # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
except KeyError:
raise exceptions.MissingField('Dict {0} has no attribute or key "{1}"'.format(obj, name)) # depends on [control=['except'], data=[]]
# Okay, it's not a dict, what if we try to access the value as for a regular object attribute?
try:
# Slight hack for better speed, since accessing dict is fast
return obj.__dict__[name] # depends on [control=['try'], data=[]]
except (KeyError, AttributeError):
pass # depends on [control=['except'], data=[]]
try:
# Lookup using regular attribute
return getattr(obj, name) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
# Last possible choice, it's an iterable
if isinstance(obj, collections.Iterable):
return IterableAttr(obj, name) # depends on [control=['if'], data=[]]
raise exceptions.MissingField('Object {0} has no attribute or key "{1}"'.format(obj, name)) |
def F_beta(self, beta):
"""
Calculate FBeta score.
:param beta: beta parameter
:type beta : float
:return: FBeta score for classes as dict
"""
try:
F_dict = {}
for i in self.TP.keys():
F_dict[i] = F_calc(
TP=self.TP[i],
FP=self.FP[i],
FN=self.FN[i],
beta=beta)
return F_dict
except Exception:
return {} | def function[F_beta, parameter[self, beta]]:
constant[
Calculate FBeta score.
:param beta: beta parameter
:type beta : float
:return: FBeta score for classes as dict
]
<ast.Try object at 0x7da1b2345540> | keyword[def] identifier[F_beta] ( identifier[self] , identifier[beta] ):
literal[string]
keyword[try] :
identifier[F_dict] ={}
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[TP] . identifier[keys] ():
identifier[F_dict] [ identifier[i] ]= identifier[F_calc] (
identifier[TP] = identifier[self] . identifier[TP] [ identifier[i] ],
identifier[FP] = identifier[self] . identifier[FP] [ identifier[i] ],
identifier[FN] = identifier[self] . identifier[FN] [ identifier[i] ],
identifier[beta] = identifier[beta] )
keyword[return] identifier[F_dict]
keyword[except] identifier[Exception] :
keyword[return] {} | def F_beta(self, beta):
"""
Calculate FBeta score.
:param beta: beta parameter
:type beta : float
:return: FBeta score for classes as dict
"""
try:
F_dict = {}
for i in self.TP.keys():
F_dict[i] = F_calc(TP=self.TP[i], FP=self.FP[i], FN=self.FN[i], beta=beta) # depends on [control=['for'], data=['i']]
return F_dict # depends on [control=['try'], data=[]]
except Exception:
return {} # depends on [control=['except'], data=[]] |
def amalgamate(colcount, snode, snptr, snpar, snpost, merge_function):
"""
Supernodal amalgamation.
colcount, snode, snptr, snpar, snpost = ...
amalgamate(colcount, snode, snptr, snpar, snpost, merge_function)
PURPOSE
Iterates over the clique tree in topological order and greedily
merges a supernode with its parent if
merge_function(|J_{par(k)}|, |J_k|, |N_{par(k)}|, |N_k|)
returns True.
ARGUMENTS
colcount vector with column counts
snode vector with supernodes
snptr vector with offsets
snpar vector with supernodal parent indices
snpost vector with supernodal post ordering
merge_function
function
RETURNS
colcount vector with amalgamated column counts
snode vector with amalgamated supernodes
snptr vector with amalgamated offsets
snpar vector with amalgamated supernodal parent indices
snpost vector with amalgamated supernodal post ordering
"""
N = len(snpost)
ch = {}
for j in snpost:
if snpar[j] in ch: ch[snpar[j]].append(j)
else: ch[snpar[j]] = [j]
snlist = [snode[snptr[k]:snptr[k+1]] for k in range(N)]
snpar_ = +snpar
colcount_ = +colcount
Ns = N
for k in snpost:
if snpar_[k] != k:
colk = colcount_[snlist[k][0]]
colp = colcount_[snlist[snpar_[k]][0]]
nk = len(snlist[k])
np = len(snlist[snpar_[k]])
if merge_function and merge_function(colp,colk,np,nk):
# merge supernode k and snpar[k]
snlist[snpar_[k]] = matrix(sorted(list(snlist[k]) + list(snlist[snpar_[k]])))
snlist[k] = None
colcount_[snlist[snpar_[k]][0]] = colp + nk
Ns -= 1
if k in ch:
for c in ch[k]:
snpar_[c] = snpar_[k]
ch[snpar_[k]] += ch[k]
snpar_[k] = k
L = [i for i,s in enumerate(snlist) if s is not None]
snptr_ = matrix(0,(len(L)+1,1))
snode_ = +snode
for i,l in enumerate(L):
snptr_[i+1] = snptr_[i] + len(snlist[l])
snode_[snptr_[i]:snptr_[i+1]] = snlist[l]
snpar_ = snpar_[L]
for i in range(len(snpar_)):
snpar_[i] = L.index(snpar_[i])
snpost_ = post_order(snpar_)
return colcount_, snode_, snptr_, snpar_, snpost_ | def function[amalgamate, parameter[colcount, snode, snptr, snpar, snpost, merge_function]]:
constant[
Supernodal amalgamation.
colcount, snode, snptr, snpar, snpost = ...
amalgamate(colcount, snode, snptr, snpar, snpost, merge_function)
PURPOSE
Iterates over the clique tree in topological order and greedily
merges a supernode with its parent if
merge_function(|J_{par(k)}|, |J_k|, |N_{par(k)}|, |N_k|)
returns True.
ARGUMENTS
colcount vector with column counts
snode vector with supernodes
snptr vector with offsets
snpar vector with supernodal parent indices
snpost vector with supernodal post ordering
merge_function
function
RETURNS
colcount vector with amalgamated column counts
snode vector with amalgamated supernodes
snptr vector with amalgamated offsets
snpar vector with amalgamated supernodal parent indices
snpost vector with amalgamated supernodal post ordering
]
variable[N] assign[=] call[name[len], parameter[name[snpost]]]
variable[ch] assign[=] dictionary[[], []]
for taget[name[j]] in starred[name[snpost]] begin[:]
if compare[call[name[snpar]][name[j]] in name[ch]] begin[:]
call[call[name[ch]][call[name[snpar]][name[j]]].append, parameter[name[j]]]
variable[snlist] assign[=] <ast.ListComp object at 0x7da1b26770d0>
variable[snpar_] assign[=] <ast.UnaryOp object at 0x7da1b2676ce0>
variable[colcount_] assign[=] <ast.UnaryOp object at 0x7da1b2676590>
variable[Ns] assign[=] name[N]
for taget[name[k]] in starred[name[snpost]] begin[:]
if compare[call[name[snpar_]][name[k]] not_equal[!=] name[k]] begin[:]
variable[colk] assign[=] call[name[colcount_]][call[call[name[snlist]][name[k]]][constant[0]]]
variable[colp] assign[=] call[name[colcount_]][call[call[name[snlist]][call[name[snpar_]][name[k]]]][constant[0]]]
variable[nk] assign[=] call[name[len], parameter[call[name[snlist]][name[k]]]]
variable[np] assign[=] call[name[len], parameter[call[name[snlist]][call[name[snpar_]][name[k]]]]]
if <ast.BoolOp object at 0x7da1b26776d0> begin[:]
call[name[snlist]][call[name[snpar_]][name[k]]] assign[=] call[name[matrix], parameter[call[name[sorted], parameter[binary_operation[call[name[list], parameter[call[name[snlist]][name[k]]]] + call[name[list], parameter[call[name[snlist]][call[name[snpar_]][name[k]]]]]]]]]]
call[name[snlist]][name[k]] assign[=] constant[None]
call[name[colcount_]][call[call[name[snlist]][call[name[snpar_]][name[k]]]][constant[0]]] assign[=] binary_operation[name[colp] + name[nk]]
<ast.AugAssign object at 0x7da1b2675d20>
if compare[name[k] in name[ch]] begin[:]
for taget[name[c]] in starred[call[name[ch]][name[k]]] begin[:]
call[name[snpar_]][name[c]] assign[=] call[name[snpar_]][name[k]]
<ast.AugAssign object at 0x7da1b26769b0>
call[name[snpar_]][name[k]] assign[=] name[k]
variable[L] assign[=] <ast.ListComp object at 0x7da1b2674c40>
variable[snptr_] assign[=] call[name[matrix], parameter[constant[0], tuple[[<ast.BinOp object at 0x7da1b2674160>, <ast.Constant object at 0x7da1b2674070>]]]]
variable[snode_] assign[=] <ast.UnaryOp object at 0x7da1b2677be0>
for taget[tuple[[<ast.Name object at 0x7da1b2674880>, <ast.Name object at 0x7da1b26748b0>]]] in starred[call[name[enumerate], parameter[name[L]]]] begin[:]
call[name[snptr_]][binary_operation[name[i] + constant[1]]] assign[=] binary_operation[call[name[snptr_]][name[i]] + call[name[len], parameter[call[name[snlist]][name[l]]]]]
call[name[snode_]][<ast.Slice object at 0x7da1b25837f0>] assign[=] call[name[snlist]][name[l]]
variable[snpar_] assign[=] call[name[snpar_]][name[L]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[snpar_]]]]]] begin[:]
call[name[snpar_]][name[i]] assign[=] call[name[L].index, parameter[call[name[snpar_]][name[i]]]]
variable[snpost_] assign[=] call[name[post_order], parameter[name[snpar_]]]
return[tuple[[<ast.Name object at 0x7da1b2583a30>, <ast.Name object at 0x7da1b25839a0>, <ast.Name object at 0x7da1b25812a0>, <ast.Name object at 0x7da1b2582d70>, <ast.Name object at 0x7da1b2583af0>]]] | keyword[def] identifier[amalgamate] ( identifier[colcount] , identifier[snode] , identifier[snptr] , identifier[snpar] , identifier[snpost] , identifier[merge_function] ):
literal[string]
identifier[N] = identifier[len] ( identifier[snpost] )
identifier[ch] ={}
keyword[for] identifier[j] keyword[in] identifier[snpost] :
keyword[if] identifier[snpar] [ identifier[j] ] keyword[in] identifier[ch] : identifier[ch] [ identifier[snpar] [ identifier[j] ]]. identifier[append] ( identifier[j] )
keyword[else] : identifier[ch] [ identifier[snpar] [ identifier[j] ]]=[ identifier[j] ]
identifier[snlist] =[ identifier[snode] [ identifier[snptr] [ identifier[k] ]: identifier[snptr] [ identifier[k] + literal[int] ]] keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[N] )]
identifier[snpar_] =+ identifier[snpar]
identifier[colcount_] =+ identifier[colcount]
identifier[Ns] = identifier[N]
keyword[for] identifier[k] keyword[in] identifier[snpost] :
keyword[if] identifier[snpar_] [ identifier[k] ]!= identifier[k] :
identifier[colk] = identifier[colcount_] [ identifier[snlist] [ identifier[k] ][ literal[int] ]]
identifier[colp] = identifier[colcount_] [ identifier[snlist] [ identifier[snpar_] [ identifier[k] ]][ literal[int] ]]
identifier[nk] = identifier[len] ( identifier[snlist] [ identifier[k] ])
identifier[np] = identifier[len] ( identifier[snlist] [ identifier[snpar_] [ identifier[k] ]])
keyword[if] identifier[merge_function] keyword[and] identifier[merge_function] ( identifier[colp] , identifier[colk] , identifier[np] , identifier[nk] ):
identifier[snlist] [ identifier[snpar_] [ identifier[k] ]]= identifier[matrix] ( identifier[sorted] ( identifier[list] ( identifier[snlist] [ identifier[k] ])+ identifier[list] ( identifier[snlist] [ identifier[snpar_] [ identifier[k] ]])))
identifier[snlist] [ identifier[k] ]= keyword[None]
identifier[colcount_] [ identifier[snlist] [ identifier[snpar_] [ identifier[k] ]][ literal[int] ]]= identifier[colp] + identifier[nk]
identifier[Ns] -= literal[int]
keyword[if] identifier[k] keyword[in] identifier[ch] :
keyword[for] identifier[c] keyword[in] identifier[ch] [ identifier[k] ]:
identifier[snpar_] [ identifier[c] ]= identifier[snpar_] [ identifier[k] ]
identifier[ch] [ identifier[snpar_] [ identifier[k] ]]+= identifier[ch] [ identifier[k] ]
identifier[snpar_] [ identifier[k] ]= identifier[k]
identifier[L] =[ identifier[i] keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[snlist] ) keyword[if] identifier[s] keyword[is] keyword[not] keyword[None] ]
identifier[snptr_] = identifier[matrix] ( literal[int] ,( identifier[len] ( identifier[L] )+ literal[int] , literal[int] ))
identifier[snode_] =+ identifier[snode]
keyword[for] identifier[i] , identifier[l] keyword[in] identifier[enumerate] ( identifier[L] ):
identifier[snptr_] [ identifier[i] + literal[int] ]= identifier[snptr_] [ identifier[i] ]+ identifier[len] ( identifier[snlist] [ identifier[l] ])
identifier[snode_] [ identifier[snptr_] [ identifier[i] ]: identifier[snptr_] [ identifier[i] + literal[int] ]]= identifier[snlist] [ identifier[l] ]
identifier[snpar_] = identifier[snpar_] [ identifier[L] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[snpar_] )):
identifier[snpar_] [ identifier[i] ]= identifier[L] . identifier[index] ( identifier[snpar_] [ identifier[i] ])
identifier[snpost_] = identifier[post_order] ( identifier[snpar_] )
keyword[return] identifier[colcount_] , identifier[snode_] , identifier[snptr_] , identifier[snpar_] , identifier[snpost_] | def amalgamate(colcount, snode, snptr, snpar, snpost, merge_function):
"""
Supernodal amalgamation.
colcount, snode, snptr, snpar, snpost = ...
amalgamate(colcount, snode, snptr, snpar, snpost, merge_function)
PURPOSE
Iterates over the clique tree in topological order and greedily
merges a supernode with its parent if
merge_function(|J_{par(k)}|, |J_k|, |N_{par(k)}|, |N_k|)
returns True.
ARGUMENTS
colcount vector with column counts
snode vector with supernodes
snptr vector with offsets
snpar vector with supernodal parent indices
snpost vector with supernodal post ordering
merge_function
function
RETURNS
colcount vector with amalgamated column counts
snode vector with amalgamated supernodes
snptr vector with amalgamated offsets
snpar vector with amalgamated supernodal parent indices
snpost vector with amalgamated supernodal post ordering
"""
N = len(snpost)
ch = {}
for j in snpost:
if snpar[j] in ch:
ch[snpar[j]].append(j) # depends on [control=['if'], data=['ch']]
else:
ch[snpar[j]] = [j] # depends on [control=['for'], data=['j']]
snlist = [snode[snptr[k]:snptr[k + 1]] for k in range(N)]
snpar_ = +snpar
colcount_ = +colcount
Ns = N
for k in snpost:
if snpar_[k] != k:
colk = colcount_[snlist[k][0]]
colp = colcount_[snlist[snpar_[k]][0]]
nk = len(snlist[k])
np = len(snlist[snpar_[k]])
if merge_function and merge_function(colp, colk, np, nk):
# merge supernode k and snpar[k]
snlist[snpar_[k]] = matrix(sorted(list(snlist[k]) + list(snlist[snpar_[k]])))
snlist[k] = None
colcount_[snlist[snpar_[k]][0]] = colp + nk
Ns -= 1
if k in ch:
for c in ch[k]:
snpar_[c] = snpar_[k] # depends on [control=['for'], data=['c']]
ch[snpar_[k]] += ch[k] # depends on [control=['if'], data=['k', 'ch']]
snpar_[k] = k # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=['k']]
L = [i for (i, s) in enumerate(snlist) if s is not None]
snptr_ = matrix(0, (len(L) + 1, 1))
snode_ = +snode
for (i, l) in enumerate(L):
snptr_[i + 1] = snptr_[i] + len(snlist[l])
snode_[snptr_[i]:snptr_[i + 1]] = snlist[l] # depends on [control=['for'], data=[]]
snpar_ = snpar_[L]
for i in range(len(snpar_)):
snpar_[i] = L.index(snpar_[i]) # depends on [control=['for'], data=['i']]
snpost_ = post_order(snpar_)
return (colcount_, snode_, snptr_, snpar_, snpost_) |
def configureLogging(self):
"""Configure logging for nose, or optionally other packages. Any logger
name may be set with the debug option, and that logger will be set to
debug level and be assigned the same handler as the nose loggers, unless
it already has a handler.
"""
if self.loggingConfig:
from logging.config import fileConfig
fileConfig(self.loggingConfig)
return
format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
if self.debugLog:
handler = logging.FileHandler(self.debugLog)
else:
handler = logging.StreamHandler(self.logStream)
handler.setFormatter(format)
logger = logging.getLogger('nose')
logger.propagate = 0
# only add our default handler if there isn't already one there
# this avoids annoying duplicate log messages.
if handler not in logger.handlers:
logger.addHandler(handler)
# default level
lvl = logging.WARNING
if self.verbosity >= 5:
lvl = 0
elif self.verbosity >= 4:
lvl = logging.DEBUG
elif self.verbosity >= 3:
lvl = logging.INFO
logger.setLevel(lvl)
# individual overrides
if self.debug:
# no blanks
debug_loggers = [ name for name in self.debug.split(',')
if name ]
for logger_name in debug_loggers:
l = logging.getLogger(logger_name)
l.setLevel(logging.DEBUG)
if not l.handlers and not logger_name.startswith('nose'):
l.addHandler(handler) | def function[configureLogging, parameter[self]]:
constant[Configure logging for nose, or optionally other packages. Any logger
name may be set with the debug option, and that logger will be set to
debug level and be assigned the same handler as the nose loggers, unless
it already has a handler.
]
if name[self].loggingConfig begin[:]
from relative_module[logging.config] import module[fileConfig]
call[name[fileConfig], parameter[name[self].loggingConfig]]
return[None]
variable[format] assign[=] call[name[logging].Formatter, parameter[constant[%(name)s: %(levelname)s: %(message)s]]]
if name[self].debugLog begin[:]
variable[handler] assign[=] call[name[logging].FileHandler, parameter[name[self].debugLog]]
call[name[handler].setFormatter, parameter[name[format]]]
variable[logger] assign[=] call[name[logging].getLogger, parameter[constant[nose]]]
name[logger].propagate assign[=] constant[0]
if compare[name[handler] <ast.NotIn object at 0x7da2590d7190> name[logger].handlers] begin[:]
call[name[logger].addHandler, parameter[name[handler]]]
variable[lvl] assign[=] name[logging].WARNING
if compare[name[self].verbosity greater_or_equal[>=] constant[5]] begin[:]
variable[lvl] assign[=] constant[0]
call[name[logger].setLevel, parameter[name[lvl]]]
if name[self].debug begin[:]
variable[debug_loggers] assign[=] <ast.ListComp object at 0x7da18ede6b90>
for taget[name[logger_name]] in starred[name[debug_loggers]] begin[:]
variable[l] assign[=] call[name[logging].getLogger, parameter[name[logger_name]]]
call[name[l].setLevel, parameter[name[logging].DEBUG]]
if <ast.BoolOp object at 0x7da18ede5e40> begin[:]
call[name[l].addHandler, parameter[name[handler]]] | keyword[def] identifier[configureLogging] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[loggingConfig] :
keyword[from] identifier[logging] . identifier[config] keyword[import] identifier[fileConfig]
identifier[fileConfig] ( identifier[self] . identifier[loggingConfig] )
keyword[return]
identifier[format] = identifier[logging] . identifier[Formatter] ( literal[string] )
keyword[if] identifier[self] . identifier[debugLog] :
identifier[handler] = identifier[logging] . identifier[FileHandler] ( identifier[self] . identifier[debugLog] )
keyword[else] :
identifier[handler] = identifier[logging] . identifier[StreamHandler] ( identifier[self] . identifier[logStream] )
identifier[handler] . identifier[setFormatter] ( identifier[format] )
identifier[logger] = identifier[logging] . identifier[getLogger] ( literal[string] )
identifier[logger] . identifier[propagate] = literal[int]
keyword[if] identifier[handler] keyword[not] keyword[in] identifier[logger] . identifier[handlers] :
identifier[logger] . identifier[addHandler] ( identifier[handler] )
identifier[lvl] = identifier[logging] . identifier[WARNING]
keyword[if] identifier[self] . identifier[verbosity] >= literal[int] :
identifier[lvl] = literal[int]
keyword[elif] identifier[self] . identifier[verbosity] >= literal[int] :
identifier[lvl] = identifier[logging] . identifier[DEBUG]
keyword[elif] identifier[self] . identifier[verbosity] >= literal[int] :
identifier[lvl] = identifier[logging] . identifier[INFO]
identifier[logger] . identifier[setLevel] ( identifier[lvl] )
keyword[if] identifier[self] . identifier[debug] :
identifier[debug_loggers] =[ identifier[name] keyword[for] identifier[name] keyword[in] identifier[self] . identifier[debug] . identifier[split] ( literal[string] )
keyword[if] identifier[name] ]
keyword[for] identifier[logger_name] keyword[in] identifier[debug_loggers] :
identifier[l] = identifier[logging] . identifier[getLogger] ( identifier[logger_name] )
identifier[l] . identifier[setLevel] ( identifier[logging] . identifier[DEBUG] )
keyword[if] keyword[not] identifier[l] . identifier[handlers] keyword[and] keyword[not] identifier[logger_name] . identifier[startswith] ( literal[string] ):
identifier[l] . identifier[addHandler] ( identifier[handler] ) | def configureLogging(self):
"""Configure logging for nose, or optionally other packages. Any logger
name may be set with the debug option, and that logger will be set to
debug level and be assigned the same handler as the nose loggers, unless
it already has a handler.
"""
if self.loggingConfig:
from logging.config import fileConfig
fileConfig(self.loggingConfig)
return # depends on [control=['if'], data=[]]
format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
if self.debugLog:
handler = logging.FileHandler(self.debugLog) # depends on [control=['if'], data=[]]
else:
handler = logging.StreamHandler(self.logStream)
handler.setFormatter(format)
logger = logging.getLogger('nose')
logger.propagate = 0
# only add our default handler if there isn't already one there
# this avoids annoying duplicate log messages.
if handler not in logger.handlers:
logger.addHandler(handler) # depends on [control=['if'], data=['handler']]
# default level
lvl = logging.WARNING
if self.verbosity >= 5:
lvl = 0 # depends on [control=['if'], data=[]]
elif self.verbosity >= 4:
lvl = logging.DEBUG # depends on [control=['if'], data=[]]
elif self.verbosity >= 3:
lvl = logging.INFO # depends on [control=['if'], data=[]]
logger.setLevel(lvl)
# individual overrides
if self.debug:
# no blanks
debug_loggers = [name for name in self.debug.split(',') if name]
for logger_name in debug_loggers:
l = logging.getLogger(logger_name)
l.setLevel(logging.DEBUG)
if not l.handlers and (not logger_name.startswith('nose')):
l.addHandler(handler) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['logger_name']] # depends on [control=['if'], data=[]] |
def flat_list(input_list):
r"""
Given a list of nested lists of arbitrary depth, returns a single level or
'flat' list.
"""
x = input_list
if isinstance(x, list):
return [a for i in x for a in flat_list(i)]
else:
return [x] | def function[flat_list, parameter[input_list]]:
constant[
Given a list of nested lists of arbitrary depth, returns a single level or
'flat' list.
]
variable[x] assign[=] name[input_list]
if call[name[isinstance], parameter[name[x], name[list]]] begin[:]
return[<ast.ListComp object at 0x7da18eb567a0>] | keyword[def] identifier[flat_list] ( identifier[input_list] ):
literal[string]
identifier[x] = identifier[input_list]
keyword[if] identifier[isinstance] ( identifier[x] , identifier[list] ):
keyword[return] [ identifier[a] keyword[for] identifier[i] keyword[in] identifier[x] keyword[for] identifier[a] keyword[in] identifier[flat_list] ( identifier[i] )]
keyword[else] :
keyword[return] [ identifier[x] ] | def flat_list(input_list):
"""
Given a list of nested lists of arbitrary depth, returns a single level or
'flat' list.
"""
x = input_list
if isinstance(x, list):
return [a for i in x for a in flat_list(i)] # depends on [control=['if'], data=[]]
else:
return [x] |
def bank_chisq_from_filters(tmplt_snr, tmplt_norm, bank_snrs, bank_norms,
tmplt_bank_matches, indices=None):
""" This function calculates and returns a TimeSeries object containing the
bank veto calculated over a segment.
Parameters
----------
tmplt_snr: TimeSeries
The SNR time series from filtering the segment against the current
search template
tmplt_norm: float
The normalization factor for the search template
bank_snrs: list of TimeSeries
The precomputed list of SNR time series between each of the bank veto
templates and the segment
bank_norms: list of floats
The normalization factors for the list of bank veto templates
(usually this will be the same for all bank veto templates)
tmplt_bank_matches: list of floats
The complex overlap between the search template and each
of the bank templates
indices: {None, Array}, optional
Array of indices into the snr time series. If given, the bank chisq
will only be calculated at these values.
Returns
-------
bank_chisq: TimeSeries of the bank vetos
"""
if indices is not None:
tmplt_snr = Array(tmplt_snr, copy=False)
bank_snrs_tmp = []
for bank_snr in bank_snrs:
bank_snrs_tmp.append(bank_snr.take(indices))
bank_snrs=bank_snrs_tmp
# Initialise bank_chisq as 0s everywhere
bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr))
# Loop over all the bank templates
for i in range(len(bank_snrs)):
bank_match = tmplt_bank_matches[i]
if (abs(bank_match) > 0.99):
# Not much point calculating bank_chisquared if the bank template
# is very close to the filter template. Can also hit numerical
# error due to approximations made in this calculation.
# The value of 2 is the expected addition to the chisq for this
# template
bank_chisq += 2.
continue
bank_norm = sqrt((1 - bank_match*bank_match.conj()).real)
bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm)
tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm)
bank_SNR = Array(bank_SNR, copy=False)
tmplt_SNR = Array(tmplt_SNR, copy=False)
bank_chisq += (bank_SNR - tmplt_SNR).squared_norm()
if indices is not None:
return bank_chisq
else:
return TimeSeries(bank_chisq, delta_t=tmplt_snr.delta_t,
epoch=tmplt_snr.start_time, copy=False) | def function[bank_chisq_from_filters, parameter[tmplt_snr, tmplt_norm, bank_snrs, bank_norms, tmplt_bank_matches, indices]]:
constant[ This function calculates and returns a TimeSeries object containing the
bank veto calculated over a segment.
Parameters
----------
tmplt_snr: TimeSeries
The SNR time series from filtering the segment against the current
search template
tmplt_norm: float
The normalization factor for the search template
bank_snrs: list of TimeSeries
The precomputed list of SNR time series between each of the bank veto
templates and the segment
bank_norms: list of floats
The normalization factors for the list of bank veto templates
(usually this will be the same for all bank veto templates)
tmplt_bank_matches: list of floats
The complex overlap between the search template and each
of the bank templates
indices: {None, Array}, optional
Array of indices into the snr time series. If given, the bank chisq
will only be calculated at these values.
Returns
-------
bank_chisq: TimeSeries of the bank vetos
]
if compare[name[indices] is_not constant[None]] begin[:]
variable[tmplt_snr] assign[=] call[name[Array], parameter[name[tmplt_snr]]]
variable[bank_snrs_tmp] assign[=] list[[]]
for taget[name[bank_snr]] in starred[name[bank_snrs]] begin[:]
call[name[bank_snrs_tmp].append, parameter[call[name[bank_snr].take, parameter[name[indices]]]]]
variable[bank_snrs] assign[=] name[bank_snrs_tmp]
variable[bank_chisq] assign[=] call[name[zeros], parameter[call[name[len], parameter[name[tmplt_snr]]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[bank_snrs]]]]]] begin[:]
variable[bank_match] assign[=] call[name[tmplt_bank_matches]][name[i]]
if compare[call[name[abs], parameter[name[bank_match]]] greater[>] constant[0.99]] begin[:]
<ast.AugAssign object at 0x7da20c7cae30>
continue
variable[bank_norm] assign[=] call[name[sqrt], parameter[binary_operation[constant[1] - binary_operation[name[bank_match] * call[name[bank_match].conj, parameter[]]]].real]]
variable[bank_SNR] assign[=] binary_operation[call[name[bank_snrs]][name[i]] * binary_operation[call[name[bank_norms]][name[i]] / name[bank_norm]]]
variable[tmplt_SNR] assign[=] binary_operation[name[tmplt_snr] * binary_operation[binary_operation[call[name[bank_match].conj, parameter[]] * name[tmplt_norm]] / name[bank_norm]]]
variable[bank_SNR] assign[=] call[name[Array], parameter[name[bank_SNR]]]
variable[tmplt_SNR] assign[=] call[name[Array], parameter[name[tmplt_SNR]]]
<ast.AugAssign object at 0x7da20c7cb490>
if compare[name[indices] is_not constant[None]] begin[:]
return[name[bank_chisq]] | keyword[def] identifier[bank_chisq_from_filters] ( identifier[tmplt_snr] , identifier[tmplt_norm] , identifier[bank_snrs] , identifier[bank_norms] ,
identifier[tmplt_bank_matches] , identifier[indices] = keyword[None] ):
literal[string]
keyword[if] identifier[indices] keyword[is] keyword[not] keyword[None] :
identifier[tmplt_snr] = identifier[Array] ( identifier[tmplt_snr] , identifier[copy] = keyword[False] )
identifier[bank_snrs_tmp] =[]
keyword[for] identifier[bank_snr] keyword[in] identifier[bank_snrs] :
identifier[bank_snrs_tmp] . identifier[append] ( identifier[bank_snr] . identifier[take] ( identifier[indices] ))
identifier[bank_snrs] = identifier[bank_snrs_tmp]
identifier[bank_chisq] = identifier[zeros] ( identifier[len] ( identifier[tmplt_snr] ), identifier[dtype] = identifier[real_same_precision_as] ( identifier[tmplt_snr] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[bank_snrs] )):
identifier[bank_match] = identifier[tmplt_bank_matches] [ identifier[i] ]
keyword[if] ( identifier[abs] ( identifier[bank_match] )> literal[int] ):
identifier[bank_chisq] += literal[int]
keyword[continue]
identifier[bank_norm] = identifier[sqrt] (( literal[int] - identifier[bank_match] * identifier[bank_match] . identifier[conj] ()). identifier[real] )
identifier[bank_SNR] = identifier[bank_snrs] [ identifier[i] ]*( identifier[bank_norms] [ identifier[i] ]/ identifier[bank_norm] )
identifier[tmplt_SNR] = identifier[tmplt_snr] *( identifier[bank_match] . identifier[conj] ()* identifier[tmplt_norm] / identifier[bank_norm] )
identifier[bank_SNR] = identifier[Array] ( identifier[bank_SNR] , identifier[copy] = keyword[False] )
identifier[tmplt_SNR] = identifier[Array] ( identifier[tmplt_SNR] , identifier[copy] = keyword[False] )
identifier[bank_chisq] +=( identifier[bank_SNR] - identifier[tmplt_SNR] ). identifier[squared_norm] ()
keyword[if] identifier[indices] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[bank_chisq]
keyword[else] :
keyword[return] identifier[TimeSeries] ( identifier[bank_chisq] , identifier[delta_t] = identifier[tmplt_snr] . identifier[delta_t] ,
identifier[epoch] = identifier[tmplt_snr] . identifier[start_time] , identifier[copy] = keyword[False] ) | def bank_chisq_from_filters(tmplt_snr, tmplt_norm, bank_snrs, bank_norms, tmplt_bank_matches, indices=None):
""" This function calculates and returns a TimeSeries object containing the
bank veto calculated over a segment.
Parameters
----------
tmplt_snr: TimeSeries
The SNR time series from filtering the segment against the current
search template
tmplt_norm: float
The normalization factor for the search template
bank_snrs: list of TimeSeries
The precomputed list of SNR time series between each of the bank veto
templates and the segment
bank_norms: list of floats
The normalization factors for the list of bank veto templates
(usually this will be the same for all bank veto templates)
tmplt_bank_matches: list of floats
The complex overlap between the search template and each
of the bank templates
indices: {None, Array}, optional
Array of indices into the snr time series. If given, the bank chisq
will only be calculated at these values.
Returns
-------
bank_chisq: TimeSeries of the bank vetos
"""
if indices is not None:
tmplt_snr = Array(tmplt_snr, copy=False)
bank_snrs_tmp = []
for bank_snr in bank_snrs:
bank_snrs_tmp.append(bank_snr.take(indices)) # depends on [control=['for'], data=['bank_snr']]
bank_snrs = bank_snrs_tmp # depends on [control=['if'], data=['indices']]
# Initialise bank_chisq as 0s everywhere
bank_chisq = zeros(len(tmplt_snr), dtype=real_same_precision_as(tmplt_snr))
# Loop over all the bank templates
for i in range(len(bank_snrs)):
bank_match = tmplt_bank_matches[i]
if abs(bank_match) > 0.99:
# Not much point calculating bank_chisquared if the bank template
# is very close to the filter template. Can also hit numerical
# error due to approximations made in this calculation.
# The value of 2 is the expected addition to the chisq for this
# template
bank_chisq += 2.0
continue # depends on [control=['if'], data=[]]
bank_norm = sqrt((1 - bank_match * bank_match.conj()).real)
bank_SNR = bank_snrs[i] * (bank_norms[i] / bank_norm)
tmplt_SNR = tmplt_snr * (bank_match.conj() * tmplt_norm / bank_norm)
bank_SNR = Array(bank_SNR, copy=False)
tmplt_SNR = Array(tmplt_SNR, copy=False)
bank_chisq += (bank_SNR - tmplt_SNR).squared_norm() # depends on [control=['for'], data=['i']]
if indices is not None:
return bank_chisq # depends on [control=['if'], data=[]]
else:
return TimeSeries(bank_chisq, delta_t=tmplt_snr.delta_t, epoch=tmplt_snr.start_time, copy=False) |
def synset_signatures(ss: "wn.Synset", hyperhypo=True, adapted=False,
remove_stopwords=True, to_lemmatize=True, remove_numbers=True,
lowercase=True, original_lesk=False, from_cache=True) -> set:
"""
Takes a Synset and returns its signature words.
:param ss: An instance of wn.Synset.
:return: A set of signature strings
"""
if from_cache:
return synset_signatures_from_cache(ss, hyperhypo, adapted, original_lesk)
# Collects the signatures from WordNet.
signature = []
# Adds the definition, example sentences and lemma_names.
signature += word_tokenize(ss.definition())
# If the original lesk signature is requested, skip the other signatures.
if original_lesk:
return set(signature)
# Adds the examples and lemma names.
signature += chain(*[word_tokenize(eg) for eg in ss.examples()])
signature += ss.lemma_names()
# Includes lemma_names of hyper-/hyponyms.
if hyperhypo:
hyperhyponyms = set(ss.hyponyms() + ss.hypernyms() + ss.instance_hyponyms() + ss.instance_hypernyms())
signature += set(chain(*[i.lemma_names() for i in hyperhyponyms]))
# Includes signatures from related senses as in Adapted Lesk.
if adapted:
# Includes lemma_names from holonyms, meronyms and similar_tos
related_senses = set(ss.member_holonyms() + ss.part_holonyms() + ss.substance_holonyms() + \
ss.member_meronyms() + ss.part_meronyms() + ss.substance_meronyms() + \
ss.similar_tos())
signature += set(chain(*[i.lemma_names() for i in related_senses]))
# Lowercase.
signature = set(s.lower() for s in signature) if lowercase else signature
# Removes stopwords.
signature = set(signature).difference(EN_STOPWORDS) if remove_stopwords else signature
# Lemmatized context is preferred over stemmed context.
if to_lemmatize:
signature = [lemmatize(s) if lowercase else lemmatize(s) # Lowercasing checks here.
for s in signature
# We only throw away if both remove_numbers and s is a digit are true.
if not (remove_numbers and s.isdigit())]
# Keep only the unique bag-of-words
return set(signature) | def function[synset_signatures, parameter[ss, hyperhypo, adapted, remove_stopwords, to_lemmatize, remove_numbers, lowercase, original_lesk, from_cache]]:
constant[
Takes a Synset and returns its signature words.
:param ss: An instance of wn.Synset.
:return: A set of signature strings
]
if name[from_cache] begin[:]
return[call[name[synset_signatures_from_cache], parameter[name[ss], name[hyperhypo], name[adapted], name[original_lesk]]]]
variable[signature] assign[=] list[[]]
<ast.AugAssign object at 0x7da1b2273940>
if name[original_lesk] begin[:]
return[call[name[set], parameter[name[signature]]]]
<ast.AugAssign object at 0x7da1b2272b60>
<ast.AugAssign object at 0x7da1b2272320>
if name[hyperhypo] begin[:]
variable[hyperhyponyms] assign[=] call[name[set], parameter[binary_operation[binary_operation[binary_operation[call[name[ss].hyponyms, parameter[]] + call[name[ss].hypernyms, parameter[]]] + call[name[ss].instance_hyponyms, parameter[]]] + call[name[ss].instance_hypernyms, parameter[]]]]]
<ast.AugAssign object at 0x7da1b2273fa0>
if name[adapted] begin[:]
variable[related_senses] assign[=] call[name[set], parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[ss].member_holonyms, parameter[]] + call[name[ss].part_holonyms, parameter[]]] + call[name[ss].substance_holonyms, parameter[]]] + call[name[ss].member_meronyms, parameter[]]] + call[name[ss].part_meronyms, parameter[]]] + call[name[ss].substance_meronyms, parameter[]]] + call[name[ss].similar_tos, parameter[]]]]]
<ast.AugAssign object at 0x7da1b2271720>
variable[signature] assign[=] <ast.IfExp object at 0x7da1b2273ca0>
variable[signature] assign[=] <ast.IfExp object at 0x7da1b2273520>
if name[to_lemmatize] begin[:]
variable[signature] assign[=] <ast.ListComp object at 0x7da1b2272d40>
return[call[name[set], parameter[name[signature]]]] | keyword[def] identifier[synset_signatures] ( identifier[ss] : literal[string] , identifier[hyperhypo] = keyword[True] , identifier[adapted] = keyword[False] ,
identifier[remove_stopwords] = keyword[True] , identifier[to_lemmatize] = keyword[True] , identifier[remove_numbers] = keyword[True] ,
identifier[lowercase] = keyword[True] , identifier[original_lesk] = keyword[False] , identifier[from_cache] = keyword[True] )-> identifier[set] :
literal[string]
keyword[if] identifier[from_cache] :
keyword[return] identifier[synset_signatures_from_cache] ( identifier[ss] , identifier[hyperhypo] , identifier[adapted] , identifier[original_lesk] )
identifier[signature] =[]
identifier[signature] += identifier[word_tokenize] ( identifier[ss] . identifier[definition] ())
keyword[if] identifier[original_lesk] :
keyword[return] identifier[set] ( identifier[signature] )
identifier[signature] += identifier[chain] (*[ identifier[word_tokenize] ( identifier[eg] ) keyword[for] identifier[eg] keyword[in] identifier[ss] . identifier[examples] ()])
identifier[signature] += identifier[ss] . identifier[lemma_names] ()
keyword[if] identifier[hyperhypo] :
identifier[hyperhyponyms] = identifier[set] ( identifier[ss] . identifier[hyponyms] ()+ identifier[ss] . identifier[hypernyms] ()+ identifier[ss] . identifier[instance_hyponyms] ()+ identifier[ss] . identifier[instance_hypernyms] ())
identifier[signature] += identifier[set] ( identifier[chain] (*[ identifier[i] . identifier[lemma_names] () keyword[for] identifier[i] keyword[in] identifier[hyperhyponyms] ]))
keyword[if] identifier[adapted] :
identifier[related_senses] = identifier[set] ( identifier[ss] . identifier[member_holonyms] ()+ identifier[ss] . identifier[part_holonyms] ()+ identifier[ss] . identifier[substance_holonyms] ()+ identifier[ss] . identifier[member_meronyms] ()+ identifier[ss] . identifier[part_meronyms] ()+ identifier[ss] . identifier[substance_meronyms] ()+ identifier[ss] . identifier[similar_tos] ())
identifier[signature] += identifier[set] ( identifier[chain] (*[ identifier[i] . identifier[lemma_names] () keyword[for] identifier[i] keyword[in] identifier[related_senses] ]))
identifier[signature] = identifier[set] ( identifier[s] . identifier[lower] () keyword[for] identifier[s] keyword[in] identifier[signature] ) keyword[if] identifier[lowercase] keyword[else] identifier[signature]
identifier[signature] = identifier[set] ( identifier[signature] ). identifier[difference] ( identifier[EN_STOPWORDS] ) keyword[if] identifier[remove_stopwords] keyword[else] identifier[signature]
keyword[if] identifier[to_lemmatize] :
identifier[signature] =[ identifier[lemmatize] ( identifier[s] ) keyword[if] identifier[lowercase] keyword[else] identifier[lemmatize] ( identifier[s] )
keyword[for] identifier[s] keyword[in] identifier[signature]
keyword[if] keyword[not] ( identifier[remove_numbers] keyword[and] identifier[s] . identifier[isdigit] ())]
keyword[return] identifier[set] ( identifier[signature] ) | def synset_signatures(ss: 'wn.Synset', hyperhypo=True, adapted=False, remove_stopwords=True, to_lemmatize=True, remove_numbers=True, lowercase=True, original_lesk=False, from_cache=True) -> set:
"""
Takes a Synset and returns its signature words.
:param ss: An instance of wn.Synset.
:return: A set of signature strings
"""
if from_cache:
return synset_signatures_from_cache(ss, hyperhypo, adapted, original_lesk) # depends on [control=['if'], data=[]]
# Collects the signatures from WordNet.
signature = []
# Adds the definition, example sentences and lemma_names.
signature += word_tokenize(ss.definition())
# If the original lesk signature is requested, skip the other signatures.
if original_lesk:
return set(signature) # depends on [control=['if'], data=[]]
# Adds the examples and lemma names.
signature += chain(*[word_tokenize(eg) for eg in ss.examples()])
signature += ss.lemma_names()
# Includes lemma_names of hyper-/hyponyms.
if hyperhypo:
hyperhyponyms = set(ss.hyponyms() + ss.hypernyms() + ss.instance_hyponyms() + ss.instance_hypernyms())
signature += set(chain(*[i.lemma_names() for i in hyperhyponyms])) # depends on [control=['if'], data=[]]
# Includes signatures from related senses as in Adapted Lesk.
if adapted:
# Includes lemma_names from holonyms, meronyms and similar_tos
related_senses = set(ss.member_holonyms() + ss.part_holonyms() + ss.substance_holonyms() + ss.member_meronyms() + ss.part_meronyms() + ss.substance_meronyms() + ss.similar_tos())
signature += set(chain(*[i.lemma_names() for i in related_senses])) # depends on [control=['if'], data=[]]
# Lowercase.
signature = set((s.lower() for s in signature)) if lowercase else signature
# Removes stopwords.
signature = set(signature).difference(EN_STOPWORDS) if remove_stopwords else signature
# Lemmatized context is preferred over stemmed context.
if to_lemmatize: # Lowercasing checks here.
# We only throw away if both remove_numbers and s is a digit are true.
signature = [lemmatize(s) if lowercase else lemmatize(s) for s in signature if not (remove_numbers and s.isdigit())] # depends on [control=['if'], data=[]]
# Keep only the unique bag-of-words
return set(signature) |
def compute_bin_edges(features, num_bins, edge_range, trim_outliers, trim_percentile, use_orig_distr=False):
"Compute the edges for the histogram bins to keep it the same for all nodes."
if use_orig_distr:
print('Using original distribution (without histogram) to compute edge weights!')
edges=None
return edges
if edge_range is None:
if trim_outliers:
# percentiles_to_keep = [ trim_percentile, 1.0-trim_percentile] # [0.05, 0.95]
edges_of_edges = np.array([np.percentile(features, trim_percentile),
np.percentile(features, 100 - trim_percentile)])
else:
edges_of_edges = np.array([np.min(features), np.max(features)])
else:
edges_of_edges = edge_range
# Edges computed using data from all nodes, in order to establish correspondence
edges = np.linspace(edges_of_edges[0], edges_of_edges[1], num=num_bins, endpoint=True)
return edges | def function[compute_bin_edges, parameter[features, num_bins, edge_range, trim_outliers, trim_percentile, use_orig_distr]]:
constant[Compute the edges for the histogram bins to keep it the same for all nodes.]
if name[use_orig_distr] begin[:]
call[name[print], parameter[constant[Using original distribution (without histogram) to compute edge weights!]]]
variable[edges] assign[=] constant[None]
return[name[edges]]
if compare[name[edge_range] is constant[None]] begin[:]
if name[trim_outliers] begin[:]
variable[edges_of_edges] assign[=] call[name[np].array, parameter[list[[<ast.Call object at 0x7da18dc06770>, <ast.Call object at 0x7da18dc04d60>]]]]
variable[edges] assign[=] call[name[np].linspace, parameter[call[name[edges_of_edges]][constant[0]], call[name[edges_of_edges]][constant[1]]]]
return[name[edges]] | keyword[def] identifier[compute_bin_edges] ( identifier[features] , identifier[num_bins] , identifier[edge_range] , identifier[trim_outliers] , identifier[trim_percentile] , identifier[use_orig_distr] = keyword[False] ):
literal[string]
keyword[if] identifier[use_orig_distr] :
identifier[print] ( literal[string] )
identifier[edges] = keyword[None]
keyword[return] identifier[edges]
keyword[if] identifier[edge_range] keyword[is] keyword[None] :
keyword[if] identifier[trim_outliers] :
identifier[edges_of_edges] = identifier[np] . identifier[array] ([ identifier[np] . identifier[percentile] ( identifier[features] , identifier[trim_percentile] ),
identifier[np] . identifier[percentile] ( identifier[features] , literal[int] - identifier[trim_percentile] )])
keyword[else] :
identifier[edges_of_edges] = identifier[np] . identifier[array] ([ identifier[np] . identifier[min] ( identifier[features] ), identifier[np] . identifier[max] ( identifier[features] )])
keyword[else] :
identifier[edges_of_edges] = identifier[edge_range]
identifier[edges] = identifier[np] . identifier[linspace] ( identifier[edges_of_edges] [ literal[int] ], identifier[edges_of_edges] [ literal[int] ], identifier[num] = identifier[num_bins] , identifier[endpoint] = keyword[True] )
keyword[return] identifier[edges] | def compute_bin_edges(features, num_bins, edge_range, trim_outliers, trim_percentile, use_orig_distr=False):
"""Compute the edges for the histogram bins to keep it the same for all nodes."""
if use_orig_distr:
print('Using original distribution (without histogram) to compute edge weights!')
edges = None
return edges # depends on [control=['if'], data=[]]
if edge_range is None:
if trim_outliers:
# percentiles_to_keep = [ trim_percentile, 1.0-trim_percentile] # [0.05, 0.95]
edges_of_edges = np.array([np.percentile(features, trim_percentile), np.percentile(features, 100 - trim_percentile)]) # depends on [control=['if'], data=[]]
else:
edges_of_edges = np.array([np.min(features), np.max(features)]) # depends on [control=['if'], data=[]]
else:
edges_of_edges = edge_range
# Edges computed using data from all nodes, in order to establish correspondence
edges = np.linspace(edges_of_edges[0], edges_of_edges[1], num=num_bins, endpoint=True)
return edges |
def plot_property(self, zs, ws, Tmin=None, Tmax=None, Pmin=1E5, Pmax=1E6,
methods=[], pts=15, only_valid=True): # pragma: no cover
r'''Method to create a plot of the property vs temperature and pressure
according to either a specified list of methods, or user methods (if
set), or all methods. User-selectable number of points for each
variable. If only_valid is set,`test_method_validity` will be used to
check if each condition in the specified range is valid, and
`test_property_validity` will be used to test the answer, and the
method is allowed to fail; only the valid points will be plotted.
Otherwise, the result will be calculated and displayed as-is. This will
not suceed if the any method fails for any point.
Parameters
----------
Tmin : float
Minimum temperature, to begin calculating the property, [K]
Tmax : float
Maximum temperature, to stop calculating the property, [K]
Pmin : float
Minimum pressure, to begin calculating the property, [Pa]
Pmax : float
Maximum pressure, to stop calculating the property, [Pa]
methods : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at for both temperature
and pressure; pts^2 points will be calculated.
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
'''
if not has_matplotlib:
raise Exception('Optional dependency matplotlib is required for plotting')
from mpl_toolkits.mplot3d import axes3d
from matplotlib.ticker import FormatStrFormatter
import numpy.ma as ma
if Pmin is None:
if self.Pmin is not None:
Pmin = self.Pmin
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it')
if Pmax is None:
if self.Pmax is not None:
Pmax = self.Pmax
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it')
if Tmin is None:
if self.Tmin is not None:
Tmin = self.Tmin
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it')
if Tmax is None:
if self.Tmax is not None:
Tmax = self.Tmax
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it')
if not methods:
methods = self.user_methods if self.user_methods else self.all_methods
Ps = np.linspace(Pmin, Pmax, pts)
Ts = np.linspace(Tmin, Tmax, pts)
Ts_mesh, Ps_mesh = np.meshgrid(Ts, Ps)
fig = plt.figure()
ax = fig.gca(projection='3d')
handles = []
for method in methods:
if only_valid:
properties = []
for T in Ts:
T_props = []
for P in Ps:
if self.test_method_validity(T, P, zs, ws, method):
try:
p = self.calculate(T, P, zs, ws, method)
if self.test_property_validity(p):
T_props.append(p)
else:
T_props.append(None)
except:
T_props.append(None)
else:
T_props.append(None)
properties.append(T_props)
properties = ma.masked_invalid(np.array(properties, dtype=np.float).T)
handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=1, rstride=1, alpha=0.5))
else:
properties = [[self.calculate(T, P, zs, ws, method) for P in Ps] for T in Ts]
handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=1, rstride=1, alpha=0.5))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.4g'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.4g'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.4g'))
ax.set_xlabel('Temperature, K')
ax.set_ylabel('Pressure, Pa')
ax.set_zlabel(self.name + ', ' + self.units)
plt.title(self.name + ' of a mixture of ' + ', '.join(self.CASs)
+ ' at mole fractions of ' + ', '.join(str(round(i, 4)) for i in zs) + '.')
plt.show(block=False)
# The below is a workaround for a matplotlib bug
ax.legend(handles, methods)
plt.show(block=False) | def function[plot_property, parameter[self, zs, ws, Tmin, Tmax, Pmin, Pmax, methods, pts, only_valid]]:
constant[Method to create a plot of the property vs temperature and pressure
according to either a specified list of methods, or user methods (if
set), or all methods. User-selectable number of points for each
variable. If only_valid is set,`test_method_validity` will be used to
check if each condition in the specified range is valid, and
`test_property_validity` will be used to test the answer, and the
method is allowed to fail; only the valid points will be plotted.
Otherwise, the result will be calculated and displayed as-is. This will
not suceed if the any method fails for any point.
Parameters
----------
Tmin : float
Minimum temperature, to begin calculating the property, [K]
Tmax : float
Maximum temperature, to stop calculating the property, [K]
Pmin : float
Minimum pressure, to begin calculating the property, [Pa]
Pmax : float
Maximum pressure, to stop calculating the property, [Pa]
methods : list, optional
List of methods to consider
pts : int, optional
A list of points to calculate the property at for both temperature
and pressure; pts^2 points will be calculated.
only_valid : bool
If True, only plot successful methods and calculated properties,
and handle errors; if False, attempt calculation without any
checking and use methods outside their bounds
]
if <ast.UnaryOp object at 0x7da20c9900a0> begin[:]
<ast.Raise object at 0x7da20c991cf0>
from relative_module[mpl_toolkits.mplot3d] import module[axes3d]
from relative_module[matplotlib.ticker] import module[FormatStrFormatter]
import module[numpy.ma] as alias[ma]
if compare[name[Pmin] is constant[None]] begin[:]
if compare[name[self].Pmin is_not constant[None]] begin[:]
variable[Pmin] assign[=] name[self].Pmin
if compare[name[Pmax] is constant[None]] begin[:]
if compare[name[self].Pmax is_not constant[None]] begin[:]
variable[Pmax] assign[=] name[self].Pmax
if compare[name[Tmin] is constant[None]] begin[:]
if compare[name[self].Tmin is_not constant[None]] begin[:]
variable[Tmin] assign[=] name[self].Tmin
if compare[name[Tmax] is constant[None]] begin[:]
if compare[name[self].Tmax is_not constant[None]] begin[:]
variable[Tmax] assign[=] name[self].Tmax
if <ast.UnaryOp object at 0x7da20c9936d0> begin[:]
variable[methods] assign[=] <ast.IfExp object at 0x7da20c992050>
variable[Ps] assign[=] call[name[np].linspace, parameter[name[Pmin], name[Pmax], name[pts]]]
variable[Ts] assign[=] call[name[np].linspace, parameter[name[Tmin], name[Tmax], name[pts]]]
<ast.Tuple object at 0x7da20c9927a0> assign[=] call[name[np].meshgrid, parameter[name[Ts], name[Ps]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[ax] assign[=] call[name[fig].gca, parameter[]]
variable[handles] assign[=] list[[]]
for taget[name[method]] in starred[name[methods]] begin[:]
if name[only_valid] begin[:]
variable[properties] assign[=] list[[]]
for taget[name[T]] in starred[name[Ts]] begin[:]
variable[T_props] assign[=] list[[]]
for taget[name[P]] in starred[name[Ps]] begin[:]
if call[name[self].test_method_validity, parameter[name[T], name[P], name[zs], name[ws], name[method]]] begin[:]
<ast.Try object at 0x7da20c993580>
call[name[properties].append, parameter[name[T_props]]]
variable[properties] assign[=] call[name[ma].masked_invalid, parameter[call[name[np].array, parameter[name[properties]]].T]]
call[name[handles].append, parameter[call[name[ax].plot_surface, parameter[name[Ts_mesh], name[Ps_mesh], name[properties]]]]]
call[name[ax].yaxis.set_major_formatter, parameter[call[name[FormatStrFormatter], parameter[constant[%.4g]]]]]
call[name[ax].zaxis.set_major_formatter, parameter[call[name[FormatStrFormatter], parameter[constant[%.4g]]]]]
call[name[ax].xaxis.set_major_formatter, parameter[call[name[FormatStrFormatter], parameter[constant[%.4g]]]]]
call[name[ax].set_xlabel, parameter[constant[Temperature, K]]]
call[name[ax].set_ylabel, parameter[constant[Pressure, Pa]]]
call[name[ax].set_zlabel, parameter[binary_operation[binary_operation[name[self].name + constant[, ]] + name[self].units]]]
call[name[plt].title, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[self].name + constant[ of a mixture of ]] + call[constant[, ].join, parameter[name[self].CASs]]] + constant[ at mole fractions of ]] + call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b021ef20>]]] + constant[.]]]]
call[name[plt].show, parameter[]]
call[name[ax].legend, parameter[name[handles], name[methods]]]
call[name[plt].show, parameter[]] | keyword[def] identifier[plot_property] ( identifier[self] , identifier[zs] , identifier[ws] , identifier[Tmin] = keyword[None] , identifier[Tmax] = keyword[None] , identifier[Pmin] = literal[int] , identifier[Pmax] = literal[int] ,
identifier[methods] =[], identifier[pts] = literal[int] , identifier[only_valid] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[has_matplotlib] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[from] identifier[mpl_toolkits] . identifier[mplot3d] keyword[import] identifier[axes3d]
keyword[from] identifier[matplotlib] . identifier[ticker] keyword[import] identifier[FormatStrFormatter]
keyword[import] identifier[numpy] . identifier[ma] keyword[as] identifier[ma]
keyword[if] identifier[Pmin] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[Pmin] keyword[is] keyword[not] keyword[None] :
identifier[Pmin] = identifier[self] . identifier[Pmin]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[Pmax] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[Pmax] keyword[is] keyword[not] keyword[None] :
identifier[Pmax] = identifier[self] . identifier[Pmax]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[Tmin] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[Tmin] keyword[is] keyword[not] keyword[None] :
identifier[Tmin] = identifier[self] . identifier[Tmin]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[Tmax] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[Tmax] keyword[is] keyword[not] keyword[None] :
identifier[Tmax] = identifier[self] . identifier[Tmax]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] keyword[not] identifier[methods] :
identifier[methods] = identifier[self] . identifier[user_methods] keyword[if] identifier[self] . identifier[user_methods] keyword[else] identifier[self] . identifier[all_methods]
identifier[Ps] = identifier[np] . identifier[linspace] ( identifier[Pmin] , identifier[Pmax] , identifier[pts] )
identifier[Ts] = identifier[np] . identifier[linspace] ( identifier[Tmin] , identifier[Tmax] , identifier[pts] )
identifier[Ts_mesh] , identifier[Ps_mesh] = identifier[np] . identifier[meshgrid] ( identifier[Ts] , identifier[Ps] )
identifier[fig] = identifier[plt] . identifier[figure] ()
identifier[ax] = identifier[fig] . identifier[gca] ( identifier[projection] = literal[string] )
identifier[handles] =[]
keyword[for] identifier[method] keyword[in] identifier[methods] :
keyword[if] identifier[only_valid] :
identifier[properties] =[]
keyword[for] identifier[T] keyword[in] identifier[Ts] :
identifier[T_props] =[]
keyword[for] identifier[P] keyword[in] identifier[Ps] :
keyword[if] identifier[self] . identifier[test_method_validity] ( identifier[T] , identifier[P] , identifier[zs] , identifier[ws] , identifier[method] ):
keyword[try] :
identifier[p] = identifier[self] . identifier[calculate] ( identifier[T] , identifier[P] , identifier[zs] , identifier[ws] , identifier[method] )
keyword[if] identifier[self] . identifier[test_property_validity] ( identifier[p] ):
identifier[T_props] . identifier[append] ( identifier[p] )
keyword[else] :
identifier[T_props] . identifier[append] ( keyword[None] )
keyword[except] :
identifier[T_props] . identifier[append] ( keyword[None] )
keyword[else] :
identifier[T_props] . identifier[append] ( keyword[None] )
identifier[properties] . identifier[append] ( identifier[T_props] )
identifier[properties] = identifier[ma] . identifier[masked_invalid] ( identifier[np] . identifier[array] ( identifier[properties] , identifier[dtype] = identifier[np] . identifier[float] ). identifier[T] )
identifier[handles] . identifier[append] ( identifier[ax] . identifier[plot_surface] ( identifier[Ts_mesh] , identifier[Ps_mesh] , identifier[properties] , identifier[cstride] = literal[int] , identifier[rstride] = literal[int] , identifier[alpha] = literal[int] ))
keyword[else] :
identifier[properties] =[[ identifier[self] . identifier[calculate] ( identifier[T] , identifier[P] , identifier[zs] , identifier[ws] , identifier[method] ) keyword[for] identifier[P] keyword[in] identifier[Ps] ] keyword[for] identifier[T] keyword[in] identifier[Ts] ]
identifier[handles] . identifier[append] ( identifier[ax] . identifier[plot_surface] ( identifier[Ts_mesh] , identifier[Ps_mesh] , identifier[properties] , identifier[cstride] = literal[int] , identifier[rstride] = literal[int] , identifier[alpha] = literal[int] ))
identifier[ax] . identifier[yaxis] . identifier[set_major_formatter] ( identifier[FormatStrFormatter] ( literal[string] ))
identifier[ax] . identifier[zaxis] . identifier[set_major_formatter] ( identifier[FormatStrFormatter] ( literal[string] ))
identifier[ax] . identifier[xaxis] . identifier[set_major_formatter] ( identifier[FormatStrFormatter] ( literal[string] ))
identifier[ax] . identifier[set_xlabel] ( literal[string] )
identifier[ax] . identifier[set_ylabel] ( literal[string] )
identifier[ax] . identifier[set_zlabel] ( identifier[self] . identifier[name] + literal[string] + identifier[self] . identifier[units] )
identifier[plt] . identifier[title] ( identifier[self] . identifier[name] + literal[string] + literal[string] . identifier[join] ( identifier[self] . identifier[CASs] )
+ literal[string] + literal[string] . identifier[join] ( identifier[str] ( identifier[round] ( identifier[i] , literal[int] )) keyword[for] identifier[i] keyword[in] identifier[zs] )+ literal[string] )
identifier[plt] . identifier[show] ( identifier[block] = keyword[False] )
identifier[ax] . identifier[legend] ( identifier[handles] , identifier[methods] )
identifier[plt] . identifier[show] ( identifier[block] = keyword[False] ) | def plot_property(self, zs, ws, Tmin=None, Tmax=None, Pmin=100000.0, Pmax=1000000.0, methods=[], pts=15, only_valid=True): # pragma: no cover
'Method to create a plot of the property vs temperature and pressure \n according to either a specified list of methods, or user methods (if \n set), or all methods. User-selectable number of points for each \n variable. If only_valid is set,`test_method_validity` will be used to\n check if each condition in the specified range is valid, and\n `test_property_validity` will be used to test the answer, and the\n method is allowed to fail; only the valid points will be plotted.\n Otherwise, the result will be calculated and displayed as-is. This will\n not suceed if the any method fails for any point.\n\n Parameters\n ----------\n Tmin : float\n Minimum temperature, to begin calculating the property, [K]\n Tmax : float\n Maximum temperature, to stop calculating the property, [K]\n Pmin : float\n Minimum pressure, to begin calculating the property, [Pa]\n Pmax : float\n Maximum pressure, to stop calculating the property, [Pa]\n methods : list, optional\n List of methods to consider\n pts : int, optional\n A list of points to calculate the property at for both temperature \n and pressure; pts^2 points will be calculated.\n only_valid : bool\n If True, only plot successful methods and calculated properties,\n and handle errors; if False, attempt calculation without any\n checking and use methods outside their bounds\n '
if not has_matplotlib:
raise Exception('Optional dependency matplotlib is required for plotting') # depends on [control=['if'], data=[]]
from mpl_toolkits.mplot3d import axes3d
from matplotlib.ticker import FormatStrFormatter
import numpy.ma as ma
if Pmin is None:
if self.Pmin is not None:
Pmin = self.Pmin # depends on [control=['if'], data=[]]
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it') # depends on [control=['if'], data=['Pmin']]
if Pmax is None:
if self.Pmax is not None:
Pmax = self.Pmax # depends on [control=['if'], data=[]]
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it') # depends on [control=['if'], data=['Pmax']]
if Tmin is None:
if self.Tmin is not None:
Tmin = self.Tmin # depends on [control=['if'], data=[]]
else:
raise Exception('Minimum pressure could not be auto-detected; please provide it') # depends on [control=['if'], data=['Tmin']]
if Tmax is None:
if self.Tmax is not None:
Tmax = self.Tmax # depends on [control=['if'], data=[]]
else:
raise Exception('Maximum pressure could not be auto-detected; please provide it') # depends on [control=['if'], data=['Tmax']]
if not methods:
methods = self.user_methods if self.user_methods else self.all_methods # depends on [control=['if'], data=[]]
Ps = np.linspace(Pmin, Pmax, pts)
Ts = np.linspace(Tmin, Tmax, pts)
(Ts_mesh, Ps_mesh) = np.meshgrid(Ts, Ps)
fig = plt.figure()
ax = fig.gca(projection='3d')
handles = []
for method in methods:
if only_valid:
properties = []
for T in Ts:
T_props = []
for P in Ps:
if self.test_method_validity(T, P, zs, ws, method):
try:
p = self.calculate(T, P, zs, ws, method)
if self.test_property_validity(p):
T_props.append(p) # depends on [control=['if'], data=[]]
else:
T_props.append(None) # depends on [control=['try'], data=[]]
except:
T_props.append(None) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
T_props.append(None) # depends on [control=['for'], data=['P']]
properties.append(T_props) # depends on [control=['for'], data=['T']]
properties = ma.masked_invalid(np.array(properties, dtype=np.float).T)
handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=1, rstride=1, alpha=0.5)) # depends on [control=['if'], data=[]]
else:
properties = [[self.calculate(T, P, zs, ws, method) for P in Ps] for T in Ts]
handles.append(ax.plot_surface(Ts_mesh, Ps_mesh, properties, cstride=1, rstride=1, alpha=0.5)) # depends on [control=['for'], data=['method']]
ax.yaxis.set_major_formatter(FormatStrFormatter('%.4g'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.4g'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.4g'))
ax.set_xlabel('Temperature, K')
ax.set_ylabel('Pressure, Pa')
ax.set_zlabel(self.name + ', ' + self.units)
plt.title(self.name + ' of a mixture of ' + ', '.join(self.CASs) + ' at mole fractions of ' + ', '.join((str(round(i, 4)) for i in zs)) + '.')
plt.show(block=False)
# The below is a workaround for a matplotlib bug
ax.legend(handles, methods)
plt.show(block=False) |
def __validInputs(self):
"""Validates the inputs of the constructor."""
#if not isinstance(self.__column, Column):
# raise Sitools2Exception("column must be an instance of Column")
if self.__type not in self.__TYPE:
raise Sitools2Exception("Type must be one of these values : numeric, string, boolean")
if self.__comparison not in self.__COMPARISON:
raise Sitools2Exception("Comparison must be one of these values : LT, GT, EQ, LIKE, IN, NOTIN") | def function[__validInputs, parameter[self]]:
constant[Validates the inputs of the constructor.]
if compare[name[self].__type <ast.NotIn object at 0x7da2590d7190> name[self].__TYPE] begin[:]
<ast.Raise object at 0x7da1b0911f60>
if compare[name[self].__comparison <ast.NotIn object at 0x7da2590d7190> name[self].__COMPARISON] begin[:]
<ast.Raise object at 0x7da1b0947fd0> | keyword[def] identifier[__validInputs] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[__type] keyword[not] keyword[in] identifier[self] . identifier[__TYPE] :
keyword[raise] identifier[Sitools2Exception] ( literal[string] )
keyword[if] identifier[self] . identifier[__comparison] keyword[not] keyword[in] identifier[self] . identifier[__COMPARISON] :
keyword[raise] identifier[Sitools2Exception] ( literal[string] ) | def __validInputs(self):
"""Validates the inputs of the constructor."""
#if not isinstance(self.__column, Column):
# raise Sitools2Exception("column must be an instance of Column")
if self.__type not in self.__TYPE:
raise Sitools2Exception('Type must be one of these values : numeric, string, boolean') # depends on [control=['if'], data=[]]
if self.__comparison not in self.__COMPARISON:
raise Sitools2Exception('Comparison must be one of these values : LT, GT, EQ, LIKE, IN, NOTIN') # depends on [control=['if'], data=[]] |
def set_rich_text_font(self, font):
"""Set rich text mode font"""
self.rich_text.set_font(font, fixed_font=self.get_plugin_font()) | def function[set_rich_text_font, parameter[self, font]]:
constant[Set rich text mode font]
call[name[self].rich_text.set_font, parameter[name[font]]] | keyword[def] identifier[set_rich_text_font] ( identifier[self] , identifier[font] ):
literal[string]
identifier[self] . identifier[rich_text] . identifier[set_font] ( identifier[font] , identifier[fixed_font] = identifier[self] . identifier[get_plugin_font] ()) | def set_rich_text_font(self, font):
"""Set rich text mode font"""
self.rich_text.set_font(font, fixed_font=self.get_plugin_font()) |
def parse_fn_docstring(fn):
"""Get parameter and return types from function's docstring.
Docstrings must use this format::
:param foo: What is foo
:type foo: int
:return: What is returned
:rtype: double
:return: A map of names, each with keys 'type' and 'desc'.
:rtype: tuple(dict)
"""
doc = fn.__doc__
params, return_ = {}, {}
param_order = []
for line in doc.split("\n"):
line = line.strip()
if line.startswith(":param"):
_, name, desc = line.split(":", 2)
name = name[6:].strip() # skip 'param '
params[name] = {'desc': desc.strip()}
param_order.append(name)
elif line.startswith(":type"):
_, name, desc = line.split(":", 2)
name = name[5:].strip() # skip 'type '
if not name in params:
raise ValueError("'type' without 'param' for {}".format(name))
params[name]['type'] = desc.strip()
elif line.startswith(":return"):
_1, _2, desc = line.split(":", 2)
return_['desc'] = desc
elif line.startswith(":rtype"):
_1, _2, desc = line.split(":", 2)
return_['type'] = desc.strip()
return params | def function[parse_fn_docstring, parameter[fn]]:
constant[Get parameter and return types from function's docstring.
Docstrings must use this format::
:param foo: What is foo
:type foo: int
:return: What is returned
:rtype: double
:return: A map of names, each with keys 'type' and 'desc'.
:rtype: tuple(dict)
]
variable[doc] assign[=] name[fn].__doc__
<ast.Tuple object at 0x7da18f810b20> assign[=] tuple[[<ast.Dict object at 0x7da18f8102e0>, <ast.Dict object at 0x7da18f813af0>]]
variable[param_order] assign[=] list[[]]
for taget[name[line]] in starred[call[name[doc].split, parameter[constant[
]]]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if call[name[line].startswith, parameter[constant[:param]]] begin[:]
<ast.Tuple object at 0x7da18f813ac0> assign[=] call[name[line].split, parameter[constant[:], constant[2]]]
variable[name] assign[=] call[call[name[name]][<ast.Slice object at 0x7da18f811360>].strip, parameter[]]
call[name[params]][name[name]] assign[=] dictionary[[<ast.Constant object at 0x7da18f810250>], [<ast.Call object at 0x7da18f811c90>]]
call[name[param_order].append, parameter[name[name]]]
return[name[params]] | keyword[def] identifier[parse_fn_docstring] ( identifier[fn] ):
literal[string]
identifier[doc] = identifier[fn] . identifier[__doc__]
identifier[params] , identifier[return_] ={},{}
identifier[param_order] =[]
keyword[for] identifier[line] keyword[in] identifier[doc] . identifier[split] ( literal[string] ):
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[_] , identifier[name] , identifier[desc] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
identifier[name] = identifier[name] [ literal[int] :]. identifier[strip] ()
identifier[params] [ identifier[name] ]={ literal[string] : identifier[desc] . identifier[strip] ()}
identifier[param_order] . identifier[append] ( identifier[name] )
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[_] , identifier[name] , identifier[desc] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
identifier[name] = identifier[name] [ literal[int] :]. identifier[strip] ()
keyword[if] keyword[not] identifier[name] keyword[in] identifier[params] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[params] [ identifier[name] ][ literal[string] ]= identifier[desc] . identifier[strip] ()
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[_1] , identifier[_2] , identifier[desc] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
identifier[return_] [ literal[string] ]= identifier[desc]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[_1] , identifier[_2] , identifier[desc] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
identifier[return_] [ literal[string] ]= identifier[desc] . identifier[strip] ()
keyword[return] identifier[params] | def parse_fn_docstring(fn):
"""Get parameter and return types from function's docstring.
Docstrings must use this format::
:param foo: What is foo
:type foo: int
:return: What is returned
:rtype: double
:return: A map of names, each with keys 'type' and 'desc'.
:rtype: tuple(dict)
"""
doc = fn.__doc__
(params, return_) = ({}, {})
param_order = []
for line in doc.split('\n'):
line = line.strip()
if line.startswith(':param'):
(_, name, desc) = line.split(':', 2)
name = name[6:].strip() # skip 'param '
params[name] = {'desc': desc.strip()}
param_order.append(name) # depends on [control=['if'], data=[]]
elif line.startswith(':type'):
(_, name, desc) = line.split(':', 2)
name = name[5:].strip() # skip 'type '
if not name in params:
raise ValueError("'type' without 'param' for {}".format(name)) # depends on [control=['if'], data=[]]
params[name]['type'] = desc.strip() # depends on [control=['if'], data=[]]
elif line.startswith(':return'):
(_1, _2, desc) = line.split(':', 2)
return_['desc'] = desc # depends on [control=['if'], data=[]]
elif line.startswith(':rtype'):
(_1, _2, desc) = line.split(':', 2)
return_['type'] = desc.strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return params |
def process_npdu(self, npdu):
"""encode NPDUs from the service access point and send them downstream."""
if _debug: RouterToRouterService._debug("process_npdu %r", npdu)
# encode the npdu as if it was about to be delivered to the network
pdu = PDU()
npdu.encode(pdu)
if _debug: RouterToRouterService._debug(" - pdu: %r", pdu)
# broadcast messages go to everyone
if pdu.pduDestination.addrType == Address.localBroadcastAddr:
destList = self.connections.keys()
else:
conn = self.connections.get(pdu.pduDestination, None)
if not conn:
if _debug: RouterToRouterService._debug(" - not a connected client")
# start a connection attempt
conn = self.connect(pdu.pduDestination)
if not conn.connected:
# keep a reference to this pdu to send after the ack comes back
conn.pendingNPDU.append(pdu)
return
destList = [pdu.pduDestination]
if _debug: RouterToRouterService._debug(" - destList: %r", destList)
for dest in destList:
# make a router-to-router NPDU
xpdu = RouterToRouterNPDU(pdu)
xpdu.pduDestination = dest
# send it to the multiplexer
self.service_request(xpdu) | def function[process_npdu, parameter[self, npdu]]:
constant[encode NPDUs from the service access point and send them downstream.]
if name[_debug] begin[:]
call[name[RouterToRouterService]._debug, parameter[constant[process_npdu %r], name[npdu]]]
variable[pdu] assign[=] call[name[PDU], parameter[]]
call[name[npdu].encode, parameter[name[pdu]]]
if name[_debug] begin[:]
call[name[RouterToRouterService]._debug, parameter[constant[ - pdu: %r], name[pdu]]]
if compare[name[pdu].pduDestination.addrType equal[==] name[Address].localBroadcastAddr] begin[:]
variable[destList] assign[=] call[name[self].connections.keys, parameter[]]
if name[_debug] begin[:]
call[name[RouterToRouterService]._debug, parameter[constant[ - destList: %r], name[destList]]]
for taget[name[dest]] in starred[name[destList]] begin[:]
variable[xpdu] assign[=] call[name[RouterToRouterNPDU], parameter[name[pdu]]]
name[xpdu].pduDestination assign[=] name[dest]
call[name[self].service_request, parameter[name[xpdu]]] | keyword[def] identifier[process_npdu] ( identifier[self] , identifier[npdu] ):
literal[string]
keyword[if] identifier[_debug] : identifier[RouterToRouterService] . identifier[_debug] ( literal[string] , identifier[npdu] )
identifier[pdu] = identifier[PDU] ()
identifier[npdu] . identifier[encode] ( identifier[pdu] )
keyword[if] identifier[_debug] : identifier[RouterToRouterService] . identifier[_debug] ( literal[string] , identifier[pdu] )
keyword[if] identifier[pdu] . identifier[pduDestination] . identifier[addrType] == identifier[Address] . identifier[localBroadcastAddr] :
identifier[destList] = identifier[self] . identifier[connections] . identifier[keys] ()
keyword[else] :
identifier[conn] = identifier[self] . identifier[connections] . identifier[get] ( identifier[pdu] . identifier[pduDestination] , keyword[None] )
keyword[if] keyword[not] identifier[conn] :
keyword[if] identifier[_debug] : identifier[RouterToRouterService] . identifier[_debug] ( literal[string] )
identifier[conn] = identifier[self] . identifier[connect] ( identifier[pdu] . identifier[pduDestination] )
keyword[if] keyword[not] identifier[conn] . identifier[connected] :
identifier[conn] . identifier[pendingNPDU] . identifier[append] ( identifier[pdu] )
keyword[return]
identifier[destList] =[ identifier[pdu] . identifier[pduDestination] ]
keyword[if] identifier[_debug] : identifier[RouterToRouterService] . identifier[_debug] ( literal[string] , identifier[destList] )
keyword[for] identifier[dest] keyword[in] identifier[destList] :
identifier[xpdu] = identifier[RouterToRouterNPDU] ( identifier[pdu] )
identifier[xpdu] . identifier[pduDestination] = identifier[dest]
identifier[self] . identifier[service_request] ( identifier[xpdu] ) | def process_npdu(self, npdu):
"""encode NPDUs from the service access point and send them downstream."""
if _debug:
RouterToRouterService._debug('process_npdu %r', npdu) # depends on [control=['if'], data=[]]
# encode the npdu as if it was about to be delivered to the network
pdu = PDU()
npdu.encode(pdu)
if _debug:
RouterToRouterService._debug(' - pdu: %r', pdu) # depends on [control=['if'], data=[]]
# broadcast messages go to everyone
if pdu.pduDestination.addrType == Address.localBroadcastAddr:
destList = self.connections.keys() # depends on [control=['if'], data=[]]
else:
conn = self.connections.get(pdu.pduDestination, None)
if not conn:
if _debug:
RouterToRouterService._debug(' - not a connected client') # depends on [control=['if'], data=[]]
# start a connection attempt
conn = self.connect(pdu.pduDestination) # depends on [control=['if'], data=[]]
if not conn.connected:
# keep a reference to this pdu to send after the ack comes back
conn.pendingNPDU.append(pdu)
return # depends on [control=['if'], data=[]]
destList = [pdu.pduDestination]
if _debug:
RouterToRouterService._debug(' - destList: %r', destList) # depends on [control=['if'], data=[]]
for dest in destList:
# make a router-to-router NPDU
xpdu = RouterToRouterNPDU(pdu)
xpdu.pduDestination = dest
# send it to the multiplexer
self.service_request(xpdu) # depends on [control=['for'], data=['dest']] |
def start(self):
"""Schedule the fiber to be started in the next iteration of the
event loop."""
target = getattr(self._target, '__qualname__', self._target.__name__)
self._log.debug('starting fiber {}, target {}', self.name, target)
self._hub.run_callback(self.switch) | def function[start, parameter[self]]:
constant[Schedule the fiber to be started in the next iteration of the
event loop.]
variable[target] assign[=] call[name[getattr], parameter[name[self]._target, constant[__qualname__], name[self]._target.__name__]]
call[name[self]._log.debug, parameter[constant[starting fiber {}, target {}], name[self].name, name[target]]]
call[name[self]._hub.run_callback, parameter[name[self].switch]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[target] = identifier[getattr] ( identifier[self] . identifier[_target] , literal[string] , identifier[self] . identifier[_target] . identifier[__name__] )
identifier[self] . identifier[_log] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] , identifier[target] )
identifier[self] . identifier[_hub] . identifier[run_callback] ( identifier[self] . identifier[switch] ) | def start(self):
"""Schedule the fiber to be started in the next iteration of the
event loop."""
target = getattr(self._target, '__qualname__', self._target.__name__)
self._log.debug('starting fiber {}, target {}', self.name, target)
self._hub.run_callback(self.switch) |
def after_config_matches(ini, envlist):
"""Determine if this job should wait for the others."""
section = ini.sections.get('travis:after', {})
if not section:
return False # Never wait if it's not configured
if 'envlist' in section or 'toxenv' in section:
if 'toxenv' in section:
print('The "toxenv" key of the [travis:after] section is '
'deprecated in favor of the "envlist" key.', file=sys.stderr)
toxenv = section.get('toxenv')
required = set(split_env(section.get('envlist', toxenv) or ''))
actual = set(envlist)
if required - actual:
return False
# Translate travis requirements to env requirements
env_requirements = [
(TRAVIS_FACTORS[factor], value) for factor, value
in parse_dict(section.get('travis', '')).items()
if factor in TRAVIS_FACTORS
] + [
(name, value) for name, value
in parse_dict(section.get('env', '')).items()
]
return all([
os.environ.get(name) == value
for name, value in env_requirements
]) | def function[after_config_matches, parameter[ini, envlist]]:
constant[Determine if this job should wait for the others.]
variable[section] assign[=] call[name[ini].sections.get, parameter[constant[travis:after], dictionary[[], []]]]
if <ast.UnaryOp object at 0x7da1b101a7a0> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b101bf40> begin[:]
if compare[constant[toxenv] in name[section]] begin[:]
call[name[print], parameter[constant[The "toxenv" key of the [travis:after] section is deprecated in favor of the "envlist" key.]]]
variable[toxenv] assign[=] call[name[section].get, parameter[constant[toxenv]]]
variable[required] assign[=] call[name[set], parameter[call[name[split_env], parameter[<ast.BoolOp object at 0x7da1b101ab60>]]]]
variable[actual] assign[=] call[name[set], parameter[name[envlist]]]
if binary_operation[name[required] - name[actual]] begin[:]
return[constant[False]]
variable[env_requirements] assign[=] binary_operation[<ast.ListComp object at 0x7da1b101af50> + <ast.ListComp object at 0x7da1b101a200>]
return[call[name[all], parameter[<ast.ListComp object at 0x7da1b101b940>]]] | keyword[def] identifier[after_config_matches] ( identifier[ini] , identifier[envlist] ):
literal[string]
identifier[section] = identifier[ini] . identifier[sections] . identifier[get] ( literal[string] ,{})
keyword[if] keyword[not] identifier[section] :
keyword[return] keyword[False]
keyword[if] literal[string] keyword[in] identifier[section] keyword[or] literal[string] keyword[in] identifier[section] :
keyword[if] literal[string] keyword[in] identifier[section] :
identifier[print] ( literal[string]
literal[string] , identifier[file] = identifier[sys] . identifier[stderr] )
identifier[toxenv] = identifier[section] . identifier[get] ( literal[string] )
identifier[required] = identifier[set] ( identifier[split_env] ( identifier[section] . identifier[get] ( literal[string] , identifier[toxenv] ) keyword[or] literal[string] ))
identifier[actual] = identifier[set] ( identifier[envlist] )
keyword[if] identifier[required] - identifier[actual] :
keyword[return] keyword[False]
identifier[env_requirements] =[
( identifier[TRAVIS_FACTORS] [ identifier[factor] ], identifier[value] ) keyword[for] identifier[factor] , identifier[value]
keyword[in] identifier[parse_dict] ( identifier[section] . identifier[get] ( literal[string] , literal[string] )). identifier[items] ()
keyword[if] identifier[factor] keyword[in] identifier[TRAVIS_FACTORS]
]+[
( identifier[name] , identifier[value] ) keyword[for] identifier[name] , identifier[value]
keyword[in] identifier[parse_dict] ( identifier[section] . identifier[get] ( literal[string] , literal[string] )). identifier[items] ()
]
keyword[return] identifier[all] ([
identifier[os] . identifier[environ] . identifier[get] ( identifier[name] )== identifier[value]
keyword[for] identifier[name] , identifier[value] keyword[in] identifier[env_requirements]
]) | def after_config_matches(ini, envlist):
"""Determine if this job should wait for the others."""
section = ini.sections.get('travis:after', {})
if not section:
return False # Never wait if it's not configured # depends on [control=['if'], data=[]]
if 'envlist' in section or 'toxenv' in section:
if 'toxenv' in section:
print('The "toxenv" key of the [travis:after] section is deprecated in favor of the "envlist" key.', file=sys.stderr) # depends on [control=['if'], data=[]]
toxenv = section.get('toxenv')
required = set(split_env(section.get('envlist', toxenv) or ''))
actual = set(envlist)
if required - actual:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Translate travis requirements to env requirements
env_requirements = [(TRAVIS_FACTORS[factor], value) for (factor, value) in parse_dict(section.get('travis', '')).items() if factor in TRAVIS_FACTORS] + [(name, value) for (name, value) in parse_dict(section.get('env', '')).items()]
return all([os.environ.get(name) == value for (name, value) in env_requirements]) |
def option(*args, **kwargs):
"""Decorator to add an option to the optparser argument of a Cmdln
subcommand.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
"""
#XXX Is there a possible optimization for many options to not have a
# large stack depth here?
def decorate(f):
if not hasattr(f, "optparser"):
f.optparser = SubCmdOptionParser()
f.optparser.add_option(*args, **kwargs)
return f
return decorate | def function[option, parameter[]]:
constant[Decorator to add an option to the optparser argument of a Cmdln
subcommand.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
]
def function[decorate, parameter[f]]:
if <ast.UnaryOp object at 0x7da1b056baf0> begin[:]
name[f].optparser assign[=] call[name[SubCmdOptionParser], parameter[]]
call[name[f].optparser.add_option, parameter[<ast.Starred object at 0x7da1b0569ba0>]]
return[name[f]]
return[name[decorate]] | keyword[def] identifier[option] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[decorate] ( identifier[f] ):
keyword[if] keyword[not] identifier[hasattr] ( identifier[f] , literal[string] ):
identifier[f] . identifier[optparser] = identifier[SubCmdOptionParser] ()
identifier[f] . identifier[optparser] . identifier[add_option] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[f]
keyword[return] identifier[decorate] | def option(*args, **kwargs):
"""Decorator to add an option to the optparser argument of a Cmdln
subcommand.
Example:
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
"""
#XXX Is there a possible optimization for many options to not have a
# large stack depth here?
def decorate(f):
if not hasattr(f, 'optparser'):
f.optparser = SubCmdOptionParser() # depends on [control=['if'], data=[]]
f.optparser.add_option(*args, **kwargs)
return f
return decorate |
def get_gaps_and_overlaps2(self, tier1, tier2, maxlen=-1):
"""Faster variant of :func:`get_gaps_and_overlaps`. Faster in this case
means almost 100 times faster...
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:yields: Tuples of the form ``[(start, end, type)]``.
:raises KeyError: If a tier is non existent.
"""
ad = sorted(((a, i+1) for i, t in enumerate([tier1, tier2]) for a in
self.get_annotation_data_for_tier(t)), reverse=True)
if ad:
last = (lambda x: (x[0][0], x[0][1], x[1]))(ad.pop())
def thr(x, y):
return maxlen == -1 or abs(x-y) < maxlen
while ad:
(begin, end, _), current = ad.pop()
if last[2] == current and thr(begin, last[1]):
yield (last[1], begin, 'P{}'.format(current))
elif last[0] < begin and last[1] > end:
yield (begin, end, 'W{}{}'.format(last[2], current))
continue
elif last[1] > begin:
yield (begin, last[1], 'O{}{}'.format(last[2], current))
elif last[1] < begin and thr(begin, last[1]):
yield (last[1], begin, 'G{}{}'.format(last[2], current))
last = (begin, end, current) | def function[get_gaps_and_overlaps2, parameter[self, tier1, tier2, maxlen]]:
constant[Faster variant of :func:`get_gaps_and_overlaps`. Faster in this case
means almost 100 times faster...
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:yields: Tuples of the form ``[(start, end, type)]``.
:raises KeyError: If a tier is non existent.
]
variable[ad] assign[=] call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b0287e20>]]
if name[ad] begin[:]
variable[last] assign[=] call[<ast.Lambda object at 0x7da1b02874f0>, parameter[call[name[ad].pop, parameter[]]]]
def function[thr, parameter[x, y]]:
return[<ast.BoolOp object at 0x7da1b02878b0>]
while name[ad] begin[:]
<ast.Tuple object at 0x7da1b0286920> assign[=] call[name[ad].pop, parameter[]]
if <ast.BoolOp object at 0x7da18f810520> begin[:]
<ast.Yield object at 0x7da18f8136a0>
variable[last] assign[=] tuple[[<ast.Name object at 0x7da1b0216290>, <ast.Name object at 0x7da1b02154b0>, <ast.Name object at 0x7da1b0215c00>]] | keyword[def] identifier[get_gaps_and_overlaps2] ( identifier[self] , identifier[tier1] , identifier[tier2] , identifier[maxlen] =- literal[int] ):
literal[string]
identifier[ad] = identifier[sorted] ((( identifier[a] , identifier[i] + literal[int] ) keyword[for] identifier[i] , identifier[t] keyword[in] identifier[enumerate] ([ identifier[tier1] , identifier[tier2] ]) keyword[for] identifier[a] keyword[in]
identifier[self] . identifier[get_annotation_data_for_tier] ( identifier[t] )), identifier[reverse] = keyword[True] )
keyword[if] identifier[ad] :
identifier[last] =( keyword[lambda] identifier[x] :( identifier[x] [ literal[int] ][ literal[int] ], identifier[x] [ literal[int] ][ literal[int] ], identifier[x] [ literal[int] ]))( identifier[ad] . identifier[pop] ())
keyword[def] identifier[thr] ( identifier[x] , identifier[y] ):
keyword[return] identifier[maxlen] ==- literal[int] keyword[or] identifier[abs] ( identifier[x] - identifier[y] )< identifier[maxlen]
keyword[while] identifier[ad] :
( identifier[begin] , identifier[end] , identifier[_] ), identifier[current] = identifier[ad] . identifier[pop] ()
keyword[if] identifier[last] [ literal[int] ]== identifier[current] keyword[and] identifier[thr] ( identifier[begin] , identifier[last] [ literal[int] ]):
keyword[yield] ( identifier[last] [ literal[int] ], identifier[begin] , literal[string] . identifier[format] ( identifier[current] ))
keyword[elif] identifier[last] [ literal[int] ]< identifier[begin] keyword[and] identifier[last] [ literal[int] ]> identifier[end] :
keyword[yield] ( identifier[begin] , identifier[end] , literal[string] . identifier[format] ( identifier[last] [ literal[int] ], identifier[current] ))
keyword[continue]
keyword[elif] identifier[last] [ literal[int] ]> identifier[begin] :
keyword[yield] ( identifier[begin] , identifier[last] [ literal[int] ], literal[string] . identifier[format] ( identifier[last] [ literal[int] ], identifier[current] ))
keyword[elif] identifier[last] [ literal[int] ]< identifier[begin] keyword[and] identifier[thr] ( identifier[begin] , identifier[last] [ literal[int] ]):
keyword[yield] ( identifier[last] [ literal[int] ], identifier[begin] , literal[string] . identifier[format] ( identifier[last] [ literal[int] ], identifier[current] ))
identifier[last] =( identifier[begin] , identifier[end] , identifier[current] ) | def get_gaps_and_overlaps2(self, tier1, tier2, maxlen=-1):
"""Faster variant of :func:`get_gaps_and_overlaps`. Faster in this case
means almost 100 times faster...
:param str tier1: Name of the first tier.
:param str tier2: Name of the second tier.
:param int maxlen: Maximum length of gaps (skip longer ones), if ``-1``
no maximum will be used.
:yields: Tuples of the form ``[(start, end, type)]``.
:raises KeyError: If a tier is non existent.
"""
ad = sorted(((a, i + 1) for (i, t) in enumerate([tier1, tier2]) for a in self.get_annotation_data_for_tier(t)), reverse=True)
if ad:
last = (lambda x: (x[0][0], x[0][1], x[1]))(ad.pop())
def thr(x, y):
return maxlen == -1 or abs(x - y) < maxlen
while ad:
((begin, end, _), current) = ad.pop()
if last[2] == current and thr(begin, last[1]):
yield (last[1], begin, 'P{}'.format(current)) # depends on [control=['if'], data=[]]
elif last[0] < begin and last[1] > end:
yield (begin, end, 'W{}{}'.format(last[2], current))
continue # depends on [control=['if'], data=[]]
elif last[1] > begin:
yield (begin, last[1], 'O{}{}'.format(last[2], current)) # depends on [control=['if'], data=['begin']]
elif last[1] < begin and thr(begin, last[1]):
yield (last[1], begin, 'G{}{}'.format(last[2], current)) # depends on [control=['if'], data=[]]
last = (begin, end, current) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]] |
def get_commits_and_names_iter(self, path):
'''
Get all commits including a given path following renames
'''
log_result = self.git.log(
'--pretty=%H',
'--follow',
'--name-only',
'--',
path).splitlines()
for commit_sha, _, filename in grouper(log_result, 3):
yield self.repo.commit(commit_sha), filename | def function[get_commits_and_names_iter, parameter[self, path]]:
constant[
Get all commits including a given path following renames
]
variable[log_result] assign[=] call[call[name[self].git.log, parameter[constant[--pretty=%H], constant[--follow], constant[--name-only], constant[--], name[path]]].splitlines, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1d07430>, <ast.Name object at 0x7da1b1d07640>, <ast.Name object at 0x7da1b1d04ca0>]]] in starred[call[name[grouper], parameter[name[log_result], constant[3]]]] begin[:]
<ast.Yield object at 0x7da1b1d06260> | keyword[def] identifier[get_commits_and_names_iter] ( identifier[self] , identifier[path] ):
literal[string]
identifier[log_result] = identifier[self] . identifier[git] . identifier[log] (
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
identifier[path] ). identifier[splitlines] ()
keyword[for] identifier[commit_sha] , identifier[_] , identifier[filename] keyword[in] identifier[grouper] ( identifier[log_result] , literal[int] ):
keyword[yield] identifier[self] . identifier[repo] . identifier[commit] ( identifier[commit_sha] ), identifier[filename] | def get_commits_and_names_iter(self, path):
"""
Get all commits including a given path following renames
"""
log_result = self.git.log('--pretty=%H', '--follow', '--name-only', '--', path).splitlines()
for (commit_sha, _, filename) in grouper(log_result, 3):
yield (self.repo.commit(commit_sha), filename) # depends on [control=['for'], data=[]] |
def _calculate_channels(records, sections, split_interval, channel_funcs, user, c_start=0):
"""
Used to group a list of records across a week as defined by the supplied
sections. Outputs a list containing records in each section and a list with
info to identify those sections.
Parameters
----------
records : list
The week of records to calculate the channels for.
sections : list
The list of sections for grouping. Each section will have an integer
value stating the minutes away from midnight between Sunday and Monday.
split_interval : int
The interval in minutes for which each indicator is computed.
channel_funcs : list
Indicator functions that generate the values for the week-matrix.
user : object
The user to calculate channels for.
c_start : num
Start numbering of channels from this value. Optional parameter. Default
value of 0. Used when adding channels to the same user using different
lists of records.
"""
week_matrix = []
if len(records) == 0:
return week_matrix
if not isinstance(records, list):
records = [records]
year_week = str(records[0].datetime.isocalendar()[
0]) + '-' + str(records[0].datetime.isocalendar()[1])
section_lists, section_id = _weekmatrix_grouping(records, sections, split_interval)
for c, fun in enumerate(channel_funcs):
for b, section_records in enumerate(section_lists):
indicator_fun, return_type = fun
# _records is used to avoid recomputing home
user._records = section_records
user._cache = {}
output = list(indicator_fun(user)['allweek']['allday'].values())[0]
if return_type == 'scalar':
indicator = sum(d for d in output if d is not None)
elif return_type == 'summarystats':
indicator = sum(d for group in output for d in group if d is not None)
if indicator != 0:
week_matrix.append(
[year_week, c + c_start, section_id[b][0], section_id[b][1], float(indicator)])
return week_matrix | def function[_calculate_channels, parameter[records, sections, split_interval, channel_funcs, user, c_start]]:
constant[
Used to group a list of records across a week as defined by the supplied
sections. Outputs a list containing records in each section and a list with
info to identify those sections.
Parameters
----------
records : list
The week of records to calculate the channels for.
sections : list
The list of sections for grouping. Each section will have an integer
value stating the minutes away from midnight between Sunday and Monday.
split_interval : int
The interval in minutes for which each indicator is computed.
channel_funcs : list
Indicator functions that generate the values for the week-matrix.
user : object
The user to calculate channels for.
c_start : num
Start numbering of channels from this value. Optional parameter. Default
value of 0. Used when adding channels to the same user using different
lists of records.
]
variable[week_matrix] assign[=] list[[]]
if compare[call[name[len], parameter[name[records]]] equal[==] constant[0]] begin[:]
return[name[week_matrix]]
if <ast.UnaryOp object at 0x7da1b0d3ef50> begin[:]
variable[records] assign[=] list[[<ast.Name object at 0x7da1b0d3db40>]]
variable[year_week] assign[=] binary_operation[binary_operation[call[name[str], parameter[call[call[call[name[records]][constant[0]].datetime.isocalendar, parameter[]]][constant[0]]]] + constant[-]] + call[name[str], parameter[call[call[call[name[records]][constant[0]].datetime.isocalendar, parameter[]]][constant[1]]]]]
<ast.Tuple object at 0x7da1b0d3d120> assign[=] call[name[_weekmatrix_grouping], parameter[name[records], name[sections], name[split_interval]]]
for taget[tuple[[<ast.Name object at 0x7da1b0d3c700>, <ast.Name object at 0x7da1b0d3c9d0>]]] in starred[call[name[enumerate], parameter[name[channel_funcs]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0d3e6b0>, <ast.Name object at 0x7da1b0d3c340>]]] in starred[call[name[enumerate], parameter[name[section_lists]]]] begin[:]
<ast.Tuple object at 0x7da1b0d3e2c0> assign[=] name[fun]
name[user]._records assign[=] name[section_records]
name[user]._cache assign[=] dictionary[[], []]
variable[output] assign[=] call[call[name[list], parameter[call[call[call[call[name[indicator_fun], parameter[name[user]]]][constant[allweek]]][constant[allday]].values, parameter[]]]]][constant[0]]
if compare[name[return_type] equal[==] constant[scalar]] begin[:]
variable[indicator] assign[=] call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b0d3d180>]]
if compare[name[indicator] not_equal[!=] constant[0]] begin[:]
call[name[week_matrix].append, parameter[list[[<ast.Name object at 0x7da1b0d3cbe0>, <ast.BinOp object at 0x7da1b0d3e980>, <ast.Subscript object at 0x7da1b0d3d5a0>, <ast.Subscript object at 0x7da1b0d3c0d0>, <ast.Call object at 0x7da1b0d3d8d0>]]]]
return[name[week_matrix]] | keyword[def] identifier[_calculate_channels] ( identifier[records] , identifier[sections] , identifier[split_interval] , identifier[channel_funcs] , identifier[user] , identifier[c_start] = literal[int] ):
literal[string]
identifier[week_matrix] =[]
keyword[if] identifier[len] ( identifier[records] )== literal[int] :
keyword[return] identifier[week_matrix]
keyword[if] keyword[not] identifier[isinstance] ( identifier[records] , identifier[list] ):
identifier[records] =[ identifier[records] ]
identifier[year_week] = identifier[str] ( identifier[records] [ literal[int] ]. identifier[datetime] . identifier[isocalendar] ()[
literal[int] ])+ literal[string] + identifier[str] ( identifier[records] [ literal[int] ]. identifier[datetime] . identifier[isocalendar] ()[ literal[int] ])
identifier[section_lists] , identifier[section_id] = identifier[_weekmatrix_grouping] ( identifier[records] , identifier[sections] , identifier[split_interval] )
keyword[for] identifier[c] , identifier[fun] keyword[in] identifier[enumerate] ( identifier[channel_funcs] ):
keyword[for] identifier[b] , identifier[section_records] keyword[in] identifier[enumerate] ( identifier[section_lists] ):
identifier[indicator_fun] , identifier[return_type] = identifier[fun]
identifier[user] . identifier[_records] = identifier[section_records]
identifier[user] . identifier[_cache] ={}
identifier[output] = identifier[list] ( identifier[indicator_fun] ( identifier[user] )[ literal[string] ][ literal[string] ]. identifier[values] ())[ literal[int] ]
keyword[if] identifier[return_type] == literal[string] :
identifier[indicator] = identifier[sum] ( identifier[d] keyword[for] identifier[d] keyword[in] identifier[output] keyword[if] identifier[d] keyword[is] keyword[not] keyword[None] )
keyword[elif] identifier[return_type] == literal[string] :
identifier[indicator] = identifier[sum] ( identifier[d] keyword[for] identifier[group] keyword[in] identifier[output] keyword[for] identifier[d] keyword[in] identifier[group] keyword[if] identifier[d] keyword[is] keyword[not] keyword[None] )
keyword[if] identifier[indicator] != literal[int] :
identifier[week_matrix] . identifier[append] (
[ identifier[year_week] , identifier[c] + identifier[c_start] , identifier[section_id] [ identifier[b] ][ literal[int] ], identifier[section_id] [ identifier[b] ][ literal[int] ], identifier[float] ( identifier[indicator] )])
keyword[return] identifier[week_matrix] | def _calculate_channels(records, sections, split_interval, channel_funcs, user, c_start=0):
"""
Used to group a list of records across a week as defined by the supplied
sections. Outputs a list containing records in each section and a list with
info to identify those sections.
Parameters
----------
records : list
The week of records to calculate the channels for.
sections : list
The list of sections for grouping. Each section will have an integer
value stating the minutes away from midnight between Sunday and Monday.
split_interval : int
The interval in minutes for which each indicator is computed.
channel_funcs : list
Indicator functions that generate the values for the week-matrix.
user : object
The user to calculate channels for.
c_start : num
Start numbering of channels from this value. Optional parameter. Default
value of 0. Used when adding channels to the same user using different
lists of records.
"""
week_matrix = []
if len(records) == 0:
return week_matrix # depends on [control=['if'], data=[]]
if not isinstance(records, list):
records = [records] # depends on [control=['if'], data=[]]
year_week = str(records[0].datetime.isocalendar()[0]) + '-' + str(records[0].datetime.isocalendar()[1])
(section_lists, section_id) = _weekmatrix_grouping(records, sections, split_interval)
for (c, fun) in enumerate(channel_funcs):
for (b, section_records) in enumerate(section_lists):
(indicator_fun, return_type) = fun
# _records is used to avoid recomputing home
user._records = section_records
user._cache = {}
output = list(indicator_fun(user)['allweek']['allday'].values())[0]
if return_type == 'scalar':
indicator = sum((d for d in output if d is not None)) # depends on [control=['if'], data=[]]
elif return_type == 'summarystats':
indicator = sum((d for group in output for d in group if d is not None)) # depends on [control=['if'], data=[]]
if indicator != 0:
week_matrix.append([year_week, c + c_start, section_id[b][0], section_id[b][1], float(indicator)]) # depends on [control=['if'], data=['indicator']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return week_matrix |
def sort_ordered_objects(items, getter=lambda x: x):
"""Sort an iterable of OrderedBase instances.
Args:
items (iterable): the objects to sort
getter (callable or None): a function to extract the OrderedBase instance from an object.
Examples:
>>> sort_ordered_objects([x, y, z])
>>> sort_ordered_objects(v.items(), getter=lambda e: e[1])
"""
return sorted(items, key=lambda x: getattr(getter(x), OrderedBase.CREATION_COUNTER_FIELD, -1)) | def function[sort_ordered_objects, parameter[items, getter]]:
constant[Sort an iterable of OrderedBase instances.
Args:
items (iterable): the objects to sort
getter (callable or None): a function to extract the OrderedBase instance from an object.
Examples:
>>> sort_ordered_objects([x, y, z])
>>> sort_ordered_objects(v.items(), getter=lambda e: e[1])
]
return[call[name[sorted], parameter[name[items]]]] | keyword[def] identifier[sort_ordered_objects] ( identifier[items] , identifier[getter] = keyword[lambda] identifier[x] : identifier[x] ):
literal[string]
keyword[return] identifier[sorted] ( identifier[items] , identifier[key] = keyword[lambda] identifier[x] : identifier[getattr] ( identifier[getter] ( identifier[x] ), identifier[OrderedBase] . identifier[CREATION_COUNTER_FIELD] ,- literal[int] )) | def sort_ordered_objects(items, getter=lambda x: x):
"""Sort an iterable of OrderedBase instances.
Args:
items (iterable): the objects to sort
getter (callable or None): a function to extract the OrderedBase instance from an object.
Examples:
>>> sort_ordered_objects([x, y, z])
>>> sort_ordered_objects(v.items(), getter=lambda e: e[1])
"""
return sorted(items, key=lambda x: getattr(getter(x), OrderedBase.CREATION_COUNTER_FIELD, -1)) |
def delete_all(self, filter=None, timeout=-1):
"""
Delete an SNMPv3 User based on User name specified in filter. The user will be deleted only if it has no associated destinations.
Args:
username: ID or URI of SNMPv3 user.
filter: A general filter/query string to narrow the list of items returned.
The default is no filter - all resources are returned.
Returns:
bool: Indicates if the resource was successfully deleted.
"""
return self._client.delete_all(filter=filter, timeout=timeout) | def function[delete_all, parameter[self, filter, timeout]]:
constant[
Delete an SNMPv3 User based on User name specified in filter. The user will be deleted only if it has no associated destinations.
Args:
username: ID or URI of SNMPv3 user.
filter: A general filter/query string to narrow the list of items returned.
The default is no filter - all resources are returned.
Returns:
bool: Indicates if the resource was successfully deleted.
]
return[call[name[self]._client.delete_all, parameter[]]] | keyword[def] identifier[delete_all] ( identifier[self] , identifier[filter] = keyword[None] , identifier[timeout] =- literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[_client] . identifier[delete_all] ( identifier[filter] = identifier[filter] , identifier[timeout] = identifier[timeout] ) | def delete_all(self, filter=None, timeout=-1):
"""
Delete an SNMPv3 User based on User name specified in filter. The user will be deleted only if it has no associated destinations.
Args:
username: ID or URI of SNMPv3 user.
filter: A general filter/query string to narrow the list of items returned.
The default is no filter - all resources are returned.
Returns:
bool: Indicates if the resource was successfully deleted.
"""
return self._client.delete_all(filter=filter, timeout=timeout) |
def _expect_inline_link(text, start):
"""(link_dest "link_title")"""
if start >= len(text) or text[start] != '(':
return None
i = start + 1
m = patterns.whitespace.match(text, i)
if m:
i = m.end()
m = patterns.link_dest_1.match(text, i)
if m:
link_dest = m.start(), m.end(), m.group()
i = m.end()
else:
open_num = 0
escaped = False
prev = i
while i < len(text):
c = text[i]
if escaped:
escaped = False
elif c == '\\':
escaped = True
elif c == '(':
open_num += 1
elif c in string.whitespace:
break
elif c == ')':
if open_num > 0:
open_num -= 1
else:
break
i += 1
if open_num != 0:
return None
link_dest = prev, i, text[prev:i]
link_title = i, i, None
tail_re = re.compile(r'(?:\s+%s)?\s*\)' % patterns.link_title, flags=re.UNICODE)
m = tail_re.match(text, i)
if not m:
return None
if m.group('title'):
link_title = m.start('title'), m.end('title'), m.group('title')
return (link_dest, link_title, m.end()) | def function[_expect_inline_link, parameter[text, start]]:
constant[(link_dest "link_title")]
if <ast.BoolOp object at 0x7da1b26aebc0> begin[:]
return[constant[None]]
variable[i] assign[=] binary_operation[name[start] + constant[1]]
variable[m] assign[=] call[name[patterns].whitespace.match, parameter[name[text], name[i]]]
if name[m] begin[:]
variable[i] assign[=] call[name[m].end, parameter[]]
variable[m] assign[=] call[name[patterns].link_dest_1.match, parameter[name[text], name[i]]]
if name[m] begin[:]
variable[link_dest] assign[=] tuple[[<ast.Call object at 0x7da2047e8400>, <ast.Call object at 0x7da2047e88b0>, <ast.Call object at 0x7da2047e8c40>]]
variable[i] assign[=] call[name[m].end, parameter[]]
variable[link_title] assign[=] tuple[[<ast.Name object at 0x7da20c990ac0>, <ast.Name object at 0x7da20c990760>, <ast.Constant object at 0x7da20c9924a0>]]
variable[tail_re] assign[=] call[name[re].compile, parameter[binary_operation[constant[(?:\s+%s)?\s*\)] <ast.Mod object at 0x7da2590d6920> name[patterns].link_title]]]
variable[m] assign[=] call[name[tail_re].match, parameter[name[text], name[i]]]
if <ast.UnaryOp object at 0x7da20c9934c0> begin[:]
return[constant[None]]
if call[name[m].group, parameter[constant[title]]] begin[:]
variable[link_title] assign[=] tuple[[<ast.Call object at 0x7da18ede5f00>, <ast.Call object at 0x7da18ede6020>, <ast.Call object at 0x7da18ede5a20>]]
return[tuple[[<ast.Name object at 0x7da18ede4460>, <ast.Name object at 0x7da18ede6c50>, <ast.Call object at 0x7da18ede5d20>]]] | keyword[def] identifier[_expect_inline_link] ( identifier[text] , identifier[start] ):
literal[string]
keyword[if] identifier[start] >= identifier[len] ( identifier[text] ) keyword[or] identifier[text] [ identifier[start] ]!= literal[string] :
keyword[return] keyword[None]
identifier[i] = identifier[start] + literal[int]
identifier[m] = identifier[patterns] . identifier[whitespace] . identifier[match] ( identifier[text] , identifier[i] )
keyword[if] identifier[m] :
identifier[i] = identifier[m] . identifier[end] ()
identifier[m] = identifier[patterns] . identifier[link_dest_1] . identifier[match] ( identifier[text] , identifier[i] )
keyword[if] identifier[m] :
identifier[link_dest] = identifier[m] . identifier[start] (), identifier[m] . identifier[end] (), identifier[m] . identifier[group] ()
identifier[i] = identifier[m] . identifier[end] ()
keyword[else] :
identifier[open_num] = literal[int]
identifier[escaped] = keyword[False]
identifier[prev] = identifier[i]
keyword[while] identifier[i] < identifier[len] ( identifier[text] ):
identifier[c] = identifier[text] [ identifier[i] ]
keyword[if] identifier[escaped] :
identifier[escaped] = keyword[False]
keyword[elif] identifier[c] == literal[string] :
identifier[escaped] = keyword[True]
keyword[elif] identifier[c] == literal[string] :
identifier[open_num] += literal[int]
keyword[elif] identifier[c] keyword[in] identifier[string] . identifier[whitespace] :
keyword[break]
keyword[elif] identifier[c] == literal[string] :
keyword[if] identifier[open_num] > literal[int] :
identifier[open_num] -= literal[int]
keyword[else] :
keyword[break]
identifier[i] += literal[int]
keyword[if] identifier[open_num] != literal[int] :
keyword[return] keyword[None]
identifier[link_dest] = identifier[prev] , identifier[i] , identifier[text] [ identifier[prev] : identifier[i] ]
identifier[link_title] = identifier[i] , identifier[i] , keyword[None]
identifier[tail_re] = identifier[re] . identifier[compile] ( literal[string] % identifier[patterns] . identifier[link_title] , identifier[flags] = identifier[re] . identifier[UNICODE] )
identifier[m] = identifier[tail_re] . identifier[match] ( identifier[text] , identifier[i] )
keyword[if] keyword[not] identifier[m] :
keyword[return] keyword[None]
keyword[if] identifier[m] . identifier[group] ( literal[string] ):
identifier[link_title] = identifier[m] . identifier[start] ( literal[string] ), identifier[m] . identifier[end] ( literal[string] ), identifier[m] . identifier[group] ( literal[string] )
keyword[return] ( identifier[link_dest] , identifier[link_title] , identifier[m] . identifier[end] ()) | def _expect_inline_link(text, start):
"""(link_dest "link_title")"""
if start >= len(text) or text[start] != '(':
return None # depends on [control=['if'], data=[]]
i = start + 1
m = patterns.whitespace.match(text, i)
if m:
i = m.end() # depends on [control=['if'], data=[]]
m = patterns.link_dest_1.match(text, i)
if m:
link_dest = (m.start(), m.end(), m.group())
i = m.end() # depends on [control=['if'], data=[]]
else:
open_num = 0
escaped = False
prev = i
while i < len(text):
c = text[i]
if escaped:
escaped = False # depends on [control=['if'], data=[]]
elif c == '\\':
escaped = True # depends on [control=['if'], data=[]]
elif c == '(':
open_num += 1 # depends on [control=['if'], data=[]]
elif c in string.whitespace:
break # depends on [control=['if'], data=[]]
elif c == ')':
if open_num > 0:
open_num -= 1 # depends on [control=['if'], data=['open_num']]
else:
break # depends on [control=['if'], data=[]]
i += 1 # depends on [control=['while'], data=['i']]
if open_num != 0:
return None # depends on [control=['if'], data=[]]
link_dest = (prev, i, text[prev:i])
link_title = (i, i, None)
tail_re = re.compile('(?:\\s+%s)?\\s*\\)' % patterns.link_title, flags=re.UNICODE)
m = tail_re.match(text, i)
if not m:
return None # depends on [control=['if'], data=[]]
if m.group('title'):
link_title = (m.start('title'), m.end('title'), m.group('title')) # depends on [control=['if'], data=[]]
return (link_dest, link_title, m.end()) |
def _get_nic_attachements(self, maximum_adapters):
"""
Returns NIC attachements.
:param maximum_adapters: maximum number of supported adapters
:returns: list of adapters with their Attachment setting (NAT, bridged etc.)
"""
nics = []
vm_info = yield from self._get_vm_info()
for adapter_number in range(0, maximum_adapters):
entry = "nic{}".format(adapter_number + 1)
if entry in vm_info:
value = vm_info[entry]
nics.append(value.lower())
else:
nics.append(None)
return nics | def function[_get_nic_attachements, parameter[self, maximum_adapters]]:
constant[
Returns NIC attachements.
:param maximum_adapters: maximum number of supported adapters
:returns: list of adapters with their Attachment setting (NAT, bridged etc.)
]
variable[nics] assign[=] list[[]]
variable[vm_info] assign[=] <ast.YieldFrom object at 0x7da18ede60e0>
for taget[name[adapter_number]] in starred[call[name[range], parameter[constant[0], name[maximum_adapters]]]] begin[:]
variable[entry] assign[=] call[constant[nic{}].format, parameter[binary_operation[name[adapter_number] + constant[1]]]]
if compare[name[entry] in name[vm_info]] begin[:]
variable[value] assign[=] call[name[vm_info]][name[entry]]
call[name[nics].append, parameter[call[name[value].lower, parameter[]]]]
return[name[nics]] | keyword[def] identifier[_get_nic_attachements] ( identifier[self] , identifier[maximum_adapters] ):
literal[string]
identifier[nics] =[]
identifier[vm_info] = keyword[yield] keyword[from] identifier[self] . identifier[_get_vm_info] ()
keyword[for] identifier[adapter_number] keyword[in] identifier[range] ( literal[int] , identifier[maximum_adapters] ):
identifier[entry] = literal[string] . identifier[format] ( identifier[adapter_number] + literal[int] )
keyword[if] identifier[entry] keyword[in] identifier[vm_info] :
identifier[value] = identifier[vm_info] [ identifier[entry] ]
identifier[nics] . identifier[append] ( identifier[value] . identifier[lower] ())
keyword[else] :
identifier[nics] . identifier[append] ( keyword[None] )
keyword[return] identifier[nics] | def _get_nic_attachements(self, maximum_adapters):
"""
Returns NIC attachements.
:param maximum_adapters: maximum number of supported adapters
:returns: list of adapters with their Attachment setting (NAT, bridged etc.)
"""
nics = []
vm_info = (yield from self._get_vm_info())
for adapter_number in range(0, maximum_adapters):
entry = 'nic{}'.format(adapter_number + 1)
if entry in vm_info:
value = vm_info[entry]
nics.append(value.lower()) # depends on [control=['if'], data=['entry', 'vm_info']]
else:
nics.append(None) # depends on [control=['for'], data=['adapter_number']]
return nics |
def get_search_result(self, ddoc_id, index_name, **query_params):
"""
Retrieves the raw JSON content from the remote database based on the
search index on the server, using the query_params provided as query
parameters. A ``query`` parameter containing the Lucene query
syntax is mandatory.
Example for search queries:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve documents where the Lucene field name is 'name' and
# the value is 'julia*'
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
include_docs=True)
for row in resp['rows']:
# Process search index data (in JSON format).
Example if the search query requires grouping by using
the ``group_field`` parameter:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve JSON response content, limiting response to 10 documents
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
group_field='name',
limit=10)
for group in resp['groups']:
for row in group['rows']:
# Process search index data (in JSON format).
:param str ddoc_id: Design document id used to get the search result.
:param str index_name: Name used in part to identify the index.
:param str bookmark: Optional string that enables you to specify which
page of results you require. Only valid for queries that do not
specify the ``group_field`` query parameter.
:param list counts: Optional JSON array of field names for which
counts should be produced. The response will contain counts for each
unique value of this field name among the documents matching the
search query.
Requires the index to have faceting enabled.
:param list drilldown: Optional list of fields that each define a
pair of a field name and a value. This field can be used several
times. The search will only match documents that have the given
value in the field name. It differs from using
``query=fieldname:value`` only in that the values are not analyzed.
:param str group_field: Optional string field by which to group
search matches. Fields containing other data
(numbers, objects, arrays) can not be used.
:param int group_limit: Optional number with the maximum group count.
This field can only be used if ``group_field`` query parameter
is specified.
:param group_sort: Optional JSON field that defines the order of the
groups in a search using ``group_field``. The default sort order
is relevance. This field can have the same values as the sort field,
so single fields as well as arrays of fields are supported.
:param int limit: Optional number to limit the maximum count of the
returned documents. In case of a grouped search, this parameter
limits the number of documents per group.
:param query/q: A Lucene query in the form of ``name:value``.
If name is omitted, the special value ``default`` is used.
The ``query`` parameter can be abbreviated as ``q``.
:param ranges: Optional JSON facet syntax that reuses the standard
Lucene syntax to return counts of results which fit into each
specified category. Inclusive range queries are denoted by brackets.
Exclusive range queries are denoted by curly brackets.
For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an
inclusive range of 0 to 100.
Requires the index to have faceting enabled.
:param sort: Optional JSON string of the form ``fieldname<type>`` for
ascending or ``-fieldname<type>`` for descending sort order.
Fieldname is the name of a string or number field and type is either
number or string or a JSON array of such strings. The type part is
optional and defaults to number.
:param str stale: Optional string to allow the results from a stale
index to be used. This makes the request return immediately, even
if the index has not been completely built yet.
:param list highlight_fields: Optional list of fields which should be
highlighted.
:param str highlight_pre_tag: Optional string inserted before the
highlighted word in the highlights output. Defaults to ``<em>``.
:param str highlight_post_tag: Optional string inserted after the
highlighted word in the highlights output. Defaults to ``</em>``.
:param int highlight_number: Optional number of fragments returned in
highlights. If the search term occurs less often than the number of
fragments specified, longer fragments are returned. Default is 1.
:param int highlight_size: Optional number of characters in each
fragment for highlights. Defaults to 100 characters.
:param list include_fields: Optional list of field names to include in
search results. Any fields included must have been indexed with the
``store:true`` option.
:returns: Search query result data in JSON format
"""
ddoc = DesignDocument(self, ddoc_id)
return self._get_search_result(
'/'.join((ddoc.document_url, '_search', index_name)),
**query_params
) | def function[get_search_result, parameter[self, ddoc_id, index_name]]:
constant[
Retrieves the raw JSON content from the remote database based on the
search index on the server, using the query_params provided as query
parameters. A ``query`` parameter containing the Lucene query
syntax is mandatory.
Example for search queries:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve documents where the Lucene field name is 'name' and
# the value is 'julia*'
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
include_docs=True)
for row in resp['rows']:
# Process search index data (in JSON format).
Example if the search query requires grouping by using
the ``group_field`` parameter:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve JSON response content, limiting response to 10 documents
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
group_field='name',
limit=10)
for group in resp['groups']:
for row in group['rows']:
# Process search index data (in JSON format).
:param str ddoc_id: Design document id used to get the search result.
:param str index_name: Name used in part to identify the index.
:param str bookmark: Optional string that enables you to specify which
page of results you require. Only valid for queries that do not
specify the ``group_field`` query parameter.
:param list counts: Optional JSON array of field names for which
counts should be produced. The response will contain counts for each
unique value of this field name among the documents matching the
search query.
Requires the index to have faceting enabled.
:param list drilldown: Optional list of fields that each define a
pair of a field name and a value. This field can be used several
times. The search will only match documents that have the given
value in the field name. It differs from using
``query=fieldname:value`` only in that the values are not analyzed.
:param str group_field: Optional string field by which to group
search matches. Fields containing other data
(numbers, objects, arrays) can not be used.
:param int group_limit: Optional number with the maximum group count.
This field can only be used if ``group_field`` query parameter
is specified.
:param group_sort: Optional JSON field that defines the order of the
groups in a search using ``group_field``. The default sort order
is relevance. This field can have the same values as the sort field,
so single fields as well as arrays of fields are supported.
:param int limit: Optional number to limit the maximum count of the
returned documents. In case of a grouped search, this parameter
limits the number of documents per group.
:param query/q: A Lucene query in the form of ``name:value``.
If name is omitted, the special value ``default`` is used.
The ``query`` parameter can be abbreviated as ``q``.
:param ranges: Optional JSON facet syntax that reuses the standard
Lucene syntax to return counts of results which fit into each
specified category. Inclusive range queries are denoted by brackets.
Exclusive range queries are denoted by curly brackets.
For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an
inclusive range of 0 to 100.
Requires the index to have faceting enabled.
:param sort: Optional JSON string of the form ``fieldname<type>`` for
ascending or ``-fieldname<type>`` for descending sort order.
Fieldname is the name of a string or number field and type is either
number or string or a JSON array of such strings. The type part is
optional and defaults to number.
:param str stale: Optional string to allow the results from a stale
index to be used. This makes the request return immediately, even
if the index has not been completely built yet.
:param list highlight_fields: Optional list of fields which should be
highlighted.
:param str highlight_pre_tag: Optional string inserted before the
highlighted word in the highlights output. Defaults to ``<em>``.
:param str highlight_post_tag: Optional string inserted after the
highlighted word in the highlights output. Defaults to ``</em>``.
:param int highlight_number: Optional number of fragments returned in
highlights. If the search term occurs less often than the number of
fragments specified, longer fragments are returned. Default is 1.
:param int highlight_size: Optional number of characters in each
fragment for highlights. Defaults to 100 characters.
:param list include_fields: Optional list of field names to include in
search results. Any fields included must have been indexed with the
``store:true`` option.
:returns: Search query result data in JSON format
]
variable[ddoc] assign[=] call[name[DesignDocument], parameter[name[self], name[ddoc_id]]]
return[call[name[self]._get_search_result, parameter[call[constant[/].join, parameter[tuple[[<ast.Attribute object at 0x7da18c4cfd90>, <ast.Constant object at 0x7da18c4cd180>, <ast.Name object at 0x7da18c4cfc10>]]]]]]] | keyword[def] identifier[get_search_result] ( identifier[self] , identifier[ddoc_id] , identifier[index_name] ,** identifier[query_params] ):
literal[string]
identifier[ddoc] = identifier[DesignDocument] ( identifier[self] , identifier[ddoc_id] )
keyword[return] identifier[self] . identifier[_get_search_result] (
literal[string] . identifier[join] (( identifier[ddoc] . identifier[document_url] , literal[string] , identifier[index_name] )),
** identifier[query_params]
) | def get_search_result(self, ddoc_id, index_name, **query_params):
"""
Retrieves the raw JSON content from the remote database based on the
search index on the server, using the query_params provided as query
parameters. A ``query`` parameter containing the Lucene query
syntax is mandatory.
Example for search queries:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve documents where the Lucene field name is 'name' and
# the value is 'julia*'
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
include_docs=True)
for row in resp['rows']:
# Process search index data (in JSON format).
Example if the search query requires grouping by using
the ``group_field`` parameter:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve JSON response content, limiting response to 10 documents
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
group_field='name',
limit=10)
for group in resp['groups']:
for row in group['rows']:
# Process search index data (in JSON format).
:param str ddoc_id: Design document id used to get the search result.
:param str index_name: Name used in part to identify the index.
:param str bookmark: Optional string that enables you to specify which
page of results you require. Only valid for queries that do not
specify the ``group_field`` query parameter.
:param list counts: Optional JSON array of field names for which
counts should be produced. The response will contain counts for each
unique value of this field name among the documents matching the
search query.
Requires the index to have faceting enabled.
:param list drilldown: Optional list of fields that each define a
pair of a field name and a value. This field can be used several
times. The search will only match documents that have the given
value in the field name. It differs from using
``query=fieldname:value`` only in that the values are not analyzed.
:param str group_field: Optional string field by which to group
search matches. Fields containing other data
(numbers, objects, arrays) can not be used.
:param int group_limit: Optional number with the maximum group count.
This field can only be used if ``group_field`` query parameter
is specified.
:param group_sort: Optional JSON field that defines the order of the
groups in a search using ``group_field``. The default sort order
is relevance. This field can have the same values as the sort field,
so single fields as well as arrays of fields are supported.
:param int limit: Optional number to limit the maximum count of the
returned documents. In case of a grouped search, this parameter
limits the number of documents per group.
:param query/q: A Lucene query in the form of ``name:value``.
If name is omitted, the special value ``default`` is used.
The ``query`` parameter can be abbreviated as ``q``.
:param ranges: Optional JSON facet syntax that reuses the standard
Lucene syntax to return counts of results which fit into each
specified category. Inclusive range queries are denoted by brackets.
Exclusive range queries are denoted by curly brackets.
For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an
inclusive range of 0 to 100.
Requires the index to have faceting enabled.
:param sort: Optional JSON string of the form ``fieldname<type>`` for
ascending or ``-fieldname<type>`` for descending sort order.
Fieldname is the name of a string or number field and type is either
number or string or a JSON array of such strings. The type part is
optional and defaults to number.
:param str stale: Optional string to allow the results from a stale
index to be used. This makes the request return immediately, even
if the index has not been completely built yet.
:param list highlight_fields: Optional list of fields which should be
highlighted.
:param str highlight_pre_tag: Optional string inserted before the
highlighted word in the highlights output. Defaults to ``<em>``.
:param str highlight_post_tag: Optional string inserted after the
highlighted word in the highlights output. Defaults to ``</em>``.
:param int highlight_number: Optional number of fragments returned in
highlights. If the search term occurs less often than the number of
fragments specified, longer fragments are returned. Default is 1.
:param int highlight_size: Optional number of characters in each
fragment for highlights. Defaults to 100 characters.
:param list include_fields: Optional list of field names to include in
search results. Any fields included must have been indexed with the
``store:true`` option.
:returns: Search query result data in JSON format
"""
ddoc = DesignDocument(self, ddoc_id)
return self._get_search_result('/'.join((ddoc.document_url, '_search', index_name)), **query_params) |
def fetch_options(self, typ, element):
"""Fetch the options for possible files to
load replace etc for the given element.
This will call :meth:`ReftypeInterface.fetch_options`.
:param typ: the typ of options. E.g. Asset, Alembic, Camera etc
:type typ: str
:param element: The element for which the options should be fetched.
:type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot`
:returns: the option model and a list with all TaskFileInfos
:rtype: ( :class:`jukeboxcore.gui.treemodel.TreeModel`, list of :class:`TaskFileInfo` )
:raises: None
"""
inter = self.get_typ_interface(typ)
return inter.fetch_options(element) | def function[fetch_options, parameter[self, typ, element]]:
constant[Fetch the options for possible files to
load replace etc for the given element.
This will call :meth:`ReftypeInterface.fetch_options`.
:param typ: the typ of options. E.g. Asset, Alembic, Camera etc
:type typ: str
:param element: The element for which the options should be fetched.
:type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot`
:returns: the option model and a list with all TaskFileInfos
:rtype: ( :class:`jukeboxcore.gui.treemodel.TreeModel`, list of :class:`TaskFileInfo` )
:raises: None
]
variable[inter] assign[=] call[name[self].get_typ_interface, parameter[name[typ]]]
return[call[name[inter].fetch_options, parameter[name[element]]]] | keyword[def] identifier[fetch_options] ( identifier[self] , identifier[typ] , identifier[element] ):
literal[string]
identifier[inter] = identifier[self] . identifier[get_typ_interface] ( identifier[typ] )
keyword[return] identifier[inter] . identifier[fetch_options] ( identifier[element] ) | def fetch_options(self, typ, element):
"""Fetch the options for possible files to
load replace etc for the given element.
This will call :meth:`ReftypeInterface.fetch_options`.
:param typ: the typ of options. E.g. Asset, Alembic, Camera etc
:type typ: str
:param element: The element for which the options should be fetched.
:type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot`
:returns: the option model and a list with all TaskFileInfos
:rtype: ( :class:`jukeboxcore.gui.treemodel.TreeModel`, list of :class:`TaskFileInfo` )
:raises: None
"""
inter = self.get_typ_interface(typ)
return inter.fetch_options(element) |
def next_string(min_size, max_size):
"""
Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string.
"""
result = ''
max_size = max_size if max_size != None else min_size
length = RandomInteger.next_integer(min_size, max_size)
for i in range(length):
result += random.choice(_chars)
return result | def function[next_string, parameter[min_size, max_size]]:
constant[
Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string.
]
variable[result] assign[=] constant[]
variable[max_size] assign[=] <ast.IfExp object at 0x7da1b1414c70>
variable[length] assign[=] call[name[RandomInteger].next_integer, parameter[name[min_size], name[max_size]]]
for taget[name[i]] in starred[call[name[range], parameter[name[length]]]] begin[:]
<ast.AugAssign object at 0x7da1b16d75e0>
return[name[result]] | keyword[def] identifier[next_string] ( identifier[min_size] , identifier[max_size] ):
literal[string]
identifier[result] = literal[string]
identifier[max_size] = identifier[max_size] keyword[if] identifier[max_size] != keyword[None] keyword[else] identifier[min_size]
identifier[length] = identifier[RandomInteger] . identifier[next_integer] ( identifier[min_size] , identifier[max_size] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[length] ):
identifier[result] += identifier[random] . identifier[choice] ( identifier[_chars] )
keyword[return] identifier[result] | def next_string(min_size, max_size):
"""
Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string.
"""
result = ''
max_size = max_size if max_size != None else min_size
length = RandomInteger.next_integer(min_size, max_size)
for i in range(length):
result += random.choice(_chars) # depends on [control=['for'], data=[]]
return result |
def rprof(self):
"""Radial profiles data of the time step.
Set to None if no radial profiles data is available for this time step.
"""
if self.istep not in self.sdat.rprof.index.levels[0]:
return None
return self.sdat.rprof.loc[self.istep] | def function[rprof, parameter[self]]:
constant[Radial profiles data of the time step.
Set to None if no radial profiles data is available for this time step.
]
if compare[name[self].istep <ast.NotIn object at 0x7da2590d7190> call[name[self].sdat.rprof.index.levels][constant[0]]] begin[:]
return[constant[None]]
return[call[name[self].sdat.rprof.loc][name[self].istep]] | keyword[def] identifier[rprof] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[istep] keyword[not] keyword[in] identifier[self] . identifier[sdat] . identifier[rprof] . identifier[index] . identifier[levels] [ literal[int] ]:
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[sdat] . identifier[rprof] . identifier[loc] [ identifier[self] . identifier[istep] ] | def rprof(self):
"""Radial profiles data of the time step.
Set to None if no radial profiles data is available for this time step.
"""
if self.istep not in self.sdat.rprof.index.levels[0]:
return None # depends on [control=['if'], data=[]]
return self.sdat.rprof.loc[self.istep] |
def configure_for_kerberos(self, datanode_transceiver_port=None,
datanode_web_port=None):
"""
Command to configure the cluster to use Kerberos for authentication.
This command will configure all relevant services on a cluster for
Kerberos usage. This command will trigger a GenerateCredentials command
to create Kerberos keytabs for all roles in the cluster.
@param datanode_transceiver_port: The HDFS DataNode transceiver port to use.
This will be applied to all DataNode role configuration groups. If
not specified, this will default to 1004.
@param datanode_web_port: The HDFS DataNode web port to use. This will be
applied to all DataNode role configuration groups. If not specified,
this will default to 1006.
@return: Reference to the submitted command.
@since: API v11
"""
args = dict()
if datanode_transceiver_port:
args['datanodeTransceiverPort'] = datanode_transceiver_port
if datanode_web_port:
args['datanodeWebPort'] = datanode_web_port
return self._cmd('configureForKerberos', data=args, api_version=11) | def function[configure_for_kerberos, parameter[self, datanode_transceiver_port, datanode_web_port]]:
constant[
Command to configure the cluster to use Kerberos for authentication.
This command will configure all relevant services on a cluster for
Kerberos usage. This command will trigger a GenerateCredentials command
to create Kerberos keytabs for all roles in the cluster.
@param datanode_transceiver_port: The HDFS DataNode transceiver port to use.
This will be applied to all DataNode role configuration groups. If
not specified, this will default to 1004.
@param datanode_web_port: The HDFS DataNode web port to use. This will be
applied to all DataNode role configuration groups. If not specified,
this will default to 1006.
@return: Reference to the submitted command.
@since: API v11
]
variable[args] assign[=] call[name[dict], parameter[]]
if name[datanode_transceiver_port] begin[:]
call[name[args]][constant[datanodeTransceiverPort]] assign[=] name[datanode_transceiver_port]
if name[datanode_web_port] begin[:]
call[name[args]][constant[datanodeWebPort]] assign[=] name[datanode_web_port]
return[call[name[self]._cmd, parameter[constant[configureForKerberos]]]] | keyword[def] identifier[configure_for_kerberos] ( identifier[self] , identifier[datanode_transceiver_port] = keyword[None] ,
identifier[datanode_web_port] = keyword[None] ):
literal[string]
identifier[args] = identifier[dict] ()
keyword[if] identifier[datanode_transceiver_port] :
identifier[args] [ literal[string] ]= identifier[datanode_transceiver_port]
keyword[if] identifier[datanode_web_port] :
identifier[args] [ literal[string] ]= identifier[datanode_web_port]
keyword[return] identifier[self] . identifier[_cmd] ( literal[string] , identifier[data] = identifier[args] , identifier[api_version] = literal[int] ) | def configure_for_kerberos(self, datanode_transceiver_port=None, datanode_web_port=None):
"""
Command to configure the cluster to use Kerberos for authentication.
This command will configure all relevant services on a cluster for
Kerberos usage. This command will trigger a GenerateCredentials command
to create Kerberos keytabs for all roles in the cluster.
@param datanode_transceiver_port: The HDFS DataNode transceiver port to use.
This will be applied to all DataNode role configuration groups. If
not specified, this will default to 1004.
@param datanode_web_port: The HDFS DataNode web port to use. This will be
applied to all DataNode role configuration groups. If not specified,
this will default to 1006.
@return: Reference to the submitted command.
@since: API v11
"""
args = dict()
if datanode_transceiver_port:
args['datanodeTransceiverPort'] = datanode_transceiver_port # depends on [control=['if'], data=[]]
if datanode_web_port:
args['datanodeWebPort'] = datanode_web_port # depends on [control=['if'], data=[]]
return self._cmd('configureForKerberos', data=args, api_version=11) |
def viewbox(self):
"""
Return bounding box of the viewport.
:return: (left, top, right, bottom) `tuple`.
"""
return self.left, self.top, self.right, self.bottom | def function[viewbox, parameter[self]]:
constant[
Return bounding box of the viewport.
:return: (left, top, right, bottom) `tuple`.
]
return[tuple[[<ast.Attribute object at 0x7da204621b40>, <ast.Attribute object at 0x7da204620f40>, <ast.Attribute object at 0x7da204621ae0>, <ast.Attribute object at 0x7da204623040>]]] | keyword[def] identifier[viewbox] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[left] , identifier[self] . identifier[top] , identifier[self] . identifier[right] , identifier[self] . identifier[bottom] | def viewbox(self):
"""
Return bounding box of the viewport.
:return: (left, top, right, bottom) `tuple`.
"""
return (self.left, self.top, self.right, self.bottom) |
def train(self, net_sizes, epochs, batchsize):
""" Initialize the base trainer """
self.trainer = ClassificationTrainer(self.data, self.targets, net_sizes)
self.trainer.learn(epochs, batchsize)
return self.trainer.evaluate(batchsize) | def function[train, parameter[self, net_sizes, epochs, batchsize]]:
constant[ Initialize the base trainer ]
name[self].trainer assign[=] call[name[ClassificationTrainer], parameter[name[self].data, name[self].targets, name[net_sizes]]]
call[name[self].trainer.learn, parameter[name[epochs], name[batchsize]]]
return[call[name[self].trainer.evaluate, parameter[name[batchsize]]]] | keyword[def] identifier[train] ( identifier[self] , identifier[net_sizes] , identifier[epochs] , identifier[batchsize] ):
literal[string]
identifier[self] . identifier[trainer] = identifier[ClassificationTrainer] ( identifier[self] . identifier[data] , identifier[self] . identifier[targets] , identifier[net_sizes] )
identifier[self] . identifier[trainer] . identifier[learn] ( identifier[epochs] , identifier[batchsize] )
keyword[return] identifier[self] . identifier[trainer] . identifier[evaluate] ( identifier[batchsize] ) | def train(self, net_sizes, epochs, batchsize):
""" Initialize the base trainer """
self.trainer = ClassificationTrainer(self.data, self.targets, net_sizes)
self.trainer.learn(epochs, batchsize)
return self.trainer.evaluate(batchsize) |
def median_fill(adf):
""" Looks at each row, and chooses the median. Honours
the Trump override/failsafe logic. """
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt
fdmn = adf.iloc[1:-1].median()
if not pd.isnull(fdmn):
return fdmn
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt
return nan | def function[median_fill, parameter[adf]]:
constant[ Looks at each row, and chooses the median. Honours
the Trump override/failsafe logic. ]
variable[ordpt] assign[=] call[name[adf].values][constant[0]]
if <ast.UnaryOp object at 0x7da1b274a1a0> begin[:]
return[name[ordpt]]
variable[fdmn] assign[=] call[call[name[adf].iloc][<ast.Slice object at 0x7da1b274a920>].median, parameter[]]
if <ast.UnaryOp object at 0x7da1b27499c0> begin[:]
return[name[fdmn]]
variable[flspt] assign[=] call[name[adf].values][<ast.UnaryOp object at 0x7da1b274abf0>]
if <ast.UnaryOp object at 0x7da1b2748730> begin[:]
return[name[flspt]]
return[name[nan]] | keyword[def] identifier[median_fill] ( identifier[adf] ):
literal[string]
identifier[ordpt] = identifier[adf] . identifier[values] [ literal[int] ]
keyword[if] keyword[not] identifier[pd] . identifier[isnull] ( identifier[ordpt] ):
keyword[return] identifier[ordpt]
identifier[fdmn] = identifier[adf] . identifier[iloc] [ literal[int] :- literal[int] ]. identifier[median] ()
keyword[if] keyword[not] identifier[pd] . identifier[isnull] ( identifier[fdmn] ):
keyword[return] identifier[fdmn]
identifier[flspt] = identifier[adf] . identifier[values] [- literal[int] ]
keyword[if] keyword[not] identifier[pd] . identifier[isnull] ( identifier[flspt] ):
keyword[return] identifier[flspt]
keyword[return] identifier[nan] | def median_fill(adf):
""" Looks at each row, and chooses the median. Honours
the Trump override/failsafe logic. """
ordpt = adf.values[0]
if not pd.isnull(ordpt):
return ordpt # depends on [control=['if'], data=[]]
fdmn = adf.iloc[1:-1].median()
if not pd.isnull(fdmn):
return fdmn # depends on [control=['if'], data=[]]
flspt = adf.values[-1]
if not pd.isnull(flspt):
return flspt # depends on [control=['if'], data=[]]
return nan |
def deploy(app_id, version, promote, quiet):
# type: (str, str, bool, bool) -> None
""" Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command.
"""
gae_app = GaeApp.for_branch(git.current_branch().name)
if gae_app is None and None in (app_id, version):
msg = (
"Can't find an AppEngine app setup for branch <35>{}<32> and"
"--project and --version were not given."
)
log.err(msg, git.current_branch().name)
sys.exit(1)
if version is not None:
gae_app.version = version
if app_id is not None:
gae_app.app_id = app_id
gae_app.deploy(promote, quiet) | def function[deploy, parameter[app_id, version, promote, quiet]]:
constant[ Deploy the app to AppEngine.
Args:
app_id (str):
AppEngine App ID. Overrides config value app_id if given.
version (str):
AppEngine project version. Overrides config values if given.
promote (bool):
If set to **True** promote the current remote app version to the one
that's being deployed.
quiet (bool):
If set to **True** this will pass the ``--quiet`` flag to gcloud
command.
]
variable[gae_app] assign[=] call[name[GaeApp].for_branch, parameter[call[name[git].current_branch, parameter[]].name]]
if <ast.BoolOp object at 0x7da1b106cac0> begin[:]
variable[msg] assign[=] constant[Can't find an AppEngine app setup for branch <35>{}<32> and--project and --version were not given.]
call[name[log].err, parameter[name[msg], call[name[git].current_branch, parameter[]].name]]
call[name[sys].exit, parameter[constant[1]]]
if compare[name[version] is_not constant[None]] begin[:]
name[gae_app].version assign[=] name[version]
if compare[name[app_id] is_not constant[None]] begin[:]
name[gae_app].app_id assign[=] name[app_id]
call[name[gae_app].deploy, parameter[name[promote], name[quiet]]] | keyword[def] identifier[deploy] ( identifier[app_id] , identifier[version] , identifier[promote] , identifier[quiet] ):
literal[string]
identifier[gae_app] = identifier[GaeApp] . identifier[for_branch] ( identifier[git] . identifier[current_branch] (). identifier[name] )
keyword[if] identifier[gae_app] keyword[is] keyword[None] keyword[and] keyword[None] keyword[in] ( identifier[app_id] , identifier[version] ):
identifier[msg] =(
literal[string]
literal[string]
)
identifier[log] . identifier[err] ( identifier[msg] , identifier[git] . identifier[current_branch] (). identifier[name] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] :
identifier[gae_app] . identifier[version] = identifier[version]
keyword[if] identifier[app_id] keyword[is] keyword[not] keyword[None] :
identifier[gae_app] . identifier[app_id] = identifier[app_id]
identifier[gae_app] . identifier[deploy] ( identifier[promote] , identifier[quiet] ) | def deploy(app_id, version, promote, quiet):
# type: (str, str, bool, bool) -> None
" Deploy the app to AppEngine.\n\n Args:\n app_id (str):\n AppEngine App ID. Overrides config value app_id if given.\n version (str):\n AppEngine project version. Overrides config values if given.\n promote (bool):\n If set to **True** promote the current remote app version to the one\n that's being deployed.\n quiet (bool):\n If set to **True** this will pass the ``--quiet`` flag to gcloud\n command.\n "
gae_app = GaeApp.for_branch(git.current_branch().name)
if gae_app is None and None in (app_id, version):
msg = "Can't find an AppEngine app setup for branch <35>{}<32> and--project and --version were not given."
log.err(msg, git.current_branch().name)
sys.exit(1) # depends on [control=['if'], data=[]]
if version is not None:
gae_app.version = version # depends on [control=['if'], data=['version']]
if app_id is not None:
gae_app.app_id = app_id # depends on [control=['if'], data=['app_id']]
gae_app.deploy(promote, quiet) |
def mark_all_as_unread(self, recipient=None):
"""Mark as unread any read messages in the current queryset.
Optionally, filter these by recipient first.
"""
qset = self.read(True)
if recipient:
qset = qset.filter(recipient=recipient)
return qset.update(unread=True) | def function[mark_all_as_unread, parameter[self, recipient]]:
constant[Mark as unread any read messages in the current queryset.
Optionally, filter these by recipient first.
]
variable[qset] assign[=] call[name[self].read, parameter[constant[True]]]
if name[recipient] begin[:]
variable[qset] assign[=] call[name[qset].filter, parameter[]]
return[call[name[qset].update, parameter[]]] | keyword[def] identifier[mark_all_as_unread] ( identifier[self] , identifier[recipient] = keyword[None] ):
literal[string]
identifier[qset] = identifier[self] . identifier[read] ( keyword[True] )
keyword[if] identifier[recipient] :
identifier[qset] = identifier[qset] . identifier[filter] ( identifier[recipient] = identifier[recipient] )
keyword[return] identifier[qset] . identifier[update] ( identifier[unread] = keyword[True] ) | def mark_all_as_unread(self, recipient=None):
"""Mark as unread any read messages in the current queryset.
Optionally, filter these by recipient first.
"""
qset = self.read(True)
if recipient:
qset = qset.filter(recipient=recipient) # depends on [control=['if'], data=[]]
return qset.update(unread=True) |
async def get_bikes(postcode: PostCodeLike, kilometers=1) -> Optional[List[Bike]]:
"""
Gets stolen bikes from the database within a
certain radius (km) of a given postcode. Selects
a square from the database and then filters out
the corners of the square.
:param postcode: The postcode to look up.
:param kilometers: The radius (km) of the search.
:return: The bikes in that radius or None if the postcode doesn't exist.
"""
try:
postcode_opt = await get_postcode(postcode)
except CachingError as e:
raise e
if postcode_opt is None:
return None
else:
postcode = postcode_opt
# create point and distance
center = Point(postcode.lat, postcode.long)
distance = geodesic(kilometers=kilometers)
# calculate edges of a square and retrieve
lat_end = distance.destination(point=center, bearing=0).latitude
lat_start = distance.destination(point=center, bearing=180).latitude
long_start = distance.destination(point=center, bearing=270).longitude
long_end = distance.destination(point=center, bearing=90).longitude
bikes_in_area = Bike.select().where(
lat_start <= Bike.latitude,
Bike.latitude <= lat_end,
long_start <= Bike.longitude,
Bike.longitude <= long_end
)
# filter out items in square that aren't within the radius and return
return [
bike for bike in bikes_in_area
if geodesic(Point(bike.latitude, bike.longitude), center).kilometers < kilometers
] | <ast.AsyncFunctionDef object at 0x7da1b01a5bd0> | keyword[async] keyword[def] identifier[get_bikes] ( identifier[postcode] : identifier[PostCodeLike] , identifier[kilometers] = literal[int] )-> identifier[Optional] [ identifier[List] [ identifier[Bike] ]]:
literal[string]
keyword[try] :
identifier[postcode_opt] = keyword[await] identifier[get_postcode] ( identifier[postcode] )
keyword[except] identifier[CachingError] keyword[as] identifier[e] :
keyword[raise] identifier[e]
keyword[if] identifier[postcode_opt] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[else] :
identifier[postcode] = identifier[postcode_opt]
identifier[center] = identifier[Point] ( identifier[postcode] . identifier[lat] , identifier[postcode] . identifier[long] )
identifier[distance] = identifier[geodesic] ( identifier[kilometers] = identifier[kilometers] )
identifier[lat_end] = identifier[distance] . identifier[destination] ( identifier[point] = identifier[center] , identifier[bearing] = literal[int] ). identifier[latitude]
identifier[lat_start] = identifier[distance] . identifier[destination] ( identifier[point] = identifier[center] , identifier[bearing] = literal[int] ). identifier[latitude]
identifier[long_start] = identifier[distance] . identifier[destination] ( identifier[point] = identifier[center] , identifier[bearing] = literal[int] ). identifier[longitude]
identifier[long_end] = identifier[distance] . identifier[destination] ( identifier[point] = identifier[center] , identifier[bearing] = literal[int] ). identifier[longitude]
identifier[bikes_in_area] = identifier[Bike] . identifier[select] (). identifier[where] (
identifier[lat_start] <= identifier[Bike] . identifier[latitude] ,
identifier[Bike] . identifier[latitude] <= identifier[lat_end] ,
identifier[long_start] <= identifier[Bike] . identifier[longitude] ,
identifier[Bike] . identifier[longitude] <= identifier[long_end]
)
keyword[return] [
identifier[bike] keyword[for] identifier[bike] keyword[in] identifier[bikes_in_area]
keyword[if] identifier[geodesic] ( identifier[Point] ( identifier[bike] . identifier[latitude] , identifier[bike] . identifier[longitude] ), identifier[center] ). identifier[kilometers] < identifier[kilometers]
] | async def get_bikes(postcode: PostCodeLike, kilometers=1) -> Optional[List[Bike]]:
"""
Gets stolen bikes from the database within a
certain radius (km) of a given postcode. Selects
a square from the database and then filters out
the corners of the square.
:param postcode: The postcode to look up.
:param kilometers: The radius (km) of the search.
:return: The bikes in that radius or None if the postcode doesn't exist.
"""
try:
postcode_opt = await get_postcode(postcode) # depends on [control=['try'], data=[]]
except CachingError as e:
raise e # depends on [control=['except'], data=['e']]
if postcode_opt is None:
return None # depends on [control=['if'], data=[]]
else:
postcode = postcode_opt
# create point and distance
center = Point(postcode.lat, postcode.long)
distance = geodesic(kilometers=kilometers)
# calculate edges of a square and retrieve
lat_end = distance.destination(point=center, bearing=0).latitude
lat_start = distance.destination(point=center, bearing=180).latitude
long_start = distance.destination(point=center, bearing=270).longitude
long_end = distance.destination(point=center, bearing=90).longitude
bikes_in_area = Bike.select().where(lat_start <= Bike.latitude, Bike.latitude <= lat_end, long_start <= Bike.longitude, Bike.longitude <= long_end)
# filter out items in square that aren't within the radius and return
return [bike for bike in bikes_in_area if geodesic(Point(bike.latitude, bike.longitude), center).kilometers < kilometers] |
def get_current_fields(self):
"""Get current list to be used"""
if hasattr(self, 'current_fields') and self.current_fields:
return self.current_fields
field_attribute = 'list_{}_{}_fields'.format(self.kwargs.get('app'), self.kwargs.get('model'))
current_fields = self.request.user.attributes.get_attribute(field_attribute, [])
request_fields = self.request.POST.get('selected_fields', None)
if request_fields and ','.join(current_fields) != request_fields:
# TODO validate fields
current_fields = request_fields.split(',')
self.request.user.attributes.set_attribute(field_attribute, current_fields)
elif request_fields:
current_fields = request_fields.split(',')
if not current_fields:
config = self.get_model_config()
current_fields = config.list_default_fields if config.list_default_fields else ['created_at', 'id']
self.current_fields = current_fields
return current_fields | def function[get_current_fields, parameter[self]]:
constant[Get current list to be used]
if <ast.BoolOp object at 0x7da204345a80> begin[:]
return[name[self].current_fields]
variable[field_attribute] assign[=] call[constant[list_{}_{}_fields].format, parameter[call[name[self].kwargs.get, parameter[constant[app]]], call[name[self].kwargs.get, parameter[constant[model]]]]]
variable[current_fields] assign[=] call[name[self].request.user.attributes.get_attribute, parameter[name[field_attribute], list[[]]]]
variable[request_fields] assign[=] call[name[self].request.POST.get, parameter[constant[selected_fields], constant[None]]]
if <ast.BoolOp object at 0x7da18f00e9b0> begin[:]
variable[current_fields] assign[=] call[name[request_fields].split, parameter[constant[,]]]
call[name[self].request.user.attributes.set_attribute, parameter[name[field_attribute], name[current_fields]]]
if <ast.UnaryOp object at 0x7da18bccb7c0> begin[:]
variable[config] assign[=] call[name[self].get_model_config, parameter[]]
variable[current_fields] assign[=] <ast.IfExp object at 0x7da18bcc8d90>
name[self].current_fields assign[=] name[current_fields]
return[name[current_fields]] | keyword[def] identifier[get_current_fields] ( identifier[self] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[current_fields] :
keyword[return] identifier[self] . identifier[current_fields]
identifier[field_attribute] = literal[string] . identifier[format] ( identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] ), identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[current_fields] = identifier[self] . identifier[request] . identifier[user] . identifier[attributes] . identifier[get_attribute] ( identifier[field_attribute] ,[])
identifier[request_fields] = identifier[self] . identifier[request] . identifier[POST] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[request_fields] keyword[and] literal[string] . identifier[join] ( identifier[current_fields] )!= identifier[request_fields] :
identifier[current_fields] = identifier[request_fields] . identifier[split] ( literal[string] )
identifier[self] . identifier[request] . identifier[user] . identifier[attributes] . identifier[set_attribute] ( identifier[field_attribute] , identifier[current_fields] )
keyword[elif] identifier[request_fields] :
identifier[current_fields] = identifier[request_fields] . identifier[split] ( literal[string] )
keyword[if] keyword[not] identifier[current_fields] :
identifier[config] = identifier[self] . identifier[get_model_config] ()
identifier[current_fields] = identifier[config] . identifier[list_default_fields] keyword[if] identifier[config] . identifier[list_default_fields] keyword[else] [ literal[string] , literal[string] ]
identifier[self] . identifier[current_fields] = identifier[current_fields]
keyword[return] identifier[current_fields] | def get_current_fields(self):
"""Get current list to be used"""
if hasattr(self, 'current_fields') and self.current_fields:
return self.current_fields # depends on [control=['if'], data=[]]
field_attribute = 'list_{}_{}_fields'.format(self.kwargs.get('app'), self.kwargs.get('model'))
current_fields = self.request.user.attributes.get_attribute(field_attribute, [])
request_fields = self.request.POST.get('selected_fields', None)
if request_fields and ','.join(current_fields) != request_fields:
# TODO validate fields
current_fields = request_fields.split(',')
self.request.user.attributes.set_attribute(field_attribute, current_fields) # depends on [control=['if'], data=[]]
elif request_fields:
current_fields = request_fields.split(',') # depends on [control=['if'], data=[]]
if not current_fields:
config = self.get_model_config()
current_fields = config.list_default_fields if config.list_default_fields else ['created_at', 'id'] # depends on [control=['if'], data=[]]
self.current_fields = current_fields
return current_fields |
def getAnalysisRequests(self, **kwargs):
"""Return all the Analysis Requests objects linked to the Batch kargs
are passed directly to the catalog.
"""
brains = self.getAnalysisRequestsBrains(**kwargs)
return [b.getObject() for b in brains] | def function[getAnalysisRequests, parameter[self]]:
constant[Return all the Analysis Requests objects linked to the Batch kargs
are passed directly to the catalog.
]
variable[brains] assign[=] call[name[self].getAnalysisRequestsBrains, parameter[]]
return[<ast.ListComp object at 0x7da2047ea620>] | keyword[def] identifier[getAnalysisRequests] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[brains] = identifier[self] . identifier[getAnalysisRequestsBrains] (** identifier[kwargs] )
keyword[return] [ identifier[b] . identifier[getObject] () keyword[for] identifier[b] keyword[in] identifier[brains] ] | def getAnalysisRequests(self, **kwargs):
"""Return all the Analysis Requests objects linked to the Batch kargs
are passed directly to the catalog.
"""
brains = self.getAnalysisRequestsBrains(**kwargs)
return [b.getObject() for b in brains] |
def _collect_vm_info(self):
"""
Gather basic info about VM
"""
self.logger.info('Collecting basic info about VM')
client = self._get_ssh_client()
out = self.distro.get_vm_info(client)
self._write_to_log(out) | def function[_collect_vm_info, parameter[self]]:
constant[
Gather basic info about VM
]
call[name[self].logger.info, parameter[constant[Collecting basic info about VM]]]
variable[client] assign[=] call[name[self]._get_ssh_client, parameter[]]
variable[out] assign[=] call[name[self].distro.get_vm_info, parameter[name[client]]]
call[name[self]._write_to_log, parameter[name[out]]] | keyword[def] identifier[_collect_vm_info] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
identifier[client] = identifier[self] . identifier[_get_ssh_client] ()
identifier[out] = identifier[self] . identifier[distro] . identifier[get_vm_info] ( identifier[client] )
identifier[self] . identifier[_write_to_log] ( identifier[out] ) | def _collect_vm_info(self):
"""
Gather basic info about VM
"""
self.logger.info('Collecting basic info about VM')
client = self._get_ssh_client()
out = self.distro.get_vm_info(client)
self._write_to_log(out) |
def get_real_layer_path(self, path):
"""
Get the path the actual layer file.
"""
filename = path.split('/')[-1]
local_path = path
filetype = os.path.splitext(filename)[1]
# Url
if re.match(r'^[a-zA-Z]+://', path):
local_path = os.path.join(DATA_DIRECTORY, filename)
if not os.path.exists(local_path):
sys.stdout.write('* Downloading %s...\n' % filename)
self.download_file(path, local_path)
elif self.args.redownload:
os.remove(local_path)
sys.stdout.write('* Redownloading %s...\n' % filename)
self.download_file(path, local_path)
# Non-existant file
elif not os.path.exists(local_path):
raise Exception('%s does not exist' % local_path)
real_path = path
# Zip files
if filetype == '.zip':
slug = os.path.splitext(filename)[0]
real_path = os.path.join(DATA_DIRECTORY, slug)
if not os.path.exists(real_path):
sys.stdout.write('* Unzipping...\n')
self.unzip_file(local_path, real_path)
return real_path | def function[get_real_layer_path, parameter[self, path]]:
constant[
Get the path the actual layer file.
]
variable[filename] assign[=] call[call[name[path].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da18f58ea10>]
variable[local_path] assign[=] name[path]
variable[filetype] assign[=] call[call[name[os].path.splitext, parameter[name[filename]]]][constant[1]]
if call[name[re].match, parameter[constant[^[a-zA-Z]+://], name[path]]] begin[:]
variable[local_path] assign[=] call[name[os].path.join, parameter[name[DATA_DIRECTORY], name[filename]]]
if <ast.UnaryOp object at 0x7da18f58d930> begin[:]
call[name[sys].stdout.write, parameter[binary_operation[constant[* Downloading %s...
] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
call[name[self].download_file, parameter[name[path], name[local_path]]]
variable[real_path] assign[=] name[path]
if compare[name[filetype] equal[==] constant[.zip]] begin[:]
variable[slug] assign[=] call[call[name[os].path.splitext, parameter[name[filename]]]][constant[0]]
variable[real_path] assign[=] call[name[os].path.join, parameter[name[DATA_DIRECTORY], name[slug]]]
if <ast.UnaryOp object at 0x7da18fe93820> begin[:]
call[name[sys].stdout.write, parameter[constant[* Unzipping...
]]]
call[name[self].unzip_file, parameter[name[local_path], name[real_path]]]
return[name[real_path]] | keyword[def] identifier[get_real_layer_path] ( identifier[self] , identifier[path] ):
literal[string]
identifier[filename] = identifier[path] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[local_path] = identifier[path]
identifier[filetype] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[path] ):
identifier[local_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[DATA_DIRECTORY] , identifier[filename] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[local_path] ):
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] % identifier[filename] )
identifier[self] . identifier[download_file] ( identifier[path] , identifier[local_path] )
keyword[elif] identifier[self] . identifier[args] . identifier[redownload] :
identifier[os] . identifier[remove] ( identifier[local_path] )
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] % identifier[filename] )
identifier[self] . identifier[download_file] ( identifier[path] , identifier[local_path] )
keyword[elif] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[local_path] ):
keyword[raise] identifier[Exception] ( literal[string] % identifier[local_path] )
identifier[real_path] = identifier[path]
keyword[if] identifier[filetype] == literal[string] :
identifier[slug] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )[ literal[int] ]
identifier[real_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[DATA_DIRECTORY] , identifier[slug] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[real_path] ):
identifier[sys] . identifier[stdout] . identifier[write] ( literal[string] )
identifier[self] . identifier[unzip_file] ( identifier[local_path] , identifier[real_path] )
keyword[return] identifier[real_path] | def get_real_layer_path(self, path):
"""
Get the path the actual layer file.
"""
filename = path.split('/')[-1]
local_path = path
filetype = os.path.splitext(filename)[1]
# Url
if re.match('^[a-zA-Z]+://', path):
local_path = os.path.join(DATA_DIRECTORY, filename)
if not os.path.exists(local_path):
sys.stdout.write('* Downloading %s...\n' % filename)
self.download_file(path, local_path) # depends on [control=['if'], data=[]]
elif self.args.redownload:
os.remove(local_path)
sys.stdout.write('* Redownloading %s...\n' % filename)
self.download_file(path, local_path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Non-existant file
elif not os.path.exists(local_path):
raise Exception('%s does not exist' % local_path) # depends on [control=['if'], data=[]]
real_path = path
# Zip files
if filetype == '.zip':
slug = os.path.splitext(filename)[0]
real_path = os.path.join(DATA_DIRECTORY, slug)
if not os.path.exists(real_path):
sys.stdout.write('* Unzipping...\n')
self.unzip_file(local_path, real_path) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return real_path |
def get_objective_form(self, *args, **kwargs):
"""Pass through to provider ObjectiveAdminSession.get_objective_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'objective_record_types' in kwargs:
return self.get_objective_form_for_create(*args, **kwargs)
else:
return self.get_objective_form_for_update(*args, **kwargs) | def function[get_objective_form, parameter[self]]:
constant[Pass through to provider ObjectiveAdminSession.get_objective_form_for_update]
if <ast.BoolOp object at 0x7da20c7c83a0> begin[:]
return[call[name[self].get_objective_form_for_create, parameter[<ast.Starred object at 0x7da20c7ca3e0>]]] | keyword[def] identifier[get_objective_form] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[args] [- literal[int] ], identifier[list] ) keyword[or] literal[string] keyword[in] identifier[kwargs] :
keyword[return] identifier[self] . identifier[get_objective_form_for_create] (* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[self] . identifier[get_objective_form_for_update] (* identifier[args] ,** identifier[kwargs] ) | def get_objective_form(self, *args, **kwargs):
"""Pass through to provider ObjectiveAdminSession.get_objective_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'objective_record_types' in kwargs:
return self.get_objective_form_for_create(*args, **kwargs) # depends on [control=['if'], data=[]]
else:
return self.get_objective_form_for_update(*args, **kwargs) |
def __parse_uri(self):
"""Parse complete URI from all values"""
if self.scheme:
scheme = '{}://'.format(self.scheme)
else:
scheme = ''
credentials = self.username or ''
password = self.password or ''
if credentials and password:
credentials = self.username + ':' + self.password
if credentials:
credentials += '@'
if self.port:
location = '{}:{}'.format(self.host, self.port)
else:
location = self.host
path = self.path or ''
if self.query:
query = '?' + self.query
else:
query = ''
if self.fragment:
fragment = '#' + self.fragment
else:
fragment = ''
return scheme + credentials + location + path + query + fragment | def function[__parse_uri, parameter[self]]:
constant[Parse complete URI from all values]
if name[self].scheme begin[:]
variable[scheme] assign[=] call[constant[{}://].format, parameter[name[self].scheme]]
variable[credentials] assign[=] <ast.BoolOp object at 0x7da1b0aba4a0>
variable[password] assign[=] <ast.BoolOp object at 0x7da1b0ab9570>
if <ast.BoolOp object at 0x7da1b0abb790> begin[:]
variable[credentials] assign[=] binary_operation[binary_operation[name[self].username + constant[:]] + name[self].password]
if name[credentials] begin[:]
<ast.AugAssign object at 0x7da1b0912050>
if name[self].port begin[:]
variable[location] assign[=] call[constant[{}:{}].format, parameter[name[self].host, name[self].port]]
variable[path] assign[=] <ast.BoolOp object at 0x7da1b09109d0>
if name[self].query begin[:]
variable[query] assign[=] binary_operation[constant[?] + name[self].query]
if name[self].fragment begin[:]
variable[fragment] assign[=] binary_operation[constant[#] + name[self].fragment]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[scheme] + name[credentials]] + name[location]] + name[path]] + name[query]] + name[fragment]]] | keyword[def] identifier[__parse_uri] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[scheme] :
identifier[scheme] = literal[string] . identifier[format] ( identifier[self] . identifier[scheme] )
keyword[else] :
identifier[scheme] = literal[string]
identifier[credentials] = identifier[self] . identifier[username] keyword[or] literal[string]
identifier[password] = identifier[self] . identifier[password] keyword[or] literal[string]
keyword[if] identifier[credentials] keyword[and] identifier[password] :
identifier[credentials] = identifier[self] . identifier[username] + literal[string] + identifier[self] . identifier[password]
keyword[if] identifier[credentials] :
identifier[credentials] += literal[string]
keyword[if] identifier[self] . identifier[port] :
identifier[location] = literal[string] . identifier[format] ( identifier[self] . identifier[host] , identifier[self] . identifier[port] )
keyword[else] :
identifier[location] = identifier[self] . identifier[host]
identifier[path] = identifier[self] . identifier[path] keyword[or] literal[string]
keyword[if] identifier[self] . identifier[query] :
identifier[query] = literal[string] + identifier[self] . identifier[query]
keyword[else] :
identifier[query] = literal[string]
keyword[if] identifier[self] . identifier[fragment] :
identifier[fragment] = literal[string] + identifier[self] . identifier[fragment]
keyword[else] :
identifier[fragment] = literal[string]
keyword[return] identifier[scheme] + identifier[credentials] + identifier[location] + identifier[path] + identifier[query] + identifier[fragment] | def __parse_uri(self):
"""Parse complete URI from all values"""
if self.scheme:
scheme = '{}://'.format(self.scheme) # depends on [control=['if'], data=[]]
else:
scheme = ''
credentials = self.username or ''
password = self.password or ''
if credentials and password:
credentials = self.username + ':' + self.password # depends on [control=['if'], data=[]]
if credentials:
credentials += '@' # depends on [control=['if'], data=[]]
if self.port:
location = '{}:{}'.format(self.host, self.port) # depends on [control=['if'], data=[]]
else:
location = self.host
path = self.path or ''
if self.query:
query = '?' + self.query # depends on [control=['if'], data=[]]
else:
query = ''
if self.fragment:
fragment = '#' + self.fragment # depends on [control=['if'], data=[]]
else:
fragment = ''
return scheme + credentials + location + path + query + fragment |
def help_content(self):
"""Return the content of help for this step wizard.
We only needs to re-implement this method in each wizard step.
:returns: A message object contains help.
:rtype: m.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'In this wizard step: {step_name}, You can choose a exposure '
'layer from the list of layers that have been loaded to QGIS and '
'that matches with the geometry and exposure type you set in the '
'previous step').format(step_name=self.step_name)))
return message | def function[help_content, parameter[self]]:
constant[Return the content of help for this step wizard.
We only needs to re-implement this method in each wizard step.
:returns: A message object contains help.
:rtype: m.Message
]
variable[message] assign[=] call[name[m].Message, parameter[]]
call[name[message].add, parameter[call[name[m].Paragraph, parameter[call[call[name[tr], parameter[constant[In this wizard step: {step_name}, You can choose a exposure layer from the list of layers that have been loaded to QGIS and that matches with the geometry and exposure type you set in the previous step]]].format, parameter[]]]]]]
return[name[message]] | keyword[def] identifier[help_content] ( identifier[self] ):
literal[string]
identifier[message] = identifier[m] . identifier[Message] ()
identifier[message] . identifier[add] ( identifier[m] . identifier[Paragraph] ( identifier[tr] (
literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[step_name] = identifier[self] . identifier[step_name] )))
keyword[return] identifier[message] | def help_content(self):
"""Return the content of help for this step wizard.
We only needs to re-implement this method in each wizard step.
:returns: A message object contains help.
:rtype: m.Message
"""
message = m.Message()
message.add(m.Paragraph(tr('In this wizard step: {step_name}, You can choose a exposure layer from the list of layers that have been loaded to QGIS and that matches with the geometry and exposure type you set in the previous step').format(step_name=self.step_name)))
return message |
def send(self, data):
"""
Send data to the child process through.
"""
self.stdin.write(data)
self.stdin.flush() | def function[send, parameter[self, data]]:
constant[
Send data to the child process through.
]
call[name[self].stdin.write, parameter[name[data]]]
call[name[self].stdin.flush, parameter[]] | keyword[def] identifier[send] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[stdin] . identifier[write] ( identifier[data] )
identifier[self] . identifier[stdin] . identifier[flush] () | def send(self, data):
"""
Send data to the child process through.
"""
self.stdin.write(data)
self.stdin.flush() |
def select_where(self, table, cols, where, return_type=list):
"""
Query certain rows from a table where a particular value is found.
cols parameter can be passed as a iterable (list, set, tuple) or a string if
only querying a single column. where parameter can be passed as a two or three
part tuple. If only two parts are passed the assumed operator is equals(=).
:param table: Name of table
:param cols: List, tuple or set of columns or string with single column name
:param where: WHERE clause, accepts either a two or three part tuple
two-part: (where_column, where_value)
three-part: (where_column, comparison_operator, where_value)
:param return_type: Type, type to return values in
:return: Queried rows
"""
# Unpack WHERE clause dictionary into tuple
if isinstance(where, (list, set)):
# Multiple WHERE clause's (separate with AND)
clauses = [self._where_clause(clause) for clause in where]
where_statement = ' AND '.join(clauses)
else:
where_statement = self._where_clause(where)
# Concatenate full statement and execute
statement = "SELECT {0} FROM {1} WHERE {2}".format(join_cols(cols), wrap(table), where_statement)
values = self.fetch(statement)
return self._return_rows(table, cols, values, return_type) | def function[select_where, parameter[self, table, cols, where, return_type]]:
constant[
Query certain rows from a table where a particular value is found.
cols parameter can be passed as a iterable (list, set, tuple) or a string if
only querying a single column. where parameter can be passed as a two or three
part tuple. If only two parts are passed the assumed operator is equals(=).
:param table: Name of table
:param cols: List, tuple or set of columns or string with single column name
:param where: WHERE clause, accepts either a two or three part tuple
two-part: (where_column, where_value)
three-part: (where_column, comparison_operator, where_value)
:param return_type: Type, type to return values in
:return: Queried rows
]
if call[name[isinstance], parameter[name[where], tuple[[<ast.Name object at 0x7da1b0bf33d0>, <ast.Name object at 0x7da1b0bf3490>]]]] begin[:]
variable[clauses] assign[=] <ast.ListComp object at 0x7da1b0bf21a0>
variable[where_statement] assign[=] call[constant[ AND ].join, parameter[name[clauses]]]
variable[statement] assign[=] call[constant[SELECT {0} FROM {1} WHERE {2}].format, parameter[call[name[join_cols], parameter[name[cols]]], call[name[wrap], parameter[name[table]]], name[where_statement]]]
variable[values] assign[=] call[name[self].fetch, parameter[name[statement]]]
return[call[name[self]._return_rows, parameter[name[table], name[cols], name[values], name[return_type]]]] | keyword[def] identifier[select_where] ( identifier[self] , identifier[table] , identifier[cols] , identifier[where] , identifier[return_type] = identifier[list] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[where] ,( identifier[list] , identifier[set] )):
identifier[clauses] =[ identifier[self] . identifier[_where_clause] ( identifier[clause] ) keyword[for] identifier[clause] keyword[in] identifier[where] ]
identifier[where_statement] = literal[string] . identifier[join] ( identifier[clauses] )
keyword[else] :
identifier[where_statement] = identifier[self] . identifier[_where_clause] ( identifier[where] )
identifier[statement] = literal[string] . identifier[format] ( identifier[join_cols] ( identifier[cols] ), identifier[wrap] ( identifier[table] ), identifier[where_statement] )
identifier[values] = identifier[self] . identifier[fetch] ( identifier[statement] )
keyword[return] identifier[self] . identifier[_return_rows] ( identifier[table] , identifier[cols] , identifier[values] , identifier[return_type] ) | def select_where(self, table, cols, where, return_type=list):
"""
Query certain rows from a table where a particular value is found.
cols parameter can be passed as a iterable (list, set, tuple) or a string if
only querying a single column. where parameter can be passed as a two or three
part tuple. If only two parts are passed the assumed operator is equals(=).
:param table: Name of table
:param cols: List, tuple or set of columns or string with single column name
:param where: WHERE clause, accepts either a two or three part tuple
two-part: (where_column, where_value)
three-part: (where_column, comparison_operator, where_value)
:param return_type: Type, type to return values in
:return: Queried rows
"""
# Unpack WHERE clause dictionary into tuple
if isinstance(where, (list, set)):
# Multiple WHERE clause's (separate with AND)
clauses = [self._where_clause(clause) for clause in where]
where_statement = ' AND '.join(clauses) # depends on [control=['if'], data=[]]
else:
where_statement = self._where_clause(where)
# Concatenate full statement and execute
statement = 'SELECT {0} FROM {1} WHERE {2}'.format(join_cols(cols), wrap(table), where_statement)
values = self.fetch(statement)
return self._return_rows(table, cols, values, return_type) |
def _update_notification(self, message=None):
"""Update the message area with blank or a message."""
if message is None:
message = ''
message_label = self._parts['notification label']
message_label.config(text=message)
self._base.update() | def function[_update_notification, parameter[self, message]]:
constant[Update the message area with blank or a message.]
if compare[name[message] is constant[None]] begin[:]
variable[message] assign[=] constant[]
variable[message_label] assign[=] call[name[self]._parts][constant[notification label]]
call[name[message_label].config, parameter[]]
call[name[self]._base.update, parameter[]] | keyword[def] identifier[_update_notification] ( identifier[self] , identifier[message] = keyword[None] ):
literal[string]
keyword[if] identifier[message] keyword[is] keyword[None] :
identifier[message] = literal[string]
identifier[message_label] = identifier[self] . identifier[_parts] [ literal[string] ]
identifier[message_label] . identifier[config] ( identifier[text] = identifier[message] )
identifier[self] . identifier[_base] . identifier[update] () | def _update_notification(self, message=None):
"""Update the message area with blank or a message."""
if message is None:
message = '' # depends on [control=['if'], data=['message']]
message_label = self._parts['notification label']
message_label.config(text=message)
self._base.update() |
def erase_all_breakpoints(self):
"""
Erases all breakpoints in all processes.
@see:
erase_code_breakpoint,
erase_page_breakpoint,
erase_hardware_breakpoint
"""
# This should be faster but let's not trust the GC so much :P
# self.disable_all_breakpoints()
# self.__codeBP = dict()
# self.__pageBP = dict()
# self.__hardwareBP = dict()
# self.__runningBP = dict()
# self.__hook_objects = dict()
## # erase hooks
## for (pid, address, hook) in self.get_all_hooks():
## self.dont_hook_function(pid, address)
# erase code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
self.erase_code_breakpoint(pid, bp.get_address())
# erase page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
self.erase_page_breakpoint(pid, bp.get_address())
# erase hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
self.erase_hardware_breakpoint(tid, bp.get_address()) | def function[erase_all_breakpoints, parameter[self]]:
constant[
Erases all breakpoints in all processes.
@see:
erase_code_breakpoint,
erase_page_breakpoint,
erase_hardware_breakpoint
]
for taget[tuple[[<ast.Name object at 0x7da1b08db730>, <ast.Name object at 0x7da1b08d84f0>]]] in starred[call[name[self].get_all_code_breakpoints, parameter[]]] begin[:]
call[name[self].erase_code_breakpoint, parameter[name[pid], call[name[bp].get_address, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b08dbf10>, <ast.Name object at 0x7da1b08d9cf0>]]] in starred[call[name[self].get_all_page_breakpoints, parameter[]]] begin[:]
call[name[self].erase_page_breakpoint, parameter[name[pid], call[name[bp].get_address, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b08d8a30>, <ast.Name object at 0x7da1b08d8730>]]] in starred[call[name[self].get_all_hardware_breakpoints, parameter[]]] begin[:]
call[name[self].erase_hardware_breakpoint, parameter[name[tid], call[name[bp].get_address, parameter[]]]] | keyword[def] identifier[erase_all_breakpoints] ( identifier[self] ):
literal[string]
keyword[for] ( identifier[pid] , identifier[bp] ) keyword[in] identifier[self] . identifier[get_all_code_breakpoints] ():
identifier[self] . identifier[erase_code_breakpoint] ( identifier[pid] , identifier[bp] . identifier[get_address] ())
keyword[for] ( identifier[pid] , identifier[bp] ) keyword[in] identifier[self] . identifier[get_all_page_breakpoints] ():
identifier[self] . identifier[erase_page_breakpoint] ( identifier[pid] , identifier[bp] . identifier[get_address] ())
keyword[for] ( identifier[tid] , identifier[bp] ) keyword[in] identifier[self] . identifier[get_all_hardware_breakpoints] ():
identifier[self] . identifier[erase_hardware_breakpoint] ( identifier[tid] , identifier[bp] . identifier[get_address] ()) | def erase_all_breakpoints(self):
"""
Erases all breakpoints in all processes.
@see:
erase_code_breakpoint,
erase_page_breakpoint,
erase_hardware_breakpoint
"""
# This should be faster but let's not trust the GC so much :P
# self.disable_all_breakpoints()
# self.__codeBP = dict()
# self.__pageBP = dict()
# self.__hardwareBP = dict()
# self.__runningBP = dict()
# self.__hook_objects = dict()
## # erase hooks
## for (pid, address, hook) in self.get_all_hooks():
## self.dont_hook_function(pid, address)
# erase code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
self.erase_code_breakpoint(pid, bp.get_address()) # depends on [control=['for'], data=[]]
# erase page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
self.erase_page_breakpoint(pid, bp.get_address()) # depends on [control=['for'], data=[]]
# erase hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
self.erase_hardware_breakpoint(tid, bp.get_address()) # depends on [control=['for'], data=[]] |
def filter_maxsnp(data, superints, edgearr):
"""
Filter max # of SNPs per locus. Do R1 and R2 separately if PE.
Also generate the snpsite line for the .loci format and save in the snp arr
This uses the edge filters that have been built based on trimming, and
saves the snps array with edges filtered. **Loci are not yet filtered.**
"""
## an empty array to count with failed loci
snpfilt = np.zeros(superints.shape[0], dtype=np.bool)
snpsarr = np.zeros((superints.shape[0], superints.shape[2], 2), dtype=np.bool)
maxsnps = np.array(data.paramsdict['max_SNPs_locus'], dtype=np.int16)
## get the per site snp string | shape=(chunk, maxlen)
# snpsarr[:, :, 0] = snps == "-"
# snpsarr[:, :, 1] = snps == "*"
snpsarr = snpcount_numba(superints, snpsarr)
LOGGER.info("---found the snps: %s", snpsarr.sum())
snpfilt, snpsarr = snpfilter_numba(snpsarr, snpfilt, edgearr, maxsnps)
LOGGER.info("---filtered snps: %s", snpfilt.sum())
return snpfilt, snpsarr | def function[filter_maxsnp, parameter[data, superints, edgearr]]:
constant[
Filter max # of SNPs per locus. Do R1 and R2 separately if PE.
Also generate the snpsite line for the .loci format and save in the snp arr
This uses the edge filters that have been built based on trimming, and
saves the snps array with edges filtered. **Loci are not yet filtered.**
]
variable[snpfilt] assign[=] call[name[np].zeros, parameter[call[name[superints].shape][constant[0]]]]
variable[snpsarr] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Subscript object at 0x7da1b0010b50>, <ast.Subscript object at 0x7da1b00101f0>, <ast.Constant object at 0x7da1b0012110>]]]]
variable[maxsnps] assign[=] call[name[np].array, parameter[call[name[data].paramsdict][constant[max_SNPs_locus]]]]
variable[snpsarr] assign[=] call[name[snpcount_numba], parameter[name[superints], name[snpsarr]]]
call[name[LOGGER].info, parameter[constant[---found the snps: %s], call[name[snpsarr].sum, parameter[]]]]
<ast.Tuple object at 0x7da1b0011b40> assign[=] call[name[snpfilter_numba], parameter[name[snpsarr], name[snpfilt], name[edgearr], name[maxsnps]]]
call[name[LOGGER].info, parameter[constant[---filtered snps: %s], call[name[snpfilt].sum, parameter[]]]]
return[tuple[[<ast.Name object at 0x7da1b0012a10>, <ast.Name object at 0x7da1b00120b0>]]] | keyword[def] identifier[filter_maxsnp] ( identifier[data] , identifier[superints] , identifier[edgearr] ):
literal[string]
identifier[snpfilt] = identifier[np] . identifier[zeros] ( identifier[superints] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[np] . identifier[bool] )
identifier[snpsarr] = identifier[np] . identifier[zeros] (( identifier[superints] . identifier[shape] [ literal[int] ], identifier[superints] . identifier[shape] [ literal[int] ], literal[int] ), identifier[dtype] = identifier[np] . identifier[bool] )
identifier[maxsnps] = identifier[np] . identifier[array] ( identifier[data] . identifier[paramsdict] [ literal[string] ], identifier[dtype] = identifier[np] . identifier[int16] )
identifier[snpsarr] = identifier[snpcount_numba] ( identifier[superints] , identifier[snpsarr] )
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[snpsarr] . identifier[sum] ())
identifier[snpfilt] , identifier[snpsarr] = identifier[snpfilter_numba] ( identifier[snpsarr] , identifier[snpfilt] , identifier[edgearr] , identifier[maxsnps] )
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[snpfilt] . identifier[sum] ())
keyword[return] identifier[snpfilt] , identifier[snpsarr] | def filter_maxsnp(data, superints, edgearr):
"""
Filter max # of SNPs per locus. Do R1 and R2 separately if PE.
Also generate the snpsite line for the .loci format and save in the snp arr
This uses the edge filters that have been built based on trimming, and
saves the snps array with edges filtered. **Loci are not yet filtered.**
"""
## an empty array to count with failed loci
snpfilt = np.zeros(superints.shape[0], dtype=np.bool)
snpsarr = np.zeros((superints.shape[0], superints.shape[2], 2), dtype=np.bool)
maxsnps = np.array(data.paramsdict['max_SNPs_locus'], dtype=np.int16)
## get the per site snp string | shape=(chunk, maxlen)
# snpsarr[:, :, 0] = snps == "-"
# snpsarr[:, :, 1] = snps == "*"
snpsarr = snpcount_numba(superints, snpsarr)
LOGGER.info('---found the snps: %s', snpsarr.sum())
(snpfilt, snpsarr) = snpfilter_numba(snpsarr, snpfilt, edgearr, maxsnps)
LOGGER.info('---filtered snps: %s', snpfilt.sum())
return (snpfilt, snpsarr) |
def _clause(self, pt: parsing.ParserTree) -> [ast.stmt]:
"""Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
"""
if isinstance(pt, list):
return pt
return [ast.If(ast.UnaryOp(ast.Not(), pt),
[self.__exit_scope()],
[])] | def function[_clause, parameter[self, pt]]:
constant[Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
]
if call[name[isinstance], parameter[name[pt], name[list]]] begin[:]
return[name[pt]]
return[list[[<ast.Call object at 0x7da1b0178490>]]] | keyword[def] identifier[_clause] ( identifier[self] , identifier[pt] : identifier[parsing] . identifier[ParserTree] )->[ identifier[ast] . identifier[stmt] ]:
literal[string]
keyword[if] identifier[isinstance] ( identifier[pt] , identifier[list] ):
keyword[return] identifier[pt]
keyword[return] [ identifier[ast] . identifier[If] ( identifier[ast] . identifier[UnaryOp] ( identifier[ast] . identifier[Not] (), identifier[pt] ),
[ identifier[self] . identifier[__exit_scope] ()],
[])] | def _clause(self, pt: parsing.ParserTree) -> [ast.stmt]:
"""Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
"""
if isinstance(pt, list):
return pt # depends on [control=['if'], data=[]]
return [ast.If(ast.UnaryOp(ast.Not(), pt), [self.__exit_scope()], [])] |
async def exec_subprocess(argv):
"""
An Future task that represents a subprocess. If successful, the task's
result is set to the collected STDOUT of the subprocess.
:raises subprocess.CalledProcessError: if the subprocess returns a non-zero
exit code
"""
future = Future()
process = Gio.Subprocess.new(
argv,
Gio.SubprocessFlags.STDOUT_PIPE |
Gio.SubprocessFlags.STDIN_INHERIT)
stdin_buf = None
cancellable = None
process.communicate_utf8_async(
stdin_buf, cancellable, gio_callback, future)
result = await future
success, stdout, stderr = process.communicate_utf8_finish(result)
if not success:
raise RuntimeError("Subprocess did not exit normally!")
exit_code = process.get_exit_status()
if exit_code != 0:
raise CalledProcessError(
"Subprocess returned a non-zero exit-status!",
exit_code,
stdout)
return stdout | <ast.AsyncFunctionDef object at 0x7da20c7cb160> | keyword[async] keyword[def] identifier[exec_subprocess] ( identifier[argv] ):
literal[string]
identifier[future] = identifier[Future] ()
identifier[process] = identifier[Gio] . identifier[Subprocess] . identifier[new] (
identifier[argv] ,
identifier[Gio] . identifier[SubprocessFlags] . identifier[STDOUT_PIPE] |
identifier[Gio] . identifier[SubprocessFlags] . identifier[STDIN_INHERIT] )
identifier[stdin_buf] = keyword[None]
identifier[cancellable] = keyword[None]
identifier[process] . identifier[communicate_utf8_async] (
identifier[stdin_buf] , identifier[cancellable] , identifier[gio_callback] , identifier[future] )
identifier[result] = keyword[await] identifier[future]
identifier[success] , identifier[stdout] , identifier[stderr] = identifier[process] . identifier[communicate_utf8_finish] ( identifier[result] )
keyword[if] keyword[not] identifier[success] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[exit_code] = identifier[process] . identifier[get_exit_status] ()
keyword[if] identifier[exit_code] != literal[int] :
keyword[raise] identifier[CalledProcessError] (
literal[string] ,
identifier[exit_code] ,
identifier[stdout] )
keyword[return] identifier[stdout] | async def exec_subprocess(argv):
"""
An Future task that represents a subprocess. If successful, the task's
result is set to the collected STDOUT of the subprocess.
:raises subprocess.CalledProcessError: if the subprocess returns a non-zero
exit code
"""
future = Future()
process = Gio.Subprocess.new(argv, Gio.SubprocessFlags.STDOUT_PIPE | Gio.SubprocessFlags.STDIN_INHERIT)
stdin_buf = None
cancellable = None
process.communicate_utf8_async(stdin_buf, cancellable, gio_callback, future)
result = await future
(success, stdout, stderr) = process.communicate_utf8_finish(result)
if not success:
raise RuntimeError('Subprocess did not exit normally!') # depends on [control=['if'], data=[]]
exit_code = process.get_exit_status()
if exit_code != 0:
raise CalledProcessError('Subprocess returned a non-zero exit-status!', exit_code, stdout) # depends on [control=['if'], data=['exit_code']]
return stdout |
def connection(self):
"""Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances
Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack
:returns: :class:`pysnow.Client` object
"""
ctx = stack.top.app
if ctx is not None:
if not hasattr(ctx, 'snow'):
if self._client_type_oauth:
if not self._token_updater:
warnings.warn("No token updater has been set. Token refreshes will be ignored.")
client = self._get_oauth_client()
else:
client = self._get_basic_client()
if self._parameters:
# Set parameters passed on app init
client.parameters = self._parameters
ctx.snow = client
return ctx.snow | def function[connection, parameter[self]]:
constant[Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances
Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack
:returns: :class:`pysnow.Client` object
]
variable[ctx] assign[=] name[stack].top.app
if compare[name[ctx] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b0e0ca90> begin[:]
if name[self]._client_type_oauth begin[:]
if <ast.UnaryOp object at 0x7da1b0e0efe0> begin[:]
call[name[warnings].warn, parameter[constant[No token updater has been set. Token refreshes will be ignored.]]]
variable[client] assign[=] call[name[self]._get_oauth_client, parameter[]]
if name[self]._parameters begin[:]
name[client].parameters assign[=] name[self]._parameters
name[ctx].snow assign[=] name[client]
return[name[ctx].snow] | keyword[def] identifier[connection] ( identifier[self] ):
literal[string]
identifier[ctx] = identifier[stack] . identifier[top] . identifier[app]
keyword[if] identifier[ctx] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[ctx] , literal[string] ):
keyword[if] identifier[self] . identifier[_client_type_oauth] :
keyword[if] keyword[not] identifier[self] . identifier[_token_updater] :
identifier[warnings] . identifier[warn] ( literal[string] )
identifier[client] = identifier[self] . identifier[_get_oauth_client] ()
keyword[else] :
identifier[client] = identifier[self] . identifier[_get_basic_client] ()
keyword[if] identifier[self] . identifier[_parameters] :
identifier[client] . identifier[parameters] = identifier[self] . identifier[_parameters]
identifier[ctx] . identifier[snow] = identifier[client]
keyword[return] identifier[ctx] . identifier[snow] | def connection(self):
"""Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances
Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack
:returns: :class:`pysnow.Client` object
"""
ctx = stack.top.app
if ctx is not None:
if not hasattr(ctx, 'snow'):
if self._client_type_oauth:
if not self._token_updater:
warnings.warn('No token updater has been set. Token refreshes will be ignored.') # depends on [control=['if'], data=[]]
client = self._get_oauth_client() # depends on [control=['if'], data=[]]
else:
client = self._get_basic_client()
if self._parameters:
# Set parameters passed on app init
client.parameters = self._parameters # depends on [control=['if'], data=[]]
ctx.snow = client # depends on [control=['if'], data=[]]
return ctx.snow # depends on [control=['if'], data=['ctx']] |
def _generateRangeDescription(self, ranges):
"""generate description from a text description of the ranges"""
desc = ""
numRanges = len(ranges)
for i in xrange(numRanges):
if ranges[i][0] != ranges[i][1]:
desc += "%.2f-%.2f" % (ranges[i][0], ranges[i][1])
else:
desc += "%.2f" % (ranges[i][0])
if i < numRanges - 1:
desc += ", "
return desc | def function[_generateRangeDescription, parameter[self, ranges]]:
constant[generate description from a text description of the ranges]
variable[desc] assign[=] constant[]
variable[numRanges] assign[=] call[name[len], parameter[name[ranges]]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[numRanges]]]] begin[:]
if compare[call[call[name[ranges]][name[i]]][constant[0]] not_equal[!=] call[call[name[ranges]][name[i]]][constant[1]]] begin[:]
<ast.AugAssign object at 0x7da20c6a8280>
if compare[name[i] less[<] binary_operation[name[numRanges] - constant[1]]] begin[:]
<ast.AugAssign object at 0x7da20c6a9d80>
return[name[desc]] | keyword[def] identifier[_generateRangeDescription] ( identifier[self] , identifier[ranges] ):
literal[string]
identifier[desc] = literal[string]
identifier[numRanges] = identifier[len] ( identifier[ranges] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[numRanges] ):
keyword[if] identifier[ranges] [ identifier[i] ][ literal[int] ]!= identifier[ranges] [ identifier[i] ][ literal[int] ]:
identifier[desc] += literal[string] %( identifier[ranges] [ identifier[i] ][ literal[int] ], identifier[ranges] [ identifier[i] ][ literal[int] ])
keyword[else] :
identifier[desc] += literal[string] %( identifier[ranges] [ identifier[i] ][ literal[int] ])
keyword[if] identifier[i] < identifier[numRanges] - literal[int] :
identifier[desc] += literal[string]
keyword[return] identifier[desc] | def _generateRangeDescription(self, ranges):
"""generate description from a text description of the ranges"""
desc = ''
numRanges = len(ranges)
for i in xrange(numRanges):
if ranges[i][0] != ranges[i][1]:
desc += '%.2f-%.2f' % (ranges[i][0], ranges[i][1]) # depends on [control=['if'], data=[]]
else:
desc += '%.2f' % ranges[i][0]
if i < numRanges - 1:
desc += ', ' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return desc |
def generate_endpoint_classes(db, generate_pks=False):
"""Return a list of model classes generated for each reflected database
table."""
seen_classes = set()
for cls in current_app.class_references.values():
seen_classes.add(cls.__tablename__)
with app.app_context():
db.metadata.reflect(bind=db.engine)
for name, table in db.metadata.tables.items():
if not name in seen_classes:
seen_classes.add(name)
if not table.primary_key and generate_pks:
cls = add_pk_if_required(db, table, name)
else:
cls = type(
str(name),
(sandman_model, db.Model),
{'__tablename__': name})
register(cls) | def function[generate_endpoint_classes, parameter[db, generate_pks]]:
constant[Return a list of model classes generated for each reflected database
table.]
variable[seen_classes] assign[=] call[name[set], parameter[]]
for taget[name[cls]] in starred[call[name[current_app].class_references.values, parameter[]]] begin[:]
call[name[seen_classes].add, parameter[name[cls].__tablename__]]
with call[name[app].app_context, parameter[]] begin[:]
call[name[db].metadata.reflect, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b26ac6d0>, <ast.Name object at 0x7da1b26ac160>]]] in starred[call[name[db].metadata.tables.items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b26ad330> begin[:]
call[name[seen_classes].add, parameter[name[name]]]
if <ast.BoolOp object at 0x7da1b26afcd0> begin[:]
variable[cls] assign[=] call[name[add_pk_if_required], parameter[name[db], name[table], name[name]]]
call[name[register], parameter[name[cls]]] | keyword[def] identifier[generate_endpoint_classes] ( identifier[db] , identifier[generate_pks] = keyword[False] ):
literal[string]
identifier[seen_classes] = identifier[set] ()
keyword[for] identifier[cls] keyword[in] identifier[current_app] . identifier[class_references] . identifier[values] ():
identifier[seen_classes] . identifier[add] ( identifier[cls] . identifier[__tablename__] )
keyword[with] identifier[app] . identifier[app_context] ():
identifier[db] . identifier[metadata] . identifier[reflect] ( identifier[bind] = identifier[db] . identifier[engine] )
keyword[for] identifier[name] , identifier[table] keyword[in] identifier[db] . identifier[metadata] . identifier[tables] . identifier[items] ():
keyword[if] keyword[not] identifier[name] keyword[in] identifier[seen_classes] :
identifier[seen_classes] . identifier[add] ( identifier[name] )
keyword[if] keyword[not] identifier[table] . identifier[primary_key] keyword[and] identifier[generate_pks] :
identifier[cls] = identifier[add_pk_if_required] ( identifier[db] , identifier[table] , identifier[name] )
keyword[else] :
identifier[cls] = identifier[type] (
identifier[str] ( identifier[name] ),
( identifier[sandman_model] , identifier[db] . identifier[Model] ),
{ literal[string] : identifier[name] })
identifier[register] ( identifier[cls] ) | def generate_endpoint_classes(db, generate_pks=False):
"""Return a list of model classes generated for each reflected database
table."""
seen_classes = set()
for cls in current_app.class_references.values():
seen_classes.add(cls.__tablename__) # depends on [control=['for'], data=['cls']]
with app.app_context():
db.metadata.reflect(bind=db.engine)
for (name, table) in db.metadata.tables.items():
if not name in seen_classes:
seen_classes.add(name)
if not table.primary_key and generate_pks:
cls = add_pk_if_required(db, table, name) # depends on [control=['if'], data=[]]
else:
cls = type(str(name), (sandman_model, db.Model), {'__tablename__': name})
register(cls) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]] |
def __set_unkown_effect(self, hgvs_string):
"""Sets a flag for unkown effect according to HGVS syntax. The
COSMIC database also uses unconventional questionmarks to denote
missing information.
Args:
hgvs_string (str): hgvs syntax with "p." removed
"""
# Standard use by HGVS of indicating unknown effect.
unknown_effect_list = ['?', '(=)', '='] # unknown effect symbols
if hgvs_string in unknown_effect_list:
self.unknown_effect = True
elif "(" in hgvs_string:
# parethesis in HGVS indicate expected outcomes
self.unknown_effect = True
else:
self.unknown_effect = False
# detect if there are missing information. commonly COSMIC will
# have insertions with p.?_?ins? or deleteions with ?del indicating
# missing information.
if "?" in hgvs_string:
self.is_missing_info = True
else:
self.is_missing_info = False | def function[__set_unkown_effect, parameter[self, hgvs_string]]:
constant[Sets a flag for unkown effect according to HGVS syntax. The
COSMIC database also uses unconventional questionmarks to denote
missing information.
Args:
hgvs_string (str): hgvs syntax with "p." removed
]
variable[unknown_effect_list] assign[=] list[[<ast.Constant object at 0x7da18f810dc0>, <ast.Constant object at 0x7da18f811810>, <ast.Constant object at 0x7da18f812740>]]
if compare[name[hgvs_string] in name[unknown_effect_list]] begin[:]
name[self].unknown_effect assign[=] constant[True]
if compare[constant[?] in name[hgvs_string]] begin[:]
name[self].is_missing_info assign[=] constant[True] | keyword[def] identifier[__set_unkown_effect] ( identifier[self] , identifier[hgvs_string] ):
literal[string]
identifier[unknown_effect_list] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[hgvs_string] keyword[in] identifier[unknown_effect_list] :
identifier[self] . identifier[unknown_effect] = keyword[True]
keyword[elif] literal[string] keyword[in] identifier[hgvs_string] :
identifier[self] . identifier[unknown_effect] = keyword[True]
keyword[else] :
identifier[self] . identifier[unknown_effect] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[hgvs_string] :
identifier[self] . identifier[is_missing_info] = keyword[True]
keyword[else] :
identifier[self] . identifier[is_missing_info] = keyword[False] | def __set_unkown_effect(self, hgvs_string):
"""Sets a flag for unkown effect according to HGVS syntax. The
COSMIC database also uses unconventional questionmarks to denote
missing information.
Args:
hgvs_string (str): hgvs syntax with "p." removed
"""
# Standard use by HGVS of indicating unknown effect.
unknown_effect_list = ['?', '(=)', '='] # unknown effect symbols
if hgvs_string in unknown_effect_list:
self.unknown_effect = True # depends on [control=['if'], data=[]]
elif '(' in hgvs_string:
# parethesis in HGVS indicate expected outcomes
self.unknown_effect = True # depends on [control=['if'], data=[]]
else:
self.unknown_effect = False
# detect if there are missing information. commonly COSMIC will
# have insertions with p.?_?ins? or deleteions with ?del indicating
# missing information.
if '?' in hgvs_string:
self.is_missing_info = True # depends on [control=['if'], data=[]]
else:
self.is_missing_info = False |
def Debugger_setPauseOnExceptions(self, state):
"""
Function path: Debugger.setPauseOnExceptions
Domain: Debugger
Method name: setPauseOnExceptions
Parameters:
Required arguments:
'state' (type: string) -> Pause on exceptions mode.
No return value.
Description: Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>.
"""
assert isinstance(state, (str,)
), "Argument 'state' must be of type '['str']'. Received type: '%s'" % type(
state)
subdom_funcs = self.synchronous_command('Debugger.setPauseOnExceptions',
state=state)
return subdom_funcs | def function[Debugger_setPauseOnExceptions, parameter[self, state]]:
constant[
Function path: Debugger.setPauseOnExceptions
Domain: Debugger
Method name: setPauseOnExceptions
Parameters:
Required arguments:
'state' (type: string) -> Pause on exceptions mode.
No return value.
Description: Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>.
]
assert[call[name[isinstance], parameter[name[state], tuple[[<ast.Name object at 0x7da1b1105150>]]]]]
variable[subdom_funcs] assign[=] call[name[self].synchronous_command, parameter[constant[Debugger.setPauseOnExceptions]]]
return[name[subdom_funcs]] | keyword[def] identifier[Debugger_setPauseOnExceptions] ( identifier[self] , identifier[state] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[state] ,( identifier[str] ,)
), literal[string] % identifier[type] (
identifier[state] )
identifier[subdom_funcs] = identifier[self] . identifier[synchronous_command] ( literal[string] ,
identifier[state] = identifier[state] )
keyword[return] identifier[subdom_funcs] | def Debugger_setPauseOnExceptions(self, state):
"""
Function path: Debugger.setPauseOnExceptions
Domain: Debugger
Method name: setPauseOnExceptions
Parameters:
Required arguments:
'state' (type: string) -> Pause on exceptions mode.
No return value.
Description: Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>.
"""
assert isinstance(state, (str,)), "Argument 'state' must be of type '['str']'. Received type: '%s'" % type(state)
subdom_funcs = self.synchronous_command('Debugger.setPauseOnExceptions', state=state)
return subdom_funcs |
def createCertRequest(pkey, digest="sha256", **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for key, value in name.items():
setattr(subj, key, value)
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req | def function[createCertRequest, parameter[pkey, digest]]:
constant[
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
]
variable[req] assign[=] call[name[crypto].X509Req, parameter[]]
variable[subj] assign[=] call[name[req].get_subject, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b020f9d0>, <ast.Name object at 0x7da1b020d630>]]] in starred[call[name[name].items, parameter[]]] begin[:]
call[name[setattr], parameter[name[subj], name[key], name[value]]]
call[name[req].set_pubkey, parameter[name[pkey]]]
call[name[req].sign, parameter[name[pkey], name[digest]]]
return[name[req]] | keyword[def] identifier[createCertRequest] ( identifier[pkey] , identifier[digest] = literal[string] ,** identifier[name] ):
literal[string]
identifier[req] = identifier[crypto] . identifier[X509Req] ()
identifier[subj] = identifier[req] . identifier[get_subject] ()
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[name] . identifier[items] ():
identifier[setattr] ( identifier[subj] , identifier[key] , identifier[value] )
identifier[req] . identifier[set_pubkey] ( identifier[pkey] )
identifier[req] . identifier[sign] ( identifier[pkey] , identifier[digest] )
keyword[return] identifier[req] | def createCertRequest(pkey, digest='sha256', **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is sha256
**name - The name of the subject of the request, possible
arguments are:
C - Country name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for (key, value) in name.items():
setattr(subj, key, value) # depends on [control=['for'], data=[]]
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req |
def joint(self, table, fields,
join_table, join_fields,
condition_field, condition_join_field,
join_method='left_join'):
""".. :py:method::
Usage::
>>> joint('user', 'name, id_number', 'medical_card', 'number', 'id', 'user_id', 'inner_join')
select u.name, u.id_number, v.number from user as u inner join medical_card as v on u.id=v.user_id;
"""
import string
fields = map(string.strip, fields.split(','))
select = ', '.join( ['u.{}'.format(field) for field in fields] )
join_fields = map(string.strip, join_fields.split(','))
join_select = ', '.join( ['v.{}'.format(field) for field in join_fields] )
sql = "select {select}, {join_select} from {table} as u {join_method}"\
" {join_table} as v on u.{condition_field}="\
"v.{condition_join_field};".format(select=select,
join_select=join_select,
table=table,
join_method=join_method,
join_table=join_table,
condition_field=condition_field,
condition_join_field=condition_join_field)
return super(PGWrapper, self).execute(sql, result=True).results | def function[joint, parameter[self, table, fields, join_table, join_fields, condition_field, condition_join_field, join_method]]:
constant[.. :py:method::
Usage::
>>> joint('user', 'name, id_number', 'medical_card', 'number', 'id', 'user_id', 'inner_join')
select u.name, u.id_number, v.number from user as u inner join medical_card as v on u.id=v.user_id;
]
import module[string]
variable[fields] assign[=] call[name[map], parameter[name[string].strip, call[name[fields].split, parameter[constant[,]]]]]
variable[select] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b14d5b10>]]
variable[join_fields] assign[=] call[name[map], parameter[name[string].strip, call[name[join_fields].split, parameter[constant[,]]]]]
variable[join_select] assign[=] call[constant[, ].join, parameter[<ast.ListComp object at 0x7da1b14d7640>]]
variable[sql] assign[=] call[constant[select {select}, {join_select} from {table} as u {join_method} {join_table} as v on u.{condition_field}=v.{condition_join_field};].format, parameter[]]
return[call[call[name[super], parameter[name[PGWrapper], name[self]]].execute, parameter[name[sql]]].results] | keyword[def] identifier[joint] ( identifier[self] , identifier[table] , identifier[fields] ,
identifier[join_table] , identifier[join_fields] ,
identifier[condition_field] , identifier[condition_join_field] ,
identifier[join_method] = literal[string] ):
literal[string]
keyword[import] identifier[string]
identifier[fields] = identifier[map] ( identifier[string] . identifier[strip] , identifier[fields] . identifier[split] ( literal[string] ))
identifier[select] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[field] ) keyword[for] identifier[field] keyword[in] identifier[fields] ])
identifier[join_fields] = identifier[map] ( identifier[string] . identifier[strip] , identifier[join_fields] . identifier[split] ( literal[string] ))
identifier[join_select] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[field] ) keyword[for] identifier[field] keyword[in] identifier[join_fields] ])
identifier[sql] = literal[string] literal[string] literal[string] . identifier[format] ( identifier[select] = identifier[select] ,
identifier[join_select] = identifier[join_select] ,
identifier[table] = identifier[table] ,
identifier[join_method] = identifier[join_method] ,
identifier[join_table] = identifier[join_table] ,
identifier[condition_field] = identifier[condition_field] ,
identifier[condition_join_field] = identifier[condition_join_field] )
keyword[return] identifier[super] ( identifier[PGWrapper] , identifier[self] ). identifier[execute] ( identifier[sql] , identifier[result] = keyword[True] ). identifier[results] | def joint(self, table, fields, join_table, join_fields, condition_field, condition_join_field, join_method='left_join'):
""".. :py:method::
Usage::
>>> joint('user', 'name, id_number', 'medical_card', 'number', 'id', 'user_id', 'inner_join')
select u.name, u.id_number, v.number from user as u inner join medical_card as v on u.id=v.user_id;
"""
import string
fields = map(string.strip, fields.split(','))
select = ', '.join(['u.{}'.format(field) for field in fields])
join_fields = map(string.strip, join_fields.split(','))
join_select = ', '.join(['v.{}'.format(field) for field in join_fields])
sql = 'select {select}, {join_select} from {table} as u {join_method} {join_table} as v on u.{condition_field}=v.{condition_join_field};'.format(select=select, join_select=join_select, table=table, join_method=join_method, join_table=join_table, condition_field=condition_field, condition_join_field=condition_join_field)
return super(PGWrapper, self).execute(sql, result=True).results |
def next(self, times=1):
"""Returns a new instance of self
times is not supported yet.
"""
return Range(copy(self.end),
self.end + self.elapse, tz=self.start.tz) | def function[next, parameter[self, times]]:
constant[Returns a new instance of self
times is not supported yet.
]
return[call[name[Range], parameter[call[name[copy], parameter[name[self].end]], binary_operation[name[self].end + name[self].elapse]]]] | keyword[def] identifier[next] ( identifier[self] , identifier[times] = literal[int] ):
literal[string]
keyword[return] identifier[Range] ( identifier[copy] ( identifier[self] . identifier[end] ),
identifier[self] . identifier[end] + identifier[self] . identifier[elapse] , identifier[tz] = identifier[self] . identifier[start] . identifier[tz] ) | def next(self, times=1):
"""Returns a new instance of self
times is not supported yet.
"""
return Range(copy(self.end), self.end + self.elapse, tz=self.start.tz) |
def readdatacommdct(idfname, iddfile='Energy+.idd', commdct=None):
"""read the idf file"""
if not commdct:
block, commlst, commdct, idd_index = parse_idd.extractidddata(iddfile)
theidd = eplusdata.Idd(block, 2)
else:
theidd = iddfile
data = eplusdata.Eplusdata(theidd, idfname)
return data, commdct, idd_index | def function[readdatacommdct, parameter[idfname, iddfile, commdct]]:
constant[read the idf file]
if <ast.UnaryOp object at 0x7da20c6c7430> begin[:]
<ast.Tuple object at 0x7da20c6c7dc0> assign[=] call[name[parse_idd].extractidddata, parameter[name[iddfile]]]
variable[theidd] assign[=] call[name[eplusdata].Idd, parameter[name[block], constant[2]]]
variable[data] assign[=] call[name[eplusdata].Eplusdata, parameter[name[theidd], name[idfname]]]
return[tuple[[<ast.Name object at 0x7da20c6c4820>, <ast.Name object at 0x7da20c6c4190>, <ast.Name object at 0x7da20c6c48e0>]]] | keyword[def] identifier[readdatacommdct] ( identifier[idfname] , identifier[iddfile] = literal[string] , identifier[commdct] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[commdct] :
identifier[block] , identifier[commlst] , identifier[commdct] , identifier[idd_index] = identifier[parse_idd] . identifier[extractidddata] ( identifier[iddfile] )
identifier[theidd] = identifier[eplusdata] . identifier[Idd] ( identifier[block] , literal[int] )
keyword[else] :
identifier[theidd] = identifier[iddfile]
identifier[data] = identifier[eplusdata] . identifier[Eplusdata] ( identifier[theidd] , identifier[idfname] )
keyword[return] identifier[data] , identifier[commdct] , identifier[idd_index] | def readdatacommdct(idfname, iddfile='Energy+.idd', commdct=None):
"""read the idf file"""
if not commdct:
(block, commlst, commdct, idd_index) = parse_idd.extractidddata(iddfile)
theidd = eplusdata.Idd(block, 2) # depends on [control=['if'], data=[]]
else:
theidd = iddfile
data = eplusdata.Eplusdata(theidd, idfname)
return (data, commdct, idd_index) |
def bestscan(self,seq):
"""
m.bestscan(seq) -- Return the score of the best match to the motif in the supplied sequence
"""
matches,endpoints,scores = self.scan(seq,-100)
if not scores: return -100
scores.sort()
best = scores[-1]
return best | def function[bestscan, parameter[self, seq]]:
constant[
m.bestscan(seq) -- Return the score of the best match to the motif in the supplied sequence
]
<ast.Tuple object at 0x7da1b2727ee0> assign[=] call[name[self].scan, parameter[name[seq], <ast.UnaryOp object at 0x7da1b27270d0>]]
if <ast.UnaryOp object at 0x7da1b2724760> begin[:]
return[<ast.UnaryOp object at 0x7da1b27249d0>]
call[name[scores].sort, parameter[]]
variable[best] assign[=] call[name[scores]][<ast.UnaryOp object at 0x7da1b2725840>]
return[name[best]] | keyword[def] identifier[bestscan] ( identifier[self] , identifier[seq] ):
literal[string]
identifier[matches] , identifier[endpoints] , identifier[scores] = identifier[self] . identifier[scan] ( identifier[seq] ,- literal[int] )
keyword[if] keyword[not] identifier[scores] : keyword[return] - literal[int]
identifier[scores] . identifier[sort] ()
identifier[best] = identifier[scores] [- literal[int] ]
keyword[return] identifier[best] | def bestscan(self, seq):
"""
m.bestscan(seq) -- Return the score of the best match to the motif in the supplied sequence
"""
(matches, endpoints, scores) = self.scan(seq, -100)
if not scores:
return -100 # depends on [control=['if'], data=[]]
scores.sort()
best = scores[-1]
return best |
def deletePhysicalInterface(self, physicalInterfaceId):
"""
Delete a physical interface.
Parameters: physicalInterfaceId (string).
Throws APIException on failure.
"""
req = ApiClient.onePhysicalInterfaceUrl % (self.host, "/draft", physicalInterfaceId)
resp = requests.delete(req, auth=self.credentials, verify=self.verify)
if resp.status_code == 204:
self.logger.debug("physical interface deleted")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error deleting a physical interface", resp)
return resp | def function[deletePhysicalInterface, parameter[self, physicalInterfaceId]]:
constant[
Delete a physical interface.
Parameters: physicalInterfaceId (string).
Throws APIException on failure.
]
variable[req] assign[=] binary_operation[name[ApiClient].onePhysicalInterfaceUrl <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2041d8100>, <ast.Constant object at 0x7da2041dbd30>, <ast.Name object at 0x7da2041db220>]]]
variable[resp] assign[=] call[name[requests].delete, parameter[name[req]]]
if compare[name[resp].status_code equal[==] constant[204]] begin[:]
call[name[self].logger.debug, parameter[constant[physical interface deleted]]]
return[name[resp]] | keyword[def] identifier[deletePhysicalInterface] ( identifier[self] , identifier[physicalInterfaceId] ):
literal[string]
identifier[req] = identifier[ApiClient] . identifier[onePhysicalInterfaceUrl] %( identifier[self] . identifier[host] , literal[string] , identifier[physicalInterfaceId] )
identifier[resp] = identifier[requests] . identifier[delete] ( identifier[req] , identifier[auth] = identifier[self] . identifier[credentials] , identifier[verify] = identifier[self] . identifier[verify] )
keyword[if] identifier[resp] . identifier[status_code] == literal[int] :
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ibmiotf] . identifier[APIException] ( identifier[resp] . identifier[status_code] , literal[string] , identifier[resp] )
keyword[return] identifier[resp] | def deletePhysicalInterface(self, physicalInterfaceId):
"""
Delete a physical interface.
Parameters: physicalInterfaceId (string).
Throws APIException on failure.
"""
req = ApiClient.onePhysicalInterfaceUrl % (self.host, '/draft', physicalInterfaceId)
resp = requests.delete(req, auth=self.credentials, verify=self.verify)
if resp.status_code == 204:
self.logger.debug('physical interface deleted') # depends on [control=['if'], data=[]]
else:
raise ibmiotf.APIException(resp.status_code, 'HTTP error deleting a physical interface', resp)
return resp |
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Password Rules."""
query_str = uri_parms[0]
try:
console = hmc.consoles.lookup_by_oid(None)
except KeyError:
raise InvalidResourceError(method, uri)
result_password_rules = []
filter_args = parse_query_parms(method, uri, query_str)
for password_rule in console.password_rules.list(filter_args):
result_password_rule = {}
for prop in password_rule.properties:
if prop in ('element-uri', 'name', 'type'):
result_password_rule[prop] = password_rule.properties[prop]
result_password_rules.append(result_password_rule)
return {'password-rules': result_password_rules} | def function[get, parameter[method, hmc, uri, uri_parms, logon_required]]:
constant[Operation: List Password Rules.]
variable[query_str] assign[=] call[name[uri_parms]][constant[0]]
<ast.Try object at 0x7da1b0383790>
variable[result_password_rules] assign[=] list[[]]
variable[filter_args] assign[=] call[name[parse_query_parms], parameter[name[method], name[uri], name[query_str]]]
for taget[name[password_rule]] in starred[call[name[console].password_rules.list, parameter[name[filter_args]]]] begin[:]
variable[result_password_rule] assign[=] dictionary[[], []]
for taget[name[prop]] in starred[name[password_rule].properties] begin[:]
if compare[name[prop] in tuple[[<ast.Constant object at 0x7da1b0382d40>, <ast.Constant object at 0x7da1b0383a00>, <ast.Constant object at 0x7da1b0383f70>]]] begin[:]
call[name[result_password_rule]][name[prop]] assign[=] call[name[password_rule].properties][name[prop]]
call[name[result_password_rules].append, parameter[name[result_password_rule]]]
return[dictionary[[<ast.Constant object at 0x7da18fe93880>], [<ast.Name object at 0x7da18fe90d30>]]] | keyword[def] identifier[get] ( identifier[method] , identifier[hmc] , identifier[uri] , identifier[uri_parms] , identifier[logon_required] ):
literal[string]
identifier[query_str] = identifier[uri_parms] [ literal[int] ]
keyword[try] :
identifier[console] = identifier[hmc] . identifier[consoles] . identifier[lookup_by_oid] ( keyword[None] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[InvalidResourceError] ( identifier[method] , identifier[uri] )
identifier[result_password_rules] =[]
identifier[filter_args] = identifier[parse_query_parms] ( identifier[method] , identifier[uri] , identifier[query_str] )
keyword[for] identifier[password_rule] keyword[in] identifier[console] . identifier[password_rules] . identifier[list] ( identifier[filter_args] ):
identifier[result_password_rule] ={}
keyword[for] identifier[prop] keyword[in] identifier[password_rule] . identifier[properties] :
keyword[if] identifier[prop] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[result_password_rule] [ identifier[prop] ]= identifier[password_rule] . identifier[properties] [ identifier[prop] ]
identifier[result_password_rules] . identifier[append] ( identifier[result_password_rule] )
keyword[return] { literal[string] : identifier[result_password_rules] } | def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Password Rules."""
query_str = uri_parms[0]
try:
console = hmc.consoles.lookup_by_oid(None) # depends on [control=['try'], data=[]]
except KeyError:
raise InvalidResourceError(method, uri) # depends on [control=['except'], data=[]]
result_password_rules = []
filter_args = parse_query_parms(method, uri, query_str)
for password_rule in console.password_rules.list(filter_args):
result_password_rule = {}
for prop in password_rule.properties:
if prop in ('element-uri', 'name', 'type'):
result_password_rule[prop] = password_rule.properties[prop] # depends on [control=['if'], data=['prop']] # depends on [control=['for'], data=['prop']]
result_password_rules.append(result_password_rule) # depends on [control=['for'], data=['password_rule']]
return {'password-rules': result_password_rules} |
def _WriteFileChunk(self, chunk):
"""Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
"""
if chunk.chunk_index == 0:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path) | def function[_WriteFileChunk, parameter[self, chunk]]:
constant[Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
]
if compare[name[chunk].chunk_index equal[==] constant[0]] begin[:]
variable[st] assign[=] call[name[os].stat_result, parameter[tuple[[<ast.Constant object at 0x7da18f58e0b0>, <ast.Constant object at 0x7da18f58c610>, <ast.Constant object at 0x7da18f58fd60>, <ast.Constant object at 0x7da18f58d1e0>, <ast.Constant object at 0x7da18f58c550>, <ast.Constant object at 0x7da18f58d300>, <ast.Attribute object at 0x7da18f58e9e0>, <ast.Constant object at 0x7da18f58d5d0>, <ast.Constant object at 0x7da18f58f790>, <ast.Constant object at 0x7da18f58ecb0>]]]]
variable[target_path] assign[=] call[name[_ClientPathToString], parameter[name[chunk].client_path]]
<ast.Yield object at 0x7da18f58cbe0>
<ast.Yield object at 0x7da18f58d780>
if compare[name[chunk].chunk_index equal[==] binary_operation[name[chunk].total_chunks - constant[1]]] begin[:]
<ast.Yield object at 0x7da18f58d0f0>
call[name[self].archived_files.add, parameter[name[chunk].client_path]] | keyword[def] identifier[_WriteFileChunk] ( identifier[self] , identifier[chunk] ):
literal[string]
keyword[if] identifier[chunk] . identifier[chunk_index] == literal[int] :
identifier[st] = identifier[os] . identifier[stat_result] (( literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , identifier[chunk] . identifier[total_size] , literal[int] , literal[int] , literal[int] ))
identifier[target_path] = identifier[_ClientPathToString] ( identifier[chunk] . identifier[client_path] , identifier[prefix] = identifier[self] . identifier[prefix] )
keyword[yield] identifier[self] . identifier[archive_generator] . identifier[WriteFileHeader] ( identifier[target_path] , identifier[st] = identifier[st] )
keyword[yield] identifier[self] . identifier[archive_generator] . identifier[WriteFileChunk] ( identifier[chunk] . identifier[data] )
keyword[if] identifier[chunk] . identifier[chunk_index] == identifier[chunk] . identifier[total_chunks] - literal[int] :
keyword[yield] identifier[self] . identifier[archive_generator] . identifier[WriteFileFooter] ()
identifier[self] . identifier[archived_files] . identifier[add] ( identifier[chunk] . identifier[client_path] ) | def _WriteFileChunk(self, chunk):
"""Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
"""
if chunk.chunk_index == 0:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((420, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st) # depends on [control=['if'], data=[]]
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path) # depends on [control=['if'], data=[]] |
def GetPatternIdInterface(patternId: int):
"""
Get pattern COM interface by pattern id.
patternId: int, a value in class `PatternId`.
Return comtypes._cominterface_meta.
"""
global _PatternIdInterfaces
if not _PatternIdInterfaces:
_PatternIdInterfaces = {
# PatternId.AnnotationPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationAnnotationPattern,
# PatternId.CustomNavigationPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationCustomNavigationPattern,
PatternId.DockPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationDockPattern,
# PatternId.DragPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationDragPattern,
# PatternId.DropTargetPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationDropTargetPattern,
PatternId.ExpandCollapsePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationExpandCollapsePattern,
PatternId.GridItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationGridItemPattern,
PatternId.GridPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationGridPattern,
PatternId.InvokePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationInvokePattern,
PatternId.ItemContainerPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationItemContainerPattern,
PatternId.LegacyIAccessiblePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationLegacyIAccessiblePattern,
PatternId.MultipleViewPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationMultipleViewPattern,
# PatternId.ObjectModelPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationObjectModelPattern,
PatternId.RangeValuePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationRangeValuePattern,
PatternId.ScrollItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationScrollItemPattern,
PatternId.ScrollPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationScrollPattern,
PatternId.SelectionItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSelectionItemPattern,
PatternId.SelectionPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSelectionPattern,
# PatternId.SpreadsheetItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetItemPattern,
# PatternId.SpreadsheetPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetPattern,
# PatternId.StylesPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationStylesPattern,
PatternId.SynchronizedInputPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSynchronizedInputPattern,
PatternId.TableItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTableItemPattern,
PatternId.TablePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTablePattern,
# PatternId.TextChildPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextChildPattern,
# PatternId.TextEditPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextEditPattern,
PatternId.TextPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern,
# PatternId.TextPattern2: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern2,
PatternId.TogglePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTogglePattern,
PatternId.TransformPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern,
# PatternId.TransformPattern2: _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern2,
PatternId.ValuePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationValuePattern,
PatternId.VirtualizedItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationVirtualizedItemPattern,
PatternId.WindowPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationWindowPattern,
}
debug = False
#the following patterns dosn't exist on Windows 7 or lower
try:
_PatternIdInterfaces[PatternId.AnnotationPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationAnnotationPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have AnnotationPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.CustomNavigationPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationCustomNavigationPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have CustomNavigationPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.DragPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationDragPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have DragPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.DropTargetPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationDropTargetPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have DropTargetPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.ObjectModelPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationObjectModelPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have ObjectModelPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.SpreadsheetItemPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetItemPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have SpreadsheetItemPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.SpreadsheetPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have SpreadsheetPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.StylesPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationStylesPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have StylesPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.TextChildPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextChildPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have TextChildPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.TextEditPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextEditPattern
except:
if debug: Logger.WriteLine('UIAutomationCore does not have TextEditPattern.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.TextPattern2] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern2
except:
if debug: Logger.WriteLine('UIAutomationCore does not have TextPattern2.', ConsoleColor.Yellow)
try:
_PatternIdInterfaces[PatternId.TransformPattern2] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern2
except:
if debug: Logger.WriteLine('UIAutomationCore does not have TransformPattern2.', ConsoleColor.Yellow)
return _PatternIdInterfaces[patternId] | def function[GetPatternIdInterface, parameter[patternId]]:
constant[
Get pattern COM interface by pattern id.
patternId: int, a value in class `PatternId`.
Return comtypes._cominterface_meta.
]
<ast.Global object at 0x7da18c4ce7a0>
if <ast.UnaryOp object at 0x7da18c4cead0> begin[:]
variable[_PatternIdInterfaces] assign[=] dictionary[[<ast.Attribute object at 0x7da18c4ccca0>, <ast.Attribute object at 0x7da18c4cf280>, <ast.Attribute object at 0x7da18c4cd420>, <ast.Attribute object at 0x7da18c4cfdc0>, <ast.Attribute object at 0x7da18c4cc460>, <ast.Attribute object at 0x7da18c4cd0f0>, <ast.Attribute object at 0x7da18c4ce200>, <ast.Attribute object at 0x7da18c4cf460>, <ast.Attribute object at 0x7da18c4cc1f0>, <ast.Attribute object at 0x7da18c4cdc60>, <ast.Attribute object at 0x7da18c4ceda0>, <ast.Attribute object at 0x7da18c4ce290>, <ast.Attribute object at 0x7da18c4cfb20>, <ast.Attribute object at 0x7da18c4cc580>, <ast.Attribute object at 0x7da18c4cfe50>, <ast.Attribute object at 0x7da18c4ced70>, <ast.Attribute object at 0x7da18c4ce470>, <ast.Attribute object at 0x7da18c4cf580>, <ast.Attribute object at 0x7da18c4cdbd0>, <ast.Attribute object at 0x7da18c4cf250>, <ast.Attribute object at 0x7da18c4ccfd0>, <ast.Attribute object at 0x7da18c4ccd60>], [<ast.Attribute object at 0x7da18c4cca00>, <ast.Attribute object at 0x7da18c4cc7c0>, <ast.Attribute object at 0x7da18c4cc8b0>, <ast.Attribute object at 0x7da18c4cd4e0>, <ast.Attribute object at 0x7da1b2346860>, <ast.Attribute object at 0x7da1b2346530>, <ast.Attribute object at 0x7da1b2344ac0>, <ast.Attribute object at 0x7da1b2346560>, <ast.Attribute object at 0x7da1b2347c70>, <ast.Attribute object at 0x7da1b2345120>, <ast.Attribute object at 0x7da1b2346920>, <ast.Attribute object at 0x7da1b2346500>, <ast.Attribute object at 0x7da1b23441f0>, <ast.Attribute object at 0x7da1b2347400>, <ast.Attribute object at 0x7da1b23456f0>, <ast.Attribute object at 0x7da1b2346140>, <ast.Attribute object at 0x7da1b23455a0>, <ast.Attribute object at 0x7da1b2347340>, <ast.Attribute object at 0x7da1b23473a0>, <ast.Attribute object at 0x7da1b23459c0>, <ast.Attribute object at 0x7da1b2344be0>, <ast.Attribute object at 0x7da1b2344970>]]
variable[debug] assign[=] constant[False]
<ast.Try object at 0x7da1b2345150>
<ast.Try object at 0x7da1b2347c40>
<ast.Try object at 0x7da1b23477c0>
<ast.Try object at 0x7da1b2344a90>
<ast.Try object at 0x7da18dc99d80>
<ast.Try object at 0x7da18dc99e40>
<ast.Try object at 0x7da18dc98760>
<ast.Try object at 0x7da18dc9ab60>
<ast.Try object at 0x7da18dc9aef0>
<ast.Try object at 0x7da18dc9afb0>
<ast.Try object at 0x7da18dc98f70>
<ast.Try object at 0x7da18dc9bdf0>
return[call[name[_PatternIdInterfaces]][name[patternId]]] | keyword[def] identifier[GetPatternIdInterface] ( identifier[patternId] : identifier[int] ):
literal[string]
keyword[global] identifier[_PatternIdInterfaces]
keyword[if] keyword[not] identifier[_PatternIdInterfaces] :
identifier[_PatternIdInterfaces] ={
identifier[PatternId] . identifier[DockPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationDockPattern] ,
identifier[PatternId] . identifier[ExpandCollapsePattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationExpandCollapsePattern] ,
identifier[PatternId] . identifier[GridItemPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationGridItemPattern] ,
identifier[PatternId] . identifier[GridPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationGridPattern] ,
identifier[PatternId] . identifier[InvokePattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationInvokePattern] ,
identifier[PatternId] . identifier[ItemContainerPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationItemContainerPattern] ,
identifier[PatternId] . identifier[LegacyIAccessiblePattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationLegacyIAccessiblePattern] ,
identifier[PatternId] . identifier[MultipleViewPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationMultipleViewPattern] ,
identifier[PatternId] . identifier[RangeValuePattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationRangeValuePattern] ,
identifier[PatternId] . identifier[ScrollItemPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationScrollItemPattern] ,
identifier[PatternId] . identifier[ScrollPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationScrollPattern] ,
identifier[PatternId] . identifier[SelectionItemPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationSelectionItemPattern] ,
identifier[PatternId] . identifier[SelectionPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationSelectionPattern] ,
identifier[PatternId] . identifier[SynchronizedInputPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationSynchronizedInputPattern] ,
identifier[PatternId] . identifier[TableItemPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTableItemPattern] ,
identifier[PatternId] . identifier[TablePattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTablePattern] ,
identifier[PatternId] . identifier[TextPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTextPattern] ,
identifier[PatternId] . identifier[TogglePattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTogglePattern] ,
identifier[PatternId] . identifier[TransformPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTransformPattern] ,
identifier[PatternId] . identifier[ValuePattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationValuePattern] ,
identifier[PatternId] . identifier[VirtualizedItemPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationVirtualizedItemPattern] ,
identifier[PatternId] . identifier[WindowPattern] : identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationWindowPattern] ,
}
identifier[debug] = keyword[False]
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[AnnotationPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationAnnotationPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[CustomNavigationPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationCustomNavigationPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[DragPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationDragPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[DropTargetPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationDropTargetPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[ObjectModelPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationObjectModelPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[SpreadsheetItemPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationSpreadsheetItemPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[SpreadsheetPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationSpreadsheetPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[StylesPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationStylesPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[TextChildPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTextChildPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[TextEditPattern] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTextEditPattern]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[TextPattern2] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTextPattern2]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[try] :
identifier[_PatternIdInterfaces] [ identifier[PatternId] . identifier[TransformPattern2] ]= identifier[_AutomationClient] . identifier[instance] (). identifier[UIAutomationCore] . identifier[IUIAutomationTransformPattern2]
keyword[except] :
keyword[if] identifier[debug] : identifier[Logger] . identifier[WriteLine] ( literal[string] , identifier[ConsoleColor] . identifier[Yellow] )
keyword[return] identifier[_PatternIdInterfaces] [ identifier[patternId] ] | def GetPatternIdInterface(patternId: int):
"""
Get pattern COM interface by pattern id.
patternId: int, a value in class `PatternId`.
Return comtypes._cominterface_meta.
"""
global _PatternIdInterfaces
if not _PatternIdInterfaces:
# PatternId.AnnotationPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationAnnotationPattern,
# PatternId.CustomNavigationPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationCustomNavigationPattern,
# PatternId.DragPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationDragPattern,
# PatternId.DropTargetPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationDropTargetPattern,
# PatternId.ObjectModelPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationObjectModelPattern,
# PatternId.SpreadsheetItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetItemPattern,
# PatternId.SpreadsheetPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetPattern,
# PatternId.StylesPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationStylesPattern,
# PatternId.TextChildPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextChildPattern,
# PatternId.TextEditPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextEditPattern,
# PatternId.TextPattern2: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern2,
# PatternId.TransformPattern2: _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern2,
_PatternIdInterfaces = {PatternId.DockPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationDockPattern, PatternId.ExpandCollapsePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationExpandCollapsePattern, PatternId.GridItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationGridItemPattern, PatternId.GridPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationGridPattern, PatternId.InvokePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationInvokePattern, PatternId.ItemContainerPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationItemContainerPattern, PatternId.LegacyIAccessiblePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationLegacyIAccessiblePattern, PatternId.MultipleViewPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationMultipleViewPattern, PatternId.RangeValuePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationRangeValuePattern, PatternId.ScrollItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationScrollItemPattern, PatternId.ScrollPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationScrollPattern, PatternId.SelectionItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSelectionItemPattern, PatternId.SelectionPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSelectionPattern, PatternId.SynchronizedInputPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationSynchronizedInputPattern, PatternId.TableItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTableItemPattern, PatternId.TablePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTablePattern, PatternId.TextPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern, PatternId.TogglePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTogglePattern, PatternId.TransformPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern, PatternId.ValuePattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationValuePattern, PatternId.VirtualizedItemPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationVirtualizedItemPattern, PatternId.WindowPattern: _AutomationClient.instance().UIAutomationCore.IUIAutomationWindowPattern}
debug = False
#the following patterns dosn't exist on Windows 7 or lower
try:
_PatternIdInterfaces[PatternId.AnnotationPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationAnnotationPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have AnnotationPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.CustomNavigationPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationCustomNavigationPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have CustomNavigationPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.DragPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationDragPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have DragPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.DropTargetPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationDropTargetPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have DropTargetPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.ObjectModelPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationObjectModelPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have ObjectModelPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.SpreadsheetItemPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetItemPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have SpreadsheetItemPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.SpreadsheetPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationSpreadsheetPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have SpreadsheetPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.StylesPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationStylesPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have StylesPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.TextChildPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextChildPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have TextChildPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.TextEditPattern] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextEditPattern # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have TextEditPattern.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.TextPattern2] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTextPattern2 # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have TextPattern2.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
try:
_PatternIdInterfaces[PatternId.TransformPattern2] = _AutomationClient.instance().UIAutomationCore.IUIAutomationTransformPattern2 # depends on [control=['try'], data=[]]
except:
if debug:
Logger.WriteLine('UIAutomationCore does not have TransformPattern2.', ConsoleColor.Yellow) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return _PatternIdInterfaces[patternId] |
def close_list(ctx, root):
"""Close already opened list if needed.
This will try to see if it is needed to close already opened list.
:Args:
- ctx (:class:`Context`): Context object
- root (Element): lxml element representing current position.
:Returns:
lxml element where future content should be placed.
"""
try:
n = len(ctx.in_list)
if n <= 0:
return root
elem = root
while n > 0:
while True:
if elem.tag in ['ul', 'ol', 'td']:
elem = elem.getparent()
break
elem = elem.getparent()
n -= 1
ctx.in_list = []
return elem
except:
return None | def function[close_list, parameter[ctx, root]]:
constant[Close already opened list if needed.
This will try to see if it is needed to close already opened list.
:Args:
- ctx (:class:`Context`): Context object
- root (Element): lxml element representing current position.
:Returns:
lxml element where future content should be placed.
]
<ast.Try object at 0x7da18f812ce0> | keyword[def] identifier[close_list] ( identifier[ctx] , identifier[root] ):
literal[string]
keyword[try] :
identifier[n] = identifier[len] ( identifier[ctx] . identifier[in_list] )
keyword[if] identifier[n] <= literal[int] :
keyword[return] identifier[root]
identifier[elem] = identifier[root]
keyword[while] identifier[n] > literal[int] :
keyword[while] keyword[True] :
keyword[if] identifier[elem] . identifier[tag] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[elem] = identifier[elem] . identifier[getparent] ()
keyword[break]
identifier[elem] = identifier[elem] . identifier[getparent] ()
identifier[n] -= literal[int]
identifier[ctx] . identifier[in_list] =[]
keyword[return] identifier[elem]
keyword[except] :
keyword[return] keyword[None] | def close_list(ctx, root):
"""Close already opened list if needed.
This will try to see if it is needed to close already opened list.
:Args:
- ctx (:class:`Context`): Context object
- root (Element): lxml element representing current position.
:Returns:
lxml element where future content should be placed.
"""
try:
n = len(ctx.in_list)
if n <= 0:
return root # depends on [control=['if'], data=[]]
elem = root
while n > 0:
while True:
if elem.tag in ['ul', 'ol', 'td']:
elem = elem.getparent()
break # depends on [control=['if'], data=[]]
elem = elem.getparent() # depends on [control=['while'], data=[]]
n -= 1 # depends on [control=['while'], data=['n']]
ctx.in_list = []
return elem # depends on [control=['try'], data=[]]
except:
return None # depends on [control=['except'], data=[]] |
def __draw_clusters(self):
"""!
@brief Display clusters and outliers using different colors.
"""
data = self.__directory.get_data()
for index_cluster in range(len(self.__clusters)):
color = color_list.get_color(index_cluster)
self.__draw_cluster(data, self.__clusters[index_cluster], color, '.')
self.__draw_cluster(self.__directory.get_data(), self.__noise, 'gray', 'x') | def function[__draw_clusters, parameter[self]]:
constant[!
@brief Display clusters and outliers using different colors.
]
variable[data] assign[=] call[name[self].__directory.get_data, parameter[]]
for taget[name[index_cluster]] in starred[call[name[range], parameter[call[name[len], parameter[name[self].__clusters]]]]] begin[:]
variable[color] assign[=] call[name[color_list].get_color, parameter[name[index_cluster]]]
call[name[self].__draw_cluster, parameter[name[data], call[name[self].__clusters][name[index_cluster]], name[color], constant[.]]]
call[name[self].__draw_cluster, parameter[call[name[self].__directory.get_data, parameter[]], name[self].__noise, constant[gray], constant[x]]] | keyword[def] identifier[__draw_clusters] ( identifier[self] ):
literal[string]
identifier[data] = identifier[self] . identifier[__directory] . identifier[get_data] ()
keyword[for] identifier[index_cluster] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[__clusters] )):
identifier[color] = identifier[color_list] . identifier[get_color] ( identifier[index_cluster] )
identifier[self] . identifier[__draw_cluster] ( identifier[data] , identifier[self] . identifier[__clusters] [ identifier[index_cluster] ], identifier[color] , literal[string] )
identifier[self] . identifier[__draw_cluster] ( identifier[self] . identifier[__directory] . identifier[get_data] (), identifier[self] . identifier[__noise] , literal[string] , literal[string] ) | def __draw_clusters(self):
"""!
@brief Display clusters and outliers using different colors.
"""
data = self.__directory.get_data()
for index_cluster in range(len(self.__clusters)):
color = color_list.get_color(index_cluster)
self.__draw_cluster(data, self.__clusters[index_cluster], color, '.') # depends on [control=['for'], data=['index_cluster']]
self.__draw_cluster(self.__directory.get_data(), self.__noise, 'gray', 'x') |
def dnld_assc(assc_name, go2obj=None, prt=sys.stdout):
"""Download association from http://geneontology.org/gene-associations."""
# Example assc_name: "tair.gaf"
# Download the Association
dirloc, assc_base = os.path.split(assc_name)
if not dirloc:
dirloc = os.getcwd()
assc_locfile = os.path.join(dirloc, assc_base) if not dirloc else assc_name
dnld_annotation(assc_locfile, prt)
# Read the downloaded association
assc_orig = read_gaf(assc_locfile, prt)
if go2obj is None:
return assc_orig
# If a GO DAG is provided, use only GO IDs present in the GO DAG
assc = {}
goids_dag = set(go2obj.keys())
for gene, goids_cur in assc_orig.items():
assc[gene] = goids_cur.intersection(goids_dag)
return assc | def function[dnld_assc, parameter[assc_name, go2obj, prt]]:
constant[Download association from http://geneontology.org/gene-associations.]
<ast.Tuple object at 0x7da1b2346980> assign[=] call[name[os].path.split, parameter[name[assc_name]]]
if <ast.UnaryOp object at 0x7da1b2347cd0> begin[:]
variable[dirloc] assign[=] call[name[os].getcwd, parameter[]]
variable[assc_locfile] assign[=] <ast.IfExp object at 0x7da1b2346620>
call[name[dnld_annotation], parameter[name[assc_locfile], name[prt]]]
variable[assc_orig] assign[=] call[name[read_gaf], parameter[name[assc_locfile], name[prt]]]
if compare[name[go2obj] is constant[None]] begin[:]
return[name[assc_orig]]
variable[assc] assign[=] dictionary[[], []]
variable[goids_dag] assign[=] call[name[set], parameter[call[name[go2obj].keys, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b23472b0>, <ast.Name object at 0x7da1b23476d0>]]] in starred[call[name[assc_orig].items, parameter[]]] begin[:]
call[name[assc]][name[gene]] assign[=] call[name[goids_cur].intersection, parameter[name[goids_dag]]]
return[name[assc]] | keyword[def] identifier[dnld_assc] ( identifier[assc_name] , identifier[go2obj] = keyword[None] , identifier[prt] = identifier[sys] . identifier[stdout] ):
literal[string]
identifier[dirloc] , identifier[assc_base] = identifier[os] . identifier[path] . identifier[split] ( identifier[assc_name] )
keyword[if] keyword[not] identifier[dirloc] :
identifier[dirloc] = identifier[os] . identifier[getcwd] ()
identifier[assc_locfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirloc] , identifier[assc_base] ) keyword[if] keyword[not] identifier[dirloc] keyword[else] identifier[assc_name]
identifier[dnld_annotation] ( identifier[assc_locfile] , identifier[prt] )
identifier[assc_orig] = identifier[read_gaf] ( identifier[assc_locfile] , identifier[prt] )
keyword[if] identifier[go2obj] keyword[is] keyword[None] :
keyword[return] identifier[assc_orig]
identifier[assc] ={}
identifier[goids_dag] = identifier[set] ( identifier[go2obj] . identifier[keys] ())
keyword[for] identifier[gene] , identifier[goids_cur] keyword[in] identifier[assc_orig] . identifier[items] ():
identifier[assc] [ identifier[gene] ]= identifier[goids_cur] . identifier[intersection] ( identifier[goids_dag] )
keyword[return] identifier[assc] | def dnld_assc(assc_name, go2obj=None, prt=sys.stdout):
"""Download association from http://geneontology.org/gene-associations."""
# Example assc_name: "tair.gaf"
# Download the Association
(dirloc, assc_base) = os.path.split(assc_name)
if not dirloc:
dirloc = os.getcwd() # depends on [control=['if'], data=[]]
assc_locfile = os.path.join(dirloc, assc_base) if not dirloc else assc_name
dnld_annotation(assc_locfile, prt)
# Read the downloaded association
assc_orig = read_gaf(assc_locfile, prt)
if go2obj is None:
return assc_orig # depends on [control=['if'], data=[]]
# If a GO DAG is provided, use only GO IDs present in the GO DAG
assc = {}
goids_dag = set(go2obj.keys())
for (gene, goids_cur) in assc_orig.items():
assc[gene] = goids_cur.intersection(goids_dag) # depends on [control=['for'], data=[]]
return assc |
def _maybe_download_corpus(tmp_dir, vocab_type):
"""Download and unpack the corpus.
Args:
tmp_dir: directory containing dataset.
vocab_type: which vocabulary are we using.
Returns:
The list of names of files.
"""
filename = os.path.basename(PTB_URL)
compressed_filepath = generator_utils.maybe_download(
tmp_dir, filename, PTB_URL)
ptb_files = []
ptb_char_files = []
with tarfile.open(compressed_filepath, "r:gz") as tgz:
files = []
# Selecting only relevant files.
for m in tgz.getmembers():
if "ptb" in m.name and ".txt" in m.name:
if "char" in m.name:
ptb_char_files += [m.name]
else:
ptb_files += [m.name]
files += [m]
tgz.extractall(tmp_dir, members=files)
if vocab_type == text_problems.VocabType.CHARACTER:
return ptb_char_files
else:
return ptb_files | def function[_maybe_download_corpus, parameter[tmp_dir, vocab_type]]:
constant[Download and unpack the corpus.
Args:
tmp_dir: directory containing dataset.
vocab_type: which vocabulary are we using.
Returns:
The list of names of files.
]
variable[filename] assign[=] call[name[os].path.basename, parameter[name[PTB_URL]]]
variable[compressed_filepath] assign[=] call[name[generator_utils].maybe_download, parameter[name[tmp_dir], name[filename], name[PTB_URL]]]
variable[ptb_files] assign[=] list[[]]
variable[ptb_char_files] assign[=] list[[]]
with call[name[tarfile].open, parameter[name[compressed_filepath], constant[r:gz]]] begin[:]
variable[files] assign[=] list[[]]
for taget[name[m]] in starred[call[name[tgz].getmembers, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b20e61a0> begin[:]
if compare[constant[char] in name[m].name] begin[:]
<ast.AugAssign object at 0x7da1b20e4f70>
<ast.AugAssign object at 0x7da1b20e43d0>
call[name[tgz].extractall, parameter[name[tmp_dir]]]
if compare[name[vocab_type] equal[==] name[text_problems].VocabType.CHARACTER] begin[:]
return[name[ptb_char_files]] | keyword[def] identifier[_maybe_download_corpus] ( identifier[tmp_dir] , identifier[vocab_type] ):
literal[string]
identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[PTB_URL] )
identifier[compressed_filepath] = identifier[generator_utils] . identifier[maybe_download] (
identifier[tmp_dir] , identifier[filename] , identifier[PTB_URL] )
identifier[ptb_files] =[]
identifier[ptb_char_files] =[]
keyword[with] identifier[tarfile] . identifier[open] ( identifier[compressed_filepath] , literal[string] ) keyword[as] identifier[tgz] :
identifier[files] =[]
keyword[for] identifier[m] keyword[in] identifier[tgz] . identifier[getmembers] ():
keyword[if] literal[string] keyword[in] identifier[m] . identifier[name] keyword[and] literal[string] keyword[in] identifier[m] . identifier[name] :
keyword[if] literal[string] keyword[in] identifier[m] . identifier[name] :
identifier[ptb_char_files] +=[ identifier[m] . identifier[name] ]
keyword[else] :
identifier[ptb_files] +=[ identifier[m] . identifier[name] ]
identifier[files] +=[ identifier[m] ]
identifier[tgz] . identifier[extractall] ( identifier[tmp_dir] , identifier[members] = identifier[files] )
keyword[if] identifier[vocab_type] == identifier[text_problems] . identifier[VocabType] . identifier[CHARACTER] :
keyword[return] identifier[ptb_char_files]
keyword[else] :
keyword[return] identifier[ptb_files] | def _maybe_download_corpus(tmp_dir, vocab_type):
"""Download and unpack the corpus.
Args:
tmp_dir: directory containing dataset.
vocab_type: which vocabulary are we using.
Returns:
The list of names of files.
"""
filename = os.path.basename(PTB_URL)
compressed_filepath = generator_utils.maybe_download(tmp_dir, filename, PTB_URL)
ptb_files = []
ptb_char_files = []
with tarfile.open(compressed_filepath, 'r:gz') as tgz:
files = []
# Selecting only relevant files.
for m in tgz.getmembers():
if 'ptb' in m.name and '.txt' in m.name:
if 'char' in m.name:
ptb_char_files += [m.name] # depends on [control=['if'], data=[]]
else:
ptb_files += [m.name]
files += [m] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']]
tgz.extractall(tmp_dir, members=files) # depends on [control=['with'], data=['tgz']]
if vocab_type == text_problems.VocabType.CHARACTER:
return ptb_char_files # depends on [control=['if'], data=[]]
else:
return ptb_files |
def run(self):
"""Periodically sends buffered operations and/or commit.
"""
if not self._should_auto_commit and not self._should_auto_send:
return
last_send, last_commit = 0, 0
while not self._stopped:
if self._should_auto_commit:
if last_commit > self._commit_interval:
self._docman.commit()
# commit also sends so reset both
last_send, last_commit = 0, 0
# Give a chance to exit the loop
if self._stopped:
break
if self._should_auto_send:
if last_send > self._send_interval:
self._docman.send_buffered_operations()
last_send = 0
time.sleep(self._sleep_interval)
last_send += self._sleep_interval
last_commit += self._sleep_interval | def function[run, parameter[self]]:
constant[Periodically sends buffered operations and/or commit.
]
if <ast.BoolOp object at 0x7da18ede4580> begin[:]
return[None]
<ast.Tuple object at 0x7da18ede4c70> assign[=] tuple[[<ast.Constant object at 0x7da18ede73a0>, <ast.Constant object at 0x7da18ede55d0>]]
while <ast.UnaryOp object at 0x7da18ede7af0> begin[:]
if name[self]._should_auto_commit begin[:]
if compare[name[last_commit] greater[>] name[self]._commit_interval] begin[:]
call[name[self]._docman.commit, parameter[]]
<ast.Tuple object at 0x7da18ede41f0> assign[=] tuple[[<ast.Constant object at 0x7da18ede5570>, <ast.Constant object at 0x7da18ede4bb0>]]
if name[self]._stopped begin[:]
break
if name[self]._should_auto_send begin[:]
if compare[name[last_send] greater[>] name[self]._send_interval] begin[:]
call[name[self]._docman.send_buffered_operations, parameter[]]
variable[last_send] assign[=] constant[0]
call[name[time].sleep, parameter[name[self]._sleep_interval]]
<ast.AugAssign object at 0x7da18ede63b0>
<ast.AugAssign object at 0x7da18ede4dc0> | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_should_auto_commit] keyword[and] keyword[not] identifier[self] . identifier[_should_auto_send] :
keyword[return]
identifier[last_send] , identifier[last_commit] = literal[int] , literal[int]
keyword[while] keyword[not] identifier[self] . identifier[_stopped] :
keyword[if] identifier[self] . identifier[_should_auto_commit] :
keyword[if] identifier[last_commit] > identifier[self] . identifier[_commit_interval] :
identifier[self] . identifier[_docman] . identifier[commit] ()
identifier[last_send] , identifier[last_commit] = literal[int] , literal[int]
keyword[if] identifier[self] . identifier[_stopped] :
keyword[break]
keyword[if] identifier[self] . identifier[_should_auto_send] :
keyword[if] identifier[last_send] > identifier[self] . identifier[_send_interval] :
identifier[self] . identifier[_docman] . identifier[send_buffered_operations] ()
identifier[last_send] = literal[int]
identifier[time] . identifier[sleep] ( identifier[self] . identifier[_sleep_interval] )
identifier[last_send] += identifier[self] . identifier[_sleep_interval]
identifier[last_commit] += identifier[self] . identifier[_sleep_interval] | def run(self):
"""Periodically sends buffered operations and/or commit.
"""
if not self._should_auto_commit and (not self._should_auto_send):
return # depends on [control=['if'], data=[]]
(last_send, last_commit) = (0, 0)
while not self._stopped:
if self._should_auto_commit:
if last_commit > self._commit_interval:
self._docman.commit()
# commit also sends so reset both
(last_send, last_commit) = (0, 0)
# Give a chance to exit the loop
if self._stopped:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['last_commit']] # depends on [control=['if'], data=[]]
if self._should_auto_send:
if last_send > self._send_interval:
self._docman.send_buffered_operations()
last_send = 0 # depends on [control=['if'], data=['last_send']] # depends on [control=['if'], data=[]]
time.sleep(self._sleep_interval)
last_send += self._sleep_interval
last_commit += self._sleep_interval # depends on [control=['while'], data=[]] |
def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None,
featuretypes_to_ignore=None):
"""
Cleans a GFF file by removing features on unwanted chromosomes and of
unwanted featuretypes. Optionally adds "chr" to chrom names.
"""
logger.info("Cleaning GFF")
chroms_to_ignore = chroms_to_ignore or []
featuretypes_to_ignore = featuretypes_to_ignore or []
with open(cleaned, 'w') as fout:
for i in gffutils.iterators.DataIterator(gff):
if add_chr:
i.chrom = "chr" + i.chrom
if i.chrom in chroms_to_ignore:
continue
if i.featuretype in featuretypes_to_ignore:
continue
fout.write(str(i) + '\n')
return cleaned | def function[clean_gff, parameter[gff, cleaned, add_chr, chroms_to_ignore, featuretypes_to_ignore]]:
constant[
Cleans a GFF file by removing features on unwanted chromosomes and of
unwanted featuretypes. Optionally adds "chr" to chrom names.
]
call[name[logger].info, parameter[constant[Cleaning GFF]]]
variable[chroms_to_ignore] assign[=] <ast.BoolOp object at 0x7da20c9900a0>
variable[featuretypes_to_ignore] assign[=] <ast.BoolOp object at 0x7da20c993700>
with call[name[open], parameter[name[cleaned], constant[w]]] begin[:]
for taget[name[i]] in starred[call[name[gffutils].iterators.DataIterator, parameter[name[gff]]]] begin[:]
if name[add_chr] begin[:]
name[i].chrom assign[=] binary_operation[constant[chr] + name[i].chrom]
if compare[name[i].chrom in name[chroms_to_ignore]] begin[:]
continue
if compare[name[i].featuretype in name[featuretypes_to_ignore]] begin[:]
continue
call[name[fout].write, parameter[binary_operation[call[name[str], parameter[name[i]]] + constant[
]]]]
return[name[cleaned]] | keyword[def] identifier[clean_gff] ( identifier[gff] , identifier[cleaned] , identifier[add_chr] = keyword[False] , identifier[chroms_to_ignore] = keyword[None] ,
identifier[featuretypes_to_ignore] = keyword[None] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[chroms_to_ignore] = identifier[chroms_to_ignore] keyword[or] []
identifier[featuretypes_to_ignore] = identifier[featuretypes_to_ignore] keyword[or] []
keyword[with] identifier[open] ( identifier[cleaned] , literal[string] ) keyword[as] identifier[fout] :
keyword[for] identifier[i] keyword[in] identifier[gffutils] . identifier[iterators] . identifier[DataIterator] ( identifier[gff] ):
keyword[if] identifier[add_chr] :
identifier[i] . identifier[chrom] = literal[string] + identifier[i] . identifier[chrom]
keyword[if] identifier[i] . identifier[chrom] keyword[in] identifier[chroms_to_ignore] :
keyword[continue]
keyword[if] identifier[i] . identifier[featuretype] keyword[in] identifier[featuretypes_to_ignore] :
keyword[continue]
identifier[fout] . identifier[write] ( identifier[str] ( identifier[i] )+ literal[string] )
keyword[return] identifier[cleaned] | def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None, featuretypes_to_ignore=None):
"""
Cleans a GFF file by removing features on unwanted chromosomes and of
unwanted featuretypes. Optionally adds "chr" to chrom names.
"""
logger.info('Cleaning GFF')
chroms_to_ignore = chroms_to_ignore or []
featuretypes_to_ignore = featuretypes_to_ignore or []
with open(cleaned, 'w') as fout:
for i in gffutils.iterators.DataIterator(gff):
if add_chr:
i.chrom = 'chr' + i.chrom # depends on [control=['if'], data=[]]
if i.chrom in chroms_to_ignore:
continue # depends on [control=['if'], data=[]]
if i.featuretype in featuretypes_to_ignore:
continue # depends on [control=['if'], data=[]]
fout.write(str(i) + '\n') # depends on [control=['for'], data=['i']] # depends on [control=['with'], data=['fout']]
return cleaned |
def _get_bgzip_version(exe):
"""return bgzip version as string"""
p = subprocess.Popen([exe, "-h"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output = p.communicate()
version_line = output[0].splitlines()[1]
version = re.match(r"(?:Version:|bgzip \(htslib\))\s+(\d+\.\d+(\.\d+)?)", version_line).group(1)
return version | def function[_get_bgzip_version, parameter[exe]]:
constant[return bgzip version as string]
variable[p] assign[=] call[name[subprocess].Popen, parameter[list[[<ast.Name object at 0x7da1b0465930>, <ast.Constant object at 0x7da1b0466230>]]]]
variable[output] assign[=] call[name[p].communicate, parameter[]]
variable[version_line] assign[=] call[call[call[name[output]][constant[0]].splitlines, parameter[]]][constant[1]]
variable[version] assign[=] call[call[name[re].match, parameter[constant[(?:Version:|bgzip \(htslib\))\s+(\d+\.\d+(\.\d+)?)], name[version_line]]].group, parameter[constant[1]]]
return[name[version]] | keyword[def] identifier[_get_bgzip_version] ( identifier[exe] ):
literal[string]
identifier[p] = identifier[subprocess] . identifier[Popen] ([ identifier[exe] , literal[string] ], identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[STDOUT] , identifier[universal_newlines] = keyword[True] )
identifier[output] = identifier[p] . identifier[communicate] ()
identifier[version_line] = identifier[output] [ literal[int] ]. identifier[splitlines] ()[ literal[int] ]
identifier[version] = identifier[re] . identifier[match] ( literal[string] , identifier[version_line] ). identifier[group] ( literal[int] )
keyword[return] identifier[version] | def _get_bgzip_version(exe):
"""return bgzip version as string"""
p = subprocess.Popen([exe, '-h'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output = p.communicate()
version_line = output[0].splitlines()[1]
version = re.match('(?:Version:|bgzip \\(htslib\\))\\s+(\\d+\\.\\d+(\\.\\d+)?)', version_line).group(1)
return version |
def ec2_vpc_availabilityzones(self, lookup, default=None):
"""
Args:
lookup: the friendly name of a VPC to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
A comma-separated list of availability zones in use in the named VPC or default/None if no match
"""
vpc_id = self.ec2_vpc_vpc_id(lookup)
if vpc_id is None:
return default
subnets = EFAwsResolver.__CLIENTS["ec2"].describe_subnets(Filters=[{
'Name': 'vpc-id',
'Values': [vpc_id]
}])
if len(subnets["Subnets"]) > 0:
# Strip the metadata section (subnets["Subnets"])
az_list = [s["AvailabilityZone"] for s in subnets["Subnets"]]
# Add internal ", " only. This is called literally from: "{{aws...}}" - CF template needs the outer quotes
return "\", \"".join(az_list)
else:
return default | def function[ec2_vpc_availabilityzones, parameter[self, lookup, default]]:
constant[
Args:
lookup: the friendly name of a VPC to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
A comma-separated list of availability zones in use in the named VPC or default/None if no match
]
variable[vpc_id] assign[=] call[name[self].ec2_vpc_vpc_id, parameter[name[lookup]]]
if compare[name[vpc_id] is constant[None]] begin[:]
return[name[default]]
variable[subnets] assign[=] call[call[name[EFAwsResolver].__CLIENTS][constant[ec2]].describe_subnets, parameter[]]
if compare[call[name[len], parameter[call[name[subnets]][constant[Subnets]]]] greater[>] constant[0]] begin[:]
variable[az_list] assign[=] <ast.ListComp object at 0x7da1b1b00760>
return[call[constant[", "].join, parameter[name[az_list]]]] | keyword[def] identifier[ec2_vpc_availabilityzones] ( identifier[self] , identifier[lookup] , identifier[default] = keyword[None] ):
literal[string]
identifier[vpc_id] = identifier[self] . identifier[ec2_vpc_vpc_id] ( identifier[lookup] )
keyword[if] identifier[vpc_id] keyword[is] keyword[None] :
keyword[return] identifier[default]
identifier[subnets] = identifier[EFAwsResolver] . identifier[__CLIENTS] [ literal[string] ]. identifier[describe_subnets] ( identifier[Filters] =[{
literal[string] : literal[string] ,
literal[string] :[ identifier[vpc_id] ]
}])
keyword[if] identifier[len] ( identifier[subnets] [ literal[string] ])> literal[int] :
identifier[az_list] =[ identifier[s] [ literal[string] ] keyword[for] identifier[s] keyword[in] identifier[subnets] [ literal[string] ]]
keyword[return] literal[string] . identifier[join] ( identifier[az_list] )
keyword[else] :
keyword[return] identifier[default] | def ec2_vpc_availabilityzones(self, lookup, default=None):
"""
Args:
lookup: the friendly name of a VPC to look up
default: the optional value to return if lookup failed; returns None if not set
Returns:
A comma-separated list of availability zones in use in the named VPC or default/None if no match
"""
vpc_id = self.ec2_vpc_vpc_id(lookup)
if vpc_id is None:
return default # depends on [control=['if'], data=[]]
subnets = EFAwsResolver.__CLIENTS['ec2'].describe_subnets(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
if len(subnets['Subnets']) > 0:
# Strip the metadata section (subnets["Subnets"])
az_list = [s['AvailabilityZone'] for s in subnets['Subnets']]
# Add internal ", " only. This is called literally from: "{{aws...}}" - CF template needs the outer quotes
return '", "'.join(az_list) # depends on [control=['if'], data=[]]
else:
return default |
def follow_path(file_path, buffering=-1, encoding=None, errors='strict'):
"""
Similar to follow, but also looks up if inode of file is changed
e.g. if it was re-created.
Returned generator yields strings encoded by using encoding.
If encoding is not specified, it defaults to locale.getpreferredencoding()
>>> import io
>>> import os
>>> f = io.open('test_follow_path.txt', 'w+')
>>> generator = follow_path('test_follow_path.txt')
>>> _ = f.write('Line 1\\n')
>>> f.flush()
>>> print(next(generator))
Line 1
>>> _ = f.write('Line 2\\n')
>>> f.flush()
>>> print(next(generator))
Line 2
>>> _ = f.truncate(0)
>>> _ = f.seek(0)
>>> _ = f.write('Line 3\\n')
>>> f.flush()
>>> print(next(generator))
Line 3
>>> f.close()
>>> os.remove('test_follow_path.txt')
>>> f = io.open('test_follow_path.txt', 'w+')
>>> _ = f.write('Line 4\\n')
>>> f.flush()
>>> print(next(generator))
Line 4
>>> print(next(generator))
None
>>> f.close()
>>> os.remove('test_follow_path.txt')
"""
if encoding is None:
encoding = locale.getpreferredencoding()
class FollowPathGenerator(object):
def __init__(self):
if os.path.isfile(file_path):
self.following_file = io.open(file_path, 'rb', buffering)
self.follow_generator = Tailer(self.following_file, end=True).follow()
self.follow_from_end_on_open = False
else:
self.following_file = None
self.follow_generator = None
self.follow_from_end_on_open = True
def next(self):
while True:
if self.follow_generator:
line = next(self.follow_generator)
else:
line = None
if line is None:
if self.follow_generator:
try:
is_file_changed = not os.path.isfile(file_path) or os.stat(file_path).st_ino != os.fstat(self.following_file.fileno()).st_ino
except OSError:
# File could be deleted between isfile and stat invocations, which will make the latter to fail.
is_file_changed = True
if is_file_changed:
# File was deleted or re-created.
self.following_file.close()
self.following_file = None
self.follow_generator = None
if not self.follow_generator and os.path.isfile(file_path):
# New file is available. Open it.
try:
self.following_file = io.open(file_path, 'rb', buffering)
self.follow_generator = Tailer(self.following_file, end=self.follow_from_end_on_open).follow()
self.follow_from_end_on_open = False # something could be written before we noticed change of file
except (IOError, OSError) as e:
LOG.info("Unable to tail file: %s", e)
if self.following_file:
self.following_file.close()
self.following_file= None
self.follow_generator = None
line = None
else:
line = next(self.follow_generator)
return line.decode(encoding, errors) if line is not None else line
def __iter__(self):
return self
def __next__(self):
return self.next()
return FollowPathGenerator() | def function[follow_path, parameter[file_path, buffering, encoding, errors]]:
constant[
Similar to follow, but also looks up if inode of file is changed
e.g. if it was re-created.
Returned generator yields strings encoded by using encoding.
If encoding is not specified, it defaults to locale.getpreferredencoding()
>>> import io
>>> import os
>>> f = io.open('test_follow_path.txt', 'w+')
>>> generator = follow_path('test_follow_path.txt')
>>> _ = f.write('Line 1\n')
>>> f.flush()
>>> print(next(generator))
Line 1
>>> _ = f.write('Line 2\n')
>>> f.flush()
>>> print(next(generator))
Line 2
>>> _ = f.truncate(0)
>>> _ = f.seek(0)
>>> _ = f.write('Line 3\n')
>>> f.flush()
>>> print(next(generator))
Line 3
>>> f.close()
>>> os.remove('test_follow_path.txt')
>>> f = io.open('test_follow_path.txt', 'w+')
>>> _ = f.write('Line 4\n')
>>> f.flush()
>>> print(next(generator))
Line 4
>>> print(next(generator))
None
>>> f.close()
>>> os.remove('test_follow_path.txt')
]
if compare[name[encoding] is constant[None]] begin[:]
variable[encoding] assign[=] call[name[locale].getpreferredencoding, parameter[]]
class class[FollowPathGenerator, parameter[]] begin[:]
def function[__init__, parameter[self]]:
if call[name[os].path.isfile, parameter[name[file_path]]] begin[:]
name[self].following_file assign[=] call[name[io].open, parameter[name[file_path], constant[rb], name[buffering]]]
name[self].follow_generator assign[=] call[call[name[Tailer], parameter[name[self].following_file]].follow, parameter[]]
name[self].follow_from_end_on_open assign[=] constant[False]
def function[next, parameter[self]]:
while constant[True] begin[:]
if name[self].follow_generator begin[:]
variable[line] assign[=] call[name[next], parameter[name[self].follow_generator]]
if compare[name[line] is constant[None]] begin[:]
if name[self].follow_generator begin[:]
<ast.Try object at 0x7da2044c2050>
if name[is_file_changed] begin[:]
call[name[self].following_file.close, parameter[]]
name[self].following_file assign[=] constant[None]
name[self].follow_generator assign[=] constant[None]
if <ast.BoolOp object at 0x7da2044c05e0> begin[:]
<ast.Try object at 0x7da2044c2380>
return[<ast.IfExp object at 0x7da20c990730>]
def function[__iter__, parameter[self]]:
return[name[self]]
def function[__next__, parameter[self]]:
return[call[name[self].next, parameter[]]]
return[call[name[FollowPathGenerator], parameter[]]] | keyword[def] identifier[follow_path] ( identifier[file_path] , identifier[buffering] =- literal[int] , identifier[encoding] = keyword[None] , identifier[errors] = literal[string] ):
literal[string]
keyword[if] identifier[encoding] keyword[is] keyword[None] :
identifier[encoding] = identifier[locale] . identifier[getpreferredencoding] ()
keyword[class] identifier[FollowPathGenerator] ( identifier[object] ):
keyword[def] identifier[__init__] ( identifier[self] ):
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_path] ):
identifier[self] . identifier[following_file] = identifier[io] . identifier[open] ( identifier[file_path] , literal[string] , identifier[buffering] )
identifier[self] . identifier[follow_generator] = identifier[Tailer] ( identifier[self] . identifier[following_file] , identifier[end] = keyword[True] ). identifier[follow] ()
identifier[self] . identifier[follow_from_end_on_open] = keyword[False]
keyword[else] :
identifier[self] . identifier[following_file] = keyword[None]
identifier[self] . identifier[follow_generator] = keyword[None]
identifier[self] . identifier[follow_from_end_on_open] = keyword[True]
keyword[def] identifier[next] ( identifier[self] ):
keyword[while] keyword[True] :
keyword[if] identifier[self] . identifier[follow_generator] :
identifier[line] = identifier[next] ( identifier[self] . identifier[follow_generator] )
keyword[else] :
identifier[line] = keyword[None]
keyword[if] identifier[line] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[follow_generator] :
keyword[try] :
identifier[is_file_changed] = keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_path] ) keyword[or] identifier[os] . identifier[stat] ( identifier[file_path] ). identifier[st_ino] != identifier[os] . identifier[fstat] ( identifier[self] . identifier[following_file] . identifier[fileno] ()). identifier[st_ino]
keyword[except] identifier[OSError] :
identifier[is_file_changed] = keyword[True]
keyword[if] identifier[is_file_changed] :
identifier[self] . identifier[following_file] . identifier[close] ()
identifier[self] . identifier[following_file] = keyword[None]
identifier[self] . identifier[follow_generator] = keyword[None]
keyword[if] keyword[not] identifier[self] . identifier[follow_generator] keyword[and] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_path] ):
keyword[try] :
identifier[self] . identifier[following_file] = identifier[io] . identifier[open] ( identifier[file_path] , literal[string] , identifier[buffering] )
identifier[self] . identifier[follow_generator] = identifier[Tailer] ( identifier[self] . identifier[following_file] , identifier[end] = identifier[self] . identifier[follow_from_end_on_open] ). identifier[follow] ()
identifier[self] . identifier[follow_from_end_on_open] = keyword[False]
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[e] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[e] )
keyword[if] identifier[self] . identifier[following_file] :
identifier[self] . identifier[following_file] . identifier[close] ()
identifier[self] . identifier[following_file] = keyword[None]
identifier[self] . identifier[follow_generator] = keyword[None]
identifier[line] = keyword[None]
keyword[else] :
identifier[line] = identifier[next] ( identifier[self] . identifier[follow_generator] )
keyword[return] identifier[line] . identifier[decode] ( identifier[encoding] , identifier[errors] ) keyword[if] identifier[line] keyword[is] keyword[not] keyword[None] keyword[else] identifier[line]
keyword[def] identifier[__iter__] ( identifier[self] ):
keyword[return] identifier[self]
keyword[def] identifier[__next__] ( identifier[self] ):
keyword[return] identifier[self] . identifier[next] ()
keyword[return] identifier[FollowPathGenerator] () | def follow_path(file_path, buffering=-1, encoding=None, errors='strict'):
"""
Similar to follow, but also looks up if inode of file is changed
e.g. if it was re-created.
Returned generator yields strings encoded by using encoding.
If encoding is not specified, it defaults to locale.getpreferredencoding()
>>> import io
>>> import os
>>> f = io.open('test_follow_path.txt', 'w+')
>>> generator = follow_path('test_follow_path.txt')
>>> _ = f.write('Line 1\\n')
>>> f.flush()
>>> print(next(generator))
Line 1
>>> _ = f.write('Line 2\\n')
>>> f.flush()
>>> print(next(generator))
Line 2
>>> _ = f.truncate(0)
>>> _ = f.seek(0)
>>> _ = f.write('Line 3\\n')
>>> f.flush()
>>> print(next(generator))
Line 3
>>> f.close()
>>> os.remove('test_follow_path.txt')
>>> f = io.open('test_follow_path.txt', 'w+')
>>> _ = f.write('Line 4\\n')
>>> f.flush()
>>> print(next(generator))
Line 4
>>> print(next(generator))
None
>>> f.close()
>>> os.remove('test_follow_path.txt')
"""
if encoding is None:
encoding = locale.getpreferredencoding() # depends on [control=['if'], data=['encoding']]
class FollowPathGenerator(object):
def __init__(self):
if os.path.isfile(file_path):
self.following_file = io.open(file_path, 'rb', buffering)
self.follow_generator = Tailer(self.following_file, end=True).follow()
self.follow_from_end_on_open = False # depends on [control=['if'], data=[]]
else:
self.following_file = None
self.follow_generator = None
self.follow_from_end_on_open = True
def next(self):
while True:
if self.follow_generator:
line = next(self.follow_generator) # depends on [control=['if'], data=[]]
else:
line = None
if line is None:
if self.follow_generator:
try:
is_file_changed = not os.path.isfile(file_path) or os.stat(file_path).st_ino != os.fstat(self.following_file.fileno()).st_ino # depends on [control=['try'], data=[]]
except OSError:
# File could be deleted between isfile and stat invocations, which will make the latter to fail.
is_file_changed = True # depends on [control=['except'], data=[]]
if is_file_changed:
# File was deleted or re-created.
self.following_file.close()
self.following_file = None
self.follow_generator = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not self.follow_generator and os.path.isfile(file_path):
# New file is available. Open it.
try:
self.following_file = io.open(file_path, 'rb', buffering)
self.follow_generator = Tailer(self.following_file, end=self.follow_from_end_on_open).follow()
self.follow_from_end_on_open = False # something could be written before we noticed change of file # depends on [control=['try'], data=[]]
except (IOError, OSError) as e:
LOG.info('Unable to tail file: %s', e)
if self.following_file:
self.following_file.close() # depends on [control=['if'], data=[]]
self.following_file = None
self.follow_generator = None
line = None # depends on [control=['except'], data=['e']]
else:
line = next(self.follow_generator) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['line']]
return line.decode(encoding, errors) if line is not None else line # depends on [control=['while'], data=[]]
def __iter__(self):
return self
def __next__(self):
return self.next()
return FollowPathGenerator() |
def contains_key(self, key):
"""
Determines whether this map contains an entry with the key.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (bool), ``true`` if this map contains an entry for the specified key.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._contains_key_internal(key_data) | def function[contains_key, parameter[self, key]]:
constant[
Determines whether this map contains an entry with the key.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (bool), ``true`` if this map contains an entry for the specified key.
]
call[name[check_not_none], parameter[name[key], constant[key can't be None]]]
variable[key_data] assign[=] call[name[self]._to_data, parameter[name[key]]]
return[call[name[self]._contains_key_internal, parameter[name[key_data]]]] | keyword[def] identifier[contains_key] ( identifier[self] , identifier[key] ):
literal[string]
identifier[check_not_none] ( identifier[key] , literal[string] )
identifier[key_data] = identifier[self] . identifier[_to_data] ( identifier[key] )
keyword[return] identifier[self] . identifier[_contains_key_internal] ( identifier[key_data] ) | def contains_key(self, key):
"""
Determines whether this map contains an entry with the key.
**Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations
of __hash__ and __eq__ defined in key's class.**
:param key: (object), the specified key.
:return: (bool), ``true`` if this map contains an entry for the specified key.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._contains_key_internal(key_data) |
def after_reject(analysis):
"""Function triggered after the "reject" transition for the analysis passed
in is performed."""
# Remove from the worksheet
remove_analysis_from_worksheet(analysis)
# Reject our dependents (analyses that depend on this analysis)
cascade_to_dependents(analysis, "reject")
if IRequestAnalysis.providedBy(analysis):
# Try verify (for when remaining analyses are in 'verified')
doActionFor(analysis.getRequest(), "verify")
# Try submit (remaining analyses are in 'to_be_verified')
doActionFor(analysis.getRequest(), "submit")
# Try rollback (no remaining analyses or some not submitted)
doActionFor(analysis.getRequest(), "rollback_to_receive")
reindex_request(analysis) | def function[after_reject, parameter[analysis]]:
constant[Function triggered after the "reject" transition for the analysis passed
in is performed.]
call[name[remove_analysis_from_worksheet], parameter[name[analysis]]]
call[name[cascade_to_dependents], parameter[name[analysis], constant[reject]]]
if call[name[IRequestAnalysis].providedBy, parameter[name[analysis]]] begin[:]
call[name[doActionFor], parameter[call[name[analysis].getRequest, parameter[]], constant[verify]]]
call[name[doActionFor], parameter[call[name[analysis].getRequest, parameter[]], constant[submit]]]
call[name[doActionFor], parameter[call[name[analysis].getRequest, parameter[]], constant[rollback_to_receive]]]
call[name[reindex_request], parameter[name[analysis]]] | keyword[def] identifier[after_reject] ( identifier[analysis] ):
literal[string]
identifier[remove_analysis_from_worksheet] ( identifier[analysis] )
identifier[cascade_to_dependents] ( identifier[analysis] , literal[string] )
keyword[if] identifier[IRequestAnalysis] . identifier[providedBy] ( identifier[analysis] ):
identifier[doActionFor] ( identifier[analysis] . identifier[getRequest] (), literal[string] )
identifier[doActionFor] ( identifier[analysis] . identifier[getRequest] (), literal[string] )
identifier[doActionFor] ( identifier[analysis] . identifier[getRequest] (), literal[string] )
identifier[reindex_request] ( identifier[analysis] ) | def after_reject(analysis):
"""Function triggered after the "reject" transition for the analysis passed
in is performed."""
# Remove from the worksheet
remove_analysis_from_worksheet(analysis)
# Reject our dependents (analyses that depend on this analysis)
cascade_to_dependents(analysis, 'reject')
if IRequestAnalysis.providedBy(analysis):
# Try verify (for when remaining analyses are in 'verified')
doActionFor(analysis.getRequest(), 'verify')
# Try submit (remaining analyses are in 'to_be_verified')
doActionFor(analysis.getRequest(), 'submit')
# Try rollback (no remaining analyses or some not submitted)
doActionFor(analysis.getRequest(), 'rollback_to_receive')
reindex_request(analysis) # depends on [control=['if'], data=[]] |
def drag_and_drop(self, source, target):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self | def function[drag_and_drop, parameter[self, source, target]]:
constant[
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
]
call[name[self].click_and_hold, parameter[name[source]]]
call[name[self].release, parameter[name[target]]]
return[name[self]] | keyword[def] identifier[drag_and_drop] ( identifier[self] , identifier[source] , identifier[target] ):
literal[string]
identifier[self] . identifier[click_and_hold] ( identifier[source] )
identifier[self] . identifier[release] ( identifier[target] )
keyword[return] identifier[self] | def drag_and_drop(self, source, target):
"""
Holds down the left mouse button on the source element,
then moves to the target element and releases the mouse button.
:Args:
- source: The element to mouse down.
- target: The element to mouse up.
"""
self.click_and_hold(source)
self.release(target)
return self |
def init_epoch(self):
"""Set up the batch generator for a new epoch."""
if self._restored_from_state:
self.random_shuffler.random_state = self._random_state_this_epoch
else:
self._random_state_this_epoch = self.random_shuffler.random_state
self.create_batches()
if self._restored_from_state:
self._restored_from_state = False
else:
self._iterations_this_epoch = 0
if not self.repeat:
self.iterations = 0 | def function[init_epoch, parameter[self]]:
constant[Set up the batch generator for a new epoch.]
if name[self]._restored_from_state begin[:]
name[self].random_shuffler.random_state assign[=] name[self]._random_state_this_epoch
call[name[self].create_batches, parameter[]]
if name[self]._restored_from_state begin[:]
name[self]._restored_from_state assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b216da20> begin[:]
name[self].iterations assign[=] constant[0] | keyword[def] identifier[init_epoch] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_restored_from_state] :
identifier[self] . identifier[random_shuffler] . identifier[random_state] = identifier[self] . identifier[_random_state_this_epoch]
keyword[else] :
identifier[self] . identifier[_random_state_this_epoch] = identifier[self] . identifier[random_shuffler] . identifier[random_state]
identifier[self] . identifier[create_batches] ()
keyword[if] identifier[self] . identifier[_restored_from_state] :
identifier[self] . identifier[_restored_from_state] = keyword[False]
keyword[else] :
identifier[self] . identifier[_iterations_this_epoch] = literal[int]
keyword[if] keyword[not] identifier[self] . identifier[repeat] :
identifier[self] . identifier[iterations] = literal[int] | def init_epoch(self):
"""Set up the batch generator for a new epoch."""
if self._restored_from_state:
self.random_shuffler.random_state = self._random_state_this_epoch # depends on [control=['if'], data=[]]
else:
self._random_state_this_epoch = self.random_shuffler.random_state
self.create_batches()
if self._restored_from_state:
self._restored_from_state = False # depends on [control=['if'], data=[]]
else:
self._iterations_this_epoch = 0
if not self.repeat:
self.iterations = 0 # depends on [control=['if'], data=[]] |
def prepare_database(delete_existing: bool) -> bool:
"""
(Re)create a fresh database and run migrations.
:param delete_existing: whether or not to drop an existing database if it exists
:return: whether or not a database has been (re)created
"""
settings = Settings()
conn = psycopg2.connect(
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT,
user=settings.DB_USER,
)
conn.autocommit = True
cur = conn.cursor()
db_name = settings.DB_NAME
cur.execute('SELECT EXISTS (SELECT datname FROM pg_catalog.pg_database WHERE datname=%s)', (db_name,))
already_exists = bool(cur.fetchone()[0])
if already_exists:
if not delete_existing:
print('database "{}" already exists, skipping'.format(db_name))
return False
else:
print('dropping database "{}" as it already exists...'.format(db_name))
cur.execute('DROP DATABASE {}'.format(db_name))
else:
print('database "{}" does not yet exist'.format(db_name))
print('creating database "{}"...'.format(db_name))
cur.execute('CREATE DATABASE {}'.format(db_name))
cur.close()
conn.close()
# {% if database.is_pg_sqlalchemy %}
engine = create_engine(pg_dsn(settings))
print('creating tables from model definition...')
Base.metadata.create_all(engine)
engine.dispose()
# {% else %}
# TODO
# {% endif %}
return True | def function[prepare_database, parameter[delete_existing]]:
constant[
(Re)create a fresh database and run migrations.
:param delete_existing: whether or not to drop an existing database if it exists
:return: whether or not a database has been (re)created
]
variable[settings] assign[=] call[name[Settings], parameter[]]
variable[conn] assign[=] call[name[psycopg2].connect, parameter[]]
name[conn].autocommit assign[=] constant[True]
variable[cur] assign[=] call[name[conn].cursor, parameter[]]
variable[db_name] assign[=] name[settings].DB_NAME
call[name[cur].execute, parameter[constant[SELECT EXISTS (SELECT datname FROM pg_catalog.pg_database WHERE datname=%s)], tuple[[<ast.Name object at 0x7da2054a49a0>]]]]
variable[already_exists] assign[=] call[name[bool], parameter[call[call[name[cur].fetchone, parameter[]]][constant[0]]]]
if name[already_exists] begin[:]
if <ast.UnaryOp object at 0x7da2054a5510> begin[:]
call[name[print], parameter[call[constant[database "{}" already exists, skipping].format, parameter[name[db_name]]]]]
return[constant[False]]
call[name[print], parameter[call[constant[creating database "{}"...].format, parameter[name[db_name]]]]]
call[name[cur].execute, parameter[call[constant[CREATE DATABASE {}].format, parameter[name[db_name]]]]]
call[name[cur].close, parameter[]]
call[name[conn].close, parameter[]]
variable[engine] assign[=] call[name[create_engine], parameter[call[name[pg_dsn], parameter[name[settings]]]]]
call[name[print], parameter[constant[creating tables from model definition...]]]
call[name[Base].metadata.create_all, parameter[name[engine]]]
call[name[engine].dispose, parameter[]]
return[constant[True]] | keyword[def] identifier[prepare_database] ( identifier[delete_existing] : identifier[bool] )-> identifier[bool] :
literal[string]
identifier[settings] = identifier[Settings] ()
identifier[conn] = identifier[psycopg2] . identifier[connect] (
identifier[password] = identifier[settings] . identifier[DB_PASSWORD] ,
identifier[host] = identifier[settings] . identifier[DB_HOST] ,
identifier[port] = identifier[settings] . identifier[DB_PORT] ,
identifier[user] = identifier[settings] . identifier[DB_USER] ,
)
identifier[conn] . identifier[autocommit] = keyword[True]
identifier[cur] = identifier[conn] . identifier[cursor] ()
identifier[db_name] = identifier[settings] . identifier[DB_NAME]
identifier[cur] . identifier[execute] ( literal[string] ,( identifier[db_name] ,))
identifier[already_exists] = identifier[bool] ( identifier[cur] . identifier[fetchone] ()[ literal[int] ])
keyword[if] identifier[already_exists] :
keyword[if] keyword[not] identifier[delete_existing] :
identifier[print] ( literal[string] . identifier[format] ( identifier[db_name] ))
keyword[return] keyword[False]
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[db_name] ))
identifier[cur] . identifier[execute] ( literal[string] . identifier[format] ( identifier[db_name] ))
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[db_name] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[db_name] ))
identifier[cur] . identifier[execute] ( literal[string] . identifier[format] ( identifier[db_name] ))
identifier[cur] . identifier[close] ()
identifier[conn] . identifier[close] ()
identifier[engine] = identifier[create_engine] ( identifier[pg_dsn] ( identifier[settings] ))
identifier[print] ( literal[string] )
identifier[Base] . identifier[metadata] . identifier[create_all] ( identifier[engine] )
identifier[engine] . identifier[dispose] ()
keyword[return] keyword[True] | def prepare_database(delete_existing: bool) -> bool:
"""
(Re)create a fresh database and run migrations.
:param delete_existing: whether or not to drop an existing database if it exists
:return: whether or not a database has been (re)created
"""
settings = Settings()
conn = psycopg2.connect(password=settings.DB_PASSWORD, host=settings.DB_HOST, port=settings.DB_PORT, user=settings.DB_USER)
conn.autocommit = True
cur = conn.cursor()
db_name = settings.DB_NAME
cur.execute('SELECT EXISTS (SELECT datname FROM pg_catalog.pg_database WHERE datname=%s)', (db_name,))
already_exists = bool(cur.fetchone()[0])
if already_exists:
if not delete_existing:
print('database "{}" already exists, skipping'.format(db_name))
return False # depends on [control=['if'], data=[]]
else:
print('dropping database "{}" as it already exists...'.format(db_name))
cur.execute('DROP DATABASE {}'.format(db_name)) # depends on [control=['if'], data=[]]
else:
print('database "{}" does not yet exist'.format(db_name))
print('creating database "{}"...'.format(db_name))
cur.execute('CREATE DATABASE {}'.format(db_name))
cur.close()
conn.close()
# {% if database.is_pg_sqlalchemy %}
engine = create_engine(pg_dsn(settings))
print('creating tables from model definition...')
Base.metadata.create_all(engine)
engine.dispose()
# {% else %}
# TODO
# {% endif %}
return True |
def list_labels(self, bucket):
'''List labels for the given bucket. Due to zipfiles inherent arbitrary ordering,
this is an expensive operation, as it walks the entire archive searching for individual
'buckets'
:param bucket: bucket to list labels for.
:return: iterator for the labels in the specified bucket.
'''
for name in self.z.namelist():
container, label = self._nf(name.encode("utf-8"))
if container == bucket and label != MD_FILE:
yield label | def function[list_labels, parameter[self, bucket]]:
constant[List labels for the given bucket. Due to zipfiles inherent arbitrary ordering,
this is an expensive operation, as it walks the entire archive searching for individual
'buckets'
:param bucket: bucket to list labels for.
:return: iterator for the labels in the specified bucket.
]
for taget[name[name]] in starred[call[name[self].z.namelist, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b0ef4d60> assign[=] call[name[self]._nf, parameter[call[name[name].encode, parameter[constant[utf-8]]]]]
if <ast.BoolOp object at 0x7da1b0ef6f20> begin[:]
<ast.Yield object at 0x7da1b0ef41f0> | keyword[def] identifier[list_labels] ( identifier[self] , identifier[bucket] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[self] . identifier[z] . identifier[namelist] ():
identifier[container] , identifier[label] = identifier[self] . identifier[_nf] ( identifier[name] . identifier[encode] ( literal[string] ))
keyword[if] identifier[container] == identifier[bucket] keyword[and] identifier[label] != identifier[MD_FILE] :
keyword[yield] identifier[label] | def list_labels(self, bucket):
"""List labels for the given bucket. Due to zipfiles inherent arbitrary ordering,
this is an expensive operation, as it walks the entire archive searching for individual
'buckets'
:param bucket: bucket to list labels for.
:return: iterator for the labels in the specified bucket.
"""
for name in self.z.namelist():
(container, label) = self._nf(name.encode('utf-8'))
if container == bucket and label != MD_FILE:
yield label # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] |
def empty_beat_rate(pianoroll, beat_resolution):
"""Return the ratio of empty beats to the total number of beats in a
pianoroll."""
_validate_pianoroll(pianoroll)
reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1])
n_empty_beats = np.count_nonzero(reshaped.any(1))
return n_empty_beats / len(reshaped) | def function[empty_beat_rate, parameter[pianoroll, beat_resolution]]:
constant[Return the ratio of empty beats to the total number of beats in a
pianoroll.]
call[name[_validate_pianoroll], parameter[name[pianoroll]]]
variable[reshaped] assign[=] call[name[pianoroll].reshape, parameter[<ast.UnaryOp object at 0x7da18ede6140>, binary_operation[name[beat_resolution] * call[name[pianoroll].shape][constant[1]]]]]
variable[n_empty_beats] assign[=] call[name[np].count_nonzero, parameter[call[name[reshaped].any, parameter[constant[1]]]]]
return[binary_operation[name[n_empty_beats] / call[name[len], parameter[name[reshaped]]]]] | keyword[def] identifier[empty_beat_rate] ( identifier[pianoroll] , identifier[beat_resolution] ):
literal[string]
identifier[_validate_pianoroll] ( identifier[pianoroll] )
identifier[reshaped] = identifier[pianoroll] . identifier[reshape] (- literal[int] , identifier[beat_resolution] * identifier[pianoroll] . identifier[shape] [ literal[int] ])
identifier[n_empty_beats] = identifier[np] . identifier[count_nonzero] ( identifier[reshaped] . identifier[any] ( literal[int] ))
keyword[return] identifier[n_empty_beats] / identifier[len] ( identifier[reshaped] ) | def empty_beat_rate(pianoroll, beat_resolution):
"""Return the ratio of empty beats to the total number of beats in a
pianoroll."""
_validate_pianoroll(pianoroll)
reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1])
n_empty_beats = np.count_nonzero(reshaped.any(1))
return n_empty_beats / len(reshaped) |
def evaluate_all_configs(
hparams, agent_model_dir, eval_fn=_eval_fn_with_learner
):
"""Evaluate the agent with multiple eval configurations."""
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(
hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn
)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score
return metrics | def function[evaluate_all_configs, parameter[hparams, agent_model_dir, eval_fn]]:
constant[Evaluate the agent with multiple eval configurations.]
variable[metrics] assign[=] dictionary[[], []]
for taget[name[sampling_temp]] in starred[name[hparams].eval_sampling_temps] begin[:]
for taget[name[max_num_noops]] in starred[call[name[set], parameter[list[[<ast.Attribute object at 0x7da18ede63b0>, <ast.Constant object at 0x7da18ede46d0>]]]]] begin[:]
variable[scores] assign[=] call[name[evaluate_single_config], parameter[name[hparams], name[sampling_temp], name[max_num_noops], name[agent_model_dir], name[eval_fn]]]
for taget[tuple[[<ast.Name object at 0x7da18ede7be0>, <ast.Name object at 0x7da18ede4970>]]] in starred[call[name[zip], parameter[name[scores], tuple[[<ast.Constant object at 0x7da18ede72e0>, <ast.Constant object at 0x7da18ede46a0>]]]]] begin[:]
variable[metric_name] assign[=] call[name[get_metric_name], parameter[name[sampling_temp], name[max_num_noops], name[clipped]]]
call[name[metrics]][name[metric_name]] assign[=] name[score]
return[name[metrics]] | keyword[def] identifier[evaluate_all_configs] (
identifier[hparams] , identifier[agent_model_dir] , identifier[eval_fn] = identifier[_eval_fn_with_learner]
):
literal[string]
identifier[metrics] ={}
keyword[for] identifier[sampling_temp] keyword[in] identifier[hparams] . identifier[eval_sampling_temps] :
keyword[for] identifier[max_num_noops] keyword[in] identifier[set] ([ identifier[hparams] . identifier[eval_max_num_noops] , literal[int] ]):
identifier[scores] = identifier[evaluate_single_config] (
identifier[hparams] , identifier[sampling_temp] , identifier[max_num_noops] , identifier[agent_model_dir] , identifier[eval_fn]
)
keyword[for] ( identifier[score] , identifier[clipped] ) keyword[in] identifier[zip] ( identifier[scores] ,( keyword[True] , keyword[False] )):
identifier[metric_name] = identifier[get_metric_name] ( identifier[sampling_temp] , identifier[max_num_noops] , identifier[clipped] )
identifier[metrics] [ identifier[metric_name] ]= identifier[score]
keyword[return] identifier[metrics] | def evaluate_all_configs(hparams, agent_model_dir, eval_fn=_eval_fn_with_learner):
"""Evaluate the agent with multiple eval configurations."""
metrics = {}
# Iterate over all combinations of sampling temperatures and whether to do
# initial no-ops.
for sampling_temp in hparams.eval_sampling_temps:
# Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration.
for max_num_noops in set([hparams.eval_max_num_noops, 0]):
scores = evaluate_single_config(hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn)
for (score, clipped) in zip(scores, (True, False)):
metric_name = get_metric_name(sampling_temp, max_num_noops, clipped)
metrics[metric_name] = score # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['max_num_noops']] # depends on [control=['for'], data=['sampling_temp']]
return metrics |
def get_profile(self, id, details=None, with_attributes=None, partition=None, core_attributes=None, force_refresh=None):
"""GetProfile.
Get my profile.
:param str id:
:param bool details:
:param bool with_attributes:
:param str partition:
:param str core_attributes:
:param bool force_refresh:
:rtype: :class:`<Profile> <azure.devops.v5_0.profile.models.Profile>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if details is not None:
query_parameters['details'] = self._serialize.query('details', details, 'bool')
if with_attributes is not None:
query_parameters['withAttributes'] = self._serialize.query('with_attributes', with_attributes, 'bool')
if partition is not None:
query_parameters['partition'] = self._serialize.query('partition', partition, 'str')
if core_attributes is not None:
query_parameters['coreAttributes'] = self._serialize.query('core_attributes', core_attributes, 'str')
if force_refresh is not None:
query_parameters['forceRefresh'] = self._serialize.query('force_refresh', force_refresh, 'bool')
response = self._send(http_method='GET',
location_id='f83735dc-483f-4238-a291-d45f6080a9af',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('Profile', response) | def function[get_profile, parameter[self, id, details, with_attributes, partition, core_attributes, force_refresh]]:
constant[GetProfile.
Get my profile.
:param str id:
:param bool details:
:param bool with_attributes:
:param str partition:
:param str core_attributes:
:param bool force_refresh:
:rtype: :class:`<Profile> <azure.devops.v5_0.profile.models.Profile>`
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[id] is_not constant[None]] begin[:]
call[name[route_values]][constant[id]] assign[=] call[name[self]._serialize.url, parameter[constant[id], name[id], constant[str]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[details] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[details]] assign[=] call[name[self]._serialize.query, parameter[constant[details], name[details], constant[bool]]]
if compare[name[with_attributes] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[withAttributes]] assign[=] call[name[self]._serialize.query, parameter[constant[with_attributes], name[with_attributes], constant[bool]]]
if compare[name[partition] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[partition]] assign[=] call[name[self]._serialize.query, parameter[constant[partition], name[partition], constant[str]]]
if compare[name[core_attributes] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[coreAttributes]] assign[=] call[name[self]._serialize.query, parameter[constant[core_attributes], name[core_attributes], constant[str]]]
if compare[name[force_refresh] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[forceRefresh]] assign[=] call[name[self]._serialize.query, parameter[constant[force_refresh], name[force_refresh], constant[bool]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[Profile], name[response]]]] | keyword[def] identifier[get_profile] ( identifier[self] , identifier[id] , identifier[details] = keyword[None] , identifier[with_attributes] = keyword[None] , identifier[partition] = keyword[None] , identifier[core_attributes] = keyword[None] , identifier[force_refresh] = keyword[None] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[id] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[details] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[details] , literal[string] )
keyword[if] identifier[with_attributes] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[with_attributes] , literal[string] )
keyword[if] identifier[partition] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[partition] , literal[string] )
keyword[if] identifier[core_attributes] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[core_attributes] , literal[string] )
keyword[if] identifier[force_refresh] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[force_refresh] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def get_profile(self, id, details=None, with_attributes=None, partition=None, core_attributes=None, force_refresh=None):
"""GetProfile.
Get my profile.
:param str id:
:param bool details:
:param bool with_attributes:
:param str partition:
:param str core_attributes:
:param bool force_refresh:
:rtype: :class:`<Profile> <azure.devops.v5_0.profile.models.Profile>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str') # depends on [control=['if'], data=['id']]
query_parameters = {}
if details is not None:
query_parameters['details'] = self._serialize.query('details', details, 'bool') # depends on [control=['if'], data=['details']]
if with_attributes is not None:
query_parameters['withAttributes'] = self._serialize.query('with_attributes', with_attributes, 'bool') # depends on [control=['if'], data=['with_attributes']]
if partition is not None:
query_parameters['partition'] = self._serialize.query('partition', partition, 'str') # depends on [control=['if'], data=['partition']]
if core_attributes is not None:
query_parameters['coreAttributes'] = self._serialize.query('core_attributes', core_attributes, 'str') # depends on [control=['if'], data=['core_attributes']]
if force_refresh is not None:
query_parameters['forceRefresh'] = self._serialize.query('force_refresh', force_refresh, 'bool') # depends on [control=['if'], data=['force_refresh']]
response = self._send(http_method='GET', location_id='f83735dc-483f-4238-a291-d45f6080a9af', version='5.0', route_values=route_values, query_parameters=query_parameters)
return self._deserialize('Profile', response) |
def has_namespace_name(self, line: str, position: int, namespace: str, name: str) -> bool:
"""Check that the namespace is defined and has the given name."""
self.raise_for_missing_namespace(line, position, namespace, name)
return self.has_enumerated_namespace_name(namespace, name) or self.has_regex_namespace_name(namespace, name) | def function[has_namespace_name, parameter[self, line, position, namespace, name]]:
constant[Check that the namespace is defined and has the given name.]
call[name[self].raise_for_missing_namespace, parameter[name[line], name[position], name[namespace], name[name]]]
return[<ast.BoolOp object at 0x7da1b0e46350>] | keyword[def] identifier[has_namespace_name] ( identifier[self] , identifier[line] : identifier[str] , identifier[position] : identifier[int] , identifier[namespace] : identifier[str] , identifier[name] : identifier[str] )-> identifier[bool] :
literal[string]
identifier[self] . identifier[raise_for_missing_namespace] ( identifier[line] , identifier[position] , identifier[namespace] , identifier[name] )
keyword[return] identifier[self] . identifier[has_enumerated_namespace_name] ( identifier[namespace] , identifier[name] ) keyword[or] identifier[self] . identifier[has_regex_namespace_name] ( identifier[namespace] , identifier[name] ) | def has_namespace_name(self, line: str, position: int, namespace: str, name: str) -> bool:
"""Check that the namespace is defined and has the given name."""
self.raise_for_missing_namespace(line, position, namespace, name)
return self.has_enumerated_namespace_name(namespace, name) or self.has_regex_namespace_name(namespace, name) |
def _process_response(response):
"""Make the request and handle exception processing"""
# Read the response as JSON
try:
data = response.json()
except ValueError:
_log_and_raise_exception('Invalid response', response.text)
# Default case, Got proper response
if response.status_code == 200:
return { 'headers': response.headers, 'data': data }
return _raise_error_from_response(data) | def function[_process_response, parameter[response]]:
constant[Make the request and handle exception processing]
<ast.Try object at 0x7da20c993070>
if compare[name[response].status_code equal[==] constant[200]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da20c9922c0>, <ast.Constant object at 0x7da20c990c10>], [<ast.Attribute object at 0x7da20c9922f0>, <ast.Name object at 0x7da1b26ae560>]]]
return[call[name[_raise_error_from_response], parameter[name[data]]]] | keyword[def] identifier[_process_response] ( identifier[response] ):
literal[string]
keyword[try] :
identifier[data] = identifier[response] . identifier[json] ()
keyword[except] identifier[ValueError] :
identifier[_log_and_raise_exception] ( literal[string] , identifier[response] . identifier[text] )
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[return] { literal[string] : identifier[response] . identifier[headers] , literal[string] : identifier[data] }
keyword[return] identifier[_raise_error_from_response] ( identifier[data] ) | def _process_response(response):
"""Make the request and handle exception processing"""
# Read the response as JSON
try:
data = response.json() # depends on [control=['try'], data=[]]
except ValueError:
_log_and_raise_exception('Invalid response', response.text) # depends on [control=['except'], data=[]]
# Default case, Got proper response
if response.status_code == 200:
return {'headers': response.headers, 'data': data} # depends on [control=['if'], data=[]]
return _raise_error_from_response(data) |
def plot_dmag(data="", title="", fignum=1, norm=1,dmag_key='treat_ac_field',intensity='',
quality=False):
"""
plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters
______________
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Output :
matptlotlib plot
"""
plt.figure(num=fignum, figsize=(5, 5))
if intensity:
int_key=intensity
else:
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
# get which key we have
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
int_key = IntMeths[0]
data = data[data[int_key].notnull()] # fish out all data with this key
units = "U" # this sets the units for plotting to undefined
if not dmag_key:
if 'treat_temp' in data.columns: units = "K" # kelvin
elif 'treat_ac_field' in data.columns: units = "T" # tesla
elif 'treat_mw_energy' in data.columns: units = "J" # joules
if dmag_key=='treat_temp': units='K'
if dmag_key=='treat_ac_field': units='T'
if dmag_key=='treat_mw_energy': units='J'
spcs = data.specimen.unique() # get a list of all specimens in DataFrame data
if len(spcs)==0:
print('no data for plotting')
return
# step through specimens to put on plot
for spc in spcs:
spec_data = data[data.specimen.str.contains(spc)]
INTblock = []
for ind, rec in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0,
float(rec[int_key]), 1, rec['quality']])
if len(INTblock) > 2:
pmagplotlib.plot_mag(fignum, INTblock, title, 0, units, norm) | def function[plot_dmag, parameter[data, title, fignum, norm, dmag_key, intensity, quality]]:
constant[
plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters
______________
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Output :
matptlotlib plot
]
call[name[plt].figure, parameter[]]
if name[intensity] begin[:]
variable[int_key] assign[=] name[intensity]
variable[data] assign[=] call[name[data]][call[call[name[data]][name[int_key]].notnull, parameter[]]]
variable[units] assign[=] constant[U]
if <ast.UnaryOp object at 0x7da18f721360> begin[:]
if compare[constant[treat_temp] in name[data].columns] begin[:]
variable[units] assign[=] constant[K]
if compare[name[dmag_key] equal[==] constant[treat_temp]] begin[:]
variable[units] assign[=] constant[K]
if compare[name[dmag_key] equal[==] constant[treat_ac_field]] begin[:]
variable[units] assign[=] constant[T]
if compare[name[dmag_key] equal[==] constant[treat_mw_energy]] begin[:]
variable[units] assign[=] constant[J]
variable[spcs] assign[=] call[name[data].specimen.unique, parameter[]]
if compare[call[name[len], parameter[name[spcs]]] equal[==] constant[0]] begin[:]
call[name[print], parameter[constant[no data for plotting]]]
return[None]
for taget[name[spc]] in starred[name[spcs]] begin[:]
variable[spec_data] assign[=] call[name[data]][call[name[data].specimen.str.contains, parameter[name[spc]]]]
variable[INTblock] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f720160>, <ast.Name object at 0x7da18f7237c0>]]] in starred[call[name[spec_data].iterrows, parameter[]]] begin[:]
call[name[INTblock].append, parameter[list[[<ast.Call object at 0x7da18f7204c0>, <ast.Constant object at 0x7da1b05cbfd0>, <ast.Constant object at 0x7da1b05cbe80>, <ast.Call object at 0x7da1b05cbf10>, <ast.Constant object at 0x7da1b05c8040>, <ast.Subscript object at 0x7da1b05c8340>]]]]
if compare[call[name[len], parameter[name[INTblock]]] greater[>] constant[2]] begin[:]
call[name[pmagplotlib].plot_mag, parameter[name[fignum], name[INTblock], name[title], constant[0], name[units], name[norm]]] | keyword[def] identifier[plot_dmag] ( identifier[data] = literal[string] , identifier[title] = literal[string] , identifier[fignum] = literal[int] , identifier[norm] = literal[int] , identifier[dmag_key] = literal[string] , identifier[intensity] = literal[string] ,
identifier[quality] = keyword[False] ):
literal[string]
identifier[plt] . identifier[figure] ( identifier[num] = identifier[fignum] , identifier[figsize] =( literal[int] , literal[int] ))
keyword[if] identifier[intensity] :
identifier[int_key] = identifier[intensity]
keyword[else] :
identifier[intlist] =[ literal[string] , literal[string] , literal[string] ]
identifier[IntMeths] =[ identifier[col_name] keyword[for] identifier[col_name] keyword[in] identifier[data] . identifier[columns] keyword[if] identifier[col_name] keyword[in] identifier[intlist] ]
identifier[int_key] = identifier[IntMeths] [ literal[int] ]
identifier[data] = identifier[data] [ identifier[data] [ identifier[int_key] ]. identifier[notnull] ()]
identifier[units] = literal[string]
keyword[if] keyword[not] identifier[dmag_key] :
keyword[if] literal[string] keyword[in] identifier[data] . identifier[columns] : identifier[units] = literal[string]
keyword[elif] literal[string] keyword[in] identifier[data] . identifier[columns] : identifier[units] = literal[string]
keyword[elif] literal[string] keyword[in] identifier[data] . identifier[columns] : identifier[units] = literal[string]
keyword[if] identifier[dmag_key] == literal[string] : identifier[units] = literal[string]
keyword[if] identifier[dmag_key] == literal[string] : identifier[units] = literal[string]
keyword[if] identifier[dmag_key] == literal[string] : identifier[units] = literal[string]
identifier[spcs] = identifier[data] . identifier[specimen] . identifier[unique] ()
keyword[if] identifier[len] ( identifier[spcs] )== literal[int] :
identifier[print] ( literal[string] )
keyword[return]
keyword[for] identifier[spc] keyword[in] identifier[spcs] :
identifier[spec_data] = identifier[data] [ identifier[data] . identifier[specimen] . identifier[str] . identifier[contains] ( identifier[spc] )]
identifier[INTblock] =[]
keyword[for] identifier[ind] , identifier[rec] keyword[in] identifier[spec_data] . identifier[iterrows] ():
identifier[INTblock] . identifier[append] ([ identifier[float] ( identifier[rec] [ identifier[dmag_key] ]), literal[int] , literal[int] ,
identifier[float] ( identifier[rec] [ identifier[int_key] ]), literal[int] , identifier[rec] [ literal[string] ]])
keyword[if] identifier[len] ( identifier[INTblock] )> literal[int] :
identifier[pmagplotlib] . identifier[plot_mag] ( identifier[fignum] , identifier[INTblock] , identifier[title] , literal[int] , identifier[units] , identifier[norm] ) | def plot_dmag(data='', title='', fignum=1, norm=1, dmag_key='treat_ac_field', intensity='', quality=False):
"""
plots demagenetization data versus step for all specimens in pandas dataframe datablock
Parameters
______________
data : Pandas dataframe with MagIC data model 3 columns:
fignum : figure number
specimen : specimen name
dmag_key : one of these: ['treat_temp','treat_ac_field','treat_mw_energy']
selected using method_codes : ['LT_T-Z','LT-AF-Z','LT-M-Z'] respectively
intensity : if blank will choose one of these: ['magn_moment', 'magn_volume', 'magn_mass']
quality : if True use the quality column of the DataFrame
title : title for plot
norm : if True, normalize data to first step
Output :
matptlotlib plot
"""
plt.figure(num=fignum, figsize=(5, 5))
if intensity:
int_key = intensity # depends on [control=['if'], data=[]]
else:
intlist = ['magn_moment', 'magn_volume', 'magn_mass']
# get which key we have
IntMeths = [col_name for col_name in data.columns if col_name in intlist]
int_key = IntMeths[0]
data = data[data[int_key].notnull()] # fish out all data with this key
units = 'U' # this sets the units for plotting to undefined
if not dmag_key:
if 'treat_temp' in data.columns:
units = 'K' # kelvin # depends on [control=['if'], data=[]]
elif 'treat_ac_field' in data.columns:
units = 'T' # tesla # depends on [control=['if'], data=[]]
elif 'treat_mw_energy' in data.columns:
units = 'J' # joules # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if dmag_key == 'treat_temp':
units = 'K' # depends on [control=['if'], data=[]]
if dmag_key == 'treat_ac_field':
units = 'T' # depends on [control=['if'], data=[]]
if dmag_key == 'treat_mw_energy':
units = 'J' # depends on [control=['if'], data=[]]
spcs = data.specimen.unique() # get a list of all specimens in DataFrame data
if len(spcs) == 0:
print('no data for plotting')
return # depends on [control=['if'], data=[]]
# step through specimens to put on plot
for spc in spcs:
spec_data = data[data.specimen.str.contains(spc)]
INTblock = []
for (ind, rec) in spec_data.iterrows():
INTblock.append([float(rec[dmag_key]), 0, 0, float(rec[int_key]), 1, rec['quality']]) # depends on [control=['for'], data=[]]
if len(INTblock) > 2:
pmagplotlib.plot_mag(fignum, INTblock, title, 0, units, norm) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['spc']] |
def progress(self, *restrictions, display=True):
"""
report progress of populating the table
:return: remaining, total -- tuples to be populated
"""
todo = self._jobs_to_do(restrictions)
total = len(todo)
remaining = len(todo - self.target)
if display:
print('%-20s' % self.__class__.__name__,
'Completed %d of %d (%2.1f%%) %s' % (
total - remaining, total, 100 - 100 * remaining / (total+1e-12),
datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')), flush=True)
return remaining, total | def function[progress, parameter[self]]:
constant[
report progress of populating the table
:return: remaining, total -- tuples to be populated
]
variable[todo] assign[=] call[name[self]._jobs_to_do, parameter[name[restrictions]]]
variable[total] assign[=] call[name[len], parameter[name[todo]]]
variable[remaining] assign[=] call[name[len], parameter[binary_operation[name[todo] - name[self].target]]]
if name[display] begin[:]
call[name[print], parameter[binary_operation[constant[%-20s] <ast.Mod object at 0x7da2590d6920> name[self].__class__.__name__], binary_operation[constant[Completed %d of %d (%2.1f%%) %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da1b138cb20>, <ast.Name object at 0x7da1b138cbe0>, <ast.BinOp object at 0x7da1b138cc10>, <ast.Call object at 0x7da1b12d8640>]]]]]
return[tuple[[<ast.Name object at 0x7da1b12d9c60>, <ast.Name object at 0x7da1b12d9d50>]]] | keyword[def] identifier[progress] ( identifier[self] ,* identifier[restrictions] , identifier[display] = keyword[True] ):
literal[string]
identifier[todo] = identifier[self] . identifier[_jobs_to_do] ( identifier[restrictions] )
identifier[total] = identifier[len] ( identifier[todo] )
identifier[remaining] = identifier[len] ( identifier[todo] - identifier[self] . identifier[target] )
keyword[if] identifier[display] :
identifier[print] ( literal[string] % identifier[self] . identifier[__class__] . identifier[__name__] ,
literal[string] %(
identifier[total] - identifier[remaining] , identifier[total] , literal[int] - literal[int] * identifier[remaining] /( identifier[total] + literal[int] ),
identifier[datetime] . identifier[datetime] . identifier[strftime] ( identifier[datetime] . identifier[datetime] . identifier[now] (), literal[string] )), identifier[flush] = keyword[True] )
keyword[return] identifier[remaining] , identifier[total] | def progress(self, *restrictions, display=True):
"""
report progress of populating the table
:return: remaining, total -- tuples to be populated
"""
todo = self._jobs_to_do(restrictions)
total = len(todo)
remaining = len(todo - self.target)
if display:
print('%-20s' % self.__class__.__name__, 'Completed %d of %d (%2.1f%%) %s' % (total - remaining, total, 100 - 100 * remaining / (total + 1e-12), datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')), flush=True) # depends on [control=['if'], data=[]]
return (remaining, total) |
def _fetchAllChildren(self):
""" Gets all sub directories and files within the current directory.
Does not fetch hidden files.
"""
childItems = []
fileNames = os.listdir(self._fileName)
absFileNames = [os.path.join(self._fileName, fn) for fn in fileNames]
# Add subdirectories
for fileName, absFileName in zip(fileNames, absFileNames):
if os.path.isdir(absFileName) and not fileName.startswith('.'):
childItems.append(DirectoryRti(fileName=absFileName, nodeName=fileName))
# Add regular files
for fileName, absFileName in zip(fileNames, absFileNames):
if os.path.isfile(absFileName) and not fileName.startswith('.'):
childItem = createRtiFromFileName(absFileName)
childItems.append(childItem)
return childItems | def function[_fetchAllChildren, parameter[self]]:
constant[ Gets all sub directories and files within the current directory.
Does not fetch hidden files.
]
variable[childItems] assign[=] list[[]]
variable[fileNames] assign[=] call[name[os].listdir, parameter[name[self]._fileName]]
variable[absFileNames] assign[=] <ast.ListComp object at 0x7da1b04f94b0>
for taget[tuple[[<ast.Name object at 0x7da1b04f96f0>, <ast.Name object at 0x7da1b04f8700>]]] in starred[call[name[zip], parameter[name[fileNames], name[absFileNames]]]] begin[:]
if <ast.BoolOp object at 0x7da1b05e3fa0> begin[:]
call[name[childItems].append, parameter[call[name[DirectoryRti], parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b05e12d0>, <ast.Name object at 0x7da1b05e2ec0>]]] in starred[call[name[zip], parameter[name[fileNames], name[absFileNames]]]] begin[:]
if <ast.BoolOp object at 0x7da1b05e3af0> begin[:]
variable[childItem] assign[=] call[name[createRtiFromFileName], parameter[name[absFileName]]]
call[name[childItems].append, parameter[name[childItem]]]
return[name[childItems]] | keyword[def] identifier[_fetchAllChildren] ( identifier[self] ):
literal[string]
identifier[childItems] =[]
identifier[fileNames] = identifier[os] . identifier[listdir] ( identifier[self] . identifier[_fileName] )
identifier[absFileNames] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_fileName] , identifier[fn] ) keyword[for] identifier[fn] keyword[in] identifier[fileNames] ]
keyword[for] identifier[fileName] , identifier[absFileName] keyword[in] identifier[zip] ( identifier[fileNames] , identifier[absFileNames] ):
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[absFileName] ) keyword[and] keyword[not] identifier[fileName] . identifier[startswith] ( literal[string] ):
identifier[childItems] . identifier[append] ( identifier[DirectoryRti] ( identifier[fileName] = identifier[absFileName] , identifier[nodeName] = identifier[fileName] ))
keyword[for] identifier[fileName] , identifier[absFileName] keyword[in] identifier[zip] ( identifier[fileNames] , identifier[absFileNames] ):
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[absFileName] ) keyword[and] keyword[not] identifier[fileName] . identifier[startswith] ( literal[string] ):
identifier[childItem] = identifier[createRtiFromFileName] ( identifier[absFileName] )
identifier[childItems] . identifier[append] ( identifier[childItem] )
keyword[return] identifier[childItems] | def _fetchAllChildren(self):
""" Gets all sub directories and files within the current directory.
Does not fetch hidden files.
"""
childItems = []
fileNames = os.listdir(self._fileName)
absFileNames = [os.path.join(self._fileName, fn) for fn in fileNames]
# Add subdirectories
for (fileName, absFileName) in zip(fileNames, absFileNames):
if os.path.isdir(absFileName) and (not fileName.startswith('.')):
childItems.append(DirectoryRti(fileName=absFileName, nodeName=fileName)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Add regular files
for (fileName, absFileName) in zip(fileNames, absFileNames):
if os.path.isfile(absFileName) and (not fileName.startswith('.')):
childItem = createRtiFromFileName(absFileName)
childItems.append(childItem) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return childItems |
def _build_wsgi_env(event, app_name):
"""Turn the Lambda/API Gateway request event into a WSGI environment dict.
:param dict event:
The event parameters passed to the Lambda function entrypoint.
:param str app_name:
Name of the API application.
"""
gateway = event['parameters']['gateway']
request = event['parameters']['request']
ctx = event['rawContext']
headers = request['header']
body = six.text_type(json.dumps(request['body']))
# Render the path correctly so connexion/flask will pass the path params to
# the handler function correctly.
# Basically, this replaces "/foo/{param1}/bar/{param2}" with
# "/foo/123/bar/456".
path = gateway['resource-path'].format(
**event['parameters']['request']['path']
)
environ = {
'PATH_INFO': path,
'QUERY_STRING': urlencode(request['querystring']),
'REMOTE_ADDR': ctx['identity']['sourceIp'],
'REQUEST_METHOD': ctx['httpMethod'],
'SCRIPT_NAME': app_name,
'SERVER_NAME': app_name,
'SERVER_PORT': headers.get('X-Forwarded-Port', '80'),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': headers.get('X-Forwarded-Proto', 'http'),
'wsgi.input': StringIO(body),
'wsgi.errors': StringIO(),
'wsgi.multiprocess': False,
'wsgi.multithread': False,
'wsgi.run_once': False,
'CONTENT_TYPE': headers.get('Content-Type', 'application/json'),
}
if ctx['httpMethod'] in ['POST', 'PUT', 'PATCH']:
environ['CONTENT_LENGTH'] = str(len(body))
for header_name, header_value in headers.items():
wsgi_name = 'HTTP_{}'.format(header_name.upper().replace('-', '_'))
environ[wsgi_name] = str(header_value)
return environ | def function[_build_wsgi_env, parameter[event, app_name]]:
constant[Turn the Lambda/API Gateway request event into a WSGI environment dict.
:param dict event:
The event parameters passed to the Lambda function entrypoint.
:param str app_name:
Name of the API application.
]
variable[gateway] assign[=] call[call[name[event]][constant[parameters]]][constant[gateway]]
variable[request] assign[=] call[call[name[event]][constant[parameters]]][constant[request]]
variable[ctx] assign[=] call[name[event]][constant[rawContext]]
variable[headers] assign[=] call[name[request]][constant[header]]
variable[body] assign[=] call[name[six].text_type, parameter[call[name[json].dumps, parameter[call[name[request]][constant[body]]]]]]
variable[path] assign[=] call[call[name[gateway]][constant[resource-path]].format, parameter[]]
variable[environ] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ebdd20>, <ast.Constant object at 0x7da1b0ebcfa0>, <ast.Constant object at 0x7da1b0ebe470>, <ast.Constant object at 0x7da1b0ebe3e0>, <ast.Constant object at 0x7da1b0ebfe80>, <ast.Constant object at 0x7da1b0ebef80>, <ast.Constant object at 0x7da1b0ebfa90>, <ast.Constant object at 0x7da1b0ebddb0>, <ast.Constant object at 0x7da1b0ebd7b0>, <ast.Constant object at 0x7da1b0ebc790>, <ast.Constant object at 0x7da1b0ebf160>, <ast.Constant object at 0x7da1b0ebd9f0>, <ast.Constant object at 0x7da1b0ebcca0>, <ast.Constant object at 0x7da1b0ebdb10>, <ast.Constant object at 0x7da1b0ebf1c0>, <ast.Constant object at 0x7da1b0ebfa60>], [<ast.Name object at 0x7da1b0ebfc10>, <ast.Call object at 0x7da1b0ebcdf0>, <ast.Subscript object at 0x7da1b0ebe950>, <ast.Subscript object at 0x7da1b0ebd6f0>, <ast.Name object at 0x7da1b0ebf970>, <ast.Name object at 0x7da1b0ebc550>, <ast.Call object at 0x7da1b0ebd270>, <ast.Constant object at 0x7da1b0ebdbd0>, <ast.Tuple object at 0x7da1b0ebd480>, <ast.Call object at 0x7da1b0ebea10>, <ast.Call object at 0x7da1b0ebc8b0>, <ast.Call object at 0x7da1b0ebd510>, <ast.Constant object at 0x7da1b0ebc7f0>, <ast.Constant object at 0x7da1b0ebdc90>, <ast.Constant object at 0x7da1b0ebe680>, <ast.Call object at 0x7da1b0ebf2b0>]]
if compare[call[name[ctx]][constant[httpMethod]] in list[[<ast.Constant object at 0x7da1b0ebf760>, <ast.Constant object at 0x7da1b0ebf0a0>, <ast.Constant object at 0x7da1b0ebd330>]]] begin[:]
call[name[environ]][constant[CONTENT_LENGTH]] assign[=] call[name[str], parameter[call[name[len], parameter[name[body]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0ebefb0>, <ast.Name object at 0x7da1b0ebc340>]]] in starred[call[name[headers].items, parameter[]]] begin[:]
variable[wsgi_name] assign[=] call[constant[HTTP_{}].format, parameter[call[call[name[header_name].upper, parameter[]].replace, parameter[constant[-], constant[_]]]]]
call[name[environ]][name[wsgi_name]] assign[=] call[name[str], parameter[name[header_value]]]
return[name[environ]] | keyword[def] identifier[_build_wsgi_env] ( identifier[event] , identifier[app_name] ):
literal[string]
identifier[gateway] = identifier[event] [ literal[string] ][ literal[string] ]
identifier[request] = identifier[event] [ literal[string] ][ literal[string] ]
identifier[ctx] = identifier[event] [ literal[string] ]
identifier[headers] = identifier[request] [ literal[string] ]
identifier[body] = identifier[six] . identifier[text_type] ( identifier[json] . identifier[dumps] ( identifier[request] [ literal[string] ]))
identifier[path] = identifier[gateway] [ literal[string] ]. identifier[format] (
** identifier[event] [ literal[string] ][ literal[string] ][ literal[string] ]
)
identifier[environ] ={
literal[string] : identifier[path] ,
literal[string] : identifier[urlencode] ( identifier[request] [ literal[string] ]),
literal[string] : identifier[ctx] [ literal[string] ][ literal[string] ],
literal[string] : identifier[ctx] [ literal[string] ],
literal[string] : identifier[app_name] ,
literal[string] : identifier[app_name] ,
literal[string] : identifier[headers] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : literal[string] ,
literal[string] :( literal[int] , literal[int] ),
literal[string] : identifier[headers] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[StringIO] ( identifier[body] ),
literal[string] : identifier[StringIO] (),
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : identifier[headers] . identifier[get] ( literal[string] , literal[string] ),
}
keyword[if] identifier[ctx] [ literal[string] ] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[environ] [ literal[string] ]= identifier[str] ( identifier[len] ( identifier[body] ))
keyword[for] identifier[header_name] , identifier[header_value] keyword[in] identifier[headers] . identifier[items] ():
identifier[wsgi_name] = literal[string] . identifier[format] ( identifier[header_name] . identifier[upper] (). identifier[replace] ( literal[string] , literal[string] ))
identifier[environ] [ identifier[wsgi_name] ]= identifier[str] ( identifier[header_value] )
keyword[return] identifier[environ] | def _build_wsgi_env(event, app_name):
"""Turn the Lambda/API Gateway request event into a WSGI environment dict.
:param dict event:
The event parameters passed to the Lambda function entrypoint.
:param str app_name:
Name of the API application.
"""
gateway = event['parameters']['gateway']
request = event['parameters']['request']
ctx = event['rawContext']
headers = request['header']
body = six.text_type(json.dumps(request['body']))
# Render the path correctly so connexion/flask will pass the path params to
# the handler function correctly.
# Basically, this replaces "/foo/{param1}/bar/{param2}" with
# "/foo/123/bar/456".
path = gateway['resource-path'].format(**event['parameters']['request']['path'])
environ = {'PATH_INFO': path, 'QUERY_STRING': urlencode(request['querystring']), 'REMOTE_ADDR': ctx['identity']['sourceIp'], 'REQUEST_METHOD': ctx['httpMethod'], 'SCRIPT_NAME': app_name, 'SERVER_NAME': app_name, 'SERVER_PORT': headers.get('X-Forwarded-Port', '80'), 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': headers.get('X-Forwarded-Proto', 'http'), 'wsgi.input': StringIO(body), 'wsgi.errors': StringIO(), 'wsgi.multiprocess': False, 'wsgi.multithread': False, 'wsgi.run_once': False, 'CONTENT_TYPE': headers.get('Content-Type', 'application/json')}
if ctx['httpMethod'] in ['POST', 'PUT', 'PATCH']:
environ['CONTENT_LENGTH'] = str(len(body)) # depends on [control=['if'], data=[]]
for (header_name, header_value) in headers.items():
wsgi_name = 'HTTP_{}'.format(header_name.upper().replace('-', '_'))
environ[wsgi_name] = str(header_value) # depends on [control=['for'], data=[]]
return environ |
def dbg_print(self, indent=0):
"""
Print out debugging information
"""
print("%sA-locs:" % (" " * indent))
for aloc_id, aloc in self._alocs.items():
print("%s<0x%x> %s" % (" " * (indent + 2), aloc_id, aloc))
print("%sMemory:" % (" " * indent))
self.memory.dbg_print(indent=indent + 2) | def function[dbg_print, parameter[self, indent]]:
constant[
Print out debugging information
]
call[name[print], parameter[binary_operation[constant[%sA-locs:] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[ ] * name[indent]]]]]
for taget[tuple[[<ast.Name object at 0x7da2047e8af0>, <ast.Name object at 0x7da2047e8400>]]] in starred[call[name[self]._alocs.items, parameter[]]] begin[:]
call[name[print], parameter[binary_operation[constant[%s<0x%x> %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.BinOp object at 0x7da2047ea170>, <ast.Name object at 0x7da2047e8c70>, <ast.Name object at 0x7da2047eb820>]]]]]
call[name[print], parameter[binary_operation[constant[%sMemory:] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[ ] * name[indent]]]]]
call[name[self].memory.dbg_print, parameter[]] | keyword[def] identifier[dbg_print] ( identifier[self] , identifier[indent] = literal[int] ):
literal[string]
identifier[print] ( literal[string] %( literal[string] * identifier[indent] ))
keyword[for] identifier[aloc_id] , identifier[aloc] keyword[in] identifier[self] . identifier[_alocs] . identifier[items] ():
identifier[print] ( literal[string] %( literal[string] *( identifier[indent] + literal[int] ), identifier[aloc_id] , identifier[aloc] ))
identifier[print] ( literal[string] %( literal[string] * identifier[indent] ))
identifier[self] . identifier[memory] . identifier[dbg_print] ( identifier[indent] = identifier[indent] + literal[int] ) | def dbg_print(self, indent=0):
"""
Print out debugging information
"""
print('%sA-locs:' % (' ' * indent))
for (aloc_id, aloc) in self._alocs.items():
print('%s<0x%x> %s' % (' ' * (indent + 2), aloc_id, aloc)) # depends on [control=['for'], data=[]]
print('%sMemory:' % (' ' * indent))
self.memory.dbg_print(indent=indent + 2) |
def eth_getBalance(self, address):
"""Get account balance.
:param address:
:return:
"""
account = self.reader._get_account(address)
return account.balance | def function[eth_getBalance, parameter[self, address]]:
constant[Get account balance.
:param address:
:return:
]
variable[account] assign[=] call[name[self].reader._get_account, parameter[name[address]]]
return[name[account].balance] | keyword[def] identifier[eth_getBalance] ( identifier[self] , identifier[address] ):
literal[string]
identifier[account] = identifier[self] . identifier[reader] . identifier[_get_account] ( identifier[address] )
keyword[return] identifier[account] . identifier[balance] | def eth_getBalance(self, address):
"""Get account balance.
:param address:
:return:
"""
account = self.reader._get_account(address)
return account.balance |
def _activate_organization(organization):
"""
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
"""
[_activate_organization_course_relationship(record) for record
in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=False)]
[_activate_record(record) for record
in internal.Organization.objects.filter(id=organization.id, active=False)] | def function[_activate_organization, parameter[organization]]:
constant[
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
]
<ast.ListComp object at 0x7da2044c30a0>
<ast.ListComp object at 0x7da2044c10f0> | keyword[def] identifier[_activate_organization] ( identifier[organization] ):
literal[string]
[ identifier[_activate_organization_course_relationship] ( identifier[record] ) keyword[for] identifier[record]
keyword[in] identifier[internal] . identifier[OrganizationCourse] . identifier[objects] . identifier[filter] ( identifier[organization_id] = identifier[organization] . identifier[id] , identifier[active] = keyword[False] )]
[ identifier[_activate_record] ( identifier[record] ) keyword[for] identifier[record]
keyword[in] identifier[internal] . identifier[Organization] . identifier[objects] . identifier[filter] ( identifier[id] = identifier[organization] . identifier[id] , identifier[active] = keyword[False] )] | def _activate_organization(organization):
"""
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
"""
[_activate_organization_course_relationship(record) for record in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=False)]
[_activate_record(record) for record in internal.Organization.objects.filter(id=organization.id, active=False)] |
def list_cubes(self):
""" List all available JSON files. """
for file_name in os.listdir(self.directory):
if '.' in file_name:
name, ext = file_name.rsplit('.', 1)
if ext.lower() == 'json':
yield name | def function[list_cubes, parameter[self]]:
constant[ List all available JSON files. ]
for taget[name[file_name]] in starred[call[name[os].listdir, parameter[name[self].directory]]] begin[:]
if compare[constant[.] in name[file_name]] begin[:]
<ast.Tuple object at 0x7da1b1b0c7c0> assign[=] call[name[file_name].rsplit, parameter[constant[.], constant[1]]]
if compare[call[name[ext].lower, parameter[]] equal[==] constant[json]] begin[:]
<ast.Yield object at 0x7da1b1b0c400> | keyword[def] identifier[list_cubes] ( identifier[self] ):
literal[string]
keyword[for] identifier[file_name] keyword[in] identifier[os] . identifier[listdir] ( identifier[self] . identifier[directory] ):
keyword[if] literal[string] keyword[in] identifier[file_name] :
identifier[name] , identifier[ext] = identifier[file_name] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[if] identifier[ext] . identifier[lower] ()== literal[string] :
keyword[yield] identifier[name] | def list_cubes(self):
""" List all available JSON files. """
for file_name in os.listdir(self.directory):
if '.' in file_name:
(name, ext) = file_name.rsplit('.', 1)
if ext.lower() == 'json':
yield name # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['file_name']] # depends on [control=['for'], data=['file_name']] |
def _find_and_cache_best_function(self, dispatch_type):
"""Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type.
"""
result = self._dispatch_table.get(dispatch_type)
if result:
return result
# The outer try ensures the lock is always released.
with self._write_lock:
try:
dispatch_mro = dispatch_type.mro()
except TypeError:
# Not every type has an MRO.
dispatch_mro = ()
best_match = None
result_type = None
for candidate_type, candidate_func in self.implementations:
if not issubclass(dispatch_type, candidate_type):
# Skip implementations that are obviously unrelated.
continue
try:
# The candidate implementation may be for a type that's
# actually in the MRO, or it may be for an abstract type.
match = dispatch_mro.index(candidate_type)
except ValueError:
# This means we have an implementation for an abstract
# type, which ranks below all concrete types.
match = None
if best_match is None:
if result and match is None:
# Already have a result, and no order of preference.
# This is probably because the type is a member of two
# abstract types and we have separate implementations
# for those two abstract types.
if self._preferred(candidate_type, over=result_type):
result = candidate_func
result_type = candidate_type
elif self._preferred(result_type, over=candidate_type):
# No need to update anything.
pass
else:
raise TypeError(
"Two candidate implementations found for "
"multimethod function %s (dispatch type %s) "
"and neither is preferred." %
(self.func_name, dispatch_type))
else:
result = candidate_func
result_type = candidate_type
best_match = match
if (match or 0) < (best_match or 0):
result = candidate_func
result_type = candidate_type
best_match = match
self._dispatch_table[dispatch_type] = result
return result | def function[_find_and_cache_best_function, parameter[self, dispatch_type]]:
constant[Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type.
]
variable[result] assign[=] call[name[self]._dispatch_table.get, parameter[name[dispatch_type]]]
if name[result] begin[:]
return[name[result]]
with name[self]._write_lock begin[:]
<ast.Try object at 0x7da1b0fb2950>
variable[best_match] assign[=] constant[None]
variable[result_type] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b0fb2e90>, <ast.Name object at 0x7da1b0fb10c0>]]] in starred[name[self].implementations] begin[:]
if <ast.UnaryOp object at 0x7da1b0fb3d90> begin[:]
continue
<ast.Try object at 0x7da1b0fb1b40>
if compare[name[best_match] is constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b0fb0fd0> begin[:]
if call[name[self]._preferred, parameter[name[candidate_type]]] begin[:]
variable[result] assign[=] name[candidate_func]
variable[result_type] assign[=] name[candidate_type]
if compare[<ast.BoolOp object at 0x7da1b0f91270> less[<] <ast.BoolOp object at 0x7da1b0f923b0>] begin[:]
variable[result] assign[=] name[candidate_func]
variable[result_type] assign[=] name[candidate_type]
variable[best_match] assign[=] name[match]
call[name[self]._dispatch_table][name[dispatch_type]] assign[=] name[result]
return[name[result]] | keyword[def] identifier[_find_and_cache_best_function] ( identifier[self] , identifier[dispatch_type] ):
literal[string]
identifier[result] = identifier[self] . identifier[_dispatch_table] . identifier[get] ( identifier[dispatch_type] )
keyword[if] identifier[result] :
keyword[return] identifier[result]
keyword[with] identifier[self] . identifier[_write_lock] :
keyword[try] :
identifier[dispatch_mro] = identifier[dispatch_type] . identifier[mro] ()
keyword[except] identifier[TypeError] :
identifier[dispatch_mro] =()
identifier[best_match] = keyword[None]
identifier[result_type] = keyword[None]
keyword[for] identifier[candidate_type] , identifier[candidate_func] keyword[in] identifier[self] . identifier[implementations] :
keyword[if] keyword[not] identifier[issubclass] ( identifier[dispatch_type] , identifier[candidate_type] ):
keyword[continue]
keyword[try] :
identifier[match] = identifier[dispatch_mro] . identifier[index] ( identifier[candidate_type] )
keyword[except] identifier[ValueError] :
identifier[match] = keyword[None]
keyword[if] identifier[best_match] keyword[is] keyword[None] :
keyword[if] identifier[result] keyword[and] identifier[match] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[_preferred] ( identifier[candidate_type] , identifier[over] = identifier[result_type] ):
identifier[result] = identifier[candidate_func]
identifier[result_type] = identifier[candidate_type]
keyword[elif] identifier[self] . identifier[_preferred] ( identifier[result_type] , identifier[over] = identifier[candidate_type] ):
keyword[pass]
keyword[else] :
keyword[raise] identifier[TypeError] (
literal[string]
literal[string]
literal[string] %
( identifier[self] . identifier[func_name] , identifier[dispatch_type] ))
keyword[else] :
identifier[result] = identifier[candidate_func]
identifier[result_type] = identifier[candidate_type]
identifier[best_match] = identifier[match]
keyword[if] ( identifier[match] keyword[or] literal[int] )<( identifier[best_match] keyword[or] literal[int] ):
identifier[result] = identifier[candidate_func]
identifier[result_type] = identifier[candidate_type]
identifier[best_match] = identifier[match]
identifier[self] . identifier[_dispatch_table] [ identifier[dispatch_type] ]= identifier[result]
keyword[return] identifier[result] | def _find_and_cache_best_function(self, dispatch_type):
"""Finds the best implementation of this function given a type.
This function caches the result, and uses locking for thread safety.
Returns:
Implementing function, in below order of preference:
1. Explicitly registered implementations (through
multimethod.implement) for types that 'dispatch_type' either is
or inherits from directly.
2. Explicitly registered implementations accepting an abstract type
(interface) in which dispatch_type participates (through
abstract_type.register() or the convenience methods).
3. Default behavior of the multimethod function. This will usually
raise a NotImplementedError, by convention.
Raises:
TypeError: If two implementing functions are registered for
different abstract types, and 'dispatch_type' participates in
both, and no order of preference was specified using
prefer_type.
"""
result = self._dispatch_table.get(dispatch_type)
if result:
return result # depends on [control=['if'], data=[]]
# The outer try ensures the lock is always released.
with self._write_lock:
try:
dispatch_mro = dispatch_type.mro() # depends on [control=['try'], data=[]]
except TypeError:
# Not every type has an MRO.
dispatch_mro = () # depends on [control=['except'], data=[]]
best_match = None
result_type = None
for (candidate_type, candidate_func) in self.implementations:
if not issubclass(dispatch_type, candidate_type):
# Skip implementations that are obviously unrelated.
continue # depends on [control=['if'], data=[]]
try:
# The candidate implementation may be for a type that's
# actually in the MRO, or it may be for an abstract type.
match = dispatch_mro.index(candidate_type) # depends on [control=['try'], data=[]]
except ValueError:
# This means we have an implementation for an abstract
# type, which ranks below all concrete types.
match = None # depends on [control=['except'], data=[]]
if best_match is None:
if result and match is None:
# Already have a result, and no order of preference.
# This is probably because the type is a member of two
# abstract types and we have separate implementations
# for those two abstract types.
if self._preferred(candidate_type, over=result_type):
result = candidate_func
result_type = candidate_type # depends on [control=['if'], data=[]]
elif self._preferred(result_type, over=candidate_type):
# No need to update anything.
pass # depends on [control=['if'], data=[]]
else:
raise TypeError('Two candidate implementations found for multimethod function %s (dispatch type %s) and neither is preferred.' % (self.func_name, dispatch_type)) # depends on [control=['if'], data=[]]
else:
result = candidate_func
result_type = candidate_type
best_match = match # depends on [control=['if'], data=['best_match']]
if (match or 0) < (best_match or 0):
result = candidate_func
result_type = candidate_type
best_match = match # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self._dispatch_table[dispatch_type] = result
return result # depends on [control=['with'], data=[]] |
def check_mailfy(self, query, kwargs={}):
"""
Verifying a mailfy query in this platform.
This might be redefined in any class inheriting from Platform. The only
condition is that any of this should return a dictionary as defined.
Args:
-----
query: The element to be searched.
kwargs: Dictionary with extra parameters. Just in case.
Return:
-------
Returns the collected data if exists or None if not.
"""
import requests
s = requests.Session()
# Getting the first response to grab the csrf_token
r1 = s.get('https://www.infojobs.net')
# Launching the query to Instagram
r2 = s.post(
'https://www.infojobs.net/candidate/profile/check-email-registered.xhtml',
data={"email": query},
)
if '{"email_is_secure":true,"email":true}' in r2.text:
return r2.text
return None | def function[check_mailfy, parameter[self, query, kwargs]]:
constant[
Verifying a mailfy query in this platform.
This might be redefined in any class inheriting from Platform. The only
condition is that any of this should return a dictionary as defined.
Args:
-----
query: The element to be searched.
kwargs: Dictionary with extra parameters. Just in case.
Return:
-------
Returns the collected data if exists or None if not.
]
import module[requests]
variable[s] assign[=] call[name[requests].Session, parameter[]]
variable[r1] assign[=] call[name[s].get, parameter[constant[https://www.infojobs.net]]]
variable[r2] assign[=] call[name[s].post, parameter[constant[https://www.infojobs.net/candidate/profile/check-email-registered.xhtml]]]
if compare[constant[{"email_is_secure":true,"email":true}] in name[r2].text] begin[:]
return[name[r2].text]
return[constant[None]] | keyword[def] identifier[check_mailfy] ( identifier[self] , identifier[query] , identifier[kwargs] ={}):
literal[string]
keyword[import] identifier[requests]
identifier[s] = identifier[requests] . identifier[Session] ()
identifier[r1] = identifier[s] . identifier[get] ( literal[string] )
identifier[r2] = identifier[s] . identifier[post] (
literal[string] ,
identifier[data] ={ literal[string] : identifier[query] },
)
keyword[if] literal[string] keyword[in] identifier[r2] . identifier[text] :
keyword[return] identifier[r2] . identifier[text]
keyword[return] keyword[None] | def check_mailfy(self, query, kwargs={}):
"""
Verifying a mailfy query in this platform.
This might be redefined in any class inheriting from Platform. The only
condition is that any of this should return a dictionary as defined.
Args:
-----
query: The element to be searched.
kwargs: Dictionary with extra parameters. Just in case.
Return:
-------
Returns the collected data if exists or None if not.
"""
import requests
s = requests.Session()
# Getting the first response to grab the csrf_token
r1 = s.get('https://www.infojobs.net')
# Launching the query to Instagram
r2 = s.post('https://www.infojobs.net/candidate/profile/check-email-registered.xhtml', data={'email': query})
if '{"email_is_secure":true,"email":true}' in r2.text:
return r2.text # depends on [control=['if'], data=[]]
return None |
def prompt_for_numbered_choice(self, choices, title=None, prompt=">"):
"""
Displays a numbered vertical list of choices from the provided list of strings.
:param choices: list of choices to display
:param title: optional title to display above the numbered list
:param prompt: prompt string. Default is ">"
:return: an int representing the selected index.
"""
if choices is None or len(choices) < 1:
raise Exception('choices list must contain at least one element.')
while True:
self.clear()
if title:
self.screen.println(title + "\n")
for i in range(0, len(choices)):
print(' {:<4}{choice}'.format(str(i + 1) + ') ', choice=choices[i]))
answer = self.screen.input('\n{} '.format(prompt))
try:
index = int(answer) - 1
if 0 <= index < len(choices):
return index
except Exception as e:
continue | def function[prompt_for_numbered_choice, parameter[self, choices, title, prompt]]:
constant[
Displays a numbered vertical list of choices from the provided list of strings.
:param choices: list of choices to display
:param title: optional title to display above the numbered list
:param prompt: prompt string. Default is ">"
:return: an int representing the selected index.
]
if <ast.BoolOp object at 0x7da1b133ea70> begin[:]
<ast.Raise object at 0x7da1b133d8a0>
while constant[True] begin[:]
call[name[self].clear, parameter[]]
if name[title] begin[:]
call[name[self].screen.println, parameter[binary_operation[name[title] + constant[
]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[choices]]]]]] begin[:]
call[name[print], parameter[call[constant[ {:<4}{choice}].format, parameter[binary_operation[call[name[str], parameter[binary_operation[name[i] + constant[1]]]] + constant[) ]]]]]]
variable[answer] assign[=] call[name[self].screen.input, parameter[call[constant[
{} ].format, parameter[name[prompt]]]]]
<ast.Try object at 0x7da1b133c9d0> | keyword[def] identifier[prompt_for_numbered_choice] ( identifier[self] , identifier[choices] , identifier[title] = keyword[None] , identifier[prompt] = literal[string] ):
literal[string]
keyword[if] identifier[choices] keyword[is] keyword[None] keyword[or] identifier[len] ( identifier[choices] )< literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[while] keyword[True] :
identifier[self] . identifier[clear] ()
keyword[if] identifier[title] :
identifier[self] . identifier[screen] . identifier[println] ( identifier[title] + literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[choices] )):
identifier[print] ( literal[string] . identifier[format] ( identifier[str] ( identifier[i] + literal[int] )+ literal[string] , identifier[choice] = identifier[choices] [ identifier[i] ]))
identifier[answer] = identifier[self] . identifier[screen] . identifier[input] ( literal[string] . identifier[format] ( identifier[prompt] ))
keyword[try] :
identifier[index] = identifier[int] ( identifier[answer] )- literal[int]
keyword[if] literal[int] <= identifier[index] < identifier[len] ( identifier[choices] ):
keyword[return] identifier[index]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[continue] | def prompt_for_numbered_choice(self, choices, title=None, prompt='>'):
"""
Displays a numbered vertical list of choices from the provided list of strings.
:param choices: list of choices to display
:param title: optional title to display above the numbered list
:param prompt: prompt string. Default is ">"
:return: an int representing the selected index.
"""
if choices is None or len(choices) < 1:
raise Exception('choices list must contain at least one element.') # depends on [control=['if'], data=[]]
while True:
self.clear()
if title:
self.screen.println(title + '\n') # depends on [control=['if'], data=[]]
for i in range(0, len(choices)):
print(' {:<4}{choice}'.format(str(i + 1) + ') ', choice=choices[i])) # depends on [control=['for'], data=['i']]
answer = self.screen.input('\n{} '.format(prompt))
try:
index = int(answer) - 1
if 0 <= index < len(choices):
return index # depends on [control=['if'], data=['index']] # depends on [control=['try'], data=[]]
except Exception as e:
continue # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def _check_lookup_prop(self, result_data):
"""Checks that selected lookup property can be used for this testcase."""
if not self._lookup_prop:
return False
if not result_data.get("id") and self._lookup_prop != "name":
return False
if not result_data.get("title") and self._lookup_prop == "name":
return False
return True | def function[_check_lookup_prop, parameter[self, result_data]]:
constant[Checks that selected lookup property can be used for this testcase.]
if <ast.UnaryOp object at 0x7da1b23d4400> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b23d6cb0> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b23d7430> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[_check_lookup_prop] ( identifier[self] , identifier[result_data] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_lookup_prop] :
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[result_data] . identifier[get] ( literal[string] ) keyword[and] identifier[self] . identifier[_lookup_prop] != literal[string] :
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[result_data] . identifier[get] ( literal[string] ) keyword[and] identifier[self] . identifier[_lookup_prop] == literal[string] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def _check_lookup_prop(self, result_data):
"""Checks that selected lookup property can be used for this testcase."""
if not self._lookup_prop:
return False # depends on [control=['if'], data=[]]
if not result_data.get('id') and self._lookup_prop != 'name':
return False # depends on [control=['if'], data=[]]
if not result_data.get('title') and self._lookup_prop == 'name':
return False # depends on [control=['if'], data=[]]
return True |
def delete_asset(self, asset_id):
"""Deletes an ``Asset``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset`` to
remove
raise: NotFound - ``asset_id`` not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('repository',
collection='Asset',
runtime=self._runtime)
if not isinstance(asset_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
asset_map = collection.find_one(
dict({'_id': ObjectId(asset_id.get_identifier())},
**self._view_filter()))
objects.Asset(osid_object_map=asset_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(asset_id.get_identifier())}) | def function[delete_asset, parameter[self, asset_id]]:
constant[Deletes an ``Asset``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset`` to
remove
raise: NotFound - ``asset_id`` not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[repository]]]
if <ast.UnaryOp object at 0x7da1b0a65a20> begin[:]
<ast.Raise object at 0x7da1b0a66140>
variable[asset_map] assign[=] call[name[collection].find_one, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da1b0a64a90>], [<ast.Call object at 0x7da1b0a64d90>]]]]]]
call[call[name[objects].Asset, parameter[]]._delete, parameter[]]
call[name[collection].delete_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0a66aa0>], [<ast.Call object at 0x7da1b0a663b0>]]]] | keyword[def] identifier[delete_asset] ( identifier[self] , identifier[asset_id] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[asset_id] , identifier[ABCId] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
identifier[asset_map] = identifier[collection] . identifier[find_one] (
identifier[dict] ({ literal[string] : identifier[ObjectId] ( identifier[asset_id] . identifier[get_identifier] ())},
** identifier[self] . identifier[_view_filter] ()))
identifier[objects] . identifier[Asset] ( identifier[osid_object_map] = identifier[asset_map] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] ). identifier[_delete] ()
identifier[collection] . identifier[delete_one] ({ literal[string] : identifier[ObjectId] ( identifier[asset_id] . identifier[get_identifier] ())}) | def delete_asset(self, asset_id):
"""Deletes an ``Asset``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset`` to
remove
raise: NotFound - ``asset_id`` not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime)
if not isinstance(asset_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id') # depends on [control=['if'], data=[]]
asset_map = collection.find_one(dict({'_id': ObjectId(asset_id.get_identifier())}, **self._view_filter()))
objects.Asset(osid_object_map=asset_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(asset_id.get_identifier())}) |
def gen_lines_from_binary_files(
files: Iterable[BinaryIO],
encoding: str = UTF8) -> Generator[str, None, None]:
"""
Generates lines from binary files.
Strips out newlines.
Args:
files: iterable of :class:`BinaryIO` file-like objects
encoding: encoding to use
Yields:
each line of all the files
"""
for file in files:
for byteline in file:
line = byteline.decode(encoding).strip()
yield line | def function[gen_lines_from_binary_files, parameter[files, encoding]]:
constant[
Generates lines from binary files.
Strips out newlines.
Args:
files: iterable of :class:`BinaryIO` file-like objects
encoding: encoding to use
Yields:
each line of all the files
]
for taget[name[file]] in starred[name[files]] begin[:]
for taget[name[byteline]] in starred[name[file]] begin[:]
variable[line] assign[=] call[call[name[byteline].decode, parameter[name[encoding]]].strip, parameter[]]
<ast.Yield object at 0x7da1b185fe50> | keyword[def] identifier[gen_lines_from_binary_files] (
identifier[files] : identifier[Iterable] [ identifier[BinaryIO] ],
identifier[encoding] : identifier[str] = identifier[UTF8] )-> identifier[Generator] [ identifier[str] , keyword[None] , keyword[None] ]:
literal[string]
keyword[for] identifier[file] keyword[in] identifier[files] :
keyword[for] identifier[byteline] keyword[in] identifier[file] :
identifier[line] = identifier[byteline] . identifier[decode] ( identifier[encoding] ). identifier[strip] ()
keyword[yield] identifier[line] | def gen_lines_from_binary_files(files: Iterable[BinaryIO], encoding: str=UTF8) -> Generator[str, None, None]:
"""
Generates lines from binary files.
Strips out newlines.
Args:
files: iterable of :class:`BinaryIO` file-like objects
encoding: encoding to use
Yields:
each line of all the files
"""
for file in files:
for byteline in file:
line = byteline.decode(encoding).strip()
yield line # depends on [control=['for'], data=['byteline']] # depends on [control=['for'], data=['file']] |
def main(
gpu:Param("GPU to run on", str)=None,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
debias_mom: Param("Debias statistics", bool)=False,
debias_sqr: Param("Debias statistics", bool)=False,
opt: Param("Optimizer: 'adam','genopt','rms','sgd'", str)='genopt',
alpha: Param("Alpha", float)=0.99,
mom: Param("Momentum", float)=0.9,
eps: Param("epsilon", float)=1e-7,
decay: Param("Decay AvgStatistic (momentum)", bool)=False,
epochs: Param("Number of epochs", int)=5,
bs: Param("Batch size", int)=128,
):
"""Distributed training of Imagenette.
Fastest multi-gpu speed is if you run with: python -m fastai.launch"""
# Pick one of these
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
moms = (mom,mom)
stats = [
AvgStatistic('momentum', mom, scope=StatScope.Weight, decay=decay, debias=debias_mom),
AvgSquare ('alpha', alpha, scope=StatScope.Weight, debias=debias_sqr),
ConstStatistic('eps', eps), ConstStatistic('clip', 0.001),
]
if opt=='adam' : opt_func = partial(optim.Adam, betas=(mom,alpha), eps=eps)
elif opt=='rms' : opt_func = partial(optim.RMSprop, alpha=alpha)
elif opt=='genopt': opt_func = partial(GeneralOptimizer, on_step=on_step, stats=stats)
else: raise Exception(f'unknown opt: {opt}')
#opt_func = optim.SGD
#learn = (cnn_learner(data, models.xresnet50, pretrained=False, concat_pool=False, lin_ftrs=[], split_on=bn_and_final,
print(f'lr: {lr}; size: {size}; debias_mom: {debias_mom}; debias_sqr: {debias_sqr}; opt: {opt}; alpha: {alpha}; mom: {mom}; eps: {eps}; decay: {decay}')
print('imagenette')
get_learner(lr, size, False, bs, opt_func, gpu, epochs)
gc.collect()
print('imagewoof')
get_learner(lr, size, True, bs, opt_func, gpu, epochs) | def function[main, parameter[gpu, lr, size, debias_mom, debias_sqr, opt, alpha, mom, eps, decay, epochs, bs]]:
constant[Distributed training of Imagenette.
Fastest multi-gpu speed is if you run with: python -m fastai.launch]
variable[gpu] assign[=] call[name[setup_distrib], parameter[name[gpu]]]
if compare[name[gpu] is constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b202a980>
variable[moms] assign[=] tuple[[<ast.Name object at 0x7da1b2028130>, <ast.Name object at 0x7da1b202be50>]]
variable[stats] assign[=] list[[<ast.Call object at 0x7da1b2028f40>, <ast.Call object at 0x7da1b2029d80>, <ast.Call object at 0x7da1b202a530>, <ast.Call object at 0x7da1b202a9b0>]]
if compare[name[opt] equal[==] constant[adam]] begin[:]
variable[opt_func] assign[=] call[name[partial], parameter[name[optim].Adam]]
call[name[print], parameter[<ast.JoinedStr object at 0x7da1b202b460>]]
call[name[print], parameter[constant[imagenette]]]
call[name[get_learner], parameter[name[lr], name[size], constant[False], name[bs], name[opt_func], name[gpu], name[epochs]]]
call[name[gc].collect, parameter[]]
call[name[print], parameter[constant[imagewoof]]]
call[name[get_learner], parameter[name[lr], name[size], constant[True], name[bs], name[opt_func], name[gpu], name[epochs]]] | keyword[def] identifier[main] (
identifier[gpu] : identifier[Param] ( literal[string] , identifier[str] )= keyword[None] ,
identifier[lr] : identifier[Param] ( literal[string] , identifier[float] )= literal[int] ,
identifier[size] : identifier[Param] ( literal[string] , identifier[int] )= literal[int] ,
identifier[debias_mom] : identifier[Param] ( literal[string] , identifier[bool] )= keyword[False] ,
identifier[debias_sqr] : identifier[Param] ( literal[string] , identifier[bool] )= keyword[False] ,
identifier[opt] : identifier[Param] ( literal[string] , identifier[str] )= literal[string] ,
identifier[alpha] : identifier[Param] ( literal[string] , identifier[float] )= literal[int] ,
identifier[mom] : identifier[Param] ( literal[string] , identifier[float] )= literal[int] ,
identifier[eps] : identifier[Param] ( literal[string] , identifier[float] )= literal[int] ,
identifier[decay] : identifier[Param] ( literal[string] , identifier[bool] )= keyword[False] ,
identifier[epochs] : identifier[Param] ( literal[string] , identifier[int] )= literal[int] ,
identifier[bs] : identifier[Param] ( literal[string] , identifier[int] )= literal[int] ,
):
literal[string]
identifier[gpu] = identifier[setup_distrib] ( identifier[gpu] )
keyword[if] identifier[gpu] keyword[is] keyword[None] : identifier[bs] *= identifier[torch] . identifier[cuda] . identifier[device_count] ()
identifier[moms] =( identifier[mom] , identifier[mom] )
identifier[stats] =[
identifier[AvgStatistic] ( literal[string] , identifier[mom] , identifier[scope] = identifier[StatScope] . identifier[Weight] , identifier[decay] = identifier[decay] , identifier[debias] = identifier[debias_mom] ),
identifier[AvgSquare] ( literal[string] , identifier[alpha] , identifier[scope] = identifier[StatScope] . identifier[Weight] , identifier[debias] = identifier[debias_sqr] ),
identifier[ConstStatistic] ( literal[string] , identifier[eps] ), identifier[ConstStatistic] ( literal[string] , literal[int] ),
]
keyword[if] identifier[opt] == literal[string] : identifier[opt_func] = identifier[partial] ( identifier[optim] . identifier[Adam] , identifier[betas] =( identifier[mom] , identifier[alpha] ), identifier[eps] = identifier[eps] )
keyword[elif] identifier[opt] == literal[string] : identifier[opt_func] = identifier[partial] ( identifier[optim] . identifier[RMSprop] , identifier[alpha] = identifier[alpha] )
keyword[elif] identifier[opt] == literal[string] : identifier[opt_func] = identifier[partial] ( identifier[GeneralOptimizer] , identifier[on_step] = identifier[on_step] , identifier[stats] = identifier[stats] )
keyword[else] : keyword[raise] identifier[Exception] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[get_learner] ( identifier[lr] , identifier[size] , keyword[False] , identifier[bs] , identifier[opt_func] , identifier[gpu] , identifier[epochs] )
identifier[gc] . identifier[collect] ()
identifier[print] ( literal[string] )
identifier[get_learner] ( identifier[lr] , identifier[size] , keyword[True] , identifier[bs] , identifier[opt_func] , identifier[gpu] , identifier[epochs] ) | def main(gpu: Param('GPU to run on', str)=None, lr: Param('Learning rate', float)=0.001, size: Param('Size (px: 128,192,224)', int)=128, debias_mom: Param('Debias statistics', bool)=False, debias_sqr: Param('Debias statistics', bool)=False, opt: Param("Optimizer: 'adam','genopt','rms','sgd'", str)='genopt', alpha: Param('Alpha', float)=0.99, mom: Param('Momentum', float)=0.9, eps: Param('epsilon', float)=1e-07, decay: Param('Decay AvgStatistic (momentum)', bool)=False, epochs: Param('Number of epochs', int)=5, bs: Param('Batch size', int)=128):
"""Distributed training of Imagenette.
Fastest multi-gpu speed is if you run with: python -m fastai.launch"""
# Pick one of these
gpu = setup_distrib(gpu)
if gpu is None:
bs *= torch.cuda.device_count() # depends on [control=['if'], data=[]]
moms = (mom, mom)
stats = [AvgStatistic('momentum', mom, scope=StatScope.Weight, decay=decay, debias=debias_mom), AvgSquare('alpha', alpha, scope=StatScope.Weight, debias=debias_sqr), ConstStatistic('eps', eps), ConstStatistic('clip', 0.001)]
if opt == 'adam':
opt_func = partial(optim.Adam, betas=(mom, alpha), eps=eps) # depends on [control=['if'], data=[]]
elif opt == 'rms':
opt_func = partial(optim.RMSprop, alpha=alpha) # depends on [control=['if'], data=[]]
elif opt == 'genopt':
opt_func = partial(GeneralOptimizer, on_step=on_step, stats=stats) # depends on [control=['if'], data=[]]
else:
raise Exception(f'unknown opt: {opt}')
#opt_func = optim.SGD
#learn = (cnn_learner(data, models.xresnet50, pretrained=False, concat_pool=False, lin_ftrs=[], split_on=bn_and_final,
print(f'lr: {lr}; size: {size}; debias_mom: {debias_mom}; debias_sqr: {debias_sqr}; opt: {opt}; alpha: {alpha}; mom: {mom}; eps: {eps}; decay: {decay}')
print('imagenette')
get_learner(lr, size, False, bs, opt_func, gpu, epochs)
gc.collect()
print('imagewoof')
get_learner(lr, size, True, bs, opt_func, gpu, epochs) |
def populate_iteration(self, iteration):
"""Parse genotypes from the file and iteration with relevant marker \
details.
:param iteration: ParseLocus object which is returned per iteration
:return: True indicates current locus is valid.
StopIteration is thrown if the marker reaches the end of the file or
the valid genomic region for analysis.
"""
cur_idx = iteration.cur_idx
if cur_idx < self.locus_count:
iteration.chr = self.markers[cur_idx][0]
iteration.pos = self.markers[cur_idx][1]
iteration.rsid = self.rsids[cur_idx]
iteration.major_allele = self.alleles[cur_idx][0]
iteration.minor_allele = self.alleles[cur_idx][1]
iteration.allele_count2 = self.allele_count2s[cur_idx]
iteration.genotype_data = self.genotypes[cur_idx, :]
hetero = numpy.sum(iteration.genotype_data==1)
iteration.min_allele_count = numpy.sum(
iteration.genotype_data==2)*2 + hetero
iteration.maj_allele_count = numpy.sum(
iteration.genotype_data==0)*2 + hetero
return True
else:
raise StopIteration | def function[populate_iteration, parameter[self, iteration]]:
constant[Parse genotypes from the file and iteration with relevant marker details.
:param iteration: ParseLocus object which is returned per iteration
:return: True indicates current locus is valid.
StopIteration is thrown if the marker reaches the end of the file or
the valid genomic region for analysis.
]
variable[cur_idx] assign[=] name[iteration].cur_idx
if compare[name[cur_idx] less[<] name[self].locus_count] begin[:]
name[iteration].chr assign[=] call[call[name[self].markers][name[cur_idx]]][constant[0]]
name[iteration].pos assign[=] call[call[name[self].markers][name[cur_idx]]][constant[1]]
name[iteration].rsid assign[=] call[name[self].rsids][name[cur_idx]]
name[iteration].major_allele assign[=] call[call[name[self].alleles][name[cur_idx]]][constant[0]]
name[iteration].minor_allele assign[=] call[call[name[self].alleles][name[cur_idx]]][constant[1]]
name[iteration].allele_count2 assign[=] call[name[self].allele_count2s][name[cur_idx]]
name[iteration].genotype_data assign[=] call[name[self].genotypes][tuple[[<ast.Name object at 0x7da2041d9300>, <ast.Slice object at 0x7da2041dbd90>]]]
variable[hetero] assign[=] call[name[numpy].sum, parameter[compare[name[iteration].genotype_data equal[==] constant[1]]]]
name[iteration].min_allele_count assign[=] binary_operation[binary_operation[call[name[numpy].sum, parameter[compare[name[iteration].genotype_data equal[==] constant[2]]]] * constant[2]] + name[hetero]]
name[iteration].maj_allele_count assign[=] binary_operation[binary_operation[call[name[numpy].sum, parameter[compare[name[iteration].genotype_data equal[==] constant[0]]]] * constant[2]] + name[hetero]]
return[constant[True]] | keyword[def] identifier[populate_iteration] ( identifier[self] , identifier[iteration] ):
literal[string]
identifier[cur_idx] = identifier[iteration] . identifier[cur_idx]
keyword[if] identifier[cur_idx] < identifier[self] . identifier[locus_count] :
identifier[iteration] . identifier[chr] = identifier[self] . identifier[markers] [ identifier[cur_idx] ][ literal[int] ]
identifier[iteration] . identifier[pos] = identifier[self] . identifier[markers] [ identifier[cur_idx] ][ literal[int] ]
identifier[iteration] . identifier[rsid] = identifier[self] . identifier[rsids] [ identifier[cur_idx] ]
identifier[iteration] . identifier[major_allele] = identifier[self] . identifier[alleles] [ identifier[cur_idx] ][ literal[int] ]
identifier[iteration] . identifier[minor_allele] = identifier[self] . identifier[alleles] [ identifier[cur_idx] ][ literal[int] ]
identifier[iteration] . identifier[allele_count2] = identifier[self] . identifier[allele_count2s] [ identifier[cur_idx] ]
identifier[iteration] . identifier[genotype_data] = identifier[self] . identifier[genotypes] [ identifier[cur_idx] ,:]
identifier[hetero] = identifier[numpy] . identifier[sum] ( identifier[iteration] . identifier[genotype_data] == literal[int] )
identifier[iteration] . identifier[min_allele_count] = identifier[numpy] . identifier[sum] (
identifier[iteration] . identifier[genotype_data] == literal[int] )* literal[int] + identifier[hetero]
identifier[iteration] . identifier[maj_allele_count] = identifier[numpy] . identifier[sum] (
identifier[iteration] . identifier[genotype_data] == literal[int] )* literal[int] + identifier[hetero]
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[StopIteration] | def populate_iteration(self, iteration):
"""Parse genotypes from the file and iteration with relevant marker details.
:param iteration: ParseLocus object which is returned per iteration
:return: True indicates current locus is valid.
StopIteration is thrown if the marker reaches the end of the file or
the valid genomic region for analysis.
"""
cur_idx = iteration.cur_idx
if cur_idx < self.locus_count:
iteration.chr = self.markers[cur_idx][0]
iteration.pos = self.markers[cur_idx][1]
iteration.rsid = self.rsids[cur_idx]
iteration.major_allele = self.alleles[cur_idx][0]
iteration.minor_allele = self.alleles[cur_idx][1]
iteration.allele_count2 = self.allele_count2s[cur_idx]
iteration.genotype_data = self.genotypes[cur_idx, :]
hetero = numpy.sum(iteration.genotype_data == 1)
iteration.min_allele_count = numpy.sum(iteration.genotype_data == 2) * 2 + hetero
iteration.maj_allele_count = numpy.sum(iteration.genotype_data == 0) * 2 + hetero
return True # depends on [control=['if'], data=['cur_idx']]
else:
raise StopIteration |
def conv_lstm_2d(inputs, state, output_channels,
kernel_size=5, name=None, spatial_dims=None):
"""2D Convolutional LSTM."""
input_shape = common_layers.shape_list(inputs)
batch_size, input_channels = input_shape[0], input_shape[-1]
if spatial_dims is None:
input_shape = input_shape[1:]
else:
input_shape = spatial_dims + [input_channels]
cell = tf.contrib.rnn.ConvLSTMCell(
2, input_shape, output_channels,
[kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32)
outputs, new_state = cell(inputs, state)
return outputs, new_state | def function[conv_lstm_2d, parameter[inputs, state, output_channels, kernel_size, name, spatial_dims]]:
constant[2D Convolutional LSTM.]
variable[input_shape] assign[=] call[name[common_layers].shape_list, parameter[name[inputs]]]
<ast.Tuple object at 0x7da1b1ffbdc0> assign[=] tuple[[<ast.Subscript object at 0x7da1b1ff9510>, <ast.Subscript object at 0x7da1b1ff9f30>]]
if compare[name[spatial_dims] is constant[None]] begin[:]
variable[input_shape] assign[=] call[name[input_shape]][<ast.Slice object at 0x7da1b1ffa980>]
variable[cell] assign[=] call[name[tf].contrib.rnn.ConvLSTMCell, parameter[constant[2], name[input_shape], name[output_channels], list[[<ast.Name object at 0x7da1b1ffb100>, <ast.Name object at 0x7da1b1ff9cc0>]]]]
if compare[name[state] is constant[None]] begin[:]
variable[state] assign[=] call[name[cell].zero_state, parameter[name[batch_size], name[tf].float32]]
<ast.Tuple object at 0x7da1b1ff9c60> assign[=] call[name[cell], parameter[name[inputs], name[state]]]
return[tuple[[<ast.Name object at 0x7da1b1ff86a0>, <ast.Name object at 0x7da1b1ff9ab0>]]] | keyword[def] identifier[conv_lstm_2d] ( identifier[inputs] , identifier[state] , identifier[output_channels] ,
identifier[kernel_size] = literal[int] , identifier[name] = keyword[None] , identifier[spatial_dims] = keyword[None] ):
literal[string]
identifier[input_shape] = identifier[common_layers] . identifier[shape_list] ( identifier[inputs] )
identifier[batch_size] , identifier[input_channels] = identifier[input_shape] [ literal[int] ], identifier[input_shape] [- literal[int] ]
keyword[if] identifier[spatial_dims] keyword[is] keyword[None] :
identifier[input_shape] = identifier[input_shape] [ literal[int] :]
keyword[else] :
identifier[input_shape] = identifier[spatial_dims] +[ identifier[input_channels] ]
identifier[cell] = identifier[tf] . identifier[contrib] . identifier[rnn] . identifier[ConvLSTMCell] (
literal[int] , identifier[input_shape] , identifier[output_channels] ,
[ identifier[kernel_size] , identifier[kernel_size] ], identifier[name] = identifier[name] )
keyword[if] identifier[state] keyword[is] keyword[None] :
identifier[state] = identifier[cell] . identifier[zero_state] ( identifier[batch_size] , identifier[tf] . identifier[float32] )
identifier[outputs] , identifier[new_state] = identifier[cell] ( identifier[inputs] , identifier[state] )
keyword[return] identifier[outputs] , identifier[new_state] | def conv_lstm_2d(inputs, state, output_channels, kernel_size=5, name=None, spatial_dims=None):
"""2D Convolutional LSTM."""
input_shape = common_layers.shape_list(inputs)
(batch_size, input_channels) = (input_shape[0], input_shape[-1])
if spatial_dims is None:
input_shape = input_shape[1:] # depends on [control=['if'], data=[]]
else:
input_shape = spatial_dims + [input_channels]
cell = tf.contrib.rnn.ConvLSTMCell(2, input_shape, output_channels, [kernel_size, kernel_size], name=name)
if state is None:
state = cell.zero_state(batch_size, tf.float32) # depends on [control=['if'], data=['state']]
(outputs, new_state) = cell(inputs, state)
return (outputs, new_state) |
def use(cls, name, method: [str, Set, List], url=None):
""" interface helper function"""
if not isinstance(method, (str, list, set, tuple)):
raise BaseException('Invalid type of method: %s' % type(method).__name__)
if isinstance(method, str):
method = {method}
# TODO: check methods available
cls._interface[name] = [{'method': method, 'url': url}] | def function[use, parameter[cls, name, method, url]]:
constant[ interface helper function]
if <ast.UnaryOp object at 0x7da1aff37b20> begin[:]
<ast.Raise object at 0x7da1aff37790>
if call[name[isinstance], parameter[name[method], name[str]]] begin[:]
variable[method] assign[=] <ast.Set object at 0x7da1aff37220>
call[name[cls]._interface][name[name]] assign[=] list[[<ast.Dict object at 0x7da1aff37a60>]] | keyword[def] identifier[use] ( identifier[cls] , identifier[name] , identifier[method] :[ identifier[str] , identifier[Set] , identifier[List] ], identifier[url] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[method] ,( identifier[str] , identifier[list] , identifier[set] , identifier[tuple] )):
keyword[raise] identifier[BaseException] ( literal[string] % identifier[type] ( identifier[method] ). identifier[__name__] )
keyword[if] identifier[isinstance] ( identifier[method] , identifier[str] ):
identifier[method] ={ identifier[method] }
identifier[cls] . identifier[_interface] [ identifier[name] ]=[{ literal[string] : identifier[method] , literal[string] : identifier[url] }] | def use(cls, name, method: [str, Set, List], url=None):
""" interface helper function"""
if not isinstance(method, (str, list, set, tuple)):
raise BaseException('Invalid type of method: %s' % type(method).__name__) # depends on [control=['if'], data=[]]
if isinstance(method, str):
method = {method} # depends on [control=['if'], data=[]]
# TODO: check methods available
cls._interface[name] = [{'method': method, 'url': url}] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.