code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def mentions(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_SPOT_API):
'''
Find possible mentions in a text, do not link them to any entity.
:param text: the text where to find mentions.
:param gcube_token: the authentication token provided by the D4Science infrastructure.
:param lang: the Wikipedia language.
:param api: the API endpoint.
'''
payload = [("text", text.encode("utf-8")),
("lang", lang.encode("utf-8"))]
json_response = _issue_request(api, payload, gcube_token)
return MentionsResponse(json_response) if json_response else None | def function[mentions, parameter[text, gcube_token, lang, api]]:
constant[
Find possible mentions in a text, do not link them to any entity.
:param text: the text where to find mentions.
:param gcube_token: the authentication token provided by the D4Science infrastructure.
:param lang: the Wikipedia language.
:param api: the API endpoint.
]
variable[payload] assign[=] list[[<ast.Tuple object at 0x7da1b04790c0>, <ast.Tuple object at 0x7da1b0478490>]]
variable[json_response] assign[=] call[name[_issue_request], parameter[name[api], name[payload], name[gcube_token]]]
return[<ast.IfExp object at 0x7da1b047a770>] | keyword[def] identifier[mentions] ( identifier[text] , identifier[gcube_token] = keyword[None] , identifier[lang] = identifier[DEFAULT_LANG] , identifier[api] = identifier[DEFAULT_SPOT_API] ):
literal[string]
identifier[payload] =[( literal[string] , identifier[text] . identifier[encode] ( literal[string] )),
( literal[string] , identifier[lang] . identifier[encode] ( literal[string] ))]
identifier[json_response] = identifier[_issue_request] ( identifier[api] , identifier[payload] , identifier[gcube_token] )
keyword[return] identifier[MentionsResponse] ( identifier[json_response] ) keyword[if] identifier[json_response] keyword[else] keyword[None] | def mentions(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_SPOT_API):
"""
Find possible mentions in a text, do not link them to any entity.
:param text: the text where to find mentions.
:param gcube_token: the authentication token provided by the D4Science infrastructure.
:param lang: the Wikipedia language.
:param api: the API endpoint.
"""
payload = [('text', text.encode('utf-8')), ('lang', lang.encode('utf-8'))]
json_response = _issue_request(api, payload, gcube_token)
return MentionsResponse(json_response) if json_response else None |
def run(self, command, block=True, cwd=None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""
Create an instance of :class:`~ShellCommand` and run it
Args:
command (str): :class:`~ShellCommand`
block (bool): See :class:`~ShellCommand`
cwd (str): Override the runner cwd. Useb by the :class:`~ShellCommand` instance
"""
if cwd is None:
cwd = self.cwd
return ShellCommand(command=command, logger=self.logger, block=block, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr).run() | def function[run, parameter[self, command, block, cwd, stdin, stdout, stderr]]:
constant[
Create an instance of :class:`~ShellCommand` and run it
Args:
command (str): :class:`~ShellCommand`
block (bool): See :class:`~ShellCommand`
cwd (str): Override the runner cwd. Useb by the :class:`~ShellCommand` instance
]
if compare[name[cwd] is constant[None]] begin[:]
variable[cwd] assign[=] name[self].cwd
return[call[call[name[ShellCommand], parameter[]].run, parameter[]]] | keyword[def] identifier[run] ( identifier[self] , identifier[command] , identifier[block] = keyword[True] , identifier[cwd] = keyword[None] , identifier[stdin] = identifier[subprocess] . identifier[PIPE] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] , identifier[stderr] = identifier[subprocess] . identifier[PIPE] ):
literal[string]
keyword[if] identifier[cwd] keyword[is] keyword[None] :
identifier[cwd] = identifier[self] . identifier[cwd]
keyword[return] identifier[ShellCommand] ( identifier[command] = identifier[command] , identifier[logger] = identifier[self] . identifier[logger] , identifier[block] = identifier[block] , identifier[cwd] = identifier[cwd] , identifier[stdin] = identifier[stdin] , identifier[stdout] = identifier[stdout] , identifier[stderr] = identifier[stderr] ). identifier[run] () | def run(self, command, block=True, cwd=None, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""
Create an instance of :class:`~ShellCommand` and run it
Args:
command (str): :class:`~ShellCommand`
block (bool): See :class:`~ShellCommand`
cwd (str): Override the runner cwd. Useb by the :class:`~ShellCommand` instance
"""
if cwd is None:
cwd = self.cwd # depends on [control=['if'], data=['cwd']]
return ShellCommand(command=command, logger=self.logger, block=block, cwd=cwd, stdin=stdin, stdout=stdout, stderr=stderr).run() |
def normalizeHSP(hsp, readLen, blastApplication):
"""
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in BLAST output properly.
hsp['frame'] is a (query, subject) 2-tuple, with both values coming from
{-3, -2, -1, 1, 2, 3}. The sign indicates negative or positive sense
(i.e., the direction of reading through the query or subject to get the
alignment). The value is the nucleotide match offset modulo 3, plus one
(i.e., it tells us which of the 3 possible reading frames is used in
the match). The value is redundant because that information could also
be obtained from the mod 3 value of the match offset.
NOTE: the returned readStartInSubject value may be negative. We consider
the hit sequence to start at offset 0. So if the read string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the hit. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a BLAST record.
All passed hsp offsets are 1-based.
@param readLen: the length of the read sequence.
@param blastApplication: The C{str} command line program that was
run (e.g., 'blastn', 'blastx').
"""
def debugPrint(locals, msg=None):
"""
Print debugging information showing the local variables from
a call to normalizeHSP and then raise an C{AssertionError}.
@param locals: A C{dict} of local variables.
@param msg: A C{str} message to raise C{AssertionError} with.
"""
print('normalizeHSP error:')
print(' readLen: %d' % readLen)
for var in sorted(locals.keys()):
if var in ('debugPrint', 'hsp'):
continue
print(' %s: %s' % (var, locals[var]))
print(' Original HSP:')
printHSP(hsp, ' ')
if msg:
raise AssertionError(msg)
else:
raise AssertionError()
readPositive = hsp['frame'][0] > 0
hitPositive = hsp['frame'][1] > 0
# The following variable names with underscores match the names of
# attributes BioPython uses and the values (1-based) match those
# reported by BLAST.
read_start = hsp['query_start']
read_end = hsp['query_end']
sbjct_start = hsp['sbjct_start']
sbjct_end = hsp['sbjct_end']
# When the read is positive, BLASTN and TBLASTX give read offsets
# ascending.
#
# TBLASTX reports negative read sense with indices ascending.
# BLASTN does not report negative read sense.
#
# In all cases the read offsets should be ascending.
if read_start > read_end:
debugPrint(locals(),
'Assertion "read_start <= read_end" failed. Read '
'positive is %s. read_start = %d, read_end = %d' %
(readPositive, read_start, read_end))
if hitPositive:
# Make sure indices are ascending.
if sbjct_start > sbjct_end:
debugPrint(locals())
else:
# Hit is negative. Its indices will be ascending for TBLASTX
# output but descending for BLASTN :-( Make sure we have them
# ascending.
if sbjct_start > sbjct_end:
sbjct_start, sbjct_end = sbjct_end, sbjct_start
# Now that we have asserted what we can about the original HSP values
# and gotten them into ascending order, make some sane 0-based offsets.
readStartInSubject = read_start - 1
readEndInSubject = read_end
subjectStart = sbjct_start - 1
subjectEnd = sbjct_end
if blastApplication == 'blastx':
# In Blastx output, hit offsets are based on protein sequence
# length but queries (and the reported offsets) are nucleotide.
# Convert the read offsets to protein because we will plot against
# the hit (protein).
#
# Note that readStartInSubject and readEndInSubject may not be 0 mod
# 3. They are offsets into the read string giving the position of
# the AA, which depends on the translation frame.
readStartInSubject = int(readStartInSubject / 3)
readEndInSubject = int(readEndInSubject / 3)
# No operations on original 1-based HSP variables (with underscores)
# should appear beyond this point.
subjectLength = subjectEnd - subjectStart
readLength = readEndInSubject - readStartInSubject
# NOTE: readLength (above) is a really bad name. It's actually going to
# hold the length of the match in the query. I don't know why
# readEndInSubject - readStartInSubject is used (I mean why those two
# variables are not named readEnd and readStart). Maybe someone made a
# find and replace editing error which changed their names. Anyway, the
# readLength variable is confusingly named because this function is
# passed a 'readLen' argument, which does happen to be the full length
# of the read. This should be cleaned up. See ../diamond/hsp.py for
# something cleaner.
hitGaps = hsp['sbjct'].count('-')
readGaps = hsp['query'].count('-')
# Sanity check that the length of the matches in the hit and read
# are identical, taking into account gaps in either (indicated by '-'
# characters in the match sequences, as returned by BLAST).
subjectLengthWithGaps = subjectLength + hitGaps
readLengthWithGaps = readLength + readGaps
if subjectLengthWithGaps != readLengthWithGaps:
debugPrint(locals(),
'Including gaps, hit match length (%d) != Read match '
'length (%d)' % (subjectLengthWithGaps,
readLengthWithGaps))
# TODO: check the mod 3 value of the start offsets.
# Calculate read indices. These are indices relative to the hit!
# unmatchedReadLeft is the number of read bases that will be sticking
# out to the left of the start of the hit in our plots.
if readPositive:
unmatchedReadLeft = readStartInSubject
else:
unmatchedReadLeft = readLen - readEndInSubject
# Set the read offsets based on the direction the match with the
# hit takes.
if hitPositive:
readStartInSubject = subjectStart - unmatchedReadLeft
readEndInSubject = readStartInSubject + readLen + readGaps
else:
readEndInSubject = subjectEnd + unmatchedReadLeft
readStartInSubject = readEndInSubject - readLen - readGaps
# Final sanity checks.
if readStartInSubject > subjectStart:
debugPrint(locals(), 'readStartInSubject > subjectStart')
if readEndInSubject < subjectEnd:
debugPrint(locals(), 'readEndInSubject < subjectEnd')
return {
'readStart': read_start - 1,
'readEnd': read_end,
'readStartInSubject': readStartInSubject,
'readEndInSubject': readEndInSubject,
'subjectStart': subjectStart,
'subjectEnd': subjectEnd,
} | def function[normalizeHSP, parameter[hsp, readLen, blastApplication]]:
constant[
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in BLAST output properly.
hsp['frame'] is a (query, subject) 2-tuple, with both values coming from
{-3, -2, -1, 1, 2, 3}. The sign indicates negative or positive sense
(i.e., the direction of reading through the query or subject to get the
alignment). The value is the nucleotide match offset modulo 3, plus one
(i.e., it tells us which of the 3 possible reading frames is used in
the match). The value is redundant because that information could also
be obtained from the mod 3 value of the match offset.
NOTE: the returned readStartInSubject value may be negative. We consider
the hit sequence to start at offset 0. So if the read string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the hit. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a BLAST record.
All passed hsp offsets are 1-based.
@param readLen: the length of the read sequence.
@param blastApplication: The C{str} command line program that was
run (e.g., 'blastn', 'blastx').
]
def function[debugPrint, parameter[locals, msg]]:
constant[
Print debugging information showing the local variables from
a call to normalizeHSP and then raise an C{AssertionError}.
@param locals: A C{dict} of local variables.
@param msg: A C{str} message to raise C{AssertionError} with.
]
call[name[print], parameter[constant[normalizeHSP error:]]]
call[name[print], parameter[binary_operation[constant[ readLen: %d] <ast.Mod object at 0x7da2590d6920> name[readLen]]]]
for taget[name[var]] in starred[call[name[sorted], parameter[call[name[locals].keys, parameter[]]]]] begin[:]
if compare[name[var] in tuple[[<ast.Constant object at 0x7da1b0cff880>, <ast.Constant object at 0x7da1b0cfcc70>]]] begin[:]
continue
call[name[print], parameter[binary_operation[constant[ %s: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0cfdea0>, <ast.Subscript object at 0x7da1b0cfdf30>]]]]]
call[name[print], parameter[constant[ Original HSP:]]]
call[name[printHSP], parameter[name[hsp], constant[ ]]]
if name[msg] begin[:]
<ast.Raise object at 0x7da1b0cfe500>
variable[readPositive] assign[=] compare[call[call[name[hsp]][constant[frame]]][constant[0]] greater[>] constant[0]]
variable[hitPositive] assign[=] compare[call[call[name[hsp]][constant[frame]]][constant[1]] greater[>] constant[0]]
variable[read_start] assign[=] call[name[hsp]][constant[query_start]]
variable[read_end] assign[=] call[name[hsp]][constant[query_end]]
variable[sbjct_start] assign[=] call[name[hsp]][constant[sbjct_start]]
variable[sbjct_end] assign[=] call[name[hsp]][constant[sbjct_end]]
if compare[name[read_start] greater[>] name[read_end]] begin[:]
call[name[debugPrint], parameter[call[name[locals], parameter[]], binary_operation[constant[Assertion "read_start <= read_end" failed. Read positive is %s. read_start = %d, read_end = %d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0cffee0>, <ast.Name object at 0x7da1b0cfff40>, <ast.Name object at 0x7da1b0cffe80>]]]]]
if name[hitPositive] begin[:]
if compare[name[sbjct_start] greater[>] name[sbjct_end]] begin[:]
call[name[debugPrint], parameter[call[name[locals], parameter[]]]]
variable[readStartInSubject] assign[=] binary_operation[name[read_start] - constant[1]]
variable[readEndInSubject] assign[=] name[read_end]
variable[subjectStart] assign[=] binary_operation[name[sbjct_start] - constant[1]]
variable[subjectEnd] assign[=] name[sbjct_end]
if compare[name[blastApplication] equal[==] constant[blastx]] begin[:]
variable[readStartInSubject] assign[=] call[name[int], parameter[binary_operation[name[readStartInSubject] / constant[3]]]]
variable[readEndInSubject] assign[=] call[name[int], parameter[binary_operation[name[readEndInSubject] / constant[3]]]]
variable[subjectLength] assign[=] binary_operation[name[subjectEnd] - name[subjectStart]]
variable[readLength] assign[=] binary_operation[name[readEndInSubject] - name[readStartInSubject]]
variable[hitGaps] assign[=] call[call[name[hsp]][constant[sbjct]].count, parameter[constant[-]]]
variable[readGaps] assign[=] call[call[name[hsp]][constant[query]].count, parameter[constant[-]]]
variable[subjectLengthWithGaps] assign[=] binary_operation[name[subjectLength] + name[hitGaps]]
variable[readLengthWithGaps] assign[=] binary_operation[name[readLength] + name[readGaps]]
if compare[name[subjectLengthWithGaps] not_equal[!=] name[readLengthWithGaps]] begin[:]
call[name[debugPrint], parameter[call[name[locals], parameter[]], binary_operation[constant[Including gaps, hit match length (%d) != Read match length (%d)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0cfcb20>, <ast.Name object at 0x7da1b0cfcbe0>]]]]]
if name[readPositive] begin[:]
variable[unmatchedReadLeft] assign[=] name[readStartInSubject]
if name[hitPositive] begin[:]
variable[readStartInSubject] assign[=] binary_operation[name[subjectStart] - name[unmatchedReadLeft]]
variable[readEndInSubject] assign[=] binary_operation[binary_operation[name[readStartInSubject] + name[readLen]] + name[readGaps]]
if compare[name[readStartInSubject] greater[>] name[subjectStart]] begin[:]
call[name[debugPrint], parameter[call[name[locals], parameter[]], constant[readStartInSubject > subjectStart]]]
if compare[name[readEndInSubject] less[<] name[subjectEnd]] begin[:]
call[name[debugPrint], parameter[call[name[locals], parameter[]], constant[readEndInSubject < subjectEnd]]]
return[dictionary[[<ast.Constant object at 0x7da18f723df0>, <ast.Constant object at 0x7da18f7217e0>, <ast.Constant object at 0x7da18f723040>, <ast.Constant object at 0x7da18f721480>, <ast.Constant object at 0x7da18f722d70>, <ast.Constant object at 0x7da18f720a90>], [<ast.BinOp object at 0x7da18f7219f0>, <ast.Name object at 0x7da18f722aa0>, <ast.Name object at 0x7da18f7228c0>, <ast.Name object at 0x7da18f723a90>, <ast.Name object at 0x7da18f7208e0>, <ast.Name object at 0x7da18f721390>]]] | keyword[def] identifier[normalizeHSP] ( identifier[hsp] , identifier[readLen] , identifier[blastApplication] ):
literal[string]
keyword[def] identifier[debugPrint] ( identifier[locals] , identifier[msg] = keyword[None] ):
literal[string]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] % identifier[readLen] )
keyword[for] identifier[var] keyword[in] identifier[sorted] ( identifier[locals] . identifier[keys] ()):
keyword[if] identifier[var] keyword[in] ( literal[string] , literal[string] ):
keyword[continue]
identifier[print] ( literal[string] %( identifier[var] , identifier[locals] [ identifier[var] ]))
identifier[print] ( literal[string] )
identifier[printHSP] ( identifier[hsp] , literal[string] )
keyword[if] identifier[msg] :
keyword[raise] identifier[AssertionError] ( identifier[msg] )
keyword[else] :
keyword[raise] identifier[AssertionError] ()
identifier[readPositive] = identifier[hsp] [ literal[string] ][ literal[int] ]> literal[int]
identifier[hitPositive] = identifier[hsp] [ literal[string] ][ literal[int] ]> literal[int]
identifier[read_start] = identifier[hsp] [ literal[string] ]
identifier[read_end] = identifier[hsp] [ literal[string] ]
identifier[sbjct_start] = identifier[hsp] [ literal[string] ]
identifier[sbjct_end] = identifier[hsp] [ literal[string] ]
keyword[if] identifier[read_start] > identifier[read_end] :
identifier[debugPrint] ( identifier[locals] (),
literal[string]
literal[string] %
( identifier[readPositive] , identifier[read_start] , identifier[read_end] ))
keyword[if] identifier[hitPositive] :
keyword[if] identifier[sbjct_start] > identifier[sbjct_end] :
identifier[debugPrint] ( identifier[locals] ())
keyword[else] :
keyword[if] identifier[sbjct_start] > identifier[sbjct_end] :
identifier[sbjct_start] , identifier[sbjct_end] = identifier[sbjct_end] , identifier[sbjct_start]
identifier[readStartInSubject] = identifier[read_start] - literal[int]
identifier[readEndInSubject] = identifier[read_end]
identifier[subjectStart] = identifier[sbjct_start] - literal[int]
identifier[subjectEnd] = identifier[sbjct_end]
keyword[if] identifier[blastApplication] == literal[string] :
identifier[readStartInSubject] = identifier[int] ( identifier[readStartInSubject] / literal[int] )
identifier[readEndInSubject] = identifier[int] ( identifier[readEndInSubject] / literal[int] )
identifier[subjectLength] = identifier[subjectEnd] - identifier[subjectStart]
identifier[readLength] = identifier[readEndInSubject] - identifier[readStartInSubject]
identifier[hitGaps] = identifier[hsp] [ literal[string] ]. identifier[count] ( literal[string] )
identifier[readGaps] = identifier[hsp] [ literal[string] ]. identifier[count] ( literal[string] )
identifier[subjectLengthWithGaps] = identifier[subjectLength] + identifier[hitGaps]
identifier[readLengthWithGaps] = identifier[readLength] + identifier[readGaps]
keyword[if] identifier[subjectLengthWithGaps] != identifier[readLengthWithGaps] :
identifier[debugPrint] ( identifier[locals] (),
literal[string]
literal[string] %( identifier[subjectLengthWithGaps] ,
identifier[readLengthWithGaps] ))
keyword[if] identifier[readPositive] :
identifier[unmatchedReadLeft] = identifier[readStartInSubject]
keyword[else] :
identifier[unmatchedReadLeft] = identifier[readLen] - identifier[readEndInSubject]
keyword[if] identifier[hitPositive] :
identifier[readStartInSubject] = identifier[subjectStart] - identifier[unmatchedReadLeft]
identifier[readEndInSubject] = identifier[readStartInSubject] + identifier[readLen] + identifier[readGaps]
keyword[else] :
identifier[readEndInSubject] = identifier[subjectEnd] + identifier[unmatchedReadLeft]
identifier[readStartInSubject] = identifier[readEndInSubject] - identifier[readLen] - identifier[readGaps]
keyword[if] identifier[readStartInSubject] > identifier[subjectStart] :
identifier[debugPrint] ( identifier[locals] (), literal[string] )
keyword[if] identifier[readEndInSubject] < identifier[subjectEnd] :
identifier[debugPrint] ( identifier[locals] (), literal[string] )
keyword[return] {
literal[string] : identifier[read_start] - literal[int] ,
literal[string] : identifier[read_end] ,
literal[string] : identifier[readStartInSubject] ,
literal[string] : identifier[readEndInSubject] ,
literal[string] : identifier[subjectStart] ,
literal[string] : identifier[subjectEnd] ,
} | def normalizeHSP(hsp, readLen, blastApplication):
"""
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in BLAST output properly.
hsp['frame'] is a (query, subject) 2-tuple, with both values coming from
{-3, -2, -1, 1, 2, 3}. The sign indicates negative or positive sense
(i.e., the direction of reading through the query or subject to get the
alignment). The value is the nucleotide match offset modulo 3, plus one
(i.e., it tells us which of the 3 possible reading frames is used in
the match). The value is redundant because that information could also
be obtained from the mod 3 value of the match offset.
NOTE: the returned readStartInSubject value may be negative. We consider
the hit sequence to start at offset 0. So if the read string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the hit. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a BLAST record.
All passed hsp offsets are 1-based.
@param readLen: the length of the read sequence.
@param blastApplication: The C{str} command line program that was
run (e.g., 'blastn', 'blastx').
"""
def debugPrint(locals, msg=None):
"""
Print debugging information showing the local variables from
a call to normalizeHSP and then raise an C{AssertionError}.
@param locals: A C{dict} of local variables.
@param msg: A C{str} message to raise C{AssertionError} with.
"""
print('normalizeHSP error:')
print(' readLen: %d' % readLen)
for var in sorted(locals.keys()):
if var in ('debugPrint', 'hsp'):
continue # depends on [control=['if'], data=[]]
print(' %s: %s' % (var, locals[var])) # depends on [control=['for'], data=['var']]
print(' Original HSP:')
printHSP(hsp, ' ')
if msg:
raise AssertionError(msg) # depends on [control=['if'], data=[]]
else:
raise AssertionError()
readPositive = hsp['frame'][0] > 0
hitPositive = hsp['frame'][1] > 0
# The following variable names with underscores match the names of
# attributes BioPython uses and the values (1-based) match those
# reported by BLAST.
read_start = hsp['query_start']
read_end = hsp['query_end']
sbjct_start = hsp['sbjct_start']
sbjct_end = hsp['sbjct_end']
# When the read is positive, BLASTN and TBLASTX give read offsets
# ascending.
#
# TBLASTX reports negative read sense with indices ascending.
# BLASTN does not report negative read sense.
#
# In all cases the read offsets should be ascending.
if read_start > read_end:
debugPrint(locals(), 'Assertion "read_start <= read_end" failed. Read positive is %s. read_start = %d, read_end = %d' % (readPositive, read_start, read_end)) # depends on [control=['if'], data=['read_start', 'read_end']]
if hitPositive:
# Make sure indices are ascending.
if sbjct_start > sbjct_end:
debugPrint(locals()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Hit is negative. Its indices will be ascending for TBLASTX
# output but descending for BLASTN :-( Make sure we have them
# ascending.
elif sbjct_start > sbjct_end:
(sbjct_start, sbjct_end) = (sbjct_end, sbjct_start) # depends on [control=['if'], data=['sbjct_start', 'sbjct_end']]
# Now that we have asserted what we can about the original HSP values
# and gotten them into ascending order, make some sane 0-based offsets.
readStartInSubject = read_start - 1
readEndInSubject = read_end
subjectStart = sbjct_start - 1
subjectEnd = sbjct_end
if blastApplication == 'blastx':
# In Blastx output, hit offsets are based on protein sequence
# length but queries (and the reported offsets) are nucleotide.
# Convert the read offsets to protein because we will plot against
# the hit (protein).
#
# Note that readStartInSubject and readEndInSubject may not be 0 mod
# 3. They are offsets into the read string giving the position of
# the AA, which depends on the translation frame.
readStartInSubject = int(readStartInSubject / 3)
readEndInSubject = int(readEndInSubject / 3) # depends on [control=['if'], data=[]]
# No operations on original 1-based HSP variables (with underscores)
# should appear beyond this point.
subjectLength = subjectEnd - subjectStart
readLength = readEndInSubject - readStartInSubject
# NOTE: readLength (above) is a really bad name. It's actually going to
# hold the length of the match in the query. I don't know why
# readEndInSubject - readStartInSubject is used (I mean why those two
# variables are not named readEnd and readStart). Maybe someone made a
# find and replace editing error which changed their names. Anyway, the
# readLength variable is confusingly named because this function is
# passed a 'readLen' argument, which does happen to be the full length
# of the read. This should be cleaned up. See ../diamond/hsp.py for
# something cleaner.
hitGaps = hsp['sbjct'].count('-')
readGaps = hsp['query'].count('-')
# Sanity check that the length of the matches in the hit and read
# are identical, taking into account gaps in either (indicated by '-'
# characters in the match sequences, as returned by BLAST).
subjectLengthWithGaps = subjectLength + hitGaps
readLengthWithGaps = readLength + readGaps
if subjectLengthWithGaps != readLengthWithGaps:
debugPrint(locals(), 'Including gaps, hit match length (%d) != Read match length (%d)' % (subjectLengthWithGaps, readLengthWithGaps)) # depends on [control=['if'], data=['subjectLengthWithGaps', 'readLengthWithGaps']]
# TODO: check the mod 3 value of the start offsets.
# Calculate read indices. These are indices relative to the hit!
# unmatchedReadLeft is the number of read bases that will be sticking
# out to the left of the start of the hit in our plots.
if readPositive:
unmatchedReadLeft = readStartInSubject # depends on [control=['if'], data=[]]
else:
unmatchedReadLeft = readLen - readEndInSubject
# Set the read offsets based on the direction the match with the
# hit takes.
if hitPositive:
readStartInSubject = subjectStart - unmatchedReadLeft
readEndInSubject = readStartInSubject + readLen + readGaps # depends on [control=['if'], data=[]]
else:
readEndInSubject = subjectEnd + unmatchedReadLeft
readStartInSubject = readEndInSubject - readLen - readGaps
# Final sanity checks.
if readStartInSubject > subjectStart:
debugPrint(locals(), 'readStartInSubject > subjectStart') # depends on [control=['if'], data=[]]
if readEndInSubject < subjectEnd:
debugPrint(locals(), 'readEndInSubject < subjectEnd') # depends on [control=['if'], data=[]]
return {'readStart': read_start - 1, 'readEnd': read_end, 'readStartInSubject': readStartInSubject, 'readEndInSubject': readEndInSubject, 'subjectStart': subjectStart, 'subjectEnd': subjectEnd} |
def namespaced_function(function, global_dict, defaults=None, preserve_context=False):
'''
Redefine (clone) a function under a different globals() namespace scope
preserve_context:
Allow keeping the context taken from orignal namespace,
and extend it with globals() taken from
new targetted namespace.
'''
if defaults is None:
defaults = function.__defaults__
if preserve_context:
_global_dict = function.__globals__.copy()
_global_dict.update(global_dict)
global_dict = _global_dict
new_namespaced_function = types.FunctionType(
function.__code__,
global_dict,
name=function.__name__,
argdefs=defaults,
closure=function.__closure__
)
new_namespaced_function.__dict__.update(function.__dict__)
return new_namespaced_function | def function[namespaced_function, parameter[function, global_dict, defaults, preserve_context]]:
constant[
Redefine (clone) a function under a different globals() namespace scope
preserve_context:
Allow keeping the context taken from orignal namespace,
and extend it with globals() taken from
new targetted namespace.
]
if compare[name[defaults] is constant[None]] begin[:]
variable[defaults] assign[=] name[function].__defaults__
if name[preserve_context] begin[:]
variable[_global_dict] assign[=] call[name[function].__globals__.copy, parameter[]]
call[name[_global_dict].update, parameter[name[global_dict]]]
variable[global_dict] assign[=] name[_global_dict]
variable[new_namespaced_function] assign[=] call[name[types].FunctionType, parameter[name[function].__code__, name[global_dict]]]
call[name[new_namespaced_function].__dict__.update, parameter[name[function].__dict__]]
return[name[new_namespaced_function]] | keyword[def] identifier[namespaced_function] ( identifier[function] , identifier[global_dict] , identifier[defaults] = keyword[None] , identifier[preserve_context] = keyword[False] ):
literal[string]
keyword[if] identifier[defaults] keyword[is] keyword[None] :
identifier[defaults] = identifier[function] . identifier[__defaults__]
keyword[if] identifier[preserve_context] :
identifier[_global_dict] = identifier[function] . identifier[__globals__] . identifier[copy] ()
identifier[_global_dict] . identifier[update] ( identifier[global_dict] )
identifier[global_dict] = identifier[_global_dict]
identifier[new_namespaced_function] = identifier[types] . identifier[FunctionType] (
identifier[function] . identifier[__code__] ,
identifier[global_dict] ,
identifier[name] = identifier[function] . identifier[__name__] ,
identifier[argdefs] = identifier[defaults] ,
identifier[closure] = identifier[function] . identifier[__closure__]
)
identifier[new_namespaced_function] . identifier[__dict__] . identifier[update] ( identifier[function] . identifier[__dict__] )
keyword[return] identifier[new_namespaced_function] | def namespaced_function(function, global_dict, defaults=None, preserve_context=False):
"""
Redefine (clone) a function under a different globals() namespace scope
preserve_context:
Allow keeping the context taken from orignal namespace,
and extend it with globals() taken from
new targetted namespace.
"""
if defaults is None:
defaults = function.__defaults__ # depends on [control=['if'], data=['defaults']]
if preserve_context:
_global_dict = function.__globals__.copy()
_global_dict.update(global_dict)
global_dict = _global_dict # depends on [control=['if'], data=[]]
new_namespaced_function = types.FunctionType(function.__code__, global_dict, name=function.__name__, argdefs=defaults, closure=function.__closure__)
new_namespaced_function.__dict__.update(function.__dict__)
return new_namespaced_function |
def yield_pair_energies(self, index1, index2):
"""Yields pairs ((s(r_ij), v(bar{r}_ij))"""
strength = self.strengths[index1, index2]
distance = self.distances[index1, index2]
yield strength*distance**(-6), 1 | def function[yield_pair_energies, parameter[self, index1, index2]]:
constant[Yields pairs ((s(r_ij), v(bar{r}_ij))]
variable[strength] assign[=] call[name[self].strengths][tuple[[<ast.Name object at 0x7da1b26adc30>, <ast.Name object at 0x7da1b26af790>]]]
variable[distance] assign[=] call[name[self].distances][tuple[[<ast.Name object at 0x7da1b26ae470>, <ast.Name object at 0x7da1b26ae2f0>]]]
<ast.Yield object at 0x7da1b26aef20> | keyword[def] identifier[yield_pair_energies] ( identifier[self] , identifier[index1] , identifier[index2] ):
literal[string]
identifier[strength] = identifier[self] . identifier[strengths] [ identifier[index1] , identifier[index2] ]
identifier[distance] = identifier[self] . identifier[distances] [ identifier[index1] , identifier[index2] ]
keyword[yield] identifier[strength] * identifier[distance] **(- literal[int] ), literal[int] | def yield_pair_energies(self, index1, index2):
"""Yields pairs ((s(r_ij), v(bar{r}_ij))"""
strength = self.strengths[index1, index2]
distance = self.distances[index1, index2]
yield (strength * distance ** (-6), 1) |
def update_get_params(self):
"""Update HTTP GET params with the given fields that user wants to fetch."""
if isinstance(self._fields, (tuple, list)): # tuples & lists > x,y,z
self.get_params["fields"] = ",".join([str(_) for _ in self._fields])
elif isinstance(self._fields, str):
self.get_params["fields"] = self._fields | def function[update_get_params, parameter[self]]:
constant[Update HTTP GET params with the given fields that user wants to fetch.]
if call[name[isinstance], parameter[name[self]._fields, tuple[[<ast.Name object at 0x7da1b0402e00>, <ast.Name object at 0x7da1b0401840>]]]] begin[:]
call[name[self].get_params][constant[fields]] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b0401cf0>]] | keyword[def] identifier[update_get_params] ( identifier[self] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[self] . identifier[_fields] ,( identifier[tuple] , identifier[list] )):
identifier[self] . identifier[get_params] [ literal[string] ]= literal[string] . identifier[join] ([ identifier[str] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[self] . identifier[_fields] ])
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[_fields] , identifier[str] ):
identifier[self] . identifier[get_params] [ literal[string] ]= identifier[self] . identifier[_fields] | def update_get_params(self):
"""Update HTTP GET params with the given fields that user wants to fetch."""
if isinstance(self._fields, (tuple, list)): # tuples & lists > x,y,z
self.get_params['fields'] = ','.join([str(_) for _ in self._fields]) # depends on [control=['if'], data=[]]
elif isinstance(self._fields, str):
self.get_params['fields'] = self._fields # depends on [control=['if'], data=[]] |
def _resetaA(self,pot=None,type=None):
"""
NAME:
_resetaA
PURPOSE:
re-set up an actionAngle module for this Orbit
ONLY TO BE CALLED FROM WITHIN SETUPAA
INPUT:
pot - potential
OUTPUT:
True if reset happened, False otherwise
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if (not pot is None and pot != self._aAPot) \
or (not type is None and type != self._aAType):
delattr(self,'_aA')
return True
else:
pass | def function[_resetaA, parameter[self, pot, type]]:
constant[
NAME:
_resetaA
PURPOSE:
re-set up an actionAngle module for this Orbit
ONLY TO BE CALLED FROM WITHIN SETUPAA
INPUT:
pot - potential
OUTPUT:
True if reset happened, False otherwise
HISTORY:
2012-06-01 - Written - Bovy (IAS)
]
if <ast.BoolOp object at 0x7da1b0c67c40> begin[:]
call[name[delattr], parameter[name[self], constant[_aA]]]
return[constant[True]] | keyword[def] identifier[_resetaA] ( identifier[self] , identifier[pot] = keyword[None] , identifier[type] = keyword[None] ):
literal[string]
keyword[if] ( keyword[not] identifier[pot] keyword[is] keyword[None] keyword[and] identifier[pot] != identifier[self] . identifier[_aAPot] ) keyword[or] ( keyword[not] identifier[type] keyword[is] keyword[None] keyword[and] identifier[type] != identifier[self] . identifier[_aAType] ):
identifier[delattr] ( identifier[self] , literal[string] )
keyword[return] keyword[True]
keyword[else] :
keyword[pass] | def _resetaA(self, pot=None, type=None):
"""
NAME:
_resetaA
PURPOSE:
re-set up an actionAngle module for this Orbit
ONLY TO BE CALLED FROM WITHIN SETUPAA
INPUT:
pot - potential
OUTPUT:
True if reset happened, False otherwise
HISTORY:
2012-06-01 - Written - Bovy (IAS)
"""
if not pot is None and pot != self._aAPot or (not type is None and type != self._aAType):
delattr(self, '_aA')
return True # depends on [control=['if'], data=[]]
else:
pass |
def less_than_obs_constraints(self):
"""get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
"""
obs = self.observation_data
lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return lt_obs | def function[less_than_obs_constraints, parameter[self]]:
constant[get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
]
variable[obs] assign[=] name[self].observation_data
variable[lt_obs] assign[=] call[name[obs].loc][tuple[[<ast.Call object at 0x7da1b1d39c30>, <ast.Constant object at 0x7da1b1d6d360>]]]
return[name[lt_obs]] | keyword[def] identifier[less_than_obs_constraints] ( identifier[self] ):
literal[string]
identifier[obs] = identifier[self] . identifier[observation_data]
identifier[lt_obs] = identifier[obs] . identifier[loc] [ identifier[obs] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[self] . identifier[_is_less_const] ( identifier[x] . identifier[obgnme] ) keyword[and] identifier[x] . identifier[weight] != literal[int] , identifier[axis] = literal[int] ), literal[string] ]
keyword[return] identifier[lt_obs] | def less_than_obs_constraints(self):
"""get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
"""
obs = self.observation_data
lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) and x.weight != 0.0, axis=1), 'obsnme']
return lt_obs |
def get_files(directory, recursive=False):
"""Return a list of all files in the directory."""
files_out = []
if recursive:
for root, dirs, files in os.walk(os.path.abspath(directory)):
files = [os.path.join(root, f) for f in files]
files_out.append(files)
files_out = list(itertools.chain(*files_out))
else:
files_out = [os.path.join(directory, f) for f in os.listdir(directory)]
files_out = list(filter(lambda f: os.path.isfile(f), files_out))
# order alphabetically on file name
return sorted(files_out) | def function[get_files, parameter[directory, recursive]]:
constant[Return a list of all files in the directory.]
variable[files_out] assign[=] list[[]]
if name[recursive] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2047ea350>, <ast.Name object at 0x7da2047ea230>, <ast.Name object at 0x7da2047eae60>]]] in starred[call[name[os].walk, parameter[call[name[os].path.abspath, parameter[name[directory]]]]]] begin[:]
variable[files] assign[=] <ast.ListComp object at 0x7da2047eb460>
call[name[files_out].append, parameter[name[files]]]
variable[files_out] assign[=] call[name[list], parameter[call[name[itertools].chain, parameter[<ast.Starred object at 0x7da18f810400>]]]]
return[call[name[sorted], parameter[name[files_out]]]] | keyword[def] identifier[get_files] ( identifier[directory] , identifier[recursive] = keyword[False] ):
literal[string]
identifier[files_out] =[]
keyword[if] identifier[recursive] :
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[directory] )):
identifier[files] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[files] ]
identifier[files_out] . identifier[append] ( identifier[files] )
identifier[files_out] = identifier[list] ( identifier[itertools] . identifier[chain] (* identifier[files_out] ))
keyword[else] :
identifier[files_out] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[directory] )]
identifier[files_out] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[f] : identifier[os] . identifier[path] . identifier[isfile] ( identifier[f] ), identifier[files_out] ))
keyword[return] identifier[sorted] ( identifier[files_out] ) | def get_files(directory, recursive=False):
"""Return a list of all files in the directory."""
files_out = []
if recursive:
for (root, dirs, files) in os.walk(os.path.abspath(directory)):
files = [os.path.join(root, f) for f in files]
files_out.append(files) # depends on [control=['for'], data=[]]
files_out = list(itertools.chain(*files_out)) # depends on [control=['if'], data=[]]
else:
files_out = [os.path.join(directory, f) for f in os.listdir(directory)]
files_out = list(filter(lambda f: os.path.isfile(f), files_out))
# order alphabetically on file name
return sorted(files_out) |
def cancel_expired_invitations(invitations=None):
"""
Invitation lifetime must be specified in Waldur Core settings with parameter
"INVITATION_LIFETIME". If invitation creation time is less than expiration time, the invitation will set as expired.
"""
expiration_date = timezone.now() - settings.WALDUR_CORE['INVITATION_LIFETIME']
if not invitations:
invitations = models.Invitation.objects.filter(state=models.Invitation.State.PENDING)
invitations = invitations.filter(created__lte=expiration_date)
invitations.update(state=models.Invitation.State.EXPIRED) | def function[cancel_expired_invitations, parameter[invitations]]:
constant[
Invitation lifetime must be specified in Waldur Core settings with parameter
"INVITATION_LIFETIME". If invitation creation time is less than expiration time, the invitation will set as expired.
]
variable[expiration_date] assign[=] binary_operation[call[name[timezone].now, parameter[]] - call[name[settings].WALDUR_CORE][constant[INVITATION_LIFETIME]]]
if <ast.UnaryOp object at 0x7da1b0e30100> begin[:]
variable[invitations] assign[=] call[name[models].Invitation.objects.filter, parameter[]]
variable[invitations] assign[=] call[name[invitations].filter, parameter[]]
call[name[invitations].update, parameter[]] | keyword[def] identifier[cancel_expired_invitations] ( identifier[invitations] = keyword[None] ):
literal[string]
identifier[expiration_date] = identifier[timezone] . identifier[now] ()- identifier[settings] . identifier[WALDUR_CORE] [ literal[string] ]
keyword[if] keyword[not] identifier[invitations] :
identifier[invitations] = identifier[models] . identifier[Invitation] . identifier[objects] . identifier[filter] ( identifier[state] = identifier[models] . identifier[Invitation] . identifier[State] . identifier[PENDING] )
identifier[invitations] = identifier[invitations] . identifier[filter] ( identifier[created__lte] = identifier[expiration_date] )
identifier[invitations] . identifier[update] ( identifier[state] = identifier[models] . identifier[Invitation] . identifier[State] . identifier[EXPIRED] ) | def cancel_expired_invitations(invitations=None):
"""
Invitation lifetime must be specified in Waldur Core settings with parameter
"INVITATION_LIFETIME". If invitation creation time is less than expiration time, the invitation will set as expired.
"""
expiration_date = timezone.now() - settings.WALDUR_CORE['INVITATION_LIFETIME']
if not invitations:
invitations = models.Invitation.objects.filter(state=models.Invitation.State.PENDING) # depends on [control=['if'], data=[]]
invitations = invitations.filter(created__lte=expiration_date)
invitations.update(state=models.Invitation.State.EXPIRED) |
def undefine(self):
"""Undefine the Global.
Python equivalent of the CLIPS undefglobal command.
The object becomes unusable after this method has been called.
"""
if lib.EnvUndefglobal(self._env, self._glb) != 1:
raise CLIPSError(self._env)
self._env = None | def function[undefine, parameter[self]]:
constant[Undefine the Global.
Python equivalent of the CLIPS undefglobal command.
The object becomes unusable after this method has been called.
]
if compare[call[name[lib].EnvUndefglobal, parameter[name[self]._env, name[self]._glb]] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da18dc07700>
name[self]._env assign[=] constant[None] | keyword[def] identifier[undefine] ( identifier[self] ):
literal[string]
keyword[if] identifier[lib] . identifier[EnvUndefglobal] ( identifier[self] . identifier[_env] , identifier[self] . identifier[_glb] )!= literal[int] :
keyword[raise] identifier[CLIPSError] ( identifier[self] . identifier[_env] )
identifier[self] . identifier[_env] = keyword[None] | def undefine(self):
"""Undefine the Global.
Python equivalent of the CLIPS undefglobal command.
The object becomes unusable after this method has been called.
"""
if lib.EnvUndefglobal(self._env, self._glb) != 1:
raise CLIPSError(self._env) # depends on [control=['if'], data=[]]
self._env = None |
def _domino_hand(d, hands):
'''
:param Domino d: domino to find within the hands
:param list hands: hands to find domino in
:return: index of the hand that contains the specified domino
:raises NoSuchDominoException: if no hand contains the specified domino
'''
for i, hand in enumerate(hands):
if d in hand:
return i
raise dominoes.NoSuchDominoException('{} is not in any hand!'.format(d)) | def function[_domino_hand, parameter[d, hands]]:
constant[
:param Domino d: domino to find within the hands
:param list hands: hands to find domino in
:return: index of the hand that contains the specified domino
:raises NoSuchDominoException: if no hand contains the specified domino
]
for taget[tuple[[<ast.Name object at 0x7da1b04b6b90>, <ast.Name object at 0x7da1b04b6b00>]]] in starred[call[name[enumerate], parameter[name[hands]]]] begin[:]
if compare[name[d] in name[hand]] begin[:]
return[name[i]]
<ast.Raise object at 0x7da1b04b6ad0> | keyword[def] identifier[_domino_hand] ( identifier[d] , identifier[hands] ):
literal[string]
keyword[for] identifier[i] , identifier[hand] keyword[in] identifier[enumerate] ( identifier[hands] ):
keyword[if] identifier[d] keyword[in] identifier[hand] :
keyword[return] identifier[i]
keyword[raise] identifier[dominoes] . identifier[NoSuchDominoException] ( literal[string] . identifier[format] ( identifier[d] )) | def _domino_hand(d, hands):
"""
:param Domino d: domino to find within the hands
:param list hands: hands to find domino in
:return: index of the hand that contains the specified domino
:raises NoSuchDominoException: if no hand contains the specified domino
"""
for (i, hand) in enumerate(hands):
if d in hand:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise dominoes.NoSuchDominoException('{} is not in any hand!'.format(d)) |
def get_subpages_sorted(self, pages, page):
"""Get @page subpages sorted appropriately."""
sorted_pages = []
to_sort = []
for subpage in page.subpages:
# Do not resort subprojects even if they are
# 'generated'.
if pages[subpage].pre_sorted:
sorted_pages.append(subpage)
else:
to_sort.append(subpage)
return sorted_pages + sorted(
to_sort, key=lambda p: pages[p].get_title().lower()) | def function[get_subpages_sorted, parameter[self, pages, page]]:
constant[Get @page subpages sorted appropriately.]
variable[sorted_pages] assign[=] list[[]]
variable[to_sort] assign[=] list[[]]
for taget[name[subpage]] in starred[name[page].subpages] begin[:]
if call[name[pages]][name[subpage]].pre_sorted begin[:]
call[name[sorted_pages].append, parameter[name[subpage]]]
return[binary_operation[name[sorted_pages] + call[name[sorted], parameter[name[to_sort]]]]] | keyword[def] identifier[get_subpages_sorted] ( identifier[self] , identifier[pages] , identifier[page] ):
literal[string]
identifier[sorted_pages] =[]
identifier[to_sort] =[]
keyword[for] identifier[subpage] keyword[in] identifier[page] . identifier[subpages] :
keyword[if] identifier[pages] [ identifier[subpage] ]. identifier[pre_sorted] :
identifier[sorted_pages] . identifier[append] ( identifier[subpage] )
keyword[else] :
identifier[to_sort] . identifier[append] ( identifier[subpage] )
keyword[return] identifier[sorted_pages] + identifier[sorted] (
identifier[to_sort] , identifier[key] = keyword[lambda] identifier[p] : identifier[pages] [ identifier[p] ]. identifier[get_title] (). identifier[lower] ()) | def get_subpages_sorted(self, pages, page):
"""Get @page subpages sorted appropriately."""
sorted_pages = []
to_sort = []
for subpage in page.subpages:
# Do not resort subprojects even if they are
# 'generated'.
if pages[subpage].pre_sorted:
sorted_pages.append(subpage) # depends on [control=['if'], data=[]]
else:
to_sort.append(subpage) # depends on [control=['for'], data=['subpage']]
return sorted_pages + sorted(to_sort, key=lambda p: pages[p].get_title().lower()) |
def get(self, size, create=True):
"""
Returns a Thumbnail instance.
First check whether thumbnail is already cached. If it doesn't:
1. Try to fetch the thumbnail
2. Create thumbnail if it's not present
3. Cache the thumbnail for future use
"""
if self._thumbnails is None:
self._refresh_cache()
thumbnail = self._thumbnails.get(size)
if thumbnail is None:
thumbnail = images.get(self.source_image.name, size,
self.metadata_backend, self.storage)
if thumbnail is None:
thumbnail = self.create(size)
self._thumbnails[size] = thumbnail
return thumbnail | def function[get, parameter[self, size, create]]:
constant[
Returns a Thumbnail instance.
First check whether thumbnail is already cached. If it doesn't:
1. Try to fetch the thumbnail
2. Create thumbnail if it's not present
3. Cache the thumbnail for future use
]
if compare[name[self]._thumbnails is constant[None]] begin[:]
call[name[self]._refresh_cache, parameter[]]
variable[thumbnail] assign[=] call[name[self]._thumbnails.get, parameter[name[size]]]
if compare[name[thumbnail] is constant[None]] begin[:]
variable[thumbnail] assign[=] call[name[images].get, parameter[name[self].source_image.name, name[size], name[self].metadata_backend, name[self].storage]]
if compare[name[thumbnail] is constant[None]] begin[:]
variable[thumbnail] assign[=] call[name[self].create, parameter[name[size]]]
call[name[self]._thumbnails][name[size]] assign[=] name[thumbnail]
return[name[thumbnail]] | keyword[def] identifier[get] ( identifier[self] , identifier[size] , identifier[create] = keyword[True] ):
literal[string]
keyword[if] identifier[self] . identifier[_thumbnails] keyword[is] keyword[None] :
identifier[self] . identifier[_refresh_cache] ()
identifier[thumbnail] = identifier[self] . identifier[_thumbnails] . identifier[get] ( identifier[size] )
keyword[if] identifier[thumbnail] keyword[is] keyword[None] :
identifier[thumbnail] = identifier[images] . identifier[get] ( identifier[self] . identifier[source_image] . identifier[name] , identifier[size] ,
identifier[self] . identifier[metadata_backend] , identifier[self] . identifier[storage] )
keyword[if] identifier[thumbnail] keyword[is] keyword[None] :
identifier[thumbnail] = identifier[self] . identifier[create] ( identifier[size] )
identifier[self] . identifier[_thumbnails] [ identifier[size] ]= identifier[thumbnail]
keyword[return] identifier[thumbnail] | def get(self, size, create=True):
"""
Returns a Thumbnail instance.
First check whether thumbnail is already cached. If it doesn't:
1. Try to fetch the thumbnail
2. Create thumbnail if it's not present
3. Cache the thumbnail for future use
"""
if self._thumbnails is None:
self._refresh_cache() # depends on [control=['if'], data=[]]
thumbnail = self._thumbnails.get(size)
if thumbnail is None:
thumbnail = images.get(self.source_image.name, size, self.metadata_backend, self.storage)
if thumbnail is None:
thumbnail = self.create(size) # depends on [control=['if'], data=['thumbnail']]
self._thumbnails[size] = thumbnail # depends on [control=['if'], data=['thumbnail']]
return thumbnail |
def export_modifications(self):
"""
Returns model modifications.
"""
result = {}
for key, value in self.__modified_data__.items():
try:
result[key] = value.export_data()
except AttributeError:
result[key] = value
for key, value in self.__original_data__.items():
if key in result.keys() or key in self.__deleted_fields__:
continue
try:
if not value.is_modified():
continue
modifications = value.export_modifications()
except AttributeError:
continue
try:
result.update({'{}.{}'.format(key, f): v for f, v in modifications.items()})
except AttributeError:
result[key] = modifications
return result | def function[export_modifications, parameter[self]]:
constant[
Returns model modifications.
]
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0acbca0>, <ast.Name object at 0x7da1b0aca470>]]] in starred[call[name[self].__modified_data__.items, parameter[]]] begin[:]
<ast.Try object at 0x7da1b0ac8880>
for taget[tuple[[<ast.Name object at 0x7da1b0ac9f00>, <ast.Name object at 0x7da1b0ac8580>]]] in starred[call[name[self].__original_data__.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0acbbb0> begin[:]
continue
<ast.Try object at 0x7da1b0ac8220>
<ast.Try object at 0x7da1b0ac81c0>
return[name[result]] | keyword[def] identifier[export_modifications] ( identifier[self] ):
literal[string]
identifier[result] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[__modified_data__] . identifier[items] ():
keyword[try] :
identifier[result] [ identifier[key] ]= identifier[value] . identifier[export_data] ()
keyword[except] identifier[AttributeError] :
identifier[result] [ identifier[key] ]= identifier[value]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[__original_data__] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[result] . identifier[keys] () keyword[or] identifier[key] keyword[in] identifier[self] . identifier[__deleted_fields__] :
keyword[continue]
keyword[try] :
keyword[if] keyword[not] identifier[value] . identifier[is_modified] ():
keyword[continue]
identifier[modifications] = identifier[value] . identifier[export_modifications] ()
keyword[except] identifier[AttributeError] :
keyword[continue]
keyword[try] :
identifier[result] . identifier[update] ({ literal[string] . identifier[format] ( identifier[key] , identifier[f] ): identifier[v] keyword[for] identifier[f] , identifier[v] keyword[in] identifier[modifications] . identifier[items] ()})
keyword[except] identifier[AttributeError] :
identifier[result] [ identifier[key] ]= identifier[modifications]
keyword[return] identifier[result] | def export_modifications(self):
"""
Returns model modifications.
"""
result = {}
for (key, value) in self.__modified_data__.items():
try:
result[key] = value.export_data() # depends on [control=['try'], data=[]]
except AttributeError:
result[key] = value # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
for (key, value) in self.__original_data__.items():
if key in result.keys() or key in self.__deleted_fields__:
continue # depends on [control=['if'], data=[]]
try:
if not value.is_modified():
continue # depends on [control=['if'], data=[]]
modifications = value.export_modifications() # depends on [control=['try'], data=[]]
except AttributeError:
continue # depends on [control=['except'], data=[]]
try:
result.update({'{}.{}'.format(key, f): v for (f, v) in modifications.items()}) # depends on [control=['try'], data=[]]
except AttributeError:
result[key] = modifications # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return result |
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__') | def function[is_internal_attribute, parameter[obj, attr]]:
constant[Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
]
if call[name[isinstance], parameter[name[obj], name[types].FunctionType]] begin[:]
if compare[name[attr] in name[UNSAFE_FUNCTION_ATTRIBUTES]] begin[:]
return[constant[True]]
return[call[name[attr].startswith, parameter[constant[__]]]] | keyword[def] identifier[is_internal_attribute] ( identifier[obj] , identifier[attr] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[FunctionType] ):
keyword[if] identifier[attr] keyword[in] identifier[UNSAFE_FUNCTION_ATTRIBUTES] :
keyword[return] keyword[True]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[MethodType] ):
keyword[if] identifier[attr] keyword[in] identifier[UNSAFE_FUNCTION_ATTRIBUTES] keyword[or] identifier[attr] keyword[in] identifier[UNSAFE_METHOD_ATTRIBUTES] :
keyword[return] keyword[True]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[type] ):
keyword[if] identifier[attr] == literal[string] :
keyword[return] keyword[True]
keyword[elif] identifier[isinstance] ( identifier[obj] ,( identifier[types] . identifier[CodeType] , identifier[types] . identifier[TracebackType] , identifier[types] . identifier[FrameType] )):
keyword[return] keyword[True]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[GeneratorType] ):
keyword[if] identifier[attr] keyword[in] identifier[UNSAFE_GENERATOR_ATTRIBUTES] :
keyword[return] keyword[True]
keyword[elif] identifier[hasattr] ( identifier[types] , literal[string] ) keyword[and] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[CoroutineType] ):
keyword[if] identifier[attr] keyword[in] identifier[UNSAFE_COROUTINE_ATTRIBUTES] :
keyword[return] keyword[True]
keyword[elif] identifier[hasattr] ( identifier[types] , literal[string] ) keyword[and] identifier[isinstance] ( identifier[obj] , identifier[types] . identifier[AsyncGeneratorType] ):
keyword[if] identifier[attr] keyword[in] identifier[UNSAFE_ASYNC_GENERATOR_ATTRIBUTES] :
keyword[return] keyword[True]
keyword[return] identifier[attr] . identifier[startswith] ( literal[string] ) | def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(obj, type):
if attr == 'mro':
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True # depends on [control=['if'], data=[]]
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return attr.startswith('__') |
def _process(self, obj, key=None):
"""
Generates a categorical 2D aggregate by inserting NaNs at all
cross-product locations that do not already have a value assigned.
Returns a 2D gridded Dataset object.
"""
if isinstance(obj, Dataset) and obj.interface.gridded:
return obj
elif obj.ndims > 2:
raise ValueError("Cannot aggregate more than two dimensions")
elif len(obj.dimensions()) < 3:
raise ValueError("Must have at two dimensions to aggregate over"
"and one value dimension to aggregate on.")
dtype = 'dataframe' if pd else 'dictionary'
obj = Dataset(obj, datatype=[dtype])
xcoords, ycoords = self._get_coords(obj)
return self._aggregate_dataset(obj, xcoords, ycoords) | def function[_process, parameter[self, obj, key]]:
constant[
Generates a categorical 2D aggregate by inserting NaNs at all
cross-product locations that do not already have a value assigned.
Returns a 2D gridded Dataset object.
]
if <ast.BoolOp object at 0x7da20c6aaa10> begin[:]
return[name[obj]]
variable[dtype] assign[=] <ast.IfExp object at 0x7da18dc058a0>
variable[obj] assign[=] call[name[Dataset], parameter[name[obj]]]
<ast.Tuple object at 0x7da18dc04910> assign[=] call[name[self]._get_coords, parameter[name[obj]]]
return[call[name[self]._aggregate_dataset, parameter[name[obj], name[xcoords], name[ycoords]]]] | keyword[def] identifier[_process] ( identifier[self] , identifier[obj] , identifier[key] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Dataset] ) keyword[and] identifier[obj] . identifier[interface] . identifier[gridded] :
keyword[return] identifier[obj]
keyword[elif] identifier[obj] . identifier[ndims] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[len] ( identifier[obj] . identifier[dimensions] ())< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[dtype] = literal[string] keyword[if] identifier[pd] keyword[else] literal[string]
identifier[obj] = identifier[Dataset] ( identifier[obj] , identifier[datatype] =[ identifier[dtype] ])
identifier[xcoords] , identifier[ycoords] = identifier[self] . identifier[_get_coords] ( identifier[obj] )
keyword[return] identifier[self] . identifier[_aggregate_dataset] ( identifier[obj] , identifier[xcoords] , identifier[ycoords] ) | def _process(self, obj, key=None):
"""
Generates a categorical 2D aggregate by inserting NaNs at all
cross-product locations that do not already have a value assigned.
Returns a 2D gridded Dataset object.
"""
if isinstance(obj, Dataset) and obj.interface.gridded:
return obj # depends on [control=['if'], data=[]]
elif obj.ndims > 2:
raise ValueError('Cannot aggregate more than two dimensions') # depends on [control=['if'], data=[]]
elif len(obj.dimensions()) < 3:
raise ValueError('Must have at two dimensions to aggregate overand one value dimension to aggregate on.') # depends on [control=['if'], data=[]]
dtype = 'dataframe' if pd else 'dictionary'
obj = Dataset(obj, datatype=[dtype])
(xcoords, ycoords) = self._get_coords(obj)
return self._aggregate_dataset(obj, xcoords, ycoords) |
def report_view(title, form_type=None):
''' Decorator that converts a report view function into something that
displays a Report.
Arguments:
title (str):
The title of the report.
form_type (Optional[forms.Form]):
A form class that can make this report display things. If not
supplied, no form will be displayed.
'''
# Create & return view
def _report(view):
report_view = ReportView(view, title, form_type)
report_view = user_passes_test(views._staff_only)(report_view)
report_view = wraps(view)(report_view)
# Add this report to the list of reports.
_all_report_views.append(report_view)
return report_view
return _report | def function[report_view, parameter[title, form_type]]:
constant[ Decorator that converts a report view function into something that
displays a Report.
Arguments:
title (str):
The title of the report.
form_type (Optional[forms.Form]):
A form class that can make this report display things. If not
supplied, no form will be displayed.
]
def function[_report, parameter[view]]:
variable[report_view] assign[=] call[name[ReportView], parameter[name[view], name[title], name[form_type]]]
variable[report_view] assign[=] call[call[name[user_passes_test], parameter[name[views]._staff_only]], parameter[name[report_view]]]
variable[report_view] assign[=] call[call[name[wraps], parameter[name[view]]], parameter[name[report_view]]]
call[name[_all_report_views].append, parameter[name[report_view]]]
return[name[report_view]]
return[name[_report]] | keyword[def] identifier[report_view] ( identifier[title] , identifier[form_type] = keyword[None] ):
literal[string]
keyword[def] identifier[_report] ( identifier[view] ):
identifier[report_view] = identifier[ReportView] ( identifier[view] , identifier[title] , identifier[form_type] )
identifier[report_view] = identifier[user_passes_test] ( identifier[views] . identifier[_staff_only] )( identifier[report_view] )
identifier[report_view] = identifier[wraps] ( identifier[view] )( identifier[report_view] )
identifier[_all_report_views] . identifier[append] ( identifier[report_view] )
keyword[return] identifier[report_view]
keyword[return] identifier[_report] | def report_view(title, form_type=None):
""" Decorator that converts a report view function into something that
displays a Report.
Arguments:
title (str):
The title of the report.
form_type (Optional[forms.Form]):
A form class that can make this report display things. If not
supplied, no form will be displayed.
"""
# Create & return view
def _report(view):
report_view = ReportView(view, title, form_type)
report_view = user_passes_test(views._staff_only)(report_view)
report_view = wraps(view)(report_view)
# Add this report to the list of reports.
_all_report_views.append(report_view)
return report_view
return _report |
def is_stdlib_name(modname):
"""Return :data:`True` if `modname` appears to come from the standard
library.
"""
if imp.is_builtin(modname) != 0:
return True
module = sys.modules.get(modname)
if module is None:
return False
# six installs crap with no __file__
modpath = os.path.abspath(getattr(module, '__file__', ''))
return is_stdlib_path(modpath) | def function[is_stdlib_name, parameter[modname]]:
constant[Return :data:`True` if `modname` appears to come from the standard
library.
]
if compare[call[name[imp].is_builtin, parameter[name[modname]]] not_equal[!=] constant[0]] begin[:]
return[constant[True]]
variable[module] assign[=] call[name[sys].modules.get, parameter[name[modname]]]
if compare[name[module] is constant[None]] begin[:]
return[constant[False]]
variable[modpath] assign[=] call[name[os].path.abspath, parameter[call[name[getattr], parameter[name[module], constant[__file__], constant[]]]]]
return[call[name[is_stdlib_path], parameter[name[modpath]]]] | keyword[def] identifier[is_stdlib_name] ( identifier[modname] ):
literal[string]
keyword[if] identifier[imp] . identifier[is_builtin] ( identifier[modname] )!= literal[int] :
keyword[return] keyword[True]
identifier[module] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[modname] )
keyword[if] identifier[module] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[modpath] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[getattr] ( identifier[module] , literal[string] , literal[string] ))
keyword[return] identifier[is_stdlib_path] ( identifier[modpath] ) | def is_stdlib_name(modname):
"""Return :data:`True` if `modname` appears to come from the standard
library.
"""
if imp.is_builtin(modname) != 0:
return True # depends on [control=['if'], data=[]]
module = sys.modules.get(modname)
if module is None:
return False # depends on [control=['if'], data=[]]
# six installs crap with no __file__
modpath = os.path.abspath(getattr(module, '__file__', ''))
return is_stdlib_path(modpath) |
def get_context_data(self, **kwargs):
"""This adds into the context of breeding_type and sets it to Active."""
context = super(BreedingList, self).get_context_data(**kwargs)
context['breeding_type'] = "Active"
return context | def function[get_context_data, parameter[self]]:
constant[This adds into the context of breeding_type and sets it to Active.]
variable[context] assign[=] call[call[name[super], parameter[name[BreedingList], name[self]]].get_context_data, parameter[]]
call[name[context]][constant[breeding_type]] assign[=] constant[Active]
return[name[context]] | keyword[def] identifier[get_context_data] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[context] = identifier[super] ( identifier[BreedingList] , identifier[self] ). identifier[get_context_data] (** identifier[kwargs] )
identifier[context] [ literal[string] ]= literal[string]
keyword[return] identifier[context] | def get_context_data(self, **kwargs):
"""This adds into the context of breeding_type and sets it to Active."""
context = super(BreedingList, self).get_context_data(**kwargs)
context['breeding_type'] = 'Active'
return context |
def _tf_load_model(sess, model_dir):
"""Load a tf model from model_dir, and return input/output alias maps."""
meta_graph_pb = tf.saved_model.loader.load(
sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
export_dir=model_dir)
signature = meta_graph_pb.signature_def[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_alias_map = {friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.inputs.items()}
output_alias_map = {friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.outputs.items()}
return input_alias_map, output_alias_map | def function[_tf_load_model, parameter[sess, model_dir]]:
constant[Load a tf model from model_dir, and return input/output alias maps.]
variable[meta_graph_pb] assign[=] call[name[tf].saved_model.loader.load, parameter[]]
variable[signature] assign[=] call[name[meta_graph_pb].signature_def][name[signature_constants].DEFAULT_SERVING_SIGNATURE_DEF_KEY]
variable[input_alias_map] assign[=] <ast.DictComp object at 0x7da204962ec0>
variable[output_alias_map] assign[=] <ast.DictComp object at 0x7da204960190>
return[tuple[[<ast.Name object at 0x7da2049621d0>, <ast.Name object at 0x7da204962680>]]] | keyword[def] identifier[_tf_load_model] ( identifier[sess] , identifier[model_dir] ):
literal[string]
identifier[meta_graph_pb] = identifier[tf] . identifier[saved_model] . identifier[loader] . identifier[load] (
identifier[sess] = identifier[sess] ,
identifier[tags] =[ identifier[tf] . identifier[saved_model] . identifier[tag_constants] . identifier[SERVING] ],
identifier[export_dir] = identifier[model_dir] )
identifier[signature] = identifier[meta_graph_pb] . identifier[signature_def] [ identifier[signature_constants] . identifier[DEFAULT_SERVING_SIGNATURE_DEF_KEY] ]
identifier[input_alias_map] ={ identifier[friendly_name] : identifier[tensor_info_proto] . identifier[name]
keyword[for] ( identifier[friendly_name] , identifier[tensor_info_proto] ) keyword[in] identifier[signature] . identifier[inputs] . identifier[items] ()}
identifier[output_alias_map] ={ identifier[friendly_name] : identifier[tensor_info_proto] . identifier[name]
keyword[for] ( identifier[friendly_name] , identifier[tensor_info_proto] ) keyword[in] identifier[signature] . identifier[outputs] . identifier[items] ()}
keyword[return] identifier[input_alias_map] , identifier[output_alias_map] | def _tf_load_model(sess, model_dir):
"""Load a tf model from model_dir, and return input/output alias maps."""
meta_graph_pb = tf.saved_model.loader.load(sess=sess, tags=[tf.saved_model.tag_constants.SERVING], export_dir=model_dir)
signature = meta_graph_pb.signature_def[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_alias_map = {friendly_name: tensor_info_proto.name for (friendly_name, tensor_info_proto) in signature.inputs.items()}
output_alias_map = {friendly_name: tensor_info_proto.name for (friendly_name, tensor_info_proto) in signature.outputs.items()}
return (input_alias_map, output_alias_map) |
async def on_raw_375(self, message):
""" Start message of the day. """
await self._registration_completed(message)
self.motd = message.params[1] + '\n' | <ast.AsyncFunctionDef object at 0x7da18f00ebf0> | keyword[async] keyword[def] identifier[on_raw_375] ( identifier[self] , identifier[message] ):
literal[string]
keyword[await] identifier[self] . identifier[_registration_completed] ( identifier[message] )
identifier[self] . identifier[motd] = identifier[message] . identifier[params] [ literal[int] ]+ literal[string] | async def on_raw_375(self, message):
""" Start message of the day. """
await self._registration_completed(message)
self.motd = message.params[1] + '\n' |
def connect(self, port=None):
''' Open a serial connection to the arm. '''
if port is None:
self.port = search_for_port('/dev/ttyUSB*', 'ROBOFORTH\r\n',
'ROBOFORTH')
else:
self.port = port
if self.port is None:
raise ArmException('ST Robotics connection not found.')
self.ser = r12_serial_port(port)
if not self.ser.isOpen():
self.ser.open()
if not self.ser.isOpen():
raise ArmException('Failed to open serial port. Exiting.')
return self.port | def function[connect, parameter[self, port]]:
constant[ Open a serial connection to the arm. ]
if compare[name[port] is constant[None]] begin[:]
name[self].port assign[=] call[name[search_for_port], parameter[constant[/dev/ttyUSB*], constant[ROBOFORTH
], constant[ROBOFORTH]]]
if compare[name[self].port is constant[None]] begin[:]
<ast.Raise object at 0x7da1b20f98d0>
name[self].ser assign[=] call[name[r12_serial_port], parameter[name[port]]]
if <ast.UnaryOp object at 0x7da1b20f9a50> begin[:]
call[name[self].ser.open, parameter[]]
if <ast.UnaryOp object at 0x7da1b1f0c1f0> begin[:]
<ast.Raise object at 0x7da1b1f0cee0>
return[name[self].port] | keyword[def] identifier[connect] ( identifier[self] , identifier[port] = keyword[None] ):
literal[string]
keyword[if] identifier[port] keyword[is] keyword[None] :
identifier[self] . identifier[port] = identifier[search_for_port] ( literal[string] , literal[string] ,
literal[string] )
keyword[else] :
identifier[self] . identifier[port] = identifier[port]
keyword[if] identifier[self] . identifier[port] keyword[is] keyword[None] :
keyword[raise] identifier[ArmException] ( literal[string] )
identifier[self] . identifier[ser] = identifier[r12_serial_port] ( identifier[port] )
keyword[if] keyword[not] identifier[self] . identifier[ser] . identifier[isOpen] ():
identifier[self] . identifier[ser] . identifier[open] ()
keyword[if] keyword[not] identifier[self] . identifier[ser] . identifier[isOpen] ():
keyword[raise] identifier[ArmException] ( literal[string] )
keyword[return] identifier[self] . identifier[port] | def connect(self, port=None):
""" Open a serial connection to the arm. """
if port is None:
self.port = search_for_port('/dev/ttyUSB*', 'ROBOFORTH\r\n', 'ROBOFORTH') # depends on [control=['if'], data=[]]
else:
self.port = port
if self.port is None:
raise ArmException('ST Robotics connection not found.') # depends on [control=['if'], data=[]]
self.ser = r12_serial_port(port)
if not self.ser.isOpen():
self.ser.open() # depends on [control=['if'], data=[]]
if not self.ser.isOpen():
raise ArmException('Failed to open serial port. Exiting.') # depends on [control=['if'], data=[]]
return self.port |
def from_json(cls, json_doc):
"""
Create and return a new Session Token based on the contents
of a JSON document.
:type json_doc: str
:param json_doc: A string containing a JSON document with a
previously saved Credentials object.
"""
d = json.loads(json_doc)
token = cls()
token.__dict__.update(d)
return token | def function[from_json, parameter[cls, json_doc]]:
constant[
Create and return a new Session Token based on the contents
of a JSON document.
:type json_doc: str
:param json_doc: A string containing a JSON document with a
previously saved Credentials object.
]
variable[d] assign[=] call[name[json].loads, parameter[name[json_doc]]]
variable[token] assign[=] call[name[cls], parameter[]]
call[name[token].__dict__.update, parameter[name[d]]]
return[name[token]] | keyword[def] identifier[from_json] ( identifier[cls] , identifier[json_doc] ):
literal[string]
identifier[d] = identifier[json] . identifier[loads] ( identifier[json_doc] )
identifier[token] = identifier[cls] ()
identifier[token] . identifier[__dict__] . identifier[update] ( identifier[d] )
keyword[return] identifier[token] | def from_json(cls, json_doc):
"""
Create and return a new Session Token based on the contents
of a JSON document.
:type json_doc: str
:param json_doc: A string containing a JSON document with a
previously saved Credentials object.
"""
d = json.loads(json_doc)
token = cls()
token.__dict__.update(d)
return token |
def _get_bmdl_ratio(self, models):
"""Return BMDL ratio in list of models."""
bmdls = [model.output["BMDL"] for model in models if model.output["BMDL"] > 0]
return max(bmdls) / min(bmdls) if len(bmdls) > 0 else 0 | def function[_get_bmdl_ratio, parameter[self, models]]:
constant[Return BMDL ratio in list of models.]
variable[bmdls] assign[=] <ast.ListComp object at 0x7da1b1f7c880>
return[<ast.IfExp object at 0x7da1b1f7c0a0>] | keyword[def] identifier[_get_bmdl_ratio] ( identifier[self] , identifier[models] ):
literal[string]
identifier[bmdls] =[ identifier[model] . identifier[output] [ literal[string] ] keyword[for] identifier[model] keyword[in] identifier[models] keyword[if] identifier[model] . identifier[output] [ literal[string] ]> literal[int] ]
keyword[return] identifier[max] ( identifier[bmdls] )/ identifier[min] ( identifier[bmdls] ) keyword[if] identifier[len] ( identifier[bmdls] )> literal[int] keyword[else] literal[int] | def _get_bmdl_ratio(self, models):
"""Return BMDL ratio in list of models."""
bmdls = [model.output['BMDL'] for model in models if model.output['BMDL'] > 0]
return max(bmdls) / min(bmdls) if len(bmdls) > 0 else 0 |
def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0 | def function[check_socket, parameter[host, port]]:
constant[Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running.]
with call[name[closing], parameter[call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_STREAM]]]] begin[:]
return[compare[call[name[sock].connect_ex, parameter[tuple[[<ast.Name object at 0x7da1b1867700>, <ast.Name object at 0x7da1b1865210>]]]] equal[==] constant[0]]] | keyword[def] identifier[check_socket] ( identifier[host] , identifier[port] ):
literal[string]
keyword[with] identifier[closing] ( identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_STREAM] )) keyword[as] identifier[sock] :
keyword[return] identifier[sock] . identifier[connect_ex] (( identifier[host] , identifier[port] ))== literal[int] | def check_socket(host, port):
"""Checks if port is open on host. This is used to check if the
Xenon-GRPC server is running."""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
return sock.connect_ex((host, port)) == 0 # depends on [control=['with'], data=['sock']] |
def fill(self, value=b'\xff'):
"""Fill all empty space between segments with given value `value`.
"""
previous_segment_maximum_address = None
fill_segments = []
for address, data in self._segments:
maximum_address = address + len(data)
if previous_segment_maximum_address is not None:
fill_size = address - previous_segment_maximum_address
fill_size_words = fill_size // self.word_size_bytes
fill_segments.append(_Segment(
previous_segment_maximum_address,
previous_segment_maximum_address + fill_size,
value * fill_size_words,
self.word_size_bytes))
previous_segment_maximum_address = maximum_address
for segment in fill_segments:
self._segments.add(segment) | def function[fill, parameter[self, value]]:
constant[Fill all empty space between segments with given value `value`.
]
variable[previous_segment_maximum_address] assign[=] constant[None]
variable[fill_segments] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18c4cc670>, <ast.Name object at 0x7da18c4cfb80>]]] in starred[name[self]._segments] begin[:]
variable[maximum_address] assign[=] binary_operation[name[address] + call[name[len], parameter[name[data]]]]
if compare[name[previous_segment_maximum_address] is_not constant[None]] begin[:]
variable[fill_size] assign[=] binary_operation[name[address] - name[previous_segment_maximum_address]]
variable[fill_size_words] assign[=] binary_operation[name[fill_size] <ast.FloorDiv object at 0x7da2590d6bc0> name[self].word_size_bytes]
call[name[fill_segments].append, parameter[call[name[_Segment], parameter[name[previous_segment_maximum_address], binary_operation[name[previous_segment_maximum_address] + name[fill_size]], binary_operation[name[value] * name[fill_size_words]], name[self].word_size_bytes]]]]
variable[previous_segment_maximum_address] assign[=] name[maximum_address]
for taget[name[segment]] in starred[name[fill_segments]] begin[:]
call[name[self]._segments.add, parameter[name[segment]]] | keyword[def] identifier[fill] ( identifier[self] , identifier[value] = literal[string] ):
literal[string]
identifier[previous_segment_maximum_address] = keyword[None]
identifier[fill_segments] =[]
keyword[for] identifier[address] , identifier[data] keyword[in] identifier[self] . identifier[_segments] :
identifier[maximum_address] = identifier[address] + identifier[len] ( identifier[data] )
keyword[if] identifier[previous_segment_maximum_address] keyword[is] keyword[not] keyword[None] :
identifier[fill_size] = identifier[address] - identifier[previous_segment_maximum_address]
identifier[fill_size_words] = identifier[fill_size] // identifier[self] . identifier[word_size_bytes]
identifier[fill_segments] . identifier[append] ( identifier[_Segment] (
identifier[previous_segment_maximum_address] ,
identifier[previous_segment_maximum_address] + identifier[fill_size] ,
identifier[value] * identifier[fill_size_words] ,
identifier[self] . identifier[word_size_bytes] ))
identifier[previous_segment_maximum_address] = identifier[maximum_address]
keyword[for] identifier[segment] keyword[in] identifier[fill_segments] :
identifier[self] . identifier[_segments] . identifier[add] ( identifier[segment] ) | def fill(self, value=b'\xff'):
"""Fill all empty space between segments with given value `value`.
"""
previous_segment_maximum_address = None
fill_segments = []
for (address, data) in self._segments:
maximum_address = address + len(data)
if previous_segment_maximum_address is not None:
fill_size = address - previous_segment_maximum_address
fill_size_words = fill_size // self.word_size_bytes
fill_segments.append(_Segment(previous_segment_maximum_address, previous_segment_maximum_address + fill_size, value * fill_size_words, self.word_size_bytes)) # depends on [control=['if'], data=['previous_segment_maximum_address']]
previous_segment_maximum_address = maximum_address # depends on [control=['for'], data=[]]
for segment in fill_segments:
self._segments.add(segment) # depends on [control=['for'], data=['segment']] |
def checkStatus(self):
"""Check status
Args:
Returns:
True: Sucess
False: Failed
"""
checkAccount()
data = {'userid': self.user_id,
'useridx': self.useridx
}
r = self.session.post(nurls['checkStatus'], data = data)
p = re.compile(r'\<message\>(?P<message>.+)\</message\>')
message = p.search(r.text).group('message')
if message == 'success':
return True
else:
return False | def function[checkStatus, parameter[self]]:
constant[Check status
Args:
Returns:
True: Sucess
False: Failed
]
call[name[checkAccount], parameter[]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da204566bc0>, <ast.Constant object at 0x7da2045677c0>], [<ast.Attribute object at 0x7da204564640>, <ast.Attribute object at 0x7da204564220>]]
variable[r] assign[=] call[name[self].session.post, parameter[call[name[nurls]][constant[checkStatus]]]]
variable[p] assign[=] call[name[re].compile, parameter[constant[\<message\>(?P<message>.+)\</message\>]]]
variable[message] assign[=] call[call[name[p].search, parameter[name[r].text]].group, parameter[constant[message]]]
if compare[name[message] equal[==] constant[success]] begin[:]
return[constant[True]] | keyword[def] identifier[checkStatus] ( identifier[self] ):
literal[string]
identifier[checkAccount] ()
identifier[data] ={ literal[string] : identifier[self] . identifier[user_id] ,
literal[string] : identifier[self] . identifier[useridx]
}
identifier[r] = identifier[self] . identifier[session] . identifier[post] ( identifier[nurls] [ literal[string] ], identifier[data] = identifier[data] )
identifier[p] = identifier[re] . identifier[compile] ( literal[string] )
identifier[message] = identifier[p] . identifier[search] ( identifier[r] . identifier[text] ). identifier[group] ( literal[string] )
keyword[if] identifier[message] == literal[string] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def checkStatus(self):
"""Check status
Args:
Returns:
True: Sucess
False: Failed
"""
checkAccount()
data = {'userid': self.user_id, 'useridx': self.useridx}
r = self.session.post(nurls['checkStatus'], data=data)
p = re.compile('\\<message\\>(?P<message>.+)\\</message\\>')
message = p.search(r.text).group('message')
if message == 'success':
return True # depends on [control=['if'], data=[]]
else:
return False |
def remover(self, id_interface):
"""Remove an interface by its identifier.
:param id_interface: Interface identifier.
:return: None
:raise InterfaceNaoExisteError: Interface doesn't exist.
:raise InterfaceError: Interface is linked to another interface.
:raise InvalidParameterError: The interface identifier is invalid or none.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_interface):
raise InvalidParameterError(
u'Interface id is invalid or was not informed.')
url = 'interface/' + str(id_interface) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml) | def function[remover, parameter[self, id_interface]]:
constant[Remove an interface by its identifier.
:param id_interface: Interface identifier.
:return: None
:raise InterfaceNaoExisteError: Interface doesn't exist.
:raise InterfaceError: Interface is linked to another interface.
:raise InvalidParameterError: The interface identifier is invalid or none.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
]
if <ast.UnaryOp object at 0x7da1b2347190> begin[:]
<ast.Raise object at 0x7da1b23447c0>
variable[url] assign[=] binary_operation[binary_operation[constant[interface/] + call[name[str], parameter[name[id_interface]]]] + constant[/]]
<ast.Tuple object at 0x7da1b23444f0> assign[=] call[name[self].submit, parameter[constant[None], constant[DELETE], name[url]]]
return[call[name[self].response, parameter[name[code], name[xml]]]] | keyword[def] identifier[remover] ( identifier[self] , identifier[id_interface] ):
literal[string]
keyword[if] keyword[not] identifier[is_valid_int_param] ( identifier[id_interface] ):
keyword[raise] identifier[InvalidParameterError] (
literal[string] )
identifier[url] = literal[string] + identifier[str] ( identifier[id_interface] )+ literal[string]
identifier[code] , identifier[xml] = identifier[self] . identifier[submit] ( keyword[None] , literal[string] , identifier[url] )
keyword[return] identifier[self] . identifier[response] ( identifier[code] , identifier[xml] ) | def remover(self, id_interface):
"""Remove an interface by its identifier.
:param id_interface: Interface identifier.
:return: None
:raise InterfaceNaoExisteError: Interface doesn't exist.
:raise InterfaceError: Interface is linked to another interface.
:raise InvalidParameterError: The interface identifier is invalid or none.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_interface):
raise InvalidParameterError(u'Interface id is invalid or was not informed.') # depends on [control=['if'], data=[]]
url = 'interface/' + str(id_interface) + '/'
(code, xml) = self.submit(None, 'DELETE', url)
return self.response(code, xml) |
def rename(self, file_id, new_filename, session=None):
"""Renames the stored file with the specified file_id.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to rename
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.rename(file_id, "new_test_name")
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be renamed.
- `new_filename`: The new name of the file.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
result = self._files.update_one({"_id": file_id},
{"$set": {"filename": new_filename}},
session=session)
if not result.matched_count:
raise NoFile("no files could be renamed %r because none "
"matched file_id %i" % (new_filename, file_id)) | def function[rename, parameter[self, file_id, new_filename, session]]:
constant[Renames the stored file with the specified file_id.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to rename
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.rename(file_id, "new_test_name")
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be renamed.
- `new_filename`: The new name of the file.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`
.. versionchanged:: 3.6
Added ``session`` parameter.
]
variable[result] assign[=] call[name[self]._files.update_one, parameter[dictionary[[<ast.Constant object at 0x7da20e9b3910>], [<ast.Name object at 0x7da20e9b0c40>]], dictionary[[<ast.Constant object at 0x7da20e9b2a10>], [<ast.Dict object at 0x7da20e9b30d0>]]]]
if <ast.UnaryOp object at 0x7da20e9b1150> begin[:]
<ast.Raise object at 0x7da20e9b37c0> | keyword[def] identifier[rename] ( identifier[self] , identifier[file_id] , identifier[new_filename] , identifier[session] = keyword[None] ):
literal[string]
identifier[result] = identifier[self] . identifier[_files] . identifier[update_one] ({ literal[string] : identifier[file_id] },
{ literal[string] :{ literal[string] : identifier[new_filename] }},
identifier[session] = identifier[session] )
keyword[if] keyword[not] identifier[result] . identifier[matched_count] :
keyword[raise] identifier[NoFile] ( literal[string]
literal[string] %( identifier[new_filename] , identifier[file_id] )) | def rename(self, file_id, new_filename, session=None):
"""Renames the stored file with the specified file_id.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# Get _id of file to rename
file_id = fs.upload_from_stream("test_file", "data I want to store!")
fs.rename(file_id, "new_test_name")
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
:Parameters:
- `file_id`: The _id of the file to be renamed.
- `new_filename`: The new name of the file.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
result = self._files.update_one({'_id': file_id}, {'$set': {'filename': new_filename}}, session=session)
if not result.matched_count:
raise NoFile('no files could be renamed %r because none matched file_id %i' % (new_filename, file_id)) # depends on [control=['if'], data=[]] |
def get_factors(self, node=None):
"""
Returns all the factors containing the node. If node is not specified
returns all the factors that have been added till now to the graph.
Parameter
---------
node: any hashable python object (optional)
The node whose factor we want. If node is not specified
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor1 = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> factor2 = DiscreteFactor(['Bob', 'Charles'], cardinality=[2, 3],
... values=np.ones(6))
>>> student.add_factors(factor1,factor2)
>>> student.get_factors()
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>,
<DiscreteFactor representing phi(Bob:2, Charles:3) at 0x7f8a0e9bf5f8>]
>>> student.get_factors('Alice')
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>]
"""
if node:
if node not in self.nodes():
raise ValueError('Node not present in the Undirected Graph')
node_factors = []
for factor in self.factors:
if node in factor.scope():
node_factors.append(factor)
return node_factors
else:
return self.factors | def function[get_factors, parameter[self, node]]:
constant[
Returns all the factors containing the node. If node is not specified
returns all the factors that have been added till now to the graph.
Parameter
---------
node: any hashable python object (optional)
The node whose factor we want. If node is not specified
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor1 = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> factor2 = DiscreteFactor(['Bob', 'Charles'], cardinality=[2, 3],
... values=np.ones(6))
>>> student.add_factors(factor1,factor2)
>>> student.get_factors()
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>,
<DiscreteFactor representing phi(Bob:2, Charles:3) at 0x7f8a0e9bf5f8>]
>>> student.get_factors('Alice')
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>]
]
if name[node] begin[:]
if compare[name[node] <ast.NotIn object at 0x7da2590d7190> call[name[self].nodes, parameter[]]] begin[:]
<ast.Raise object at 0x7da18f00c280>
variable[node_factors] assign[=] list[[]]
for taget[name[factor]] in starred[name[self].factors] begin[:]
if compare[name[node] in call[name[factor].scope, parameter[]]] begin[:]
call[name[node_factors].append, parameter[name[factor]]]
return[name[node_factors]] | keyword[def] identifier[get_factors] ( identifier[self] , identifier[node] = keyword[None] ):
literal[string]
keyword[if] identifier[node] :
keyword[if] identifier[node] keyword[not] keyword[in] identifier[self] . identifier[nodes] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[node_factors] =[]
keyword[for] identifier[factor] keyword[in] identifier[self] . identifier[factors] :
keyword[if] identifier[node] keyword[in] identifier[factor] . identifier[scope] ():
identifier[node_factors] . identifier[append] ( identifier[factor] )
keyword[return] identifier[node_factors]
keyword[else] :
keyword[return] identifier[self] . identifier[factors] | def get_factors(self, node=None):
"""
Returns all the factors containing the node. If node is not specified
returns all the factors that have been added till now to the graph.
Parameter
---------
node: any hashable python object (optional)
The node whose factor we want. If node is not specified
Examples
--------
>>> from pgmpy.models import MarkovModel
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> student = MarkovModel([('Alice', 'Bob'), ('Bob', 'Charles')])
>>> factor1 = DiscreteFactor(['Alice', 'Bob'], cardinality=[2, 2],
... values=np.random.rand(4))
>>> factor2 = DiscreteFactor(['Bob', 'Charles'], cardinality=[2, 3],
... values=np.ones(6))
>>> student.add_factors(factor1,factor2)
>>> student.get_factors()
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>,
<DiscreteFactor representing phi(Bob:2, Charles:3) at 0x7f8a0e9bf5f8>]
>>> student.get_factors('Alice')
[<DiscreteFactor representing phi(Alice:2, Bob:2) at 0x7f8a0e9bf630>]
"""
if node:
if node not in self.nodes():
raise ValueError('Node not present in the Undirected Graph') # depends on [control=['if'], data=[]]
node_factors = []
for factor in self.factors:
if node in factor.scope():
node_factors.append(factor) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['factor']]
return node_factors # depends on [control=['if'], data=[]]
else:
return self.factors |
def get_item_metadata(self, handle):
"""Return dictionary containing all metadata associated with handle.
In other words all the metadata added using the ``add_item_metadata``
method.
:param handle: handle for accessing an item before the dataset is
frozen
:returns: dictionary containing item metadata
"""
if not self._metadata_dir_exists():
return {}
prefix = self._handle_to_fragment_absprefixpath(handle)
files = [f for f in self._ls_abspaths_with_cache(
self._metadata_fragments_abspath)
if f.startswith(prefix)]
metadata = {}
for f in files:
key = f.split('.')[-2] # filename: identifier.key.json
value = _get_obj(f)
metadata[key] = value
return metadata | def function[get_item_metadata, parameter[self, handle]]:
constant[Return dictionary containing all metadata associated with handle.
In other words all the metadata added using the ``add_item_metadata``
method.
:param handle: handle for accessing an item before the dataset is
frozen
:returns: dictionary containing item metadata
]
if <ast.UnaryOp object at 0x7da20e9575b0> begin[:]
return[dictionary[[], []]]
variable[prefix] assign[=] call[name[self]._handle_to_fragment_absprefixpath, parameter[name[handle]]]
variable[files] assign[=] <ast.ListComp object at 0x7da20e9543d0>
variable[metadata] assign[=] dictionary[[], []]
for taget[name[f]] in starred[name[files]] begin[:]
variable[key] assign[=] call[call[name[f].split, parameter[constant[.]]]][<ast.UnaryOp object at 0x7da18f8107f0>]
variable[value] assign[=] call[name[_get_obj], parameter[name[f]]]
call[name[metadata]][name[key]] assign[=] name[value]
return[name[metadata]] | keyword[def] identifier[get_item_metadata] ( identifier[self] , identifier[handle] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_metadata_dir_exists] ():
keyword[return] {}
identifier[prefix] = identifier[self] . identifier[_handle_to_fragment_absprefixpath] ( identifier[handle] )
identifier[files] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[self] . identifier[_ls_abspaths_with_cache] (
identifier[self] . identifier[_metadata_fragments_abspath] )
keyword[if] identifier[f] . identifier[startswith] ( identifier[prefix] )]
identifier[metadata] ={}
keyword[for] identifier[f] keyword[in] identifier[files] :
identifier[key] = identifier[f] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[value] = identifier[_get_obj] ( identifier[f] )
identifier[metadata] [ identifier[key] ]= identifier[value]
keyword[return] identifier[metadata] | def get_item_metadata(self, handle):
"""Return dictionary containing all metadata associated with handle.
In other words all the metadata added using the ``add_item_metadata``
method.
:param handle: handle for accessing an item before the dataset is
frozen
:returns: dictionary containing item metadata
"""
if not self._metadata_dir_exists():
return {} # depends on [control=['if'], data=[]]
prefix = self._handle_to_fragment_absprefixpath(handle)
files = [f for f in self._ls_abspaths_with_cache(self._metadata_fragments_abspath) if f.startswith(prefix)]
metadata = {}
for f in files:
key = f.split('.')[-2] # filename: identifier.key.json
value = _get_obj(f)
metadata[key] = value # depends on [control=['for'], data=['f']]
return metadata |
def replace_order_line_item_by_id(cls, order_line_item_id, order_line_item, **kwargs):
"""Replace OrderLineItem
Replace all attributes of OrderLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to replace (required)
:param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs)
else:
(data) = cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs)
return data | def function[replace_order_line_item_by_id, parameter[cls, order_line_item_id, order_line_item]]:
constant[Replace OrderLineItem
Replace all attributes of OrderLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to replace (required)
:param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._replace_order_line_item_by_id_with_http_info, parameter[name[order_line_item_id], name[order_line_item]]]] | keyword[def] identifier[replace_order_line_item_by_id] ( identifier[cls] , identifier[order_line_item_id] , identifier[order_line_item] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_replace_order_line_item_by_id_with_http_info] ( identifier[order_line_item_id] , identifier[order_line_item] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_replace_order_line_item_by_id_with_http_info] ( identifier[order_line_item_id] , identifier[order_line_item] ,** identifier[kwargs] )
keyword[return] identifier[data] | def replace_order_line_item_by_id(cls, order_line_item_id, order_line_item, **kwargs):
"""Replace OrderLineItem
Replace all attributes of OrderLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to replace (required)
:param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs)
return data |
def run_at_subprocess(self, use_subprocess, foo, *args, **kwrags):
"""
This method for run some function at subprocess.
Very useful when you have a problem with memory leaks.
"""
if use_subprocess is False:
return foo(*args, **kwrags)
child_pid = os.fork()
if child_pid == 0:
foo(*args, **kwrags)
sys.exit(0)
return os.waitpid(child_pid, 0)[1] == 0 | def function[run_at_subprocess, parameter[self, use_subprocess, foo]]:
constant[
This method for run some function at subprocess.
Very useful when you have a problem with memory leaks.
]
if compare[name[use_subprocess] is constant[False]] begin[:]
return[call[name[foo], parameter[<ast.Starred object at 0x7da1b269b760>]]]
variable[child_pid] assign[=] call[name[os].fork, parameter[]]
if compare[name[child_pid] equal[==] constant[0]] begin[:]
call[name[foo], parameter[<ast.Starred object at 0x7da1b269be20>]]
call[name[sys].exit, parameter[constant[0]]]
return[compare[call[call[name[os].waitpid, parameter[name[child_pid], constant[0]]]][constant[1]] equal[==] constant[0]]] | keyword[def] identifier[run_at_subprocess] ( identifier[self] , identifier[use_subprocess] , identifier[foo] ,* identifier[args] ,** identifier[kwrags] ):
literal[string]
keyword[if] identifier[use_subprocess] keyword[is] keyword[False] :
keyword[return] identifier[foo] (* identifier[args] ,** identifier[kwrags] )
identifier[child_pid] = identifier[os] . identifier[fork] ()
keyword[if] identifier[child_pid] == literal[int] :
identifier[foo] (* identifier[args] ,** identifier[kwrags] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[return] identifier[os] . identifier[waitpid] ( identifier[child_pid] , literal[int] )[ literal[int] ]== literal[int] | def run_at_subprocess(self, use_subprocess, foo, *args, **kwrags):
"""
This method for run some function at subprocess.
Very useful when you have a problem with memory leaks.
"""
if use_subprocess is False:
return foo(*args, **kwrags) # depends on [control=['if'], data=[]]
child_pid = os.fork()
if child_pid == 0:
foo(*args, **kwrags)
sys.exit(0) # depends on [control=['if'], data=[]]
return os.waitpid(child_pid, 0)[1] == 0 |
def drop_columns(
self, max_na_values: int = None, max_unique_values: int = None
):
"""
When max_na_values was informed, remove columns when the proportion of
total NA values more than max_na_values threshold.
When max_unique_values was informed, remove columns when the proportion
of the total of unique values is more than the max_unique_values
threshold, just for columns with type as object or category.
:param max_na_values: proportion threshold of max na values
:param max_unique_values:
:return:
"""
step = {}
if max_na_values is not None:
step = {
'data-set': self.iid,
'operation': 'drop-na',
'expression': '{"max_na_values":%s, "axis": 1}' % max_na_values
}
if max_unique_values is not None:
step = {
'data-set': self.iid,
'operation': 'drop-unique',
'expression': '{"max_unique_values":%s}' % max_unique_values
}
self.attr_update(attr='steps', value=[step]) | def function[drop_columns, parameter[self, max_na_values, max_unique_values]]:
constant[
When max_na_values was informed, remove columns when the proportion of
total NA values more than max_na_values threshold.
When max_unique_values was informed, remove columns when the proportion
of the total of unique values is more than the max_unique_values
threshold, just for columns with type as object or category.
:param max_na_values: proportion threshold of max na values
:param max_unique_values:
:return:
]
variable[step] assign[=] dictionary[[], []]
if compare[name[max_na_values] is_not constant[None]] begin[:]
variable[step] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ff03d0>, <ast.Constant object at 0x7da1b0ff2860>, <ast.Constant object at 0x7da1b0ff3070>], [<ast.Attribute object at 0x7da1b0ff21a0>, <ast.Constant object at 0x7da1b0ff0ca0>, <ast.BinOp object at 0x7da1b0ff3310>]]
if compare[name[max_unique_values] is_not constant[None]] begin[:]
variable[step] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ff0d00>, <ast.Constant object at 0x7da1b0ff2a10>, <ast.Constant object at 0x7da1b0ff0250>], [<ast.Attribute object at 0x7da1b0ff15a0>, <ast.Constant object at 0x7da1b0ff1a50>, <ast.BinOp object at 0x7da1b0ff13f0>]]
call[name[self].attr_update, parameter[]] | keyword[def] identifier[drop_columns] (
identifier[self] , identifier[max_na_values] : identifier[int] = keyword[None] , identifier[max_unique_values] : identifier[int] = keyword[None]
):
literal[string]
identifier[step] ={}
keyword[if] identifier[max_na_values] keyword[is] keyword[not] keyword[None] :
identifier[step] ={
literal[string] : identifier[self] . identifier[iid] ,
literal[string] : literal[string] ,
literal[string] : literal[string] % identifier[max_na_values]
}
keyword[if] identifier[max_unique_values] keyword[is] keyword[not] keyword[None] :
identifier[step] ={
literal[string] : identifier[self] . identifier[iid] ,
literal[string] : literal[string] ,
literal[string] : literal[string] % identifier[max_unique_values]
}
identifier[self] . identifier[attr_update] ( identifier[attr] = literal[string] , identifier[value] =[ identifier[step] ]) | def drop_columns(self, max_na_values: int=None, max_unique_values: int=None):
"""
When max_na_values was informed, remove columns when the proportion of
total NA values more than max_na_values threshold.
When max_unique_values was informed, remove columns when the proportion
of the total of unique values is more than the max_unique_values
threshold, just for columns with type as object or category.
:param max_na_values: proportion threshold of max na values
:param max_unique_values:
:return:
"""
step = {}
if max_na_values is not None:
step = {'data-set': self.iid, 'operation': 'drop-na', 'expression': '{"max_na_values":%s, "axis": 1}' % max_na_values} # depends on [control=['if'], data=['max_na_values']]
if max_unique_values is not None:
step = {'data-set': self.iid, 'operation': 'drop-unique', 'expression': '{"max_unique_values":%s}' % max_unique_values} # depends on [control=['if'], data=['max_unique_values']]
self.attr_update(attr='steps', value=[step]) |
def save(self):
'''Save current property list representation to the original file.'''
with open(self.filename, 'w') as plist_file:
plist_file.write(str(self.soup)) | def function[save, parameter[self]]:
constant[Save current property list representation to the original file.]
with call[name[open], parameter[name[self].filename, constant[w]]] begin[:]
call[name[plist_file].write, parameter[call[name[str], parameter[name[self].soup]]]] | keyword[def] identifier[save] ( identifier[self] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[filename] , literal[string] ) keyword[as] identifier[plist_file] :
identifier[plist_file] . identifier[write] ( identifier[str] ( identifier[self] . identifier[soup] )) | def save(self):
"""Save current property list representation to the original file."""
with open(self.filename, 'w') as plist_file:
plist_file.write(str(self.soup)) # depends on [control=['with'], data=['plist_file']] |
def save_coef(scoef, filename):
"""Saves ScalarCoeffs object 'scoef' to file. The first line of the
file has the max number N and the max number M of the scoef structure
separated by a comma. The remaining lines have the form
3.14, 2.718
The first number is the real part of the mode and the second is the
imaginary.
"""
nmax = scoef.nmax
mmax = scoef.mmax
frmstr = "{0:.16e},{1:.16e}\n"
L = (nmax + 1) + mmax * (2 * nmax - mmax + 1);
with open(filename, 'w') as f:
f.write("{0},{1}\n".format(nmax, mmax))
for n in xrange(0, L):
f.write(frmstr.format(scoef._vec[n].real,
scoef._vec[n].imag)) | def function[save_coef, parameter[scoef, filename]]:
constant[Saves ScalarCoeffs object 'scoef' to file. The first line of the
file has the max number N and the max number M of the scoef structure
separated by a comma. The remaining lines have the form
3.14, 2.718
The first number is the real part of the mode and the second is the
imaginary.
]
variable[nmax] assign[=] name[scoef].nmax
variable[mmax] assign[=] name[scoef].mmax
variable[frmstr] assign[=] constant[{0:.16e},{1:.16e}
]
variable[L] assign[=] binary_operation[binary_operation[name[nmax] + constant[1]] + binary_operation[name[mmax] * binary_operation[binary_operation[binary_operation[constant[2] * name[nmax]] - name[mmax]] + constant[1]]]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[f].write, parameter[call[constant[{0},{1}
].format, parameter[name[nmax], name[mmax]]]]]
for taget[name[n]] in starred[call[name[xrange], parameter[constant[0], name[L]]]] begin[:]
call[name[f].write, parameter[call[name[frmstr].format, parameter[call[name[scoef]._vec][name[n]].real, call[name[scoef]._vec][name[n]].imag]]]] | keyword[def] identifier[save_coef] ( identifier[scoef] , identifier[filename] ):
literal[string]
identifier[nmax] = identifier[scoef] . identifier[nmax]
identifier[mmax] = identifier[scoef] . identifier[mmax]
identifier[frmstr] = literal[string]
identifier[L] =( identifier[nmax] + literal[int] )+ identifier[mmax] *( literal[int] * identifier[nmax] - identifier[mmax] + literal[int] );
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( literal[string] . identifier[format] ( identifier[nmax] , identifier[mmax] ))
keyword[for] identifier[n] keyword[in] identifier[xrange] ( literal[int] , identifier[L] ):
identifier[f] . identifier[write] ( identifier[frmstr] . identifier[format] ( identifier[scoef] . identifier[_vec] [ identifier[n] ]. identifier[real] ,
identifier[scoef] . identifier[_vec] [ identifier[n] ]. identifier[imag] )) | def save_coef(scoef, filename):
"""Saves ScalarCoeffs object 'scoef' to file. The first line of the
file has the max number N and the max number M of the scoef structure
separated by a comma. The remaining lines have the form
3.14, 2.718
The first number is the real part of the mode and the second is the
imaginary.
"""
nmax = scoef.nmax
mmax = scoef.mmax
frmstr = '{0:.16e},{1:.16e}\n'
L = nmax + 1 + mmax * (2 * nmax - mmax + 1)
with open(filename, 'w') as f:
f.write('{0},{1}\n'.format(nmax, mmax))
for n in xrange(0, L):
f.write(frmstr.format(scoef._vec[n].real, scoef._vec[n].imag)) # depends on [control=['for'], data=['n']] # depends on [control=['with'], data=['f']] |
def _get_input_buffer_cursor_prompt(self):
""" Returns the (plain text) prompt for line of the input buffer that
contains the cursor, or None if there is no such line.
"""
if self._executing:
return None
cursor = self._control.textCursor()
if cursor.position() >= self._prompt_pos:
if cursor.blockNumber() == self._get_prompt_cursor().blockNumber():
return self._prompt
else:
return self._continuation_prompt
else:
return None | def function[_get_input_buffer_cursor_prompt, parameter[self]]:
constant[ Returns the (plain text) prompt for line of the input buffer that
contains the cursor, or None if there is no such line.
]
if name[self]._executing begin[:]
return[constant[None]]
variable[cursor] assign[=] call[name[self]._control.textCursor, parameter[]]
if compare[call[name[cursor].position, parameter[]] greater_or_equal[>=] name[self]._prompt_pos] begin[:]
if compare[call[name[cursor].blockNumber, parameter[]] equal[==] call[call[name[self]._get_prompt_cursor, parameter[]].blockNumber, parameter[]]] begin[:]
return[name[self]._prompt] | keyword[def] identifier[_get_input_buffer_cursor_prompt] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_executing] :
keyword[return] keyword[None]
identifier[cursor] = identifier[self] . identifier[_control] . identifier[textCursor] ()
keyword[if] identifier[cursor] . identifier[position] ()>= identifier[self] . identifier[_prompt_pos] :
keyword[if] identifier[cursor] . identifier[blockNumber] ()== identifier[self] . identifier[_get_prompt_cursor] (). identifier[blockNumber] ():
keyword[return] identifier[self] . identifier[_prompt]
keyword[else] :
keyword[return] identifier[self] . identifier[_continuation_prompt]
keyword[else] :
keyword[return] keyword[None] | def _get_input_buffer_cursor_prompt(self):
""" Returns the (plain text) prompt for line of the input buffer that
contains the cursor, or None if there is no such line.
"""
if self._executing:
return None # depends on [control=['if'], data=[]]
cursor = self._control.textCursor()
if cursor.position() >= self._prompt_pos:
if cursor.blockNumber() == self._get_prompt_cursor().blockNumber():
return self._prompt # depends on [control=['if'], data=[]]
else:
return self._continuation_prompt # depends on [control=['if'], data=[]]
else:
return None |
def getIssuedBatchJobIDs(self):
"""
Gets the list of jobs issued to parasol in all results files, but not including jobs
created by other users.
"""
issuedJobs = set()
for resultsFile in itervalues(self.resultsFiles):
issuedJobs.update(self.getJobIDsForResultsFile(resultsFile))
return list(issuedJobs) | def function[getIssuedBatchJobIDs, parameter[self]]:
constant[
Gets the list of jobs issued to parasol in all results files, but not including jobs
created by other users.
]
variable[issuedJobs] assign[=] call[name[set], parameter[]]
for taget[name[resultsFile]] in starred[call[name[itervalues], parameter[name[self].resultsFiles]]] begin[:]
call[name[issuedJobs].update, parameter[call[name[self].getJobIDsForResultsFile, parameter[name[resultsFile]]]]]
return[call[name[list], parameter[name[issuedJobs]]]] | keyword[def] identifier[getIssuedBatchJobIDs] ( identifier[self] ):
literal[string]
identifier[issuedJobs] = identifier[set] ()
keyword[for] identifier[resultsFile] keyword[in] identifier[itervalues] ( identifier[self] . identifier[resultsFiles] ):
identifier[issuedJobs] . identifier[update] ( identifier[self] . identifier[getJobIDsForResultsFile] ( identifier[resultsFile] ))
keyword[return] identifier[list] ( identifier[issuedJobs] ) | def getIssuedBatchJobIDs(self):
"""
Gets the list of jobs issued to parasol in all results files, but not including jobs
created by other users.
"""
issuedJobs = set()
for resultsFile in itervalues(self.resultsFiles):
issuedJobs.update(self.getJobIDsForResultsFile(resultsFile)) # depends on [control=['for'], data=['resultsFile']]
return list(issuedJobs) |
def tasks(self, **kwargs):
""" Fetch tasks specified criteria """
tasks_result = self.service.tasks().list(**kwargs).execute()
return [Task(task) for task in tasks_result.get("items", [])] | def function[tasks, parameter[self]]:
constant[ Fetch tasks specified criteria ]
variable[tasks_result] assign[=] call[call[call[name[self].service.tasks, parameter[]].list, parameter[]].execute, parameter[]]
return[<ast.ListComp object at 0x7da18bc71240>] | keyword[def] identifier[tasks] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[tasks_result] = identifier[self] . identifier[service] . identifier[tasks] (). identifier[list] (** identifier[kwargs] ). identifier[execute] ()
keyword[return] [ identifier[Task] ( identifier[task] ) keyword[for] identifier[task] keyword[in] identifier[tasks_result] . identifier[get] ( literal[string] ,[])] | def tasks(self, **kwargs):
""" Fetch tasks specified criteria """
tasks_result = self.service.tasks().list(**kwargs).execute()
return [Task(task) for task in tasks_result.get('items', [])] |
def normprob(d, snrs, inds=None, version=2):
""" Uses observed SNR distribution to calculate normal probability SNR
Uses state dict to calculate number of trials.
snrs is list of all snrs in distribution.
version used to toggle for tests. version 2 is fastest and returns zeros for filtered snr values.
Returns list of expected snr given each input value's frequency of occurrence via the normal probability assumption
"""
if not inds: inds = range(len(snrs))
# define norm quantile functions
Z = lambda quan: np.sqrt(2)*erfinv( 2*quan - 1)
quan = lambda ntrials, i: (ntrials + 1/2. - i)/ntrials
# calc number of trials
npix = d['npixx']*d['npixy']
if d.has_key('goodintcount'):
nints = d['goodintcount']
else:
nints = d['nints']
ndms = len(d['dmarr'])
dtfactor = np.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = npix*nints*ndms*dtfactor
logger.info('Calculating normal probability distribution for npix*nints*ndms*dtfactor = %d' % (ntrials))
# calc normal quantile
if version == 2:
# purely sort and numpy-based
sortinds = np.argsort(snrs[inds])
lenpos = len(np.where(snrs[inds] >= 0)[0])
lenneg = len(np.where(snrs[inds] < 0)[0])
unsortinds = np.zeros(len(sortinds), dtype=int)
unsortinds[sortinds] = np.arange(len(sortinds))
rank = np.concatenate( (np.arange(1, lenneg+1), np.arange(1, lenpos+1)[::-1]) )
logger.debug('{} {}'.format(rank, sortinds))
zval = Z(quan(ntrials, rank[unsortinds]))
if inds != range(len(snrs)): # add zeros for filtered data to match length to original snr array
zval = np.array([zval[inds.index(i)] if i in inds else 0 for i in range(len(snrs))])
elif version == 1:
# numpy array based
snrpos = snrs[inds][np.where(snrs[inds] > 0)]
snrneg = snrs[inds][np.where(snrs[inds] < 0)]
snrsortpos = np.sort(snrpos)[::-1]
snrsortneg = np.sort(snrneg)
logger.debug('Sorted pos/neg SNRs')
zval = []
for i,snr in enumerate(snrs):
if i in inds:
if snr in snrsortpos:
zval.append(Z(quan(ntrials, np.where(snr == snrsortpos)[0][0]+1)))
elif snr in snrsortneg:
zval.append(Z(quan(ntrials, np.where(snr == snrsortneg)[0][0]+1)))
elif version == 0:
# list based
snrsortpos = []
snrsortneg = []
for i in inds:
if snrs[i] > 0:
snrsortpos.append(snrs[i])
elif snrs[i] < 0:
snrsortneg.append(abs(snrs[i]))
snrsortpos = sorted(snrsortpos, reverse=True)
snrsortneg = sorted(snrsortneg, reverse=True)
logger.debug('Sorted pos/neg SNRs')
zval = []
for (i, snr) in enumerate(snrs):
if snr >= 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortpos.index(snr)+1)))
elif snr < 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortneg.index(abs(snr))+1)))
else:
zval.append(0)
return zval | def function[normprob, parameter[d, snrs, inds, version]]:
constant[ Uses observed SNR distribution to calculate normal probability SNR
Uses state dict to calculate number of trials.
snrs is list of all snrs in distribution.
version used to toggle for tests. version 2 is fastest and returns zeros for filtered snr values.
Returns list of expected snr given each input value's frequency of occurrence via the normal probability assumption
]
if <ast.UnaryOp object at 0x7da1b26d7be0> begin[:]
variable[inds] assign[=] call[name[range], parameter[call[name[len], parameter[name[snrs]]]]]
variable[Z] assign[=] <ast.Lambda object at 0x7da1b26d79d0>
variable[quan] assign[=] <ast.Lambda object at 0x7da1b26d76a0>
variable[npix] assign[=] binary_operation[call[name[d]][constant[npixx]] * call[name[d]][constant[npixy]]]
if call[name[d].has_key, parameter[constant[goodintcount]]] begin[:]
variable[nints] assign[=] call[name[d]][constant[goodintcount]]
variable[ndms] assign[=] call[name[len], parameter[call[name[d]][constant[dmarr]]]]
variable[dtfactor] assign[=] call[name[np].sum, parameter[<ast.ListComp object at 0x7da1b26d6d70>]]
variable[ntrials] assign[=] binary_operation[binary_operation[binary_operation[name[npix] * name[nints]] * name[ndms]] * name[dtfactor]]
call[name[logger].info, parameter[binary_operation[constant[Calculating normal probability distribution for npix*nints*ndms*dtfactor = %d] <ast.Mod object at 0x7da2590d6920> name[ntrials]]]]
if compare[name[version] equal[==] constant[2]] begin[:]
variable[sortinds] assign[=] call[name[np].argsort, parameter[call[name[snrs]][name[inds]]]]
variable[lenpos] assign[=] call[name[len], parameter[call[call[name[np].where, parameter[compare[call[name[snrs]][name[inds]] greater_or_equal[>=] constant[0]]]]][constant[0]]]]
variable[lenneg] assign[=] call[name[len], parameter[call[call[name[np].where, parameter[compare[call[name[snrs]][name[inds]] less[<] constant[0]]]]][constant[0]]]]
variable[unsortinds] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[sortinds]]]]]
call[name[unsortinds]][name[sortinds]] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[sortinds]]]]]
variable[rank] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Call object at 0x7da1b26d5c00>, <ast.Subscript object at 0x7da1b26d5ab0>]]]]
call[name[logger].debug, parameter[call[constant[{} {}].format, parameter[name[rank], name[sortinds]]]]]
variable[zval] assign[=] call[name[Z], parameter[call[name[quan], parameter[name[ntrials], call[name[rank]][name[unsortinds]]]]]]
if compare[name[inds] not_equal[!=] call[name[range], parameter[call[name[len], parameter[name[snrs]]]]]] begin[:]
variable[zval] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b26d5270>]]
return[name[zval]] | keyword[def] identifier[normprob] ( identifier[d] , identifier[snrs] , identifier[inds] = keyword[None] , identifier[version] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[inds] : identifier[inds] = identifier[range] ( identifier[len] ( identifier[snrs] ))
identifier[Z] = keyword[lambda] identifier[quan] : identifier[np] . identifier[sqrt] ( literal[int] )* identifier[erfinv] ( literal[int] * identifier[quan] - literal[int] )
identifier[quan] = keyword[lambda] identifier[ntrials] , identifier[i] :( identifier[ntrials] + literal[int] / literal[int] - identifier[i] )/ identifier[ntrials]
identifier[npix] = identifier[d] [ literal[string] ]* identifier[d] [ literal[string] ]
keyword[if] identifier[d] . identifier[has_key] ( literal[string] ):
identifier[nints] = identifier[d] [ literal[string] ]
keyword[else] :
identifier[nints] = identifier[d] [ literal[string] ]
identifier[ndms] = identifier[len] ( identifier[d] [ literal[string] ])
identifier[dtfactor] = identifier[np] . identifier[sum] ([ literal[int] / identifier[i] keyword[for] identifier[i] keyword[in] identifier[d] [ literal[string] ]])
identifier[ntrials] = identifier[npix] * identifier[nints] * identifier[ndms] * identifier[dtfactor]
identifier[logger] . identifier[info] ( literal[string] %( identifier[ntrials] ))
keyword[if] identifier[version] == literal[int] :
identifier[sortinds] = identifier[np] . identifier[argsort] ( identifier[snrs] [ identifier[inds] ])
identifier[lenpos] = identifier[len] ( identifier[np] . identifier[where] ( identifier[snrs] [ identifier[inds] ]>= literal[int] )[ literal[int] ])
identifier[lenneg] = identifier[len] ( identifier[np] . identifier[where] ( identifier[snrs] [ identifier[inds] ]< literal[int] )[ literal[int] ])
identifier[unsortinds] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[sortinds] ), identifier[dtype] = identifier[int] )
identifier[unsortinds] [ identifier[sortinds] ]= identifier[np] . identifier[arange] ( identifier[len] ( identifier[sortinds] ))
identifier[rank] = identifier[np] . identifier[concatenate] (( identifier[np] . identifier[arange] ( literal[int] , identifier[lenneg] + literal[int] ), identifier[np] . identifier[arange] ( literal[int] , identifier[lenpos] + literal[int] )[::- literal[int] ]))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[rank] , identifier[sortinds] ))
identifier[zval] = identifier[Z] ( identifier[quan] ( identifier[ntrials] , identifier[rank] [ identifier[unsortinds] ]))
keyword[if] identifier[inds] != identifier[range] ( identifier[len] ( identifier[snrs] )):
identifier[zval] = identifier[np] . identifier[array] ([ identifier[zval] [ identifier[inds] . identifier[index] ( identifier[i] )] keyword[if] identifier[i] keyword[in] identifier[inds] keyword[else] literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[snrs] ))])
keyword[elif] identifier[version] == literal[int] :
identifier[snrpos] = identifier[snrs] [ identifier[inds] ][ identifier[np] . identifier[where] ( identifier[snrs] [ identifier[inds] ]> literal[int] )]
identifier[snrneg] = identifier[snrs] [ identifier[inds] ][ identifier[np] . identifier[where] ( identifier[snrs] [ identifier[inds] ]< literal[int] )]
identifier[snrsortpos] = identifier[np] . identifier[sort] ( identifier[snrpos] )[::- literal[int] ]
identifier[snrsortneg] = identifier[np] . identifier[sort] ( identifier[snrneg] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[zval] =[]
keyword[for] identifier[i] , identifier[snr] keyword[in] identifier[enumerate] ( identifier[snrs] ):
keyword[if] identifier[i] keyword[in] identifier[inds] :
keyword[if] identifier[snr] keyword[in] identifier[snrsortpos] :
identifier[zval] . identifier[append] ( identifier[Z] ( identifier[quan] ( identifier[ntrials] , identifier[np] . identifier[where] ( identifier[snr] == identifier[snrsortpos] )[ literal[int] ][ literal[int] ]+ literal[int] )))
keyword[elif] identifier[snr] keyword[in] identifier[snrsortneg] :
identifier[zval] . identifier[append] ( identifier[Z] ( identifier[quan] ( identifier[ntrials] , identifier[np] . identifier[where] ( identifier[snr] == identifier[snrsortneg] )[ literal[int] ][ literal[int] ]+ literal[int] )))
keyword[elif] identifier[version] == literal[int] :
identifier[snrsortpos] =[]
identifier[snrsortneg] =[]
keyword[for] identifier[i] keyword[in] identifier[inds] :
keyword[if] identifier[snrs] [ identifier[i] ]> literal[int] :
identifier[snrsortpos] . identifier[append] ( identifier[snrs] [ identifier[i] ])
keyword[elif] identifier[snrs] [ identifier[i] ]< literal[int] :
identifier[snrsortneg] . identifier[append] ( identifier[abs] ( identifier[snrs] [ identifier[i] ]))
identifier[snrsortpos] = identifier[sorted] ( identifier[snrsortpos] , identifier[reverse] = keyword[True] )
identifier[snrsortneg] = identifier[sorted] ( identifier[snrsortneg] , identifier[reverse] = keyword[True] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[zval] =[]
keyword[for] ( identifier[i] , identifier[snr] ) keyword[in] identifier[enumerate] ( identifier[snrs] ):
keyword[if] identifier[snr] >= literal[int] keyword[and] identifier[i] keyword[in] identifier[inds] :
identifier[zval] . identifier[append] ( identifier[Z] ( identifier[quan] ( identifier[ntrials] , identifier[snrsortpos] . identifier[index] ( identifier[snr] )+ literal[int] )))
keyword[elif] identifier[snr] < literal[int] keyword[and] identifier[i] keyword[in] identifier[inds] :
identifier[zval] . identifier[append] ( identifier[Z] ( identifier[quan] ( identifier[ntrials] , identifier[snrsortneg] . identifier[index] ( identifier[abs] ( identifier[snr] ))+ literal[int] )))
keyword[else] :
identifier[zval] . identifier[append] ( literal[int] )
keyword[return] identifier[zval] | def normprob(d, snrs, inds=None, version=2):
""" Uses observed SNR distribution to calculate normal probability SNR
Uses state dict to calculate number of trials.
snrs is list of all snrs in distribution.
version used to toggle for tests. version 2 is fastest and returns zeros for filtered snr values.
Returns list of expected snr given each input value's frequency of occurrence via the normal probability assumption
"""
if not inds:
inds = range(len(snrs)) # depends on [control=['if'], data=[]]
# define norm quantile functions
Z = lambda quan: np.sqrt(2) * erfinv(2 * quan - 1)
quan = lambda ntrials, i: (ntrials + 1 / 2.0 - i) / ntrials
# calc number of trials
npix = d['npixx'] * d['npixy']
if d.has_key('goodintcount'):
nints = d['goodintcount'] # depends on [control=['if'], data=[]]
else:
nints = d['nints']
ndms = len(d['dmarr'])
dtfactor = np.sum([1.0 / i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = npix * nints * ndms * dtfactor
logger.info('Calculating normal probability distribution for npix*nints*ndms*dtfactor = %d' % ntrials)
# calc normal quantile
if version == 2:
# purely sort and numpy-based
sortinds = np.argsort(snrs[inds])
lenpos = len(np.where(snrs[inds] >= 0)[0])
lenneg = len(np.where(snrs[inds] < 0)[0])
unsortinds = np.zeros(len(sortinds), dtype=int)
unsortinds[sortinds] = np.arange(len(sortinds))
rank = np.concatenate((np.arange(1, lenneg + 1), np.arange(1, lenpos + 1)[::-1]))
logger.debug('{} {}'.format(rank, sortinds))
zval = Z(quan(ntrials, rank[unsortinds]))
if inds != range(len(snrs)): # add zeros for filtered data to match length to original snr array
zval = np.array([zval[inds.index(i)] if i in inds else 0 for i in range(len(snrs))]) # depends on [control=['if'], data=['inds']] # depends on [control=['if'], data=[]]
elif version == 1:
# numpy array based
snrpos = snrs[inds][np.where(snrs[inds] > 0)]
snrneg = snrs[inds][np.where(snrs[inds] < 0)]
snrsortpos = np.sort(snrpos)[::-1]
snrsortneg = np.sort(snrneg)
logger.debug('Sorted pos/neg SNRs')
zval = []
for (i, snr) in enumerate(snrs):
if i in inds:
if snr in snrsortpos:
zval.append(Z(quan(ntrials, np.where(snr == snrsortpos)[0][0] + 1))) # depends on [control=['if'], data=['snr', 'snrsortpos']]
elif snr in snrsortneg:
zval.append(Z(quan(ntrials, np.where(snr == snrsortneg)[0][0] + 1))) # depends on [control=['if'], data=['snr', 'snrsortneg']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif version == 0:
# list based
snrsortpos = []
snrsortneg = []
for i in inds:
if snrs[i] > 0:
snrsortpos.append(snrs[i]) # depends on [control=['if'], data=[]]
elif snrs[i] < 0:
snrsortneg.append(abs(snrs[i])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
snrsortpos = sorted(snrsortpos, reverse=True)
snrsortneg = sorted(snrsortneg, reverse=True)
logger.debug('Sorted pos/neg SNRs')
zval = []
for (i, snr) in enumerate(snrs):
if snr >= 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortpos.index(snr) + 1))) # depends on [control=['if'], data=[]]
elif snr < 0 and i in inds:
zval.append(Z(quan(ntrials, snrsortneg.index(abs(snr)) + 1))) # depends on [control=['if'], data=[]]
else:
zval.append(0) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return zval |
def get_android_hierarchy(d):
"""
Returns:
JSON object
"""
page_xml = d.dump_hierarchy(compressed=False, pretty=False).encode('utf-8')
dom = xml.dom.minidom.parseString(page_xml)
root = dom.documentElement
def travel(node):
# print(node)
if node.attributes is None:
return
json_node = parse_uiautomator_node(node)
json_node['id'] = str(uuid.uuid4())
if node.childNodes:
children = []
for n in node.childNodes:
sub_hierarchy = travel(n)
if sub_hierarchy:
children.append(sub_hierarchy)
json_node['children'] = children
return json_node
return travel(root) | def function[get_android_hierarchy, parameter[d]]:
constant[
Returns:
JSON object
]
variable[page_xml] assign[=] call[call[name[d].dump_hierarchy, parameter[]].encode, parameter[constant[utf-8]]]
variable[dom] assign[=] call[name[xml].dom.minidom.parseString, parameter[name[page_xml]]]
variable[root] assign[=] name[dom].documentElement
def function[travel, parameter[node]]:
if compare[name[node].attributes is constant[None]] begin[:]
return[None]
variable[json_node] assign[=] call[name[parse_uiautomator_node], parameter[name[node]]]
call[name[json_node]][constant[id]] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]]
if name[node].childNodes begin[:]
variable[children] assign[=] list[[]]
for taget[name[n]] in starred[name[node].childNodes] begin[:]
variable[sub_hierarchy] assign[=] call[name[travel], parameter[name[n]]]
if name[sub_hierarchy] begin[:]
call[name[children].append, parameter[name[sub_hierarchy]]]
call[name[json_node]][constant[children]] assign[=] name[children]
return[name[json_node]]
return[call[name[travel], parameter[name[root]]]] | keyword[def] identifier[get_android_hierarchy] ( identifier[d] ):
literal[string]
identifier[page_xml] = identifier[d] . identifier[dump_hierarchy] ( identifier[compressed] = keyword[False] , identifier[pretty] = keyword[False] ). identifier[encode] ( literal[string] )
identifier[dom] = identifier[xml] . identifier[dom] . identifier[minidom] . identifier[parseString] ( identifier[page_xml] )
identifier[root] = identifier[dom] . identifier[documentElement]
keyword[def] identifier[travel] ( identifier[node] ):
keyword[if] identifier[node] . identifier[attributes] keyword[is] keyword[None] :
keyword[return]
identifier[json_node] = identifier[parse_uiautomator_node] ( identifier[node] )
identifier[json_node] [ literal[string] ]= identifier[str] ( identifier[uuid] . identifier[uuid4] ())
keyword[if] identifier[node] . identifier[childNodes] :
identifier[children] =[]
keyword[for] identifier[n] keyword[in] identifier[node] . identifier[childNodes] :
identifier[sub_hierarchy] = identifier[travel] ( identifier[n] )
keyword[if] identifier[sub_hierarchy] :
identifier[children] . identifier[append] ( identifier[sub_hierarchy] )
identifier[json_node] [ literal[string] ]= identifier[children]
keyword[return] identifier[json_node]
keyword[return] identifier[travel] ( identifier[root] ) | def get_android_hierarchy(d):
"""
Returns:
JSON object
"""
page_xml = d.dump_hierarchy(compressed=False, pretty=False).encode('utf-8')
dom = xml.dom.minidom.parseString(page_xml)
root = dom.documentElement
def travel(node):
# print(node)
if node.attributes is None:
return # depends on [control=['if'], data=[]]
json_node = parse_uiautomator_node(node)
json_node['id'] = str(uuid.uuid4())
if node.childNodes:
children = []
for n in node.childNodes:
sub_hierarchy = travel(n)
if sub_hierarchy:
children.append(sub_hierarchy) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['n']]
json_node['children'] = children # depends on [control=['if'], data=[]]
return json_node
return travel(root) |
def analysis_question_extractor(impact_report, component_metadata):
"""Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
multi_exposure = impact_report.multi_exposure_impact_function
if multi_exposure:
return multi_exposure_analysis_question_extractor(
impact_report, component_metadata)
context = {}
extra_args = component_metadata.extra_args
provenance = impact_report.impact_function.provenance
header = resolve_from_dictionary(extra_args, 'header')
analysis_question = provenance['analysis_question']
context['component_key'] = component_metadata.key
context['header'] = header
context['analysis_questions'] = [analysis_question]
return context | def function[analysis_question_extractor, parameter[impact_report, component_metadata]]:
constant[Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
]
variable[multi_exposure] assign[=] name[impact_report].multi_exposure_impact_function
if name[multi_exposure] begin[:]
return[call[name[multi_exposure_analysis_question_extractor], parameter[name[impact_report], name[component_metadata]]]]
variable[context] assign[=] dictionary[[], []]
variable[extra_args] assign[=] name[component_metadata].extra_args
variable[provenance] assign[=] name[impact_report].impact_function.provenance
variable[header] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[header]]]
variable[analysis_question] assign[=] call[name[provenance]][constant[analysis_question]]
call[name[context]][constant[component_key]] assign[=] name[component_metadata].key
call[name[context]][constant[header]] assign[=] name[header]
call[name[context]][constant[analysis_questions]] assign[=] list[[<ast.Name object at 0x7da20e9b2a40>]]
return[name[context]] | keyword[def] identifier[analysis_question_extractor] ( identifier[impact_report] , identifier[component_metadata] ):
literal[string]
identifier[multi_exposure] = identifier[impact_report] . identifier[multi_exposure_impact_function]
keyword[if] identifier[multi_exposure] :
keyword[return] identifier[multi_exposure_analysis_question_extractor] (
identifier[impact_report] , identifier[component_metadata] )
identifier[context] ={}
identifier[extra_args] = identifier[component_metadata] . identifier[extra_args]
identifier[provenance] = identifier[impact_report] . identifier[impact_function] . identifier[provenance]
identifier[header] = identifier[resolve_from_dictionary] ( identifier[extra_args] , literal[string] )
identifier[analysis_question] = identifier[provenance] [ literal[string] ]
identifier[context] [ literal[string] ]= identifier[component_metadata] . identifier[key]
identifier[context] [ literal[string] ]= identifier[header]
identifier[context] [ literal[string] ]=[ identifier[analysis_question] ]
keyword[return] identifier[context] | def analysis_question_extractor(impact_report, component_metadata):
"""Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
multi_exposure = impact_report.multi_exposure_impact_function
if multi_exposure:
return multi_exposure_analysis_question_extractor(impact_report, component_metadata) # depends on [control=['if'], data=[]]
context = {}
extra_args = component_metadata.extra_args
provenance = impact_report.impact_function.provenance
header = resolve_from_dictionary(extra_args, 'header')
analysis_question = provenance['analysis_question']
context['component_key'] = component_metadata.key
context['header'] = header
context['analysis_questions'] = [analysis_question]
return context |
def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return 'Dropout', new_attrs, inputs | def function[dropout, parameter[attrs, inputs, proto_obj]]:
constant[Dropout Regularization.]
variable[mode] assign[=] constant[training]
if <ast.BoolOp object at 0x7da1b204ca00> begin[:]
variable[mode] assign[=] constant[always]
variable[new_attrs] assign[=] call[name[translation_utils]._fix_attribute_names, parameter[name[attrs], dictionary[[<ast.Constant object at 0x7da1b204df60>], [<ast.Constant object at 0x7da1b204fd00>]]]]
variable[new_attrs] assign[=] call[name[translation_utils]._remove_attributes, parameter[name[new_attrs], list[[<ast.Constant object at 0x7da1b204ceb0>]]]]
variable[new_attrs] assign[=] call[name[translation_utils]._add_extra_attributes, parameter[name[new_attrs], dictionary[[<ast.Constant object at 0x7da1b204f8b0>], [<ast.Name object at 0x7da1b204f550>]]]]
return[tuple[[<ast.Constant object at 0x7da1b204d7b0>, <ast.Name object at 0x7da1b204c9d0>, <ast.Name object at 0x7da1b204c310>]]] | keyword[def] identifier[dropout] ( identifier[attrs] , identifier[inputs] , identifier[proto_obj] ):
literal[string]
identifier[mode] = literal[string]
keyword[if] literal[string] keyword[in] identifier[attrs] keyword[and] identifier[attrs] [ literal[string] ]== literal[int] :
identifier[mode] = literal[string]
identifier[new_attrs] = identifier[translation_utils] . identifier[_fix_attribute_names] ( identifier[attrs] ,
{ literal[string] : literal[string] })
identifier[new_attrs] = identifier[translation_utils] . identifier[_remove_attributes] ( identifier[new_attrs] ,[ literal[string] ])
identifier[new_attrs] = identifier[translation_utils] . identifier[_add_extra_attributes] ( identifier[new_attrs] ,{ literal[string] : identifier[mode] })
keyword[return] literal[string] , identifier[new_attrs] , identifier[inputs] | def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always' # depends on [control=['if'], data=[]]
new_attrs = translation_utils._fix_attribute_names(attrs, {'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return ('Dropout', new_attrs, inputs) |
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
segments = range(0, len(password), self._max_password_size)
password_parts = [
password[i:i + self._max_password_size] for i in segments]
for i, password_part in enumerate(password_parts):
curr_username = username
if i > 0:
curr_username += '{{part_%d}}' % i
self._keyring.set_password(service, curr_username, password_part) | def function[set_password, parameter[self, service, username, password]]:
constant[Set password for the username of the service
]
variable[segments] assign[=] call[name[range], parameter[constant[0], call[name[len], parameter[name[password]]], name[self]._max_password_size]]
variable[password_parts] assign[=] <ast.ListComp object at 0x7da1b10e7430>
for taget[tuple[[<ast.Name object at 0x7da1b10e60b0>, <ast.Name object at 0x7da1b10e58d0>]]] in starred[call[name[enumerate], parameter[name[password_parts]]]] begin[:]
variable[curr_username] assign[=] name[username]
if compare[name[i] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b10e4c70>
call[name[self]._keyring.set_password, parameter[name[service], name[curr_username], name[password_part]]] | keyword[def] identifier[set_password] ( identifier[self] , identifier[service] , identifier[username] , identifier[password] ):
literal[string]
identifier[segments] = identifier[range] ( literal[int] , identifier[len] ( identifier[password] ), identifier[self] . identifier[_max_password_size] )
identifier[password_parts] =[
identifier[password] [ identifier[i] : identifier[i] + identifier[self] . identifier[_max_password_size] ] keyword[for] identifier[i] keyword[in] identifier[segments] ]
keyword[for] identifier[i] , identifier[password_part] keyword[in] identifier[enumerate] ( identifier[password_parts] ):
identifier[curr_username] = identifier[username]
keyword[if] identifier[i] > literal[int] :
identifier[curr_username] += literal[string] % identifier[i]
identifier[self] . identifier[_keyring] . identifier[set_password] ( identifier[service] , identifier[curr_username] , identifier[password_part] ) | def set_password(self, service, username, password):
"""Set password for the username of the service
"""
segments = range(0, len(password), self._max_password_size)
password_parts = [password[i:i + self._max_password_size] for i in segments]
for (i, password_part) in enumerate(password_parts):
curr_username = username
if i > 0:
curr_username += '{{part_%d}}' % i # depends on [control=['if'], data=['i']]
self._keyring.set_password(service, curr_username, password_part) # depends on [control=['for'], data=[]] |
def _dequeue_into_store(transfersession):
"""
Takes data from the buffers and merges into the store and record max counters.
"""
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_rmcb_records(cursor, transfersession.id)
DBBackend._dequeuing_delete_buffered_records(cursor, transfersession.id)
current_id = InstanceIDModel.get_current_instance_and_increment_counter()
DBBackend._dequeuing_merge_conflict_buffer(cursor, current_id, transfersession.id)
DBBackend._dequeuing_merge_conflict_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_update_rmcs_last_saved_by(cursor, current_id, transfersession.id)
DBBackend._dequeuing_delete_mc_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_delete_mc_buffer(cursor, transfersession.id)
DBBackend._dequeuing_insert_remaining_buffer(cursor, transfersession.id)
DBBackend._dequeuing_insert_remaining_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_delete_remaining_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_delete_remaining_buffer(cursor, transfersession.id)
if getattr(settings, 'MORANGO_DESERIALIZE_AFTER_DEQUEUING', True):
_deserialize_from_store(transfersession.sync_session.profile) | def function[_dequeue_into_store, parameter[transfersession]]:
constant[
Takes data from the buffers and merges into the store and record max counters.
]
with call[name[connection].cursor, parameter[]] begin[:]
call[name[DBBackend]._dequeuing_delete_rmcb_records, parameter[name[cursor], name[transfersession].id]]
call[name[DBBackend]._dequeuing_delete_buffered_records, parameter[name[cursor], name[transfersession].id]]
variable[current_id] assign[=] call[name[InstanceIDModel].get_current_instance_and_increment_counter, parameter[]]
call[name[DBBackend]._dequeuing_merge_conflict_buffer, parameter[name[cursor], name[current_id], name[transfersession].id]]
call[name[DBBackend]._dequeuing_merge_conflict_rmcb, parameter[name[cursor], name[transfersession].id]]
call[name[DBBackend]._dequeuing_update_rmcs_last_saved_by, parameter[name[cursor], name[current_id], name[transfersession].id]]
call[name[DBBackend]._dequeuing_delete_mc_rmcb, parameter[name[cursor], name[transfersession].id]]
call[name[DBBackend]._dequeuing_delete_mc_buffer, parameter[name[cursor], name[transfersession].id]]
call[name[DBBackend]._dequeuing_insert_remaining_buffer, parameter[name[cursor], name[transfersession].id]]
call[name[DBBackend]._dequeuing_insert_remaining_rmcb, parameter[name[cursor], name[transfersession].id]]
call[name[DBBackend]._dequeuing_delete_remaining_rmcb, parameter[name[cursor], name[transfersession].id]]
call[name[DBBackend]._dequeuing_delete_remaining_buffer, parameter[name[cursor], name[transfersession].id]]
if call[name[getattr], parameter[name[settings], constant[MORANGO_DESERIALIZE_AFTER_DEQUEUING], constant[True]]] begin[:]
call[name[_deserialize_from_store], parameter[name[transfersession].sync_session.profile]] | keyword[def] identifier[_dequeue_into_store] ( identifier[transfersession] ):
literal[string]
keyword[with] identifier[connection] . identifier[cursor] () keyword[as] identifier[cursor] :
identifier[DBBackend] . identifier[_dequeuing_delete_rmcb_records] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_delete_buffered_records] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[current_id] = identifier[InstanceIDModel] . identifier[get_current_instance_and_increment_counter] ()
identifier[DBBackend] . identifier[_dequeuing_merge_conflict_buffer] ( identifier[cursor] , identifier[current_id] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_merge_conflict_rmcb] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_update_rmcs_last_saved_by] ( identifier[cursor] , identifier[current_id] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_delete_mc_rmcb] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_delete_mc_buffer] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_insert_remaining_buffer] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_insert_remaining_rmcb] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_delete_remaining_rmcb] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
identifier[DBBackend] . identifier[_dequeuing_delete_remaining_buffer] ( identifier[cursor] , identifier[transfersession] . identifier[id] )
keyword[if] identifier[getattr] ( identifier[settings] , literal[string] , keyword[True] ):
identifier[_deserialize_from_store] ( identifier[transfersession] . identifier[sync_session] . identifier[profile] ) | def _dequeue_into_store(transfersession):
"""
Takes data from the buffers and merges into the store and record max counters.
"""
with connection.cursor() as cursor:
DBBackend._dequeuing_delete_rmcb_records(cursor, transfersession.id)
DBBackend._dequeuing_delete_buffered_records(cursor, transfersession.id)
current_id = InstanceIDModel.get_current_instance_and_increment_counter()
DBBackend._dequeuing_merge_conflict_buffer(cursor, current_id, transfersession.id)
DBBackend._dequeuing_merge_conflict_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_update_rmcs_last_saved_by(cursor, current_id, transfersession.id)
DBBackend._dequeuing_delete_mc_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_delete_mc_buffer(cursor, transfersession.id)
DBBackend._dequeuing_insert_remaining_buffer(cursor, transfersession.id)
DBBackend._dequeuing_insert_remaining_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_delete_remaining_rmcb(cursor, transfersession.id)
DBBackend._dequeuing_delete_remaining_buffer(cursor, transfersession.id) # depends on [control=['with'], data=['cursor']]
if getattr(settings, 'MORANGO_DESERIALIZE_AFTER_DEQUEUING', True):
_deserialize_from_store(transfersession.sync_session.profile) # depends on [control=['if'], data=[]] |
def update_mapping(self, mapping: Dict[ops.Qid, LogicalIndex],
keys: Sequence[ops.Qid]
) -> None:
"""Updates a mapping (in place) from qubits to logical indices.
Args:
mapping: The mapping to update.
keys: The qubits acted on by the gate.
"""
permutation = self.permutation()
indices = tuple(permutation.keys())
new_keys = [keys[permutation[i]] for i in indices]
old_elements = [mapping[keys[i]] for i in indices]
mapping.update(zip(new_keys, old_elements)) | def function[update_mapping, parameter[self, mapping, keys]]:
constant[Updates a mapping (in place) from qubits to logical indices.
Args:
mapping: The mapping to update.
keys: The qubits acted on by the gate.
]
variable[permutation] assign[=] call[name[self].permutation, parameter[]]
variable[indices] assign[=] call[name[tuple], parameter[call[name[permutation].keys, parameter[]]]]
variable[new_keys] assign[=] <ast.ListComp object at 0x7da1b1cec2e0>
variable[old_elements] assign[=] <ast.ListComp object at 0x7da1b217efe0>
call[name[mapping].update, parameter[call[name[zip], parameter[name[new_keys], name[old_elements]]]]] | keyword[def] identifier[update_mapping] ( identifier[self] , identifier[mapping] : identifier[Dict] [ identifier[ops] . identifier[Qid] , identifier[LogicalIndex] ],
identifier[keys] : identifier[Sequence] [ identifier[ops] . identifier[Qid] ]
)-> keyword[None] :
literal[string]
identifier[permutation] = identifier[self] . identifier[permutation] ()
identifier[indices] = identifier[tuple] ( identifier[permutation] . identifier[keys] ())
identifier[new_keys] =[ identifier[keys] [ identifier[permutation] [ identifier[i] ]] keyword[for] identifier[i] keyword[in] identifier[indices] ]
identifier[old_elements] =[ identifier[mapping] [ identifier[keys] [ identifier[i] ]] keyword[for] identifier[i] keyword[in] identifier[indices] ]
identifier[mapping] . identifier[update] ( identifier[zip] ( identifier[new_keys] , identifier[old_elements] )) | def update_mapping(self, mapping: Dict[ops.Qid, LogicalIndex], keys: Sequence[ops.Qid]) -> None:
"""Updates a mapping (in place) from qubits to logical indices.
Args:
mapping: The mapping to update.
keys: The qubits acted on by the gate.
"""
permutation = self.permutation()
indices = tuple(permutation.keys())
new_keys = [keys[permutation[i]] for i in indices]
old_elements = [mapping[keys[i]] for i in indices]
mapping.update(zip(new_keys, old_elements)) |
def cacheContent(self, request, response, buffer):
"""
Checks if the response should be cached.
Caches the content in a gzipped format given that a `cache_it` flag is
True
To be used CacheClient
"""
content = buffer.getvalue()
code = int(response.code)
cache_it = False
uri, bust = self.processURI(request.uri, PREFIX)
# Conditions for adding uri response to cache:
# * if it was successful i.e. status of in the 200s
# * requested using GET
# * not busted
if request.method == "GET" and code / 100 == 2 and not bust:
cache_control = response.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
if int(params.get('max-age', '0')) > 0:
cache_it = True
if cache_it:
content = compressBuffer(content)
self.addResource(content, uri, response.headers)
buffer.close() | def function[cacheContent, parameter[self, request, response, buffer]]:
constant[
Checks if the response should be cached.
Caches the content in a gzipped format given that a `cache_it` flag is
True
To be used CacheClient
]
variable[content] assign[=] call[name[buffer].getvalue, parameter[]]
variable[code] assign[=] call[name[int], parameter[name[response].code]]
variable[cache_it] assign[=] constant[False]
<ast.Tuple object at 0x7da20e74b670> assign[=] call[name[self].processURI, parameter[name[request].uri, name[PREFIX]]]
if <ast.BoolOp object at 0x7da20e74b040> begin[:]
variable[cache_control] assign[=] call[name[response].headers.get, parameter[constant[cache-control]]]
if name[cache_control] begin[:]
variable[params] assign[=] call[name[dict], parameter[call[name[urlparse].parse_qsl, parameter[name[cache_control]]]]]
if compare[call[name[int], parameter[call[name[params].get, parameter[constant[max-age], constant[0]]]]] greater[>] constant[0]] begin[:]
variable[cache_it] assign[=] constant[True]
if name[cache_it] begin[:]
variable[content] assign[=] call[name[compressBuffer], parameter[name[content]]]
call[name[self].addResource, parameter[name[content], name[uri], name[response].headers]]
call[name[buffer].close, parameter[]] | keyword[def] identifier[cacheContent] ( identifier[self] , identifier[request] , identifier[response] , identifier[buffer] ):
literal[string]
identifier[content] = identifier[buffer] . identifier[getvalue] ()
identifier[code] = identifier[int] ( identifier[response] . identifier[code] )
identifier[cache_it] = keyword[False]
identifier[uri] , identifier[bust] = identifier[self] . identifier[processURI] ( identifier[request] . identifier[uri] , identifier[PREFIX] )
keyword[if] identifier[request] . identifier[method] == literal[string] keyword[and] identifier[code] / literal[int] == literal[int] keyword[and] keyword[not] identifier[bust] :
identifier[cache_control] = identifier[response] . identifier[headers] . identifier[get] ( literal[string] )
keyword[if] identifier[cache_control] :
identifier[params] = identifier[dict] ( identifier[urlparse] . identifier[parse_qsl] ( identifier[cache_control] ))
keyword[if] identifier[int] ( identifier[params] . identifier[get] ( literal[string] , literal[string] ))> literal[int] :
identifier[cache_it] = keyword[True]
keyword[if] identifier[cache_it] :
identifier[content] = identifier[compressBuffer] ( identifier[content] )
identifier[self] . identifier[addResource] ( identifier[content] , identifier[uri] , identifier[response] . identifier[headers] )
identifier[buffer] . identifier[close] () | def cacheContent(self, request, response, buffer):
"""
Checks if the response should be cached.
Caches the content in a gzipped format given that a `cache_it` flag is
True
To be used CacheClient
"""
content = buffer.getvalue()
code = int(response.code)
cache_it = False
(uri, bust) = self.processURI(request.uri, PREFIX)
# Conditions for adding uri response to cache:
# * if it was successful i.e. status of in the 200s
# * requested using GET
# * not busted
if request.method == 'GET' and code / 100 == 2 and (not bust):
cache_control = response.headers.get('cache-control')
if cache_control:
params = dict(urlparse.parse_qsl(cache_control))
if int(params.get('max-age', '0')) > 0:
cache_it = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if cache_it:
content = compressBuffer(content)
self.addResource(content, uri, response.headers) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
buffer.close() |
def bot_item_drop_event(self, dropped_listwidget, event):
"""
Switches the team for the dropped agent to the other team
:param dropped_listwidget: The listwidget belonging to the new team for the agent
:param event: The QDropEvent containing the source
:return:
"""
dragged_listwidget = event.source()
if dragged_listwidget is dropped_listwidget: # drops in the same widget
return
self.current_bot.set_team(0 if dropped_listwidget == self.blue_listwidget else 1)
self.update_teams_listwidgets() | def function[bot_item_drop_event, parameter[self, dropped_listwidget, event]]:
constant[
Switches the team for the dropped agent to the other team
:param dropped_listwidget: The listwidget belonging to the new team for the agent
:param event: The QDropEvent containing the source
:return:
]
variable[dragged_listwidget] assign[=] call[name[event].source, parameter[]]
if compare[name[dragged_listwidget] is name[dropped_listwidget]] begin[:]
return[None]
call[name[self].current_bot.set_team, parameter[<ast.IfExp object at 0x7da204622710>]]
call[name[self].update_teams_listwidgets, parameter[]] | keyword[def] identifier[bot_item_drop_event] ( identifier[self] , identifier[dropped_listwidget] , identifier[event] ):
literal[string]
identifier[dragged_listwidget] = identifier[event] . identifier[source] ()
keyword[if] identifier[dragged_listwidget] keyword[is] identifier[dropped_listwidget] :
keyword[return]
identifier[self] . identifier[current_bot] . identifier[set_team] ( literal[int] keyword[if] identifier[dropped_listwidget] == identifier[self] . identifier[blue_listwidget] keyword[else] literal[int] )
identifier[self] . identifier[update_teams_listwidgets] () | def bot_item_drop_event(self, dropped_listwidget, event):
"""
Switches the team for the dropped agent to the other team
:param dropped_listwidget: The listwidget belonging to the new team for the agent
:param event: The QDropEvent containing the source
:return:
"""
dragged_listwidget = event.source()
if dragged_listwidget is dropped_listwidget: # drops in the same widget
return # depends on [control=['if'], data=[]]
self.current_bot.set_team(0 if dropped_listwidget == self.blue_listwidget else 1)
self.update_teams_listwidgets() |
def stop_tracing(self, tid):
"""
Stop tracing mode in the given thread.
@type tid: int
@param tid: Global ID of thread to stop tracing.
"""
if self.is_tracing(tid):
thread = self.system.get_thread(tid)
self.__stop_tracing(thread) | def function[stop_tracing, parameter[self, tid]]:
constant[
Stop tracing mode in the given thread.
@type tid: int
@param tid: Global ID of thread to stop tracing.
]
if call[name[self].is_tracing, parameter[name[tid]]] begin[:]
variable[thread] assign[=] call[name[self].system.get_thread, parameter[name[tid]]]
call[name[self].__stop_tracing, parameter[name[thread]]] | keyword[def] identifier[stop_tracing] ( identifier[self] , identifier[tid] ):
literal[string]
keyword[if] identifier[self] . identifier[is_tracing] ( identifier[tid] ):
identifier[thread] = identifier[self] . identifier[system] . identifier[get_thread] ( identifier[tid] )
identifier[self] . identifier[__stop_tracing] ( identifier[thread] ) | def stop_tracing(self, tid):
"""
Stop tracing mode in the given thread.
@type tid: int
@param tid: Global ID of thread to stop tracing.
"""
if self.is_tracing(tid):
thread = self.system.get_thread(tid)
self.__stop_tracing(thread) # depends on [control=['if'], data=[]] |
def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad():
deps.extend(out_grad)
deps.extend(in_data)
deps.extend(out_data)
return deps | def function[declare_backward_dependency, parameter[self, out_grad, in_data, out_data]]:
constant[Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
]
variable[deps] assign[=] list[[]]
if call[name[self].need_top_grad, parameter[]] begin[:]
call[name[deps].extend, parameter[name[out_grad]]]
call[name[deps].extend, parameter[name[in_data]]]
call[name[deps].extend, parameter[name[out_data]]]
return[name[deps]] | keyword[def] identifier[declare_backward_dependency] ( identifier[self] , identifier[out_grad] , identifier[in_data] , identifier[out_data] ):
literal[string]
identifier[deps] =[]
keyword[if] identifier[self] . identifier[need_top_grad] ():
identifier[deps] . identifier[extend] ( identifier[out_grad] )
identifier[deps] . identifier[extend] ( identifier[in_data] )
identifier[deps] . identifier[extend] ( identifier[out_data] )
keyword[return] identifier[deps] | def declare_backward_dependency(self, out_grad, in_data, out_data):
"""Declare dependencies of this operator for backward pass.
Parameters
----------
out_grad : list of int
ids of out_grad blobs.
in_data : list of int
ids of in_data blobs.
out_data: list of int
ids of out_data blobs.
Returns
-------
deps : list of int
ids of the needed blobs.
"""
deps = []
if self.need_top_grad():
deps.extend(out_grad) # depends on [control=['if'], data=[]]
deps.extend(in_data)
deps.extend(out_data)
return deps |
def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks
la, lb = len(self.a), len(self.b)
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
alo, ahi, blo, bhi = queue.pop()
i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j))
if i+k < ahi and j+k < bhi:
queue.append((i+k, ahi, j+k, bhi))
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for i2, j2, k2 in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1))
i1, j1, k1 = i2, j2, k2
if k1:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
self.matching_blocks = map(Match._make, non_adjacent)
return self.matching_blocks | def function[get_matching_blocks, parameter[self]]:
constant[Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
]
if compare[name[self].matching_blocks is_not constant[None]] begin[:]
return[name[self].matching_blocks]
<ast.Tuple object at 0x7da18c4cdc90> assign[=] tuple[[<ast.Call object at 0x7da18c4cf730>, <ast.Call object at 0x7da18c4cefe0>]]
variable[queue] assign[=] list[[<ast.Tuple object at 0x7da18c4cf160>]]
variable[matching_blocks] assign[=] list[[]]
while name[queue] begin[:]
<ast.Tuple object at 0x7da18c4ce140> assign[=] call[name[queue].pop, parameter[]]
<ast.Tuple object at 0x7da18c4cda20> assign[=] call[name[self].find_longest_match, parameter[name[alo], name[ahi], name[blo], name[bhi]]]
if name[k] begin[:]
call[name[matching_blocks].append, parameter[name[x]]]
if <ast.BoolOp object at 0x7da18c4ced70> begin[:]
call[name[queue].append, parameter[tuple[[<ast.Name object at 0x7da18c4ccdc0>, <ast.Name object at 0x7da18c4ccb50>, <ast.Name object at 0x7da18c4cd660>, <ast.Name object at 0x7da18c4ce500>]]]]
if <ast.BoolOp object at 0x7da18c4cc3a0> begin[:]
call[name[queue].append, parameter[tuple[[<ast.BinOp object at 0x7da18dc05450>, <ast.Name object at 0x7da18dc048e0>, <ast.BinOp object at 0x7da18dc040a0>, <ast.Name object at 0x7da18dc05f60>]]]]
call[name[matching_blocks].sort, parameter[]]
variable[i1] assign[=] constant[0]
variable[non_adjacent] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18dc044f0>, <ast.Name object at 0x7da18dc076d0>, <ast.Name object at 0x7da18dc04940>]]] in starred[name[matching_blocks]] begin[:]
if <ast.BoolOp object at 0x7da18dc05990> begin[:]
<ast.AugAssign object at 0x7da18dc04400>
if name[k1] begin[:]
call[name[non_adjacent].append, parameter[tuple[[<ast.Name object at 0x7da18dc04100>, <ast.Name object at 0x7da18dc05600>, <ast.Name object at 0x7da18dc05810>]]]]
call[name[non_adjacent].append, parameter[tuple[[<ast.Name object at 0x7da18dc06b30>, <ast.Name object at 0x7da18dc06a10>, <ast.Constant object at 0x7da18dc06050>]]]]
name[self].matching_blocks assign[=] call[name[map], parameter[name[Match]._make, name[non_adjacent]]]
return[name[self].matching_blocks] | keyword[def] identifier[get_matching_blocks] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[matching_blocks] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[matching_blocks]
identifier[la] , identifier[lb] = identifier[len] ( identifier[self] . identifier[a] ), identifier[len] ( identifier[self] . identifier[b] )
identifier[queue] =[( literal[int] , identifier[la] , literal[int] , identifier[lb] )]
identifier[matching_blocks] =[]
keyword[while] identifier[queue] :
identifier[alo] , identifier[ahi] , identifier[blo] , identifier[bhi] = identifier[queue] . identifier[pop] ()
identifier[i] , identifier[j] , identifier[k] = identifier[x] = identifier[self] . identifier[find_longest_match] ( identifier[alo] , identifier[ahi] , identifier[blo] , identifier[bhi] )
keyword[if] identifier[k] :
identifier[matching_blocks] . identifier[append] ( identifier[x] )
keyword[if] identifier[alo] < identifier[i] keyword[and] identifier[blo] < identifier[j] :
identifier[queue] . identifier[append] (( identifier[alo] , identifier[i] , identifier[blo] , identifier[j] ))
keyword[if] identifier[i] + identifier[k] < identifier[ahi] keyword[and] identifier[j] + identifier[k] < identifier[bhi] :
identifier[queue] . identifier[append] (( identifier[i] + identifier[k] , identifier[ahi] , identifier[j] + identifier[k] , identifier[bhi] ))
identifier[matching_blocks] . identifier[sort] ()
identifier[i1] = identifier[j1] = identifier[k1] = literal[int]
identifier[non_adjacent] =[]
keyword[for] identifier[i2] , identifier[j2] , identifier[k2] keyword[in] identifier[matching_blocks] :
keyword[if] identifier[i1] + identifier[k1] == identifier[i2] keyword[and] identifier[j1] + identifier[k1] == identifier[j2] :
identifier[k1] += identifier[k2]
keyword[else] :
keyword[if] identifier[k1] :
identifier[non_adjacent] . identifier[append] (( identifier[i1] , identifier[j1] , identifier[k1] ))
identifier[i1] , identifier[j1] , identifier[k1] = identifier[i2] , identifier[j2] , identifier[k2]
keyword[if] identifier[k1] :
identifier[non_adjacent] . identifier[append] (( identifier[i1] , identifier[j1] , identifier[k1] ))
identifier[non_adjacent] . identifier[append] (( identifier[la] , identifier[lb] , literal[int] ))
identifier[self] . identifier[matching_blocks] = identifier[map] ( identifier[Match] . identifier[_make] , identifier[non_adjacent] )
keyword[return] identifier[self] . identifier[matching_blocks] | def get_matching_blocks(self):
"""Return list of triples describing matching subsequences.
Each triple is of the form (i, j, n), and means that
a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
i and in j. New in Python 2.5, it's also guaranteed that if
(i, j, n) and (i', j', n') are adjacent triples in the list, and
the second is not the last triple in the list, then i+n != i' or
j+n != j'. IOW, adjacent triples never describe adjacent equal
blocks.
The last triple is a dummy, (len(a), len(b), 0), and is the only
triple with n==0.
>>> s = SequenceMatcher(None, "abxcd", "abcd")
>>> s.get_matching_blocks()
[Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)]
"""
if self.matching_blocks is not None:
return self.matching_blocks # depends on [control=['if'], data=[]]
(la, lb) = (len(self.a), len(self.b))
# This is most naturally expressed as a recursive algorithm, but
# at least one user bumped into extreme use cases that exceeded
# the recursion limit on their box. So, now we maintain a list
# ('queue`) of blocks we still need to look at, and append partial
# results to `matching_blocks` in a loop; the matches are sorted
# at the end.
queue = [(0, la, 0, lb)]
matching_blocks = []
while queue:
(alo, ahi, blo, bhi) = queue.pop()
(i, j, k) = x = self.find_longest_match(alo, ahi, blo, bhi)
# a[alo:i] vs b[blo:j] unknown
# a[i:i+k] same as b[j:j+k]
# a[i+k:ahi] vs b[j+k:bhi] unknown
if k: # if k is 0, there was no matching block
matching_blocks.append(x)
if alo < i and blo < j:
queue.append((alo, i, blo, j)) # depends on [control=['if'], data=[]]
if i + k < ahi and j + k < bhi:
queue.append((i + k, ahi, j + k, bhi)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
matching_blocks.sort()
# It's possible that we have adjacent equal blocks in the
# matching_blocks list now. Starting with 2.5, this code was added
# to collapse them.
i1 = j1 = k1 = 0
non_adjacent = []
for (i2, j2, k2) in matching_blocks:
# Is this block adjacent to i1, j1, k1?
if i1 + k1 == i2 and j1 + k1 == j2:
# Yes, so collapse them -- this just increases the length of
# the first block by the length of the second, and the first
# block so lengthened remains the block to compare against.
k1 += k2 # depends on [control=['if'], data=[]]
else:
# Not adjacent. Remember the first block (k1==0 means it's
# the dummy we started with), and make the second block the
# new block to compare against.
if k1:
non_adjacent.append((i1, j1, k1)) # depends on [control=['if'], data=[]]
(i1, j1, k1) = (i2, j2, k2) # depends on [control=['for'], data=[]]
if k1:
non_adjacent.append((i1, j1, k1)) # depends on [control=['if'], data=[]]
non_adjacent.append((la, lb, 0))
self.matching_blocks = map(Match._make, non_adjacent)
return self.matching_blocks |
def recursively_load_dict_contents_from_group(h5file, path):
"""
....
"""
rf = h5file["_reconstruction_flags"]
# rkf = h5file["_reconstruction_key_flags"]
ans = {}
for key, item in h5file[path].items():
dest_key = key
# if key in ("_reconstruction_flags", "_reconstruction_key_flags"):
if key in "_reconstruction_flags":
continue
kkey = key + "_key_"
tkey = key + "_typ_"
if kkey in rf:
flag = rf[kkey]
if flag.value == "json_key":
import json
dest_key = json.loads(key)
# import pickle
# dest_key = pickle.loads(key.encode("ascii"))
# logger.debug("unpickling key")
if tkey in rf:
flag = rf[tkey]
if flag.value == "list":
dict_to_output = recursively_load_dict_contents_from_group(h5file, path + key + '/')
ans[dest_key] = list(dict_to_output.values())
continue
if flag.value == "tuple":
dict_to_output = recursively_load_dict_contents_from_group(h5file, path + key + '/')
ans[dest_key] = tuple(dict_to_output.values())
continue
elif flag.value == "json_value":
import json
ans[dest_key] = json.loads(item.value)
continue
elif flag.value == "float":
ans[dest_key] = float(item.value)
continue
elif flag.value == "int":
ans[dest_key] = int(item.value)
continue
if isinstance(item, h5py._hl.dataset.Dataset):
ans[dest_key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[dest_key] = recursively_load_dict_contents_from_group(h5file, path + key + '/')
return ans | def function[recursively_load_dict_contents_from_group, parameter[h5file, path]]:
constant[
....
]
variable[rf] assign[=] call[name[h5file]][constant[_reconstruction_flags]]
variable[ans] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1874760>, <ast.Name object at 0x7da1b1876830>]]] in starred[call[call[name[h5file]][name[path]].items, parameter[]]] begin[:]
variable[dest_key] assign[=] name[key]
if compare[name[key] in constant[_reconstruction_flags]] begin[:]
continue
variable[kkey] assign[=] binary_operation[name[key] + constant[_key_]]
variable[tkey] assign[=] binary_operation[name[key] + constant[_typ_]]
if compare[name[kkey] in name[rf]] begin[:]
variable[flag] assign[=] call[name[rf]][name[kkey]]
if compare[name[flag].value equal[==] constant[json_key]] begin[:]
import module[json]
variable[dest_key] assign[=] call[name[json].loads, parameter[name[key]]]
if compare[name[tkey] in name[rf]] begin[:]
variable[flag] assign[=] call[name[rf]][name[tkey]]
if compare[name[flag].value equal[==] constant[list]] begin[:]
variable[dict_to_output] assign[=] call[name[recursively_load_dict_contents_from_group], parameter[name[h5file], binary_operation[binary_operation[name[path] + name[key]] + constant[/]]]]
call[name[ans]][name[dest_key]] assign[=] call[name[list], parameter[call[name[dict_to_output].values, parameter[]]]]
continue
if compare[name[flag].value equal[==] constant[tuple]] begin[:]
variable[dict_to_output] assign[=] call[name[recursively_load_dict_contents_from_group], parameter[name[h5file], binary_operation[binary_operation[name[path] + name[key]] + constant[/]]]]
call[name[ans]][name[dest_key]] assign[=] call[name[tuple], parameter[call[name[dict_to_output].values, parameter[]]]]
continue
if call[name[isinstance], parameter[name[item], name[h5py]._hl.dataset.Dataset]] begin[:]
call[name[ans]][name[dest_key]] assign[=] name[item].value
return[name[ans]] | keyword[def] identifier[recursively_load_dict_contents_from_group] ( identifier[h5file] , identifier[path] ):
literal[string]
identifier[rf] = identifier[h5file] [ literal[string] ]
identifier[ans] ={}
keyword[for] identifier[key] , identifier[item] keyword[in] identifier[h5file] [ identifier[path] ]. identifier[items] ():
identifier[dest_key] = identifier[key]
keyword[if] identifier[key] keyword[in] literal[string] :
keyword[continue]
identifier[kkey] = identifier[key] + literal[string]
identifier[tkey] = identifier[key] + literal[string]
keyword[if] identifier[kkey] keyword[in] identifier[rf] :
identifier[flag] = identifier[rf] [ identifier[kkey] ]
keyword[if] identifier[flag] . identifier[value] == literal[string] :
keyword[import] identifier[json]
identifier[dest_key] = identifier[json] . identifier[loads] ( identifier[key] )
keyword[if] identifier[tkey] keyword[in] identifier[rf] :
identifier[flag] = identifier[rf] [ identifier[tkey] ]
keyword[if] identifier[flag] . identifier[value] == literal[string] :
identifier[dict_to_output] = identifier[recursively_load_dict_contents_from_group] ( identifier[h5file] , identifier[path] + identifier[key] + literal[string] )
identifier[ans] [ identifier[dest_key] ]= identifier[list] ( identifier[dict_to_output] . identifier[values] ())
keyword[continue]
keyword[if] identifier[flag] . identifier[value] == literal[string] :
identifier[dict_to_output] = identifier[recursively_load_dict_contents_from_group] ( identifier[h5file] , identifier[path] + identifier[key] + literal[string] )
identifier[ans] [ identifier[dest_key] ]= identifier[tuple] ( identifier[dict_to_output] . identifier[values] ())
keyword[continue]
keyword[elif] identifier[flag] . identifier[value] == literal[string] :
keyword[import] identifier[json]
identifier[ans] [ identifier[dest_key] ]= identifier[json] . identifier[loads] ( identifier[item] . identifier[value] )
keyword[continue]
keyword[elif] identifier[flag] . identifier[value] == literal[string] :
identifier[ans] [ identifier[dest_key] ]= identifier[float] ( identifier[item] . identifier[value] )
keyword[continue]
keyword[elif] identifier[flag] . identifier[value] == literal[string] :
identifier[ans] [ identifier[dest_key] ]= identifier[int] ( identifier[item] . identifier[value] )
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[h5py] . identifier[_hl] . identifier[dataset] . identifier[Dataset] ):
identifier[ans] [ identifier[dest_key] ]= identifier[item] . identifier[value]
keyword[elif] identifier[isinstance] ( identifier[item] , identifier[h5py] . identifier[_hl] . identifier[group] . identifier[Group] ):
identifier[ans] [ identifier[dest_key] ]= identifier[recursively_load_dict_contents_from_group] ( identifier[h5file] , identifier[path] + identifier[key] + literal[string] )
keyword[return] identifier[ans] | def recursively_load_dict_contents_from_group(h5file, path):
"""
....
"""
rf = h5file['_reconstruction_flags']
# rkf = h5file["_reconstruction_key_flags"]
ans = {}
for (key, item) in h5file[path].items():
dest_key = key
# if key in ("_reconstruction_flags", "_reconstruction_key_flags"):
if key in '_reconstruction_flags':
continue # depends on [control=['if'], data=[]]
kkey = key + '_key_'
tkey = key + '_typ_'
if kkey in rf:
flag = rf[kkey]
if flag.value == 'json_key':
import json
dest_key = json.loads(key) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kkey', 'rf']]
# import pickle
# dest_key = pickle.loads(key.encode("ascii"))
# logger.debug("unpickling key")
if tkey in rf:
flag = rf[tkey]
if flag.value == 'list':
dict_to_output = recursively_load_dict_contents_from_group(h5file, path + key + '/')
ans[dest_key] = list(dict_to_output.values())
continue # depends on [control=['if'], data=[]]
if flag.value == 'tuple':
dict_to_output = recursively_load_dict_contents_from_group(h5file, path + key + '/')
ans[dest_key] = tuple(dict_to_output.values())
continue # depends on [control=['if'], data=[]]
elif flag.value == 'json_value':
import json
ans[dest_key] = json.loads(item.value)
continue # depends on [control=['if'], data=[]]
elif flag.value == 'float':
ans[dest_key] = float(item.value)
continue # depends on [control=['if'], data=[]]
elif flag.value == 'int':
ans[dest_key] = int(item.value)
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['tkey', 'rf']]
if isinstance(item, h5py._hl.dataset.Dataset):
ans[dest_key] = item.value # depends on [control=['if'], data=[]]
elif isinstance(item, h5py._hl.group.Group):
ans[dest_key] = recursively_load_dict_contents_from_group(h5file, path + key + '/') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return ans |
def git_clone_job_list(job_list):
"""Deal with all git clone jobs in $job_list."""
queue = Queue()
for job in job_list:
queue.put(job)
if len(job_list) < 20:
thread_num = len(job_list)
else:
thread_num = 20
threads = []
for _ in range(thread_num):
thread = Thread(target=git_clone_worker, args=(queue, ))
thread.start()
threads.append(thread)
queue.join()
for _ in range(thread_num):
queue.put(None)
for thread in threads:
thread.join() | def function[git_clone_job_list, parameter[job_list]]:
constant[Deal with all git clone jobs in $job_list.]
variable[queue] assign[=] call[name[Queue], parameter[]]
for taget[name[job]] in starred[name[job_list]] begin[:]
call[name[queue].put, parameter[name[job]]]
if compare[call[name[len], parameter[name[job_list]]] less[<] constant[20]] begin[:]
variable[thread_num] assign[=] call[name[len], parameter[name[job_list]]]
variable[threads] assign[=] list[[]]
for taget[name[_]] in starred[call[name[range], parameter[name[thread_num]]]] begin[:]
variable[thread] assign[=] call[name[Thread], parameter[]]
call[name[thread].start, parameter[]]
call[name[threads].append, parameter[name[thread]]]
call[name[queue].join, parameter[]]
for taget[name[_]] in starred[call[name[range], parameter[name[thread_num]]]] begin[:]
call[name[queue].put, parameter[constant[None]]]
for taget[name[thread]] in starred[name[threads]] begin[:]
call[name[thread].join, parameter[]] | keyword[def] identifier[git_clone_job_list] ( identifier[job_list] ):
literal[string]
identifier[queue] = identifier[Queue] ()
keyword[for] identifier[job] keyword[in] identifier[job_list] :
identifier[queue] . identifier[put] ( identifier[job] )
keyword[if] identifier[len] ( identifier[job_list] )< literal[int] :
identifier[thread_num] = identifier[len] ( identifier[job_list] )
keyword[else] :
identifier[thread_num] = literal[int]
identifier[threads] =[]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[thread_num] ):
identifier[thread] = identifier[Thread] ( identifier[target] = identifier[git_clone_worker] , identifier[args] =( identifier[queue] ,))
identifier[thread] . identifier[start] ()
identifier[threads] . identifier[append] ( identifier[thread] )
identifier[queue] . identifier[join] ()
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[thread_num] ):
identifier[queue] . identifier[put] ( keyword[None] )
keyword[for] identifier[thread] keyword[in] identifier[threads] :
identifier[thread] . identifier[join] () | def git_clone_job_list(job_list):
"""Deal with all git clone jobs in $job_list."""
queue = Queue()
for job in job_list:
queue.put(job) # depends on [control=['for'], data=['job']]
if len(job_list) < 20:
thread_num = len(job_list) # depends on [control=['if'], data=[]]
else:
thread_num = 20
threads = []
for _ in range(thread_num):
thread = Thread(target=git_clone_worker, args=(queue,))
thread.start()
threads.append(thread) # depends on [control=['for'], data=[]]
queue.join()
for _ in range(thread_num):
queue.put(None) # depends on [control=['for'], data=[]]
for thread in threads:
thread.join() # depends on [control=['for'], data=['thread']] |
def trace(enter=False, exit=True):
"""
This decorator prints entry and exit message when
the decorated method is called, as well as call
arguments, result and thrown exception (if any).
:param enter: indicates whether entry message should be printed.
:param exit: indicates whether exit message should be printed.
:return: decorated function.
"""
def decorate(fn):
@inspection.wraps(fn)
def new_fn(*args, **kwargs):
name = fn.__module__ + "." + fn.__name__
if enter:
print(
"%s(args = %s, kwargs = %s) <-" % (name, repr(args), repr(kwargs))
)
try:
result = fn(*args, **kwargs)
if exit:
print(
"%s(args = %s, kwargs = %s) -> %s"
% (name, repr(args), repr(kwargs), repr(result))
)
return result
except Exception as e:
if exit:
print(
"%s(args = %s, kwargs = %s) -> thrown %s"
% (name, repr(args), repr(kwargs), str(e))
)
raise
return new_fn
return decorate | def function[trace, parameter[enter, exit]]:
constant[
This decorator prints entry and exit message when
the decorated method is called, as well as call
arguments, result and thrown exception (if any).
:param enter: indicates whether entry message should be printed.
:param exit: indicates whether exit message should be printed.
:return: decorated function.
]
def function[decorate, parameter[fn]]:
def function[new_fn, parameter[]]:
variable[name] assign[=] binary_operation[binary_operation[name[fn].__module__ + constant[.]] + name[fn].__name__]
if name[enter] begin[:]
call[name[print], parameter[binary_operation[constant[%s(args = %s, kwargs = %s) <-] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0ff00d0>, <ast.Call object at 0x7da1b0ff30a0>, <ast.Call object at 0x7da1b0ff21d0>]]]]]
<ast.Try object at 0x7da1b0ff2860>
return[name[new_fn]]
return[name[decorate]] | keyword[def] identifier[trace] ( identifier[enter] = keyword[False] , identifier[exit] = keyword[True] ):
literal[string]
keyword[def] identifier[decorate] ( identifier[fn] ):
@ identifier[inspection] . identifier[wraps] ( identifier[fn] )
keyword[def] identifier[new_fn] (* identifier[args] ,** identifier[kwargs] ):
identifier[name] = identifier[fn] . identifier[__module__] + literal[string] + identifier[fn] . identifier[__name__]
keyword[if] identifier[enter] :
identifier[print] (
literal[string] %( identifier[name] , identifier[repr] ( identifier[args] ), identifier[repr] ( identifier[kwargs] ))
)
keyword[try] :
identifier[result] = identifier[fn] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[exit] :
identifier[print] (
literal[string]
%( identifier[name] , identifier[repr] ( identifier[args] ), identifier[repr] ( identifier[kwargs] ), identifier[repr] ( identifier[result] ))
)
keyword[return] identifier[result]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[exit] :
identifier[print] (
literal[string]
%( identifier[name] , identifier[repr] ( identifier[args] ), identifier[repr] ( identifier[kwargs] ), identifier[str] ( identifier[e] ))
)
keyword[raise]
keyword[return] identifier[new_fn]
keyword[return] identifier[decorate] | def trace(enter=False, exit=True):
"""
This decorator prints entry and exit message when
the decorated method is called, as well as call
arguments, result and thrown exception (if any).
:param enter: indicates whether entry message should be printed.
:param exit: indicates whether exit message should be printed.
:return: decorated function.
"""
def decorate(fn):
@inspection.wraps(fn)
def new_fn(*args, **kwargs):
name = fn.__module__ + '.' + fn.__name__
if enter:
print('%s(args = %s, kwargs = %s) <-' % (name, repr(args), repr(kwargs))) # depends on [control=['if'], data=[]]
try:
result = fn(*args, **kwargs)
if exit:
print('%s(args = %s, kwargs = %s) -> %s' % (name, repr(args), repr(kwargs), repr(result))) # depends on [control=['if'], data=[]]
return result # depends on [control=['try'], data=[]]
except Exception as e:
if exit:
print('%s(args = %s, kwargs = %s) -> thrown %s' % (name, repr(args), repr(kwargs), str(e))) # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['e']]
return new_fn
return decorate |
def _get_time_at_progress(self, x_target):
"""
Return the projected time when progress level `x_target` will be reached.
Since the underlying progress model is nonlinear, we need to do use Newton method to find a numerical solution
to the equation x(t) = x_target.
"""
t, x, v = self._t0, self._x0, self._v0
# The convergence should be achieved in just few iterations, however in unlikely situation that it doesn't
# we don't want to loop forever...
for _ in range(20):
if v == 0: return 1e20
# make time prediction assuming the progress will continue at a linear speed ``v``
t += (x_target - x) / v
# calculate the actual progress at that time
x, v = self._compute_progress_at_time(t)
# iterate until convergence
if abs(x - x_target) < 1e-3: return t
return time.time() + 100 | def function[_get_time_at_progress, parameter[self, x_target]]:
constant[
Return the projected time when progress level `x_target` will be reached.
Since the underlying progress model is nonlinear, we need to do use Newton method to find a numerical solution
to the equation x(t) = x_target.
]
<ast.Tuple object at 0x7da18dc9be50> assign[=] tuple[[<ast.Attribute object at 0x7da18dc98c10>, <ast.Attribute object at 0x7da18dc9b400>, <ast.Attribute object at 0x7da18dc99840>]]
for taget[name[_]] in starred[call[name[range], parameter[constant[20]]]] begin[:]
if compare[name[v] equal[==] constant[0]] begin[:]
return[constant[1e+20]]
<ast.AugAssign object at 0x7da18dc9b010>
<ast.Tuple object at 0x7da18dc9a5c0> assign[=] call[name[self]._compute_progress_at_time, parameter[name[t]]]
if compare[call[name[abs], parameter[binary_operation[name[x] - name[x_target]]]] less[<] constant[0.001]] begin[:]
return[name[t]]
return[binary_operation[call[name[time].time, parameter[]] + constant[100]]] | keyword[def] identifier[_get_time_at_progress] ( identifier[self] , identifier[x_target] ):
literal[string]
identifier[t] , identifier[x] , identifier[v] = identifier[self] . identifier[_t0] , identifier[self] . identifier[_x0] , identifier[self] . identifier[_v0]
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] ):
keyword[if] identifier[v] == literal[int] : keyword[return] literal[int]
identifier[t] +=( identifier[x_target] - identifier[x] )/ identifier[v]
identifier[x] , identifier[v] = identifier[self] . identifier[_compute_progress_at_time] ( identifier[t] )
keyword[if] identifier[abs] ( identifier[x] - identifier[x_target] )< literal[int] : keyword[return] identifier[t]
keyword[return] identifier[time] . identifier[time] ()+ literal[int] | def _get_time_at_progress(self, x_target):
"""
Return the projected time when progress level `x_target` will be reached.
Since the underlying progress model is nonlinear, we need to do use Newton method to find a numerical solution
to the equation x(t) = x_target.
"""
(t, x, v) = (self._t0, self._x0, self._v0)
# The convergence should be achieved in just few iterations, however in unlikely situation that it doesn't
# we don't want to loop forever...
for _ in range(20):
if v == 0:
return 1e+20 # depends on [control=['if'], data=[]]
# make time prediction assuming the progress will continue at a linear speed ``v``
t += (x_target - x) / v
# calculate the actual progress at that time
(x, v) = self._compute_progress_at_time(t)
# iterate until convergence
if abs(x - x_target) < 0.001:
return t # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return time.time() + 100 |
def stub_batch(cls, size, **kwargs):
"""Stub a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to stub
Returns:
object list: the stubbed instances
"""
return [cls.stub(**kwargs) for _ in range(size)] | def function[stub_batch, parameter[cls, size]]:
constant[Stub a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to stub
Returns:
object list: the stubbed instances
]
return[<ast.ListComp object at 0x7da1b1d5de10>] | keyword[def] identifier[stub_batch] ( identifier[cls] , identifier[size] ,** identifier[kwargs] ):
literal[string]
keyword[return] [ identifier[cls] . identifier[stub] (** identifier[kwargs] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[size] )] | def stub_batch(cls, size, **kwargs):
"""Stub a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to stub
Returns:
object list: the stubbed instances
"""
return [cls.stub(**kwargs) for _ in range(size)] |
def visit_Scope(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code for a scope.
if not self.begin():
return False
res = self.pt()
if not self.end():
return False
return res
"""
return ast.Name('scope_not_implemented', ast.Load())
raise NotImplementedError() | def function[visit_Scope, parameter[self, node]]:
constant[Generates python code for a scope.
if not self.begin():
return False
res = self.pt()
if not self.end():
return False
return res
]
return[call[name[ast].Name, parameter[constant[scope_not_implemented], call[name[ast].Load, parameter[]]]]]
<ast.Raise object at 0x7da1b01374c0> | keyword[def] identifier[visit_Scope] ( identifier[self] , identifier[node] : identifier[parsing] . identifier[Capture] )->[ identifier[ast] . identifier[stmt] ] keyword[or] identifier[ast] . identifier[expr] :
literal[string]
keyword[return] identifier[ast] . identifier[Name] ( literal[string] , identifier[ast] . identifier[Load] ())
keyword[raise] identifier[NotImplementedError] () | def visit_Scope(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code for a scope.
if not self.begin():
return False
res = self.pt()
if not self.end():
return False
return res
"""
return ast.Name('scope_not_implemented', ast.Load())
raise NotImplementedError() |
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
if end is None:
end = len(buf)
for i in range(start, end):
buf[i] = self._readbyte() | def function[readinto, parameter[self, buf]]:
constant[
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
]
if compare[name[end] is constant[None]] begin[:]
variable[end] assign[=] call[name[len], parameter[name[buf]]]
for taget[name[i]] in starred[call[name[range], parameter[name[start], name[end]]]] begin[:]
call[name[buf]][name[i]] assign[=] call[name[self]._readbyte, parameter[]] | keyword[def] identifier[readinto] ( identifier[self] , identifier[buf] ,*, identifier[start] = literal[int] , identifier[end] = keyword[None] ):
literal[string]
keyword[if] identifier[end] keyword[is] keyword[None] :
identifier[end] = identifier[len] ( identifier[buf] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[start] , identifier[end] ):
identifier[buf] [ identifier[i] ]= identifier[self] . identifier[_readbyte] () | def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
if end is None:
end = len(buf) # depends on [control=['if'], data=['end']]
for i in range(start, end):
buf[i] = self._readbyte() # depends on [control=['for'], data=['i']] |
def print_context_names(ctx, param, value):
"""Print all possible types."""
if not value or ctx.resilient_parsing:
return
click.echo('\n'.join(_context_names()))
ctx.exit() | def function[print_context_names, parameter[ctx, param, value]]:
constant[Print all possible types.]
if <ast.BoolOp object at 0x7da18bc73970> begin[:]
return[None]
call[name[click].echo, parameter[call[constant[
].join, parameter[call[name[_context_names], parameter[]]]]]]
call[name[ctx].exit, parameter[]] | keyword[def] identifier[print_context_names] ( identifier[ctx] , identifier[param] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[value] keyword[or] identifier[ctx] . identifier[resilient_parsing] :
keyword[return]
identifier[click] . identifier[echo] ( literal[string] . identifier[join] ( identifier[_context_names] ()))
identifier[ctx] . identifier[exit] () | def print_context_names(ctx, param, value):
"""Print all possible types."""
if not value or ctx.resilient_parsing:
return # depends on [control=['if'], data=[]]
click.echo('\n'.join(_context_names()))
ctx.exit() |
def _query_by_distro(self, table_name):
"""
Query for download data broken down by OS distribution, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by distro; keys are project name,
values are a dict of distro names to dicts of distro version to
download count.
:rtype: dict
"""
logger.info('Querying for downloads by distro in table %s', table_name)
q = "SELECT file.project, details.distro.name, " \
"details.distro.version, COUNT(*) as dl_count " \
"%s " \
"%s " \
"GROUP BY file.project, details.distro.name, " \
"details.distro.version;" % (
self._from_for_table(table_name),
self._where_for_projects
)
res = self._run_query(q)
result = self._dict_for_projects()
# iterate through results
for row in res:
# pointer to the per-project result dict
proj = result[row['file_project']]
# grab the name and version; change None to 'unknown'
dname = row['details_distro_name']
dver = row['details_distro_version']
if dname not in proj:
proj[dname] = {}
if dver not in proj[dname]:
proj[dname][dver] = 0
proj[dname][dver] += int(row['dl_count'])
return result | def function[_query_by_distro, parameter[self, table_name]]:
constant[
Query for download data broken down by OS distribution, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by distro; keys are project name,
values are a dict of distro names to dicts of distro version to
download count.
:rtype: dict
]
call[name[logger].info, parameter[constant[Querying for downloads by distro in table %s], name[table_name]]]
variable[q] assign[=] binary_operation[constant[SELECT file.project, details.distro.name, details.distro.version, COUNT(*) as dl_count %s %s GROUP BY file.project, details.distro.name, details.distro.version;] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da20cabc3a0>, <ast.Attribute object at 0x7da20cabfa30>]]]
variable[res] assign[=] call[name[self]._run_query, parameter[name[q]]]
variable[result] assign[=] call[name[self]._dict_for_projects, parameter[]]
for taget[name[row]] in starred[name[res]] begin[:]
variable[proj] assign[=] call[name[result]][call[name[row]][constant[file_project]]]
variable[dname] assign[=] call[name[row]][constant[details_distro_name]]
variable[dver] assign[=] call[name[row]][constant[details_distro_version]]
if compare[name[dname] <ast.NotIn object at 0x7da2590d7190> name[proj]] begin[:]
call[name[proj]][name[dname]] assign[=] dictionary[[], []]
if compare[name[dver] <ast.NotIn object at 0x7da2590d7190> call[name[proj]][name[dname]]] begin[:]
call[call[name[proj]][name[dname]]][name[dver]] assign[=] constant[0]
<ast.AugAssign object at 0x7da2041d99f0>
return[name[result]] | keyword[def] identifier[_query_by_distro] ( identifier[self] , identifier[table_name] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[table_name] )
identifier[q] = literal[string] literal[string] literal[string] literal[string] literal[string] literal[string] %(
identifier[self] . identifier[_from_for_table] ( identifier[table_name] ),
identifier[self] . identifier[_where_for_projects]
)
identifier[res] = identifier[self] . identifier[_run_query] ( identifier[q] )
identifier[result] = identifier[self] . identifier[_dict_for_projects] ()
keyword[for] identifier[row] keyword[in] identifier[res] :
identifier[proj] = identifier[result] [ identifier[row] [ literal[string] ]]
identifier[dname] = identifier[row] [ literal[string] ]
identifier[dver] = identifier[row] [ literal[string] ]
keyword[if] identifier[dname] keyword[not] keyword[in] identifier[proj] :
identifier[proj] [ identifier[dname] ]={}
keyword[if] identifier[dver] keyword[not] keyword[in] identifier[proj] [ identifier[dname] ]:
identifier[proj] [ identifier[dname] ][ identifier[dver] ]= literal[int]
identifier[proj] [ identifier[dname] ][ identifier[dver] ]+= identifier[int] ( identifier[row] [ literal[string] ])
keyword[return] identifier[result] | def _query_by_distro(self, table_name):
"""
Query for download data broken down by OS distribution, for one day.
:param table_name: table name to query against
:type table_name: str
:return: dict of download information by distro; keys are project name,
values are a dict of distro names to dicts of distro version to
download count.
:rtype: dict
"""
logger.info('Querying for downloads by distro in table %s', table_name)
q = 'SELECT file.project, details.distro.name, details.distro.version, COUNT(*) as dl_count %s %s GROUP BY file.project, details.distro.name, details.distro.version;' % (self._from_for_table(table_name), self._where_for_projects)
res = self._run_query(q)
result = self._dict_for_projects()
# iterate through results
for row in res:
# pointer to the per-project result dict
proj = result[row['file_project']]
# grab the name and version; change None to 'unknown'
dname = row['details_distro_name']
dver = row['details_distro_version']
if dname not in proj:
proj[dname] = {} # depends on [control=['if'], data=['dname', 'proj']]
if dver not in proj[dname]:
proj[dname][dver] = 0 # depends on [control=['if'], data=['dver']]
proj[dname][dver] += int(row['dl_count']) # depends on [control=['for'], data=['row']]
return result |
def _hdparm(args, failhard=True):
'''
Execute hdparm
Fail hard when required
return output when possible
'''
cmd = 'hdparm {0}'.format(args)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
msg = '{0}: {1}'.format(cmd, result['stderr'])
if failhard:
raise CommandExecutionError(msg)
else:
log.warning(msg)
return result['stdout'] | def function[_hdparm, parameter[args, failhard]]:
constant[
Execute hdparm
Fail hard when required
return output when possible
]
variable[cmd] assign[=] call[constant[hdparm {0}].format, parameter[name[args]]]
variable[result] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[name[cmd]]]
if compare[call[name[result]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
variable[msg] assign[=] call[constant[{0}: {1}].format, parameter[name[cmd], call[name[result]][constant[stderr]]]]
if name[failhard] begin[:]
<ast.Raise object at 0x7da1b1c3cac0>
return[call[name[result]][constant[stdout]]] | keyword[def] identifier[_hdparm] ( identifier[args] , identifier[failhard] = keyword[True] ):
literal[string]
identifier[cmd] = literal[string] . identifier[format] ( identifier[args] )
identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[cmd] )
keyword[if] identifier[result] [ literal[string] ]!= literal[int] :
identifier[msg] = literal[string] . identifier[format] ( identifier[cmd] , identifier[result] [ literal[string] ])
keyword[if] identifier[failhard] :
keyword[raise] identifier[CommandExecutionError] ( identifier[msg] )
keyword[else] :
identifier[log] . identifier[warning] ( identifier[msg] )
keyword[return] identifier[result] [ literal[string] ] | def _hdparm(args, failhard=True):
"""
Execute hdparm
Fail hard when required
return output when possible
"""
cmd = 'hdparm {0}'.format(args)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
msg = '{0}: {1}'.format(cmd, result['stderr'])
if failhard:
raise CommandExecutionError(msg) # depends on [control=['if'], data=[]]
else:
log.warning(msg) # depends on [control=['if'], data=[]]
return result['stdout'] |
def save_to_db(model_text_id, parsed_values):
"""save to db and return saved object"""
Model = apps.get_model(model_text_id)
# normalise values and separate to m2m, simple
simple_fields = {}
many2many_fields = {}
for field, value in parsed_values.items():
if (Model._meta.get_field(
field).get_internal_type() == 'ManyToManyField'):
many2many_fields[field] = value
elif (Model._meta.get_field(
field).get_internal_type() == 'DateTimeField'):
simple_fields[field] = time_parser.parse(value)
else:
simple_fields[field] = value
# ToDo: add unique identify parameter to field
# ToDo: allow unique identify m2m field
model, created = Model.objects.get_or_create(**simple_fields)
for field, value in many2many_fields.items():
setattr(model, field, value)
model.save()
return model | def function[save_to_db, parameter[model_text_id, parsed_values]]:
constant[save to db and return saved object]
variable[Model] assign[=] call[name[apps].get_model, parameter[name[model_text_id]]]
variable[simple_fields] assign[=] dictionary[[], []]
variable[many2many_fields] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1621930>, <ast.Name object at 0x7da1b1621bd0>]]] in starred[call[name[parsed_values].items, parameter[]]] begin[:]
if compare[call[call[name[Model]._meta.get_field, parameter[name[field]]].get_internal_type, parameter[]] equal[==] constant[ManyToManyField]] begin[:]
call[name[many2many_fields]][name[field]] assign[=] name[value]
<ast.Tuple object at 0x7da1b1621180> assign[=] call[name[Model].objects.get_or_create, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1621270>, <ast.Name object at 0x7da1b1621240>]]] in starred[call[name[many2many_fields].items, parameter[]]] begin[:]
call[name[setattr], parameter[name[model], name[field], name[value]]]
call[name[model].save, parameter[]]
return[name[model]] | keyword[def] identifier[save_to_db] ( identifier[model_text_id] , identifier[parsed_values] ):
literal[string]
identifier[Model] = identifier[apps] . identifier[get_model] ( identifier[model_text_id] )
identifier[simple_fields] ={}
identifier[many2many_fields] ={}
keyword[for] identifier[field] , identifier[value] keyword[in] identifier[parsed_values] . identifier[items] ():
keyword[if] ( identifier[Model] . identifier[_meta] . identifier[get_field] (
identifier[field] ). identifier[get_internal_type] ()== literal[string] ):
identifier[many2many_fields] [ identifier[field] ]= identifier[value]
keyword[elif] ( identifier[Model] . identifier[_meta] . identifier[get_field] (
identifier[field] ). identifier[get_internal_type] ()== literal[string] ):
identifier[simple_fields] [ identifier[field] ]= identifier[time_parser] . identifier[parse] ( identifier[value] )
keyword[else] :
identifier[simple_fields] [ identifier[field] ]= identifier[value]
identifier[model] , identifier[created] = identifier[Model] . identifier[objects] . identifier[get_or_create] (** identifier[simple_fields] )
keyword[for] identifier[field] , identifier[value] keyword[in] identifier[many2many_fields] . identifier[items] ():
identifier[setattr] ( identifier[model] , identifier[field] , identifier[value] )
identifier[model] . identifier[save] ()
keyword[return] identifier[model] | def save_to_db(model_text_id, parsed_values):
"""save to db and return saved object"""
Model = apps.get_model(model_text_id)
# normalise values and separate to m2m, simple
simple_fields = {}
many2many_fields = {}
for (field, value) in parsed_values.items():
if Model._meta.get_field(field).get_internal_type() == 'ManyToManyField':
many2many_fields[field] = value # depends on [control=['if'], data=[]]
elif Model._meta.get_field(field).get_internal_type() == 'DateTimeField':
simple_fields[field] = time_parser.parse(value) # depends on [control=['if'], data=[]]
else:
simple_fields[field] = value # depends on [control=['for'], data=[]]
# ToDo: add unique identify parameter to field
# ToDo: allow unique identify m2m field
(model, created) = Model.objects.get_or_create(**simple_fields)
for (field, value) in many2many_fields.items():
setattr(model, field, value) # depends on [control=['for'], data=[]]
model.save()
return model |
def read_config(cls, configparser):
"""Read configuration file options."""
config = dict()
section = cls.__name__
option = "prefixes"
if configparser.has_option(section, option):
value = configparser.get(section, option)
names = [x.strip().lower() for x in value.split(",")]
else:
names = []
config[option] = names
return config | def function[read_config, parameter[cls, configparser]]:
constant[Read configuration file options.]
variable[config] assign[=] call[name[dict], parameter[]]
variable[section] assign[=] name[cls].__name__
variable[option] assign[=] constant[prefixes]
if call[name[configparser].has_option, parameter[name[section], name[option]]] begin[:]
variable[value] assign[=] call[name[configparser].get, parameter[name[section], name[option]]]
variable[names] assign[=] <ast.ListComp object at 0x7da1b2344850>
call[name[config]][name[option]] assign[=] name[names]
return[name[config]] | keyword[def] identifier[read_config] ( identifier[cls] , identifier[configparser] ):
literal[string]
identifier[config] = identifier[dict] ()
identifier[section] = identifier[cls] . identifier[__name__]
identifier[option] = literal[string]
keyword[if] identifier[configparser] . identifier[has_option] ( identifier[section] , identifier[option] ):
identifier[value] = identifier[configparser] . identifier[get] ( identifier[section] , identifier[option] )
identifier[names] =[ identifier[x] . identifier[strip] (). identifier[lower] () keyword[for] identifier[x] keyword[in] identifier[value] . identifier[split] ( literal[string] )]
keyword[else] :
identifier[names] =[]
identifier[config] [ identifier[option] ]= identifier[names]
keyword[return] identifier[config] | def read_config(cls, configparser):
"""Read configuration file options."""
config = dict()
section = cls.__name__
option = 'prefixes'
if configparser.has_option(section, option):
value = configparser.get(section, option)
names = [x.strip().lower() for x in value.split(',')] # depends on [control=['if'], data=[]]
else:
names = []
config[option] = names
return config |
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
return queryset | def function[_apply_rel_filters, parameter[self, queryset]]:
constant[
Filter the queryset for the instance this manager is bound to.
]
call[name[queryset]._add_hints, parameter[]]
if name[self]._db begin[:]
variable[queryset] assign[=] call[name[queryset].using, parameter[name[self]._db]]
variable[queryset] assign[=] call[name[queryset].filter, parameter[]]
return[name[queryset]] | keyword[def] identifier[_apply_rel_filters] ( identifier[self] , identifier[queryset] ):
literal[string]
identifier[queryset] . identifier[_add_hints] ( identifier[instance] = identifier[self] . identifier[instance] )
keyword[if] identifier[self] . identifier[_db] :
identifier[queryset] = identifier[queryset] . identifier[using] ( identifier[self] . identifier[_db] )
identifier[queryset] = identifier[queryset] . identifier[filter] (** identifier[self] . identifier[core_filters] )
keyword[return] identifier[queryset] | def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db) # depends on [control=['if'], data=[]]
queryset = queryset.filter(**self.core_filters)
return queryset |
def load_isd_daily_temp_data(
self, start, end, read_from_cache=True, write_to_cache=True
):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(
self.usaf_id,
start,
end,
read_from_cache=read_from_cache,
write_to_cache=write_to_cache,
) | def function[load_isd_daily_temp_data, parameter[self, start, end, read_from_cache, write_to_cache]]:
constant[ Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
]
return[call[name[load_isd_daily_temp_data], parameter[name[self].usaf_id, name[start], name[end]]]] | keyword[def] identifier[load_isd_daily_temp_data] (
identifier[self] , identifier[start] , identifier[end] , identifier[read_from_cache] = keyword[True] , identifier[write_to_cache] = keyword[True]
):
literal[string]
keyword[return] identifier[load_isd_daily_temp_data] (
identifier[self] . identifier[usaf_id] ,
identifier[start] ,
identifier[end] ,
identifier[read_from_cache] = identifier[read_from_cache] ,
identifier[write_to_cache] = identifier[write_to_cache] ,
) | def load_isd_daily_temp_data(self, start, end, read_from_cache=True, write_to_cache=True):
""" Load resampled daily ISD temperature data from start date to end date (inclusive).
This is the primary convenience method for loading resampled daily ISD temperature data.
Parameters
----------
start : datetime.datetime
The earliest date from which to load data.
end : datetime.datetime
The latest date until which to load data.
read_from_cache : bool
Whether or not to load data from cache.
write_to_cache : bool
Whether or not to write newly loaded data to cache.
"""
return load_isd_daily_temp_data(self.usaf_id, start, end, read_from_cache=read_from_cache, write_to_cache=write_to_cache) |
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is `None`, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.build_error_handler`. The `url_for` function
results in a :exc:`~werkzeug.routing.BuildError` when the current app does
not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if
it is not `None`, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, **values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.build_error_handler = external_url_handler
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `**values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to `True` or a `ValueError` is raised.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
# If request specific information is available we have some extra
# features that support "relative" urls.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
else:
# TODO: get rid of this deprecated functionality in 1.0
if '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint
elif endpoint.startswith('.'):
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the urls external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL generation. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
url_adapter.url_scheme = scheme
try:
rv = url_adapter.build(endpoint, values, method=method,
force_external=external)
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor)
return rv | def function[url_for, parameter[endpoint]]:
constant[Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is `None`, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.build_error_handler`. The `url_for` function
results in a :exc:`~werkzeug.routing.BuildError` when the current app does
not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if
it is not `None`, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, **values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.build_error_handler = external_url_handler
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `**values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to `True` or a `ValueError` is raised.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
]
variable[appctx] assign[=] name[_app_ctx_stack].top
variable[reqctx] assign[=] name[_request_ctx_stack].top
if compare[name[appctx] is constant[None]] begin[:]
<ast.Raise object at 0x7da204344b50>
if compare[name[reqctx] is_not constant[None]] begin[:]
variable[url_adapter] assign[=] name[reqctx].url_adapter
variable[blueprint_name] assign[=] name[request].blueprint
if <ast.UnaryOp object at 0x7da204344a30> begin[:]
if compare[call[name[endpoint]][<ast.Slice object at 0x7da204344eb0>] equal[==] constant[.]] begin[:]
if compare[name[blueprint_name] is_not constant[None]] begin[:]
variable[endpoint] assign[=] binary_operation[name[blueprint_name] + name[endpoint]]
variable[external] assign[=] call[name[values].pop, parameter[constant[_external], constant[False]]]
variable[anchor] assign[=] call[name[values].pop, parameter[constant[_anchor], constant[None]]]
variable[method] assign[=] call[name[values].pop, parameter[constant[_method], constant[None]]]
variable[scheme] assign[=] call[name[values].pop, parameter[constant[_scheme], constant[None]]]
call[name[appctx].app.inject_url_defaults, parameter[name[endpoint], name[values]]]
if compare[name[scheme] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da1b03bab60> begin[:]
<ast.Raise object at 0x7da1b03b8ca0>
name[url_adapter].url_scheme assign[=] name[scheme]
<ast.Try object at 0x7da1b03b8850>
if compare[name[anchor] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b03b9720>
return[name[rv]] | keyword[def] identifier[url_for] ( identifier[endpoint] ,** identifier[values] ):
literal[string]
identifier[appctx] = identifier[_app_ctx_stack] . identifier[top]
identifier[reqctx] = identifier[_request_ctx_stack] . identifier[top]
keyword[if] identifier[appctx] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[reqctx] keyword[is] keyword[not] keyword[None] :
identifier[url_adapter] = identifier[reqctx] . identifier[url_adapter]
identifier[blueprint_name] = identifier[request] . identifier[blueprint]
keyword[if] keyword[not] identifier[reqctx] . identifier[request] . identifier[_is_old_module] :
keyword[if] identifier[endpoint] [: literal[int] ]== literal[string] :
keyword[if] identifier[blueprint_name] keyword[is] keyword[not] keyword[None] :
identifier[endpoint] = identifier[blueprint_name] + identifier[endpoint]
keyword[else] :
identifier[endpoint] = identifier[endpoint] [ literal[int] :]
keyword[else] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[endpoint] :
keyword[if] identifier[blueprint_name] keyword[is] keyword[not] keyword[None] :
identifier[endpoint] = identifier[blueprint_name] + literal[string] + identifier[endpoint]
keyword[elif] identifier[endpoint] . identifier[startswith] ( literal[string] ):
identifier[endpoint] = identifier[endpoint] [ literal[int] :]
identifier[external] = identifier[values] . identifier[pop] ( literal[string] , keyword[False] )
keyword[else] :
identifier[url_adapter] = identifier[appctx] . identifier[url_adapter]
keyword[if] identifier[url_adapter] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[external] = identifier[values] . identifier[pop] ( literal[string] , keyword[True] )
identifier[anchor] = identifier[values] . identifier[pop] ( literal[string] , keyword[None] )
identifier[method] = identifier[values] . identifier[pop] ( literal[string] , keyword[None] )
identifier[scheme] = identifier[values] . identifier[pop] ( literal[string] , keyword[None] )
identifier[appctx] . identifier[app] . identifier[inject_url_defaults] ( identifier[endpoint] , identifier[values] )
keyword[if] identifier[scheme] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[external] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[url_adapter] . identifier[url_scheme] = identifier[scheme]
keyword[try] :
identifier[rv] = identifier[url_adapter] . identifier[build] ( identifier[endpoint] , identifier[values] , identifier[method] = identifier[method] ,
identifier[force_external] = identifier[external] )
keyword[except] identifier[BuildError] keyword[as] identifier[error] :
identifier[values] [ literal[string] ]= identifier[external]
identifier[values] [ literal[string] ]= identifier[anchor]
identifier[values] [ literal[string] ]= identifier[method]
keyword[return] identifier[appctx] . identifier[app] . identifier[handle_url_build_error] ( identifier[error] , identifier[endpoint] , identifier[values] )
keyword[if] identifier[anchor] keyword[is] keyword[not] keyword[None] :
identifier[rv] += literal[string] + identifier[url_quote] ( identifier[anchor] )
keyword[return] identifier[rv] | def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is `None`, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.build_error_handler`. The `url_for` function
results in a :exc:`~werkzeug.routing.BuildError` when the current app does
not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if
it is not `None`, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, **values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.build_error_handler = external_url_handler
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `**values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to `True` or a `ValueError` is raised.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the application context being pushed. This has to be executed when application context is available.') # depends on [control=['if'], data=[]]
# If request specific information is available we have some extra
# features that support "relative" urls.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint # depends on [control=['if'], data=['blueprint_name']]
else:
endpoint = endpoint[1:] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# TODO: get rid of this deprecated functionality in 1.0
elif '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint # depends on [control=['if'], data=['blueprint_name']] # depends on [control=['if'], data=['endpoint']]
elif endpoint.startswith('.'):
endpoint = endpoint[1:] # depends on [control=['if'], data=[]]
external = values.pop('_external', False) # depends on [control=['if'], data=['reqctx']]
else:
# Otherwise go with the url adapter from the appctx and make
# the urls external by default.
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL adapter for request independent URL generation. You might be able to fix this by setting the SERVER_NAME config variable.') # depends on [control=['if'], data=[]]
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True') # depends on [control=['if'], data=[]]
url_adapter.url_scheme = scheme # depends on [control=['if'], data=['scheme']]
try:
rv = url_adapter.build(endpoint, values, method=method, force_external=external) # depends on [control=['try'], data=[]]
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values) # depends on [control=['except'], data=['error']]
if anchor is not None:
rv += '#' + url_quote(anchor) # depends on [control=['if'], data=['anchor']]
return rv |
def audio_load_time(self):
"""
Returns aggregate audio load time for all pages.
"""
load_times = self.get_load_times('audio')
return round(mean(load_times), self.decimal_precision) | def function[audio_load_time, parameter[self]]:
constant[
Returns aggregate audio load time for all pages.
]
variable[load_times] assign[=] call[name[self].get_load_times, parameter[constant[audio]]]
return[call[name[round], parameter[call[name[mean], parameter[name[load_times]]], name[self].decimal_precision]]] | keyword[def] identifier[audio_load_time] ( identifier[self] ):
literal[string]
identifier[load_times] = identifier[self] . identifier[get_load_times] ( literal[string] )
keyword[return] identifier[round] ( identifier[mean] ( identifier[load_times] ), identifier[self] . identifier[decimal_precision] ) | def audio_load_time(self):
"""
Returns aggregate audio load time for all pages.
"""
load_times = self.get_load_times('audio')
return round(mean(load_times), self.decimal_precision) |
def practice(request):
"""
Return the given number of questions to practice adaptively. In case of
POST request, try to save the answer(s).
GET parameters:
filter:
list of lists of identifiers (may be prefixed by minus sign to
mark complement)
language:
language (str) of items
avoid:
list of item ids to avoid
limit:
number of returned questions (default 10, maximum 100)
time:
time in format '%Y-%m-%d_%H:%M:%S' used for practicing
user:
identifier for the practicing user (only for stuff users)
stats:
turn on the enrichment of the objects by some statistics
html:
turn on the HTML version of the API
BODY:
see answer resource
"""
if request.user.id is None: # Google Bot
return render_json(request, {
'error': _('There is no user available for the practice.'),
'error_type': 'user_undefined'
}, status=400, template='models_json.html')
limit = min(int(request.GET.get('limit', 10)), 100)
# prepare
user = get_user_id(request)
time = get_time(request)
avoid = load_query_json(request.GET, "avoid", "[]")
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
environment = get_environment()
item_selector = get_item_selector()
if is_time_overridden(request):
environment.shift_time(time)
# save answers
if request.method == 'POST':
_save_answers(request, practice_context, False)
elif request.method == 'GET':
PracticeSet.objects.filter(answer__user_id=request.user.id).update(finished=True)
if limit > 0:
item_ids = Item.objects.filter_all_reachable_leaves(practice_filter, get_language(request), forbidden_identifiers=get_forbidden_items())
item_ids = list(set(item_ids) - set(avoid))
limit_size = get_config('proso_models', 'practice.limit_item_set_size_to_select_from', default=None)
if limit_size is not None and limit_size < len(item_ids):
item_ids = sample(item_ids, limit_size)
if len(item_ids) == 0:
return render_json(request, {
'error': _('There is no item for the given filter to practice.'),
'error_type': 'empty_practice'
}, status=404, template='models_json.html')
selected_items, meta = item_selector.select(environment, user, item_ids, time, practice_context.id, limit, items_in_queue=len(avoid))
result = []
for item, item_meta in zip(selected_items, meta):
question = {
'object_type': 'question',
'payload': Item.objects.item_id_to_json(item),
}
if item_meta is not None:
question['meta'] = item_meta
result.append(question)
else:
result = []
return render_json(request, result, template='models_json.html', help_text=practice.__doc__) | def function[practice, parameter[request]]:
constant[
Return the given number of questions to practice adaptively. In case of
POST request, try to save the answer(s).
GET parameters:
filter:
list of lists of identifiers (may be prefixed by minus sign to
mark complement)
language:
language (str) of items
avoid:
list of item ids to avoid
limit:
number of returned questions (default 10, maximum 100)
time:
time in format '%Y-%m-%d_%H:%M:%S' used for practicing
user:
identifier for the practicing user (only for stuff users)
stats:
turn on the enrichment of the objects by some statistics
html:
turn on the HTML version of the API
BODY:
see answer resource
]
if compare[name[request].user.id is constant[None]] begin[:]
return[call[name[render_json], parameter[name[request], dictionary[[<ast.Constant object at 0x7da1b2298eb0>, <ast.Constant object at 0x7da1b22991e0>], [<ast.Call object at 0x7da1b229ab00>, <ast.Constant object at 0x7da1b2298fa0>]]]]]
variable[limit] assign[=] call[name[min], parameter[call[name[int], parameter[call[name[request].GET.get, parameter[constant[limit], constant[10]]]]], constant[100]]]
variable[user] assign[=] call[name[get_user_id], parameter[name[request]]]
variable[time] assign[=] call[name[get_time], parameter[name[request]]]
variable[avoid] assign[=] call[name[load_query_json], parameter[name[request].GET, constant[avoid], constant[[]]]]
variable[practice_filter] assign[=] call[name[get_filter], parameter[name[request]]]
variable[practice_context] assign[=] call[name[PracticeContext].objects.from_content, parameter[name[practice_filter]]]
variable[environment] assign[=] call[name[get_environment], parameter[]]
variable[item_selector] assign[=] call[name[get_item_selector], parameter[]]
if call[name[is_time_overridden], parameter[name[request]]] begin[:]
call[name[environment].shift_time, parameter[name[time]]]
if compare[name[request].method equal[==] constant[POST]] begin[:]
call[name[_save_answers], parameter[name[request], name[practice_context], constant[False]]]
if compare[name[limit] greater[>] constant[0]] begin[:]
variable[item_ids] assign[=] call[name[Item].objects.filter_all_reachable_leaves, parameter[name[practice_filter], call[name[get_language], parameter[name[request]]]]]
variable[item_ids] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[item_ids]]] - call[name[set], parameter[name[avoid]]]]]]
variable[limit_size] assign[=] call[name[get_config], parameter[constant[proso_models], constant[practice.limit_item_set_size_to_select_from]]]
if <ast.BoolOp object at 0x7da1b2298d00> begin[:]
variable[item_ids] assign[=] call[name[sample], parameter[name[item_ids], name[limit_size]]]
if compare[call[name[len], parameter[name[item_ids]]] equal[==] constant[0]] begin[:]
return[call[name[render_json], parameter[name[request], dictionary[[<ast.Constant object at 0x7da1b229a320>, <ast.Constant object at 0x7da1b2298e80>], [<ast.Call object at 0x7da1b2299210>, <ast.Constant object at 0x7da1b2298a30>]]]]]
<ast.Tuple object at 0x7da1b229b7c0> assign[=] call[name[item_selector].select, parameter[name[environment], name[user], name[item_ids], name[time], name[practice_context].id, name[limit]]]
variable[result] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f58c4c0>, <ast.Name object at 0x7da18f58f760>]]] in starred[call[name[zip], parameter[name[selected_items], name[meta]]]] begin[:]
variable[question] assign[=] dictionary[[<ast.Constant object at 0x7da18f58ecb0>, <ast.Constant object at 0x7da18f58d300>], [<ast.Constant object at 0x7da18f58df90>, <ast.Call object at 0x7da18f58f0d0>]]
if compare[name[item_meta] is_not constant[None]] begin[:]
call[name[question]][constant[meta]] assign[=] name[item_meta]
call[name[result].append, parameter[name[question]]]
return[call[name[render_json], parameter[name[request], name[result]]]] | keyword[def] identifier[practice] ( identifier[request] ):
literal[string]
keyword[if] identifier[request] . identifier[user] . identifier[id] keyword[is] keyword[None] :
keyword[return] identifier[render_json] ( identifier[request] ,{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : literal[string]
}, identifier[status] = literal[int] , identifier[template] = literal[string] )
identifier[limit] = identifier[min] ( identifier[int] ( identifier[request] . identifier[GET] . identifier[get] ( literal[string] , literal[int] )), literal[int] )
identifier[user] = identifier[get_user_id] ( identifier[request] )
identifier[time] = identifier[get_time] ( identifier[request] )
identifier[avoid] = identifier[load_query_json] ( identifier[request] . identifier[GET] , literal[string] , literal[string] )
identifier[practice_filter] = identifier[get_filter] ( identifier[request] )
identifier[practice_context] = identifier[PracticeContext] . identifier[objects] . identifier[from_content] ( identifier[practice_filter] )
identifier[environment] = identifier[get_environment] ()
identifier[item_selector] = identifier[get_item_selector] ()
keyword[if] identifier[is_time_overridden] ( identifier[request] ):
identifier[environment] . identifier[shift_time] ( identifier[time] )
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[_save_answers] ( identifier[request] , identifier[practice_context] , keyword[False] )
keyword[elif] identifier[request] . identifier[method] == literal[string] :
identifier[PracticeSet] . identifier[objects] . identifier[filter] ( identifier[answer__user_id] = identifier[request] . identifier[user] . identifier[id] ). identifier[update] ( identifier[finished] = keyword[True] )
keyword[if] identifier[limit] > literal[int] :
identifier[item_ids] = identifier[Item] . identifier[objects] . identifier[filter_all_reachable_leaves] ( identifier[practice_filter] , identifier[get_language] ( identifier[request] ), identifier[forbidden_identifiers] = identifier[get_forbidden_items] ())
identifier[item_ids] = identifier[list] ( identifier[set] ( identifier[item_ids] )- identifier[set] ( identifier[avoid] ))
identifier[limit_size] = identifier[get_config] ( literal[string] , literal[string] , identifier[default] = keyword[None] )
keyword[if] identifier[limit_size] keyword[is] keyword[not] keyword[None] keyword[and] identifier[limit_size] < identifier[len] ( identifier[item_ids] ):
identifier[item_ids] = identifier[sample] ( identifier[item_ids] , identifier[limit_size] )
keyword[if] identifier[len] ( identifier[item_ids] )== literal[int] :
keyword[return] identifier[render_json] ( identifier[request] ,{
literal[string] : identifier[_] ( literal[string] ),
literal[string] : literal[string]
}, identifier[status] = literal[int] , identifier[template] = literal[string] )
identifier[selected_items] , identifier[meta] = identifier[item_selector] . identifier[select] ( identifier[environment] , identifier[user] , identifier[item_ids] , identifier[time] , identifier[practice_context] . identifier[id] , identifier[limit] , identifier[items_in_queue] = identifier[len] ( identifier[avoid] ))
identifier[result] =[]
keyword[for] identifier[item] , identifier[item_meta] keyword[in] identifier[zip] ( identifier[selected_items] , identifier[meta] ):
identifier[question] ={
literal[string] : literal[string] ,
literal[string] : identifier[Item] . identifier[objects] . identifier[item_id_to_json] ( identifier[item] ),
}
keyword[if] identifier[item_meta] keyword[is] keyword[not] keyword[None] :
identifier[question] [ literal[string] ]= identifier[item_meta]
identifier[result] . identifier[append] ( identifier[question] )
keyword[else] :
identifier[result] =[]
keyword[return] identifier[render_json] ( identifier[request] , identifier[result] , identifier[template] = literal[string] , identifier[help_text] = identifier[practice] . identifier[__doc__] ) | def practice(request):
"""
Return the given number of questions to practice adaptively. In case of
POST request, try to save the answer(s).
GET parameters:
filter:
list of lists of identifiers (may be prefixed by minus sign to
mark complement)
language:
language (str) of items
avoid:
list of item ids to avoid
limit:
number of returned questions (default 10, maximum 100)
time:
time in format '%Y-%m-%d_%H:%M:%S' used for practicing
user:
identifier for the practicing user (only for stuff users)
stats:
turn on the enrichment of the objects by some statistics
html:
turn on the HTML version of the API
BODY:
see answer resource
"""
if request.user.id is None: # Google Bot
return render_json(request, {'error': _('There is no user available for the practice.'), 'error_type': 'user_undefined'}, status=400, template='models_json.html') # depends on [control=['if'], data=[]]
limit = min(int(request.GET.get('limit', 10)), 100)
# prepare
user = get_user_id(request)
time = get_time(request)
avoid = load_query_json(request.GET, 'avoid', '[]')
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
environment = get_environment()
item_selector = get_item_selector()
if is_time_overridden(request):
environment.shift_time(time) # depends on [control=['if'], data=[]]
# save answers
if request.method == 'POST':
_save_answers(request, practice_context, False) # depends on [control=['if'], data=[]]
elif request.method == 'GET':
PracticeSet.objects.filter(answer__user_id=request.user.id).update(finished=True) # depends on [control=['if'], data=[]]
if limit > 0:
item_ids = Item.objects.filter_all_reachable_leaves(practice_filter, get_language(request), forbidden_identifiers=get_forbidden_items())
item_ids = list(set(item_ids) - set(avoid))
limit_size = get_config('proso_models', 'practice.limit_item_set_size_to_select_from', default=None)
if limit_size is not None and limit_size < len(item_ids):
item_ids = sample(item_ids, limit_size) # depends on [control=['if'], data=[]]
if len(item_ids) == 0:
return render_json(request, {'error': _('There is no item for the given filter to practice.'), 'error_type': 'empty_practice'}, status=404, template='models_json.html') # depends on [control=['if'], data=[]]
(selected_items, meta) = item_selector.select(environment, user, item_ids, time, practice_context.id, limit, items_in_queue=len(avoid))
result = []
for (item, item_meta) in zip(selected_items, meta):
question = {'object_type': 'question', 'payload': Item.objects.item_id_to_json(item)}
if item_meta is not None:
question['meta'] = item_meta # depends on [control=['if'], data=['item_meta']]
result.append(question) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['limit']]
else:
result = []
return render_json(request, result, template='models_json.html', help_text=practice.__doc__) |
def enroll_user_in_course(self, username, course_id, mode, cohort=None):
"""
Call the enrollment API to enroll the user in the course specified by course_id.
Args:
username (str): The username by which the user goes on the OpenEdX platform
course_id (str): The string value of the course's unique identifier
mode (str): The enrollment mode which should be used for the enrollment
cohort (str): Add the user to this named cohort
Returns:
dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.
"""
return self.client.enrollment.post(
{
'user': username,
'course_details': {'course_id': course_id},
'mode': mode,
'cohort': cohort,
}
) | def function[enroll_user_in_course, parameter[self, username, course_id, mode, cohort]]:
constant[
Call the enrollment API to enroll the user in the course specified by course_id.
Args:
username (str): The username by which the user goes on the OpenEdX platform
course_id (str): The string value of the course's unique identifier
mode (str): The enrollment mode which should be used for the enrollment
cohort (str): Add the user to this named cohort
Returns:
dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.
]
return[call[name[self].client.enrollment.post, parameter[dictionary[[<ast.Constant object at 0x7da20c991a80>, <ast.Constant object at 0x7da20c991c60>, <ast.Constant object at 0x7da20c991e70>, <ast.Constant object at 0x7da20c991690>], [<ast.Name object at 0x7da20c990310>, <ast.Dict object at 0x7da20c9917e0>, <ast.Name object at 0x7da20c992e00>, <ast.Name object at 0x7da20c990880>]]]]] | keyword[def] identifier[enroll_user_in_course] ( identifier[self] , identifier[username] , identifier[course_id] , identifier[mode] , identifier[cohort] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[client] . identifier[enrollment] . identifier[post] (
{
literal[string] : identifier[username] ,
literal[string] :{ literal[string] : identifier[course_id] },
literal[string] : identifier[mode] ,
literal[string] : identifier[cohort] ,
}
) | def enroll_user_in_course(self, username, course_id, mode, cohort=None):
"""
Call the enrollment API to enroll the user in the course specified by course_id.
Args:
username (str): The username by which the user goes on the OpenEdX platform
course_id (str): The string value of the course's unique identifier
mode (str): The enrollment mode which should be used for the enrollment
cohort (str): Add the user to this named cohort
Returns:
dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.
"""
return self.client.enrollment.post({'user': username, 'course_details': {'course_id': course_id}, 'mode': mode, 'cohort': cohort}) |
def sendVX(self, vx):
'''
Sends VX velocity.
@param vx: VX velocity
@type vx: float
'''
self.lock.acquire()
self.data.vx = vx
self.lock.release() | def function[sendVX, parameter[self, vx]]:
constant[
Sends VX velocity.
@param vx: VX velocity
@type vx: float
]
call[name[self].lock.acquire, parameter[]]
name[self].data.vx assign[=] name[vx]
call[name[self].lock.release, parameter[]] | keyword[def] identifier[sendVX] ( identifier[self] , identifier[vx] ):
literal[string]
identifier[self] . identifier[lock] . identifier[acquire] ()
identifier[self] . identifier[data] . identifier[vx] = identifier[vx]
identifier[self] . identifier[lock] . identifier[release] () | def sendVX(self, vx):
"""
Sends VX velocity.
@param vx: VX velocity
@type vx: float
"""
self.lock.acquire()
self.data.vx = vx
self.lock.release() |
def is_present(name, **kwargs):
'''
Check if Zabbix Template already exists.
:param name: Zabbix Template name
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
does_zabbix-template-exist:
zabbix_template.is_present:
- name: Template OS Linux
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
try:
object_id = __salt__['zabbix.get_object_id_by_params']('template', {'filter': {'name': name}}, **kwargs)
except SaltException:
object_id = False
if not object_id:
ret['result'] = False
ret['comment'] = 'Zabbix Template "{0}" does not exist.'.format(name)
else:
ret['result'] = True
ret['comment'] = 'Zabbix Template "{0}" exists.'.format(name)
return ret | def function[is_present, parameter[name]]:
constant[
Check if Zabbix Template already exists.
:param name: Zabbix Template name
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
does_zabbix-template-exist:
zabbix_template.is_present:
- name: Template OS Linux
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b1cb23b0>, <ast.Constant object at 0x7da1b1cb20e0>, <ast.Constant object at 0x7da1b1cb1db0>, <ast.Constant object at 0x7da1b1cb1000>], [<ast.Name object at 0x7da1b1cb3790>, <ast.Constant object at 0x7da1b1cb2770>, <ast.Constant object at 0x7da1b1cb2a40>, <ast.Dict object at 0x7da1b1cb3580>]]
<ast.Try object at 0x7da1b1cb2350>
if <ast.UnaryOp object at 0x7da1b1cb1510> begin[:]
call[name[ret]][constant[result]] assign[=] constant[False]
call[name[ret]][constant[comment]] assign[=] call[constant[Zabbix Template "{0}" does not exist.].format, parameter[name[name]]]
return[name[ret]] | keyword[def] identifier[is_present] ( identifier[name] ,** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] , literal[string] : keyword[False] , literal[string] : literal[string] , literal[string] :{}}
keyword[try] :
identifier[object_id] = identifier[__salt__] [ literal[string] ]( literal[string] ,{ literal[string] :{ literal[string] : identifier[name] }},** identifier[kwargs] )
keyword[except] identifier[SaltException] :
identifier[object_id] = keyword[False]
keyword[if] keyword[not] identifier[object_id] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def is_present(name, **kwargs):
"""
Check if Zabbix Template already exists.
:param name: Zabbix Template name
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
does_zabbix-template-exist:
zabbix_template.is_present:
- name: Template OS Linux
"""
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
try:
object_id = __salt__['zabbix.get_object_id_by_params']('template', {'filter': {'name': name}}, **kwargs) # depends on [control=['try'], data=[]]
except SaltException:
object_id = False # depends on [control=['except'], data=[]]
if not object_id:
ret['result'] = False
ret['comment'] = 'Zabbix Template "{0}" does not exist.'.format(name) # depends on [control=['if'], data=[]]
else:
ret['result'] = True
ret['comment'] = 'Zabbix Template "{0}" exists.'.format(name)
return ret |
def _caveat_v1_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized | def function[_caveat_v1_to_dict, parameter[c]]:
constant[ Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
]
variable[serialized] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[c].caveat_id]] greater[>] constant[0]] begin[:]
call[name[serialized]][constant[cid]] assign[=] name[c].caveat_id
if name[c].verification_key_id begin[:]
call[name[serialized]][constant[vid]] assign[=] call[call[name[utils].raw_urlsafe_b64encode, parameter[name[c].verification_key_id]].decode, parameter[constant[utf-8]]]
if name[c].location begin[:]
call[name[serialized]][constant[cl]] assign[=] name[c].location
return[name[serialized]] | keyword[def] identifier[_caveat_v1_to_dict] ( identifier[c] ):
literal[string]
identifier[serialized] ={}
keyword[if] identifier[len] ( identifier[c] . identifier[caveat_id] )> literal[int] :
identifier[serialized] [ literal[string] ]= identifier[c] . identifier[caveat_id]
keyword[if] identifier[c] . identifier[verification_key_id] :
identifier[serialized] [ literal[string] ]= identifier[utils] . identifier[raw_urlsafe_b64encode] (
identifier[c] . identifier[verification_key_id] ). identifier[decode] ( literal[string] )
keyword[if] identifier[c] . identifier[location] :
identifier[serialized] [ literal[string] ]= identifier[c] . identifier[location]
keyword[return] identifier[serialized] | def _caveat_v1_to_dict(c):
""" Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
"""
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id # depends on [control=['if'], data=[]]
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(c.verification_key_id).decode('utf-8') # depends on [control=['if'], data=[]]
if c.location:
serialized['cl'] = c.location # depends on [control=['if'], data=[]]
return serialized |
def filter_by(lookup_dict,
grain='os_family',
merge=None,
default='default',
base=None):
'''
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}), default='Debian' %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
:param merge: A dictionary to merge with the ``lookup_dict`` before doing
the lookup. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict.
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is None.
.. versionadded:: 2015.8.11,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
'''
ret = lookup_dict.get(
__grains__.get(
grain, default),
lookup_dict.get(
default, None)
)
if base and base in lookup_dict:
base_values = lookup_dict[base]
if ret is None:
ret = base_values
elif isinstance(base_values, collections.Mapping):
if not isinstance(ret, collections.Mapping):
raise SaltException('filter_by default and look-up values must both be dictionaries.')
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret)
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException('filter_by merge argument must be a dictionary.')
else:
if ret is None:
ret = merge
else:
salt.utils.dictupdate.update(ret, merge)
return ret | def function[filter_by, parameter[lookup_dict, grain, merge, default, base]]:
constant[
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}), default='Debian' %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
:param merge: A dictionary to merge with the ``lookup_dict`` before doing
the lookup. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict.
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is None.
.. versionadded:: 2015.8.11,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
]
variable[ret] assign[=] call[name[lookup_dict].get, parameter[call[name[__grains__].get, parameter[name[grain], name[default]]], call[name[lookup_dict].get, parameter[name[default], constant[None]]]]]
if <ast.BoolOp object at 0x7da1b1f81cc0> begin[:]
variable[base_values] assign[=] call[name[lookup_dict]][name[base]]
if compare[name[ret] is constant[None]] begin[:]
variable[ret] assign[=] name[base_values]
if name[merge] begin[:]
if <ast.UnaryOp object at 0x7da1b1f81db0> begin[:]
<ast.Raise object at 0x7da1b1f82320>
return[name[ret]] | keyword[def] identifier[filter_by] ( identifier[lookup_dict] ,
identifier[grain] = literal[string] ,
identifier[merge] = keyword[None] ,
identifier[default] = literal[string] ,
identifier[base] = keyword[None] ):
literal[string]
identifier[ret] = identifier[lookup_dict] . identifier[get] (
identifier[__grains__] . identifier[get] (
identifier[grain] , identifier[default] ),
identifier[lookup_dict] . identifier[get] (
identifier[default] , keyword[None] )
)
keyword[if] identifier[base] keyword[and] identifier[base] keyword[in] identifier[lookup_dict] :
identifier[base_values] = identifier[lookup_dict] [ identifier[base] ]
keyword[if] identifier[ret] keyword[is] keyword[None] :
identifier[ret] = identifier[base_values]
keyword[elif] identifier[isinstance] ( identifier[base_values] , identifier[collections] . identifier[Mapping] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[ret] , identifier[collections] . identifier[Mapping] ):
keyword[raise] identifier[SaltException] ( literal[string] )
identifier[ret] = identifier[salt] . identifier[utils] . identifier[dictupdate] . identifier[update] ( identifier[copy] . identifier[deepcopy] ( identifier[base_values] ), identifier[ret] )
keyword[if] identifier[merge] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[merge] , identifier[collections] . identifier[Mapping] ):
keyword[raise] identifier[SaltException] ( literal[string] )
keyword[else] :
keyword[if] identifier[ret] keyword[is] keyword[None] :
identifier[ret] = identifier[merge]
keyword[else] :
identifier[salt] . identifier[utils] . identifier[dictupdate] . identifier[update] ( identifier[ret] , identifier[merge] )
keyword[return] identifier[ret] | def filter_by(lookup_dict, grain='os_family', merge=None, default='default', base=None):
"""
.. versionadded:: 0.17.0
Look up the given grain in a given dictionary for the current OS and return
the result
Although this may occasionally be useful at the CLI, the primary intent of
this function is for use in Jinja to make short work of creating lookup
tables for OS-specific data. For example:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
'Debian': {'pkg': 'apache2', 'srv': 'apache2'},
'RedHat': {'pkg': 'httpd', 'srv': 'httpd'},
}), default='Debian' %}
myapache:
pkg.installed:
- name: {{ apache.pkg }}
service.running:
- name: {{ apache.srv }}
Values in the lookup table may be overridden by values in Pillar. An
example Pillar to override values in the example above could be as follows:
.. code-block:: yaml
apache:
lookup:
pkg: apache_13
srv: apache
The call to ``filter_by()`` would be modified as follows to reference those
Pillar values:
.. code-block:: jinja
{% set apache = salt['grains.filter_by']({
...
}, merge=salt['pillar.get']('apache:lookup')) %}
:param lookup_dict: A dictionary, keyed by a grain, containing a value or
values relevant to systems matching that grain. For example, a key
could be the grain for an OS and the value could the name of a package
on that particular OS.
:param grain: The name of a grain to match with the current system's
grains. For example, the value of the "os_family" grain for the current
system could be used to pull values from the ``lookup_dict``
dictionary.
:param merge: A dictionary to merge with the ``lookup_dict`` before doing
the lookup. This allows Pillar to override the values in the
``lookup_dict``. This could be useful, for example, to override the
values for non-standard package names such as when using a different
Python version from the default Python version provided by the OS
(e.g., ``python26-mysql`` instead of ``python-mysql``).
:param default: default lookup_dict's key used if the grain does not exists
or if the grain value has no match on lookup_dict.
.. versionadded:: 2014.1.0
:param base: A lookup_dict key to use for a base dictionary. The
grain-selected ``lookup_dict`` is merged over this and then finally
the ``merge`` dictionary is merged. This allows common values for
each case to be collected in the base and overridden by the grain
selection dictionary and the merge dictionary. Default is None.
.. versionadded:: 2015.8.11,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}'
# this one will render {D: {E: I, G: H}, J: K}
salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
"""
ret = lookup_dict.get(__grains__.get(grain, default), lookup_dict.get(default, None))
if base and base in lookup_dict:
base_values = lookup_dict[base]
if ret is None:
ret = base_values # depends on [control=['if'], data=['ret']]
elif isinstance(base_values, collections.Mapping):
if not isinstance(ret, collections.Mapping):
raise SaltException('filter_by default and look-up values must both be dictionaries.') # depends on [control=['if'], data=[]]
ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if merge:
if not isinstance(merge, collections.Mapping):
raise SaltException('filter_by merge argument must be a dictionary.') # depends on [control=['if'], data=[]]
elif ret is None:
ret = merge # depends on [control=['if'], data=['ret']]
else:
salt.utils.dictupdate.update(ret, merge) # depends on [control=['if'], data=[]]
return ret |
def remove_duplicate_sg(security_groups):
"""Removes duplicate Security Groups that share a same name alias
Args:
security_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS
Returns:
security_groups (list): A list of security groups with duplicate aliases removed
"""
for each_sg, duplicate_sg_name in SECURITYGROUP_REPLACEMENTS.items():
if each_sg in security_groups and duplicate_sg_name in security_groups:
LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg)
security_groups.remove(duplicate_sg_name)
return security_groups | def function[remove_duplicate_sg, parameter[security_groups]]:
constant[Removes duplicate Security Groups that share a same name alias
Args:
security_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS
Returns:
security_groups (list): A list of security groups with duplicate aliases removed
]
for taget[tuple[[<ast.Name object at 0x7da18fe900d0>, <ast.Name object at 0x7da18fe91900>]]] in starred[call[name[SECURITYGROUP_REPLACEMENTS].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18fe90c10> begin[:]
call[name[LOG].info, parameter[constant[Duplicate SG found. Removing %s in favor of %s.], name[duplicate_sg_name], name[each_sg]]]
call[name[security_groups].remove, parameter[name[duplicate_sg_name]]]
return[name[security_groups]] | keyword[def] identifier[remove_duplicate_sg] ( identifier[security_groups] ):
literal[string]
keyword[for] identifier[each_sg] , identifier[duplicate_sg_name] keyword[in] identifier[SECURITYGROUP_REPLACEMENTS] . identifier[items] ():
keyword[if] identifier[each_sg] keyword[in] identifier[security_groups] keyword[and] identifier[duplicate_sg_name] keyword[in] identifier[security_groups] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[duplicate_sg_name] , identifier[each_sg] )
identifier[security_groups] . identifier[remove] ( identifier[duplicate_sg_name] )
keyword[return] identifier[security_groups] | def remove_duplicate_sg(security_groups):
"""Removes duplicate Security Groups that share a same name alias
Args:
security_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS
Returns:
security_groups (list): A list of security groups with duplicate aliases removed
"""
for (each_sg, duplicate_sg_name) in SECURITYGROUP_REPLACEMENTS.items():
if each_sg in security_groups and duplicate_sg_name in security_groups:
LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg)
security_groups.remove(duplicate_sg_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return security_groups |
def read32(self, offset):
"""Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack("=L", self.mapping[offset:offset + 4])[0] | def function[read32, parameter[self, offset]]:
constant[Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
]
if <ast.UnaryOp object at 0x7da18f721d20> begin[:]
<ast.Raise object at 0x7da18f723dc0>
variable[offset] assign[=] call[name[self]._adjust_offset, parameter[name[offset]]]
call[name[self]._validate_offset, parameter[name[offset], constant[4]]]
return[call[call[name[struct].unpack, parameter[constant[=L], call[name[self].mapping][<ast.Slice object at 0x7da18f721b40>]]]][constant[0]]] | keyword[def] identifier[read32] ( identifier[self] , identifier[offset] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[offset] ,( identifier[int] , identifier[long] )):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[offset] = identifier[self] . identifier[_adjust_offset] ( identifier[offset] )
identifier[self] . identifier[_validate_offset] ( identifier[offset] , literal[int] )
keyword[return] identifier[struct] . identifier[unpack] ( literal[string] , identifier[self] . identifier[mapping] [ identifier[offset] : identifier[offset] + literal[int] ])[ literal[int] ] | def read32(self, offset):
"""Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError('Invalid offset type, should be integer.') # depends on [control=['if'], data=[]]
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack('=L', self.mapping[offset:offset + 4])[0] |
def migrate(db, name, package, conf={}):
""" Run all migrations that have not been run
Migrations will be run inside a transaction.
:param db: database connection object
:param name: name associated with the migrations
:param package: package that contains the migrations
:param conf: application configuration object
"""
(current_major_version, current_minor_version) = get_version(db, name)
package = importlib.import_module(package)
logging.debug('Migration version for %s is %s.%s',
package.__name__,
current_major_version,
current_minor_version)
mods = get_mods(package)
migrations = get_new(mods,
current_major_version,
current_minor_version + 1)
for (modname, major_version, minor_version) in migrations:
mod = load_mod(modname, package)
run_migration(name, major_version, minor_version, db, mod, conf)
logging.debug("Finished migrating to %s", modname) | def function[migrate, parameter[db, name, package, conf]]:
constant[ Run all migrations that have not been run
Migrations will be run inside a transaction.
:param db: database connection object
:param name: name associated with the migrations
:param package: package that contains the migrations
:param conf: application configuration object
]
<ast.Tuple object at 0x7da1b2344cd0> assign[=] call[name[get_version], parameter[name[db], name[name]]]
variable[package] assign[=] call[name[importlib].import_module, parameter[name[package]]]
call[name[logging].debug, parameter[constant[Migration version for %s is %s.%s], name[package].__name__, name[current_major_version], name[current_minor_version]]]
variable[mods] assign[=] call[name[get_mods], parameter[name[package]]]
variable[migrations] assign[=] call[name[get_new], parameter[name[mods], name[current_major_version], binary_operation[name[current_minor_version] + constant[1]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2344c40>, <ast.Name object at 0x7da1b2345660>, <ast.Name object at 0x7da1b23453f0>]]] in starred[name[migrations]] begin[:]
variable[mod] assign[=] call[name[load_mod], parameter[name[modname], name[package]]]
call[name[run_migration], parameter[name[name], name[major_version], name[minor_version], name[db], name[mod], name[conf]]]
call[name[logging].debug, parameter[constant[Finished migrating to %s], name[modname]]] | keyword[def] identifier[migrate] ( identifier[db] , identifier[name] , identifier[package] , identifier[conf] ={}):
literal[string]
( identifier[current_major_version] , identifier[current_minor_version] )= identifier[get_version] ( identifier[db] , identifier[name] )
identifier[package] = identifier[importlib] . identifier[import_module] ( identifier[package] )
identifier[logging] . identifier[debug] ( literal[string] ,
identifier[package] . identifier[__name__] ,
identifier[current_major_version] ,
identifier[current_minor_version] )
identifier[mods] = identifier[get_mods] ( identifier[package] )
identifier[migrations] = identifier[get_new] ( identifier[mods] ,
identifier[current_major_version] ,
identifier[current_minor_version] + literal[int] )
keyword[for] ( identifier[modname] , identifier[major_version] , identifier[minor_version] ) keyword[in] identifier[migrations] :
identifier[mod] = identifier[load_mod] ( identifier[modname] , identifier[package] )
identifier[run_migration] ( identifier[name] , identifier[major_version] , identifier[minor_version] , identifier[db] , identifier[mod] , identifier[conf] )
identifier[logging] . identifier[debug] ( literal[string] , identifier[modname] ) | def migrate(db, name, package, conf={}):
""" Run all migrations that have not been run
Migrations will be run inside a transaction.
:param db: database connection object
:param name: name associated with the migrations
:param package: package that contains the migrations
:param conf: application configuration object
"""
(current_major_version, current_minor_version) = get_version(db, name)
package = importlib.import_module(package)
logging.debug('Migration version for %s is %s.%s', package.__name__, current_major_version, current_minor_version)
mods = get_mods(package)
migrations = get_new(mods, current_major_version, current_minor_version + 1)
for (modname, major_version, minor_version) in migrations:
mod = load_mod(modname, package)
run_migration(name, major_version, minor_version, db, mod, conf)
logging.debug('Finished migrating to %s', modname) # depends on [control=['for'], data=[]] |
def gene_list(self, list_id):
"""Get a gene list from the database."""
return self.query(GeneList).filter_by(list_id=list_id).first() | def function[gene_list, parameter[self, list_id]]:
constant[Get a gene list from the database.]
return[call[call[call[name[self].query, parameter[name[GeneList]]].filter_by, parameter[]].first, parameter[]]] | keyword[def] identifier[gene_list] ( identifier[self] , identifier[list_id] ):
literal[string]
keyword[return] identifier[self] . identifier[query] ( identifier[GeneList] ). identifier[filter_by] ( identifier[list_id] = identifier[list_id] ). identifier[first] () | def gene_list(self, list_id):
"""Get a gene list from the database."""
return self.query(GeneList).filter_by(list_id=list_id).first() |
def make_sentences(self, stream_item):
'assemble Sentence and Token objects'
self.make_label_index(stream_item)
sentences = []
token_num = 0
new_mention_id = 0
for sent_start, sent_end, sent_str in self._sentences(
stream_item.body.clean_visible):
assert isinstance(sent_str, unicode)
sent = Sentence()
sentence_pos = 0
for start, end in self.word_tokenizer.span_tokenize(sent_str):
token_str = sent_str[start:end].encode('utf8')
tok = Token(
token_num=token_num,
token=token_str,
sentence_pos=sentence_pos,
)
tok.offsets[OffsetType.CHARS] = Offset(
type=OffsetType.CHARS,
first=sent_start + start,
length=end - start,
)
# whitespace tokenizer will never get a token
# boundary in the middle of an 'author' label
try:
label = self.label_index.find_le(sent_start + start)
except ValueError:
label = None
if label:
off = label.offsets[OffsetType.CHARS]
if off.first + off.length > sent_start + start:
streamcorpus.add_annotation(tok, label)
logger.debug('adding label to tok: %r has %r',
tok.token, label.target.target_id)
if label in self.label_to_mention_id:
mention_id = self.label_to_mention_id[label]
else:
mention_id = new_mention_id
new_mention_id += 1
self.label_to_mention_id[label] = mention_id
tok.mention_id = mention_id
token_num += 1
sentence_pos += 1
sent.tokens.append(tok)
sentences.append(sent)
return sentences | def function[make_sentences, parameter[self, stream_item]]:
constant[assemble Sentence and Token objects]
call[name[self].make_label_index, parameter[name[stream_item]]]
variable[sentences] assign[=] list[[]]
variable[token_num] assign[=] constant[0]
variable[new_mention_id] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da20c76d450>, <ast.Name object at 0x7da20c76d0f0>, <ast.Name object at 0x7da20c76f790>]]] in starred[call[name[self]._sentences, parameter[name[stream_item].body.clean_visible]]] begin[:]
assert[call[name[isinstance], parameter[name[sent_str], name[unicode]]]]
variable[sent] assign[=] call[name[Sentence], parameter[]]
variable[sentence_pos] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da20c76d8d0>, <ast.Name object at 0x7da20c76ce50>]]] in starred[call[name[self].word_tokenizer.span_tokenize, parameter[name[sent_str]]]] begin[:]
variable[token_str] assign[=] call[call[name[sent_str]][<ast.Slice object at 0x7da1b0290310>].encode, parameter[constant[utf8]]]
variable[tok] assign[=] call[name[Token], parameter[]]
call[name[tok].offsets][name[OffsetType].CHARS] assign[=] call[name[Offset], parameter[]]
<ast.Try object at 0x7da20c76c1c0>
if name[label] begin[:]
variable[off] assign[=] call[name[label].offsets][name[OffsetType].CHARS]
if compare[binary_operation[name[off].first + name[off].length] greater[>] binary_operation[name[sent_start] + name[start]]] begin[:]
call[name[streamcorpus].add_annotation, parameter[name[tok], name[label]]]
call[name[logger].debug, parameter[constant[adding label to tok: %r has %r], name[tok].token, name[label].target.target_id]]
if compare[name[label] in name[self].label_to_mention_id] begin[:]
variable[mention_id] assign[=] call[name[self].label_to_mention_id][name[label]]
name[tok].mention_id assign[=] name[mention_id]
<ast.AugAssign object at 0x7da20c76e7a0>
<ast.AugAssign object at 0x7da20c76cc70>
call[name[sent].tokens.append, parameter[name[tok]]]
call[name[sentences].append, parameter[name[sent]]]
return[name[sentences]] | keyword[def] identifier[make_sentences] ( identifier[self] , identifier[stream_item] ):
literal[string]
identifier[self] . identifier[make_label_index] ( identifier[stream_item] )
identifier[sentences] =[]
identifier[token_num] = literal[int]
identifier[new_mention_id] = literal[int]
keyword[for] identifier[sent_start] , identifier[sent_end] , identifier[sent_str] keyword[in] identifier[self] . identifier[_sentences] (
identifier[stream_item] . identifier[body] . identifier[clean_visible] ):
keyword[assert] identifier[isinstance] ( identifier[sent_str] , identifier[unicode] )
identifier[sent] = identifier[Sentence] ()
identifier[sentence_pos] = literal[int]
keyword[for] identifier[start] , identifier[end] keyword[in] identifier[self] . identifier[word_tokenizer] . identifier[span_tokenize] ( identifier[sent_str] ):
identifier[token_str] = identifier[sent_str] [ identifier[start] : identifier[end] ]. identifier[encode] ( literal[string] )
identifier[tok] = identifier[Token] (
identifier[token_num] = identifier[token_num] ,
identifier[token] = identifier[token_str] ,
identifier[sentence_pos] = identifier[sentence_pos] ,
)
identifier[tok] . identifier[offsets] [ identifier[OffsetType] . identifier[CHARS] ]= identifier[Offset] (
identifier[type] = identifier[OffsetType] . identifier[CHARS] ,
identifier[first] = identifier[sent_start] + identifier[start] ,
identifier[length] = identifier[end] - identifier[start] ,
)
keyword[try] :
identifier[label] = identifier[self] . identifier[label_index] . identifier[find_le] ( identifier[sent_start] + identifier[start] )
keyword[except] identifier[ValueError] :
identifier[label] = keyword[None]
keyword[if] identifier[label] :
identifier[off] = identifier[label] . identifier[offsets] [ identifier[OffsetType] . identifier[CHARS] ]
keyword[if] identifier[off] . identifier[first] + identifier[off] . identifier[length] > identifier[sent_start] + identifier[start] :
identifier[streamcorpus] . identifier[add_annotation] ( identifier[tok] , identifier[label] )
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[tok] . identifier[token] , identifier[label] . identifier[target] . identifier[target_id] )
keyword[if] identifier[label] keyword[in] identifier[self] . identifier[label_to_mention_id] :
identifier[mention_id] = identifier[self] . identifier[label_to_mention_id] [ identifier[label] ]
keyword[else] :
identifier[mention_id] = identifier[new_mention_id]
identifier[new_mention_id] += literal[int]
identifier[self] . identifier[label_to_mention_id] [ identifier[label] ]= identifier[mention_id]
identifier[tok] . identifier[mention_id] = identifier[mention_id]
identifier[token_num] += literal[int]
identifier[sentence_pos] += literal[int]
identifier[sent] . identifier[tokens] . identifier[append] ( identifier[tok] )
identifier[sentences] . identifier[append] ( identifier[sent] )
keyword[return] identifier[sentences] | def make_sentences(self, stream_item):
"""assemble Sentence and Token objects"""
self.make_label_index(stream_item)
sentences = []
token_num = 0
new_mention_id = 0
for (sent_start, sent_end, sent_str) in self._sentences(stream_item.body.clean_visible):
assert isinstance(sent_str, unicode)
sent = Sentence()
sentence_pos = 0
for (start, end) in self.word_tokenizer.span_tokenize(sent_str):
token_str = sent_str[start:end].encode('utf8')
tok = Token(token_num=token_num, token=token_str, sentence_pos=sentence_pos)
tok.offsets[OffsetType.CHARS] = Offset(type=OffsetType.CHARS, first=sent_start + start, length=end - start)
# whitespace tokenizer will never get a token
# boundary in the middle of an 'author' label
try:
label = self.label_index.find_le(sent_start + start) # depends on [control=['try'], data=[]]
except ValueError:
label = None # depends on [control=['except'], data=[]]
if label:
off = label.offsets[OffsetType.CHARS]
if off.first + off.length > sent_start + start:
streamcorpus.add_annotation(tok, label)
logger.debug('adding label to tok: %r has %r', tok.token, label.target.target_id)
if label in self.label_to_mention_id:
mention_id = self.label_to_mention_id[label] # depends on [control=['if'], data=['label']]
else:
mention_id = new_mention_id
new_mention_id += 1
self.label_to_mention_id[label] = mention_id
tok.mention_id = mention_id # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
token_num += 1
sentence_pos += 1
sent.tokens.append(tok) # depends on [control=['for'], data=[]]
sentences.append(sent) # depends on [control=['for'], data=[]]
return sentences |
def get_primary_contributors(self):
"""
Returns a list of primary contributors, with primary being defined
as those contributors that have the highest role assigned (in terms
of priority). Only permitted contributors are returned.
"""
primary_credits = []
credits = self.credits.exclude(credit_option=None).order_by(
'credit_option__role_priority'
)
if credits:
primary_role_priority = credits[0].credit_option.role_priority
for credit in credits:
if credit.credit_option.role_priority == primary_role_priority:
primary_credits.append(credit)
contributors = []
for credit in primary_credits:
contributor = credit.contributor
if contributor.is_permitted:
contributors.append(contributor)
return contributors | def function[get_primary_contributors, parameter[self]]:
constant[
Returns a list of primary contributors, with primary being defined
as those contributors that have the highest role assigned (in terms
of priority). Only permitted contributors are returned.
]
variable[primary_credits] assign[=] list[[]]
variable[credits] assign[=] call[call[name[self].credits.exclude, parameter[]].order_by, parameter[constant[credit_option__role_priority]]]
if name[credits] begin[:]
variable[primary_role_priority] assign[=] call[name[credits]][constant[0]].credit_option.role_priority
for taget[name[credit]] in starred[name[credits]] begin[:]
if compare[name[credit].credit_option.role_priority equal[==] name[primary_role_priority]] begin[:]
call[name[primary_credits].append, parameter[name[credit]]]
variable[contributors] assign[=] list[[]]
for taget[name[credit]] in starred[name[primary_credits]] begin[:]
variable[contributor] assign[=] name[credit].contributor
if name[contributor].is_permitted begin[:]
call[name[contributors].append, parameter[name[contributor]]]
return[name[contributors]] | keyword[def] identifier[get_primary_contributors] ( identifier[self] ):
literal[string]
identifier[primary_credits] =[]
identifier[credits] = identifier[self] . identifier[credits] . identifier[exclude] ( identifier[credit_option] = keyword[None] ). identifier[order_by] (
literal[string]
)
keyword[if] identifier[credits] :
identifier[primary_role_priority] = identifier[credits] [ literal[int] ]. identifier[credit_option] . identifier[role_priority]
keyword[for] identifier[credit] keyword[in] identifier[credits] :
keyword[if] identifier[credit] . identifier[credit_option] . identifier[role_priority] == identifier[primary_role_priority] :
identifier[primary_credits] . identifier[append] ( identifier[credit] )
identifier[contributors] =[]
keyword[for] identifier[credit] keyword[in] identifier[primary_credits] :
identifier[contributor] = identifier[credit] . identifier[contributor]
keyword[if] identifier[contributor] . identifier[is_permitted] :
identifier[contributors] . identifier[append] ( identifier[contributor] )
keyword[return] identifier[contributors] | def get_primary_contributors(self):
"""
Returns a list of primary contributors, with primary being defined
as those contributors that have the highest role assigned (in terms
of priority). Only permitted contributors are returned.
"""
primary_credits = []
credits = self.credits.exclude(credit_option=None).order_by('credit_option__role_priority')
if credits:
primary_role_priority = credits[0].credit_option.role_priority
for credit in credits:
if credit.credit_option.role_priority == primary_role_priority:
primary_credits.append(credit) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['credit']] # depends on [control=['if'], data=[]]
contributors = []
for credit in primary_credits:
contributor = credit.contributor
if contributor.is_permitted:
contributors.append(contributor) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['credit']]
return contributors |
def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:]
else:
return '' | def function[suffix, parameter[self]]:
constant[The final component's last suffix, if any.]
variable[name] assign[=] name[self].name
variable[i] assign[=] call[name[name].rfind, parameter[constant[.]]]
if compare[constant[0] less[<] name[i]] begin[:]
return[call[name[name]][<ast.Slice object at 0x7da2041daf80>]] | keyword[def] identifier[suffix] ( identifier[self] ):
literal[string]
identifier[name] = identifier[self] . identifier[name]
identifier[i] = identifier[name] . identifier[rfind] ( literal[string] )
keyword[if] literal[int] < identifier[i] < identifier[len] ( identifier[name] )- literal[int] :
keyword[return] identifier[name] [ identifier[i] :]
keyword[else] :
keyword[return] literal[string] | def suffix(self):
"""The final component's last suffix, if any."""
name = self.name
i = name.rfind('.')
if 0 < i < len(name) - 1:
return name[i:] # depends on [control=['if'], data=['i']]
else:
return '' |
def look_at(self, vec=None, pos=None):
"""
Look at a specific point
:param vec: Vector3 position
:param pos: python list [x, y, x]
:return: Camera matrix
"""
if pos is None:
vec = Vector3(pos)
if vec is None:
raise ValueError("vector or pos must be set")
return self._gl_look_at(self.position, vec, self._up) | def function[look_at, parameter[self, vec, pos]]:
constant[
Look at a specific point
:param vec: Vector3 position
:param pos: python list [x, y, x]
:return: Camera matrix
]
if compare[name[pos] is constant[None]] begin[:]
variable[vec] assign[=] call[name[Vector3], parameter[name[pos]]]
if compare[name[vec] is constant[None]] begin[:]
<ast.Raise object at 0x7da18ede7a30>
return[call[name[self]._gl_look_at, parameter[name[self].position, name[vec], name[self]._up]]] | keyword[def] identifier[look_at] ( identifier[self] , identifier[vec] = keyword[None] , identifier[pos] = keyword[None] ):
literal[string]
keyword[if] identifier[pos] keyword[is] keyword[None] :
identifier[vec] = identifier[Vector3] ( identifier[pos] )
keyword[if] identifier[vec] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[_gl_look_at] ( identifier[self] . identifier[position] , identifier[vec] , identifier[self] . identifier[_up] ) | def look_at(self, vec=None, pos=None):
"""
Look at a specific point
:param vec: Vector3 position
:param pos: python list [x, y, x]
:return: Camera matrix
"""
if pos is None:
vec = Vector3(pos) # depends on [control=['if'], data=['pos']]
if vec is None:
raise ValueError('vector or pos must be set') # depends on [control=['if'], data=[]]
return self._gl_look_at(self.position, vec, self._up) |
def get_projects_list(self):
""" Get projects list """
try:
result = self._request('/getprojectslist/')
return [TildaProject(**p) for p in result]
except NetworkError:
return [] | def function[get_projects_list, parameter[self]]:
constant[ Get projects list ]
<ast.Try object at 0x7da20c795600> | keyword[def] identifier[get_projects_list] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[result] = identifier[self] . identifier[_request] ( literal[string] )
keyword[return] [ identifier[TildaProject] (** identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[result] ]
keyword[except] identifier[NetworkError] :
keyword[return] [] | def get_projects_list(self):
""" Get projects list """
try:
result = self._request('/getprojectslist/')
return [TildaProject(**p) for p in result] # depends on [control=['try'], data=[]]
except NetworkError:
return [] # depends on [control=['except'], data=[]] |
def _set_subject_alt(self, name, values):
"""
Replaces all existing asn1crypto.x509.GeneralName objects of the choice
represented by the name parameter with the values
:param name:
A unicode string of the choice name of the x509.GeneralName object
:param values:
A list of unicode strings to use as the values for the new
x509.GeneralName objects
"""
if self._subject_alt_name is not None:
filtered_general_names = []
for general_name in self._subject_alt_name:
if general_name.name != name:
filtered_general_names.append(general_name)
self._subject_alt_name = x509.GeneralNames(filtered_general_names)
else:
self._subject_alt_name = x509.GeneralNames()
if values is not None:
for value in values:
new_general_name = x509.GeneralName(name=name, value=value)
self._subject_alt_name.append(new_general_name)
if len(self._subject_alt_name) == 0:
self._subject_alt_name = None | def function[_set_subject_alt, parameter[self, name, values]]:
constant[
Replaces all existing asn1crypto.x509.GeneralName objects of the choice
represented by the name parameter with the values
:param name:
A unicode string of the choice name of the x509.GeneralName object
:param values:
A list of unicode strings to use as the values for the new
x509.GeneralName objects
]
if compare[name[self]._subject_alt_name is_not constant[None]] begin[:]
variable[filtered_general_names] assign[=] list[[]]
for taget[name[general_name]] in starred[name[self]._subject_alt_name] begin[:]
if compare[name[general_name].name not_equal[!=] name[name]] begin[:]
call[name[filtered_general_names].append, parameter[name[general_name]]]
name[self]._subject_alt_name assign[=] call[name[x509].GeneralNames, parameter[name[filtered_general_names]]]
if compare[name[values] is_not constant[None]] begin[:]
for taget[name[value]] in starred[name[values]] begin[:]
variable[new_general_name] assign[=] call[name[x509].GeneralName, parameter[]]
call[name[self]._subject_alt_name.append, parameter[name[new_general_name]]]
if compare[call[name[len], parameter[name[self]._subject_alt_name]] equal[==] constant[0]] begin[:]
name[self]._subject_alt_name assign[=] constant[None] | keyword[def] identifier[_set_subject_alt] ( identifier[self] , identifier[name] , identifier[values] ):
literal[string]
keyword[if] identifier[self] . identifier[_subject_alt_name] keyword[is] keyword[not] keyword[None] :
identifier[filtered_general_names] =[]
keyword[for] identifier[general_name] keyword[in] identifier[self] . identifier[_subject_alt_name] :
keyword[if] identifier[general_name] . identifier[name] != identifier[name] :
identifier[filtered_general_names] . identifier[append] ( identifier[general_name] )
identifier[self] . identifier[_subject_alt_name] = identifier[x509] . identifier[GeneralNames] ( identifier[filtered_general_names] )
keyword[else] :
identifier[self] . identifier[_subject_alt_name] = identifier[x509] . identifier[GeneralNames] ()
keyword[if] identifier[values] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[value] keyword[in] identifier[values] :
identifier[new_general_name] = identifier[x509] . identifier[GeneralName] ( identifier[name] = identifier[name] , identifier[value] = identifier[value] )
identifier[self] . identifier[_subject_alt_name] . identifier[append] ( identifier[new_general_name] )
keyword[if] identifier[len] ( identifier[self] . identifier[_subject_alt_name] )== literal[int] :
identifier[self] . identifier[_subject_alt_name] = keyword[None] | def _set_subject_alt(self, name, values):
"""
Replaces all existing asn1crypto.x509.GeneralName objects of the choice
represented by the name parameter with the values
:param name:
A unicode string of the choice name of the x509.GeneralName object
:param values:
A list of unicode strings to use as the values for the new
x509.GeneralName objects
"""
if self._subject_alt_name is not None:
filtered_general_names = []
for general_name in self._subject_alt_name:
if general_name.name != name:
filtered_general_names.append(general_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['general_name']]
self._subject_alt_name = x509.GeneralNames(filtered_general_names) # depends on [control=['if'], data=[]]
else:
self._subject_alt_name = x509.GeneralNames()
if values is not None:
for value in values:
new_general_name = x509.GeneralName(name=name, value=value)
self._subject_alt_name.append(new_general_name) # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=['values']]
if len(self._subject_alt_name) == 0:
self._subject_alt_name = None # depends on [control=['if'], data=[]] |
def add_validation_method(cls, method):
"""
Extend validation of Node by adding a function to the _additional_validation list.
The additional validation function will be called by the clean method
:method function: function to be added to _additional_validation
"""
method_name = method.func_name
# add method name to additional validation method list
cls._additional_validation.append(method_name)
# add method to this class
setattr(cls, method_name, method) | def function[add_validation_method, parameter[cls, method]]:
constant[
Extend validation of Node by adding a function to the _additional_validation list.
The additional validation function will be called by the clean method
:method function: function to be added to _additional_validation
]
variable[method_name] assign[=] name[method].func_name
call[name[cls]._additional_validation.append, parameter[name[method_name]]]
call[name[setattr], parameter[name[cls], name[method_name], name[method]]] | keyword[def] identifier[add_validation_method] ( identifier[cls] , identifier[method] ):
literal[string]
identifier[method_name] = identifier[method] . identifier[func_name]
identifier[cls] . identifier[_additional_validation] . identifier[append] ( identifier[method_name] )
identifier[setattr] ( identifier[cls] , identifier[method_name] , identifier[method] ) | def add_validation_method(cls, method):
"""
Extend validation of Node by adding a function to the _additional_validation list.
The additional validation function will be called by the clean method
:method function: function to be added to _additional_validation
"""
method_name = method.func_name
# add method name to additional validation method list
cls._additional_validation.append(method_name)
# add method to this class
setattr(cls, method_name, method) |
def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0):
"""
Configure a pin as servo with min_pulse, max_pulse and first angle.
``min_pulse`` and ``max_pulse`` default to the arduino defaults.
"""
if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:
raise IOError("Pin {0} is not a valid servo pin".format(pin))
data = bytearray([pin])
data += to_two_bytes(min_pulse)
data += to_two_bytes(max_pulse)
self.send_sysex(SERVO_CONFIG, data)
# set pin._mode to SERVO so that it sends analog messages
# don't set pin.mode as that calls this method
self.digital[pin]._mode = SERVO
self.digital[pin].write(angle) | def function[servo_config, parameter[self, pin, min_pulse, max_pulse, angle]]:
constant[
Configure a pin as servo with min_pulse, max_pulse and first angle.
``min_pulse`` and ``max_pulse`` default to the arduino defaults.
]
if <ast.BoolOp object at 0x7da1b1e64430> begin[:]
<ast.Raise object at 0x7da1b1e67a30>
variable[data] assign[=] call[name[bytearray], parameter[list[[<ast.Name object at 0x7da1b1eb49a0>]]]]
<ast.AugAssign object at 0x7da1b1eb4040>
<ast.AugAssign object at 0x7da1b1eb40a0>
call[name[self].send_sysex, parameter[name[SERVO_CONFIG], name[data]]]
call[name[self].digital][name[pin]]._mode assign[=] name[SERVO]
call[call[name[self].digital][name[pin]].write, parameter[name[angle]]] | keyword[def] identifier[servo_config] ( identifier[self] , identifier[pin] , identifier[min_pulse] = literal[int] , identifier[max_pulse] = literal[int] , identifier[angle] = literal[int] ):
literal[string]
keyword[if] identifier[pin] > identifier[len] ( identifier[self] . identifier[digital] ) keyword[or] identifier[self] . identifier[digital] [ identifier[pin] ]. identifier[mode] == identifier[UNAVAILABLE] :
keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[pin] ))
identifier[data] = identifier[bytearray] ([ identifier[pin] ])
identifier[data] += identifier[to_two_bytes] ( identifier[min_pulse] )
identifier[data] += identifier[to_two_bytes] ( identifier[max_pulse] )
identifier[self] . identifier[send_sysex] ( identifier[SERVO_CONFIG] , identifier[data] )
identifier[self] . identifier[digital] [ identifier[pin] ]. identifier[_mode] = identifier[SERVO]
identifier[self] . identifier[digital] [ identifier[pin] ]. identifier[write] ( identifier[angle] ) | def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0):
"""
Configure a pin as servo with min_pulse, max_pulse and first angle.
``min_pulse`` and ``max_pulse`` default to the arduino defaults.
"""
if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:
raise IOError('Pin {0} is not a valid servo pin'.format(pin)) # depends on [control=['if'], data=[]]
data = bytearray([pin])
data += to_two_bytes(min_pulse)
data += to_two_bytes(max_pulse)
self.send_sysex(SERVO_CONFIG, data)
# set pin._mode to SERVO so that it sends analog messages
# don't set pin.mode as that calls this method
self.digital[pin]._mode = SERVO
self.digital[pin].write(angle) |
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result | def function[start_pipeline, parameter[conf, args]]:
constant[Start a pipeline]
variable[host] assign[=] call[call[name[conf].config][constant[instances]]][name[args].host_instance]
variable[url] assign[=] call[name[api].build_pipeline_url, parameter[call[name[build_instance_url], parameter[name[host]]]]]
variable[auth] assign[=] call[name[tuple], parameter[list[[<ast.Subscript object at 0x7da1b2592da0>, <ast.Subscript object at 0x7da1b25925c0>]]]]
variable[runtime_parameters] assign[=] dictionary[[], []]
variable[verify_ssl] assign[=] call[name[host].get, parameter[constant[verify_ssl], constant[True]]]
if name[args].runtime_parameters begin[:]
variable[runtime_parameters] assign[=] call[name[json].loads, parameter[name[args].runtime_parameters]]
variable[start_result] assign[=] call[name[api].start_pipeline, parameter[name[url], name[args].pipeline_id, name[auth], name[verify_ssl], name[runtime_parameters]]]
return[name[start_result]] | keyword[def] identifier[start_pipeline] ( identifier[conf] , identifier[args] ):
literal[string]
identifier[host] = identifier[conf] . identifier[config] [ literal[string] ][ identifier[args] . identifier[host_instance] ]
identifier[url] = identifier[api] . identifier[build_pipeline_url] ( identifier[build_instance_url] ( identifier[host] ))
identifier[auth] = identifier[tuple] ([ identifier[conf] . identifier[creds] [ literal[string] ][ identifier[args] . identifier[host_instance] ][ literal[string] ], identifier[conf] . identifier[creds] [ literal[string] ][ identifier[args] . identifier[host_instance] ][ literal[string] ]])
identifier[runtime_parameters] ={}
identifier[verify_ssl] = identifier[host] . identifier[get] ( literal[string] , keyword[True] )
keyword[if] identifier[args] . identifier[runtime_parameters] :
identifier[runtime_parameters] = identifier[json] . identifier[loads] ( identifier[args] . identifier[runtime_parameters] )
identifier[start_result] = identifier[api] . identifier[start_pipeline] ( identifier[url] , identifier[args] . identifier[pipeline_id] , identifier[auth] , identifier[verify_ssl] , identifier[runtime_parameters] )
keyword[return] identifier[start_result] | def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters) # depends on [control=['if'], data=[]]
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result |
def set_stream_color(stream, disabled):
"""
Remember what our original streams were so that we
can colorize them separately, which colorama doesn't
seem to natively support.
"""
original_stdout = sys.stdout
original_stderr = sys.stderr
init(strip=disabled)
if stream != original_stdout:
sys.stdout = original_stdout
sys.stderr = BinaryStreamWrapper(stream, sys.stderr)
if stream != original_stderr:
sys.stderr = original_stderr
sys.stdout = BinaryStreamWrapper(stream, sys.stdout) | def function[set_stream_color, parameter[stream, disabled]]:
constant[
Remember what our original streams were so that we
can colorize them separately, which colorama doesn't
seem to natively support.
]
variable[original_stdout] assign[=] name[sys].stdout
variable[original_stderr] assign[=] name[sys].stderr
call[name[init], parameter[]]
if compare[name[stream] not_equal[!=] name[original_stdout]] begin[:]
name[sys].stdout assign[=] name[original_stdout]
name[sys].stderr assign[=] call[name[BinaryStreamWrapper], parameter[name[stream], name[sys].stderr]]
if compare[name[stream] not_equal[!=] name[original_stderr]] begin[:]
name[sys].stderr assign[=] name[original_stderr]
name[sys].stdout assign[=] call[name[BinaryStreamWrapper], parameter[name[stream], name[sys].stdout]] | keyword[def] identifier[set_stream_color] ( identifier[stream] , identifier[disabled] ):
literal[string]
identifier[original_stdout] = identifier[sys] . identifier[stdout]
identifier[original_stderr] = identifier[sys] . identifier[stderr]
identifier[init] ( identifier[strip] = identifier[disabled] )
keyword[if] identifier[stream] != identifier[original_stdout] :
identifier[sys] . identifier[stdout] = identifier[original_stdout]
identifier[sys] . identifier[stderr] = identifier[BinaryStreamWrapper] ( identifier[stream] , identifier[sys] . identifier[stderr] )
keyword[if] identifier[stream] != identifier[original_stderr] :
identifier[sys] . identifier[stderr] = identifier[original_stderr]
identifier[sys] . identifier[stdout] = identifier[BinaryStreamWrapper] ( identifier[stream] , identifier[sys] . identifier[stdout] ) | def set_stream_color(stream, disabled):
"""
Remember what our original streams were so that we
can colorize them separately, which colorama doesn't
seem to natively support.
"""
original_stdout = sys.stdout
original_stderr = sys.stderr
init(strip=disabled)
if stream != original_stdout:
sys.stdout = original_stdout
sys.stderr = BinaryStreamWrapper(stream, sys.stderr) # depends on [control=['if'], data=['stream', 'original_stdout']]
if stream != original_stderr:
sys.stderr = original_stderr
sys.stdout = BinaryStreamWrapper(stream, sys.stdout) # depends on [control=['if'], data=['stream', 'original_stderr']] |
def reprompt(text=None, ssml=None, attributes=None):
"""Convenience method to save a little bit of typing for the common case of
reprompting the user. Simply calls :py:func:`alexandra.util.respond` with
the given arguments and holds the session open.
One of either the `text` or `ssml` should be provided if any
speech output is desired.
:param text: Plain text speech output
:param ssml: Speech output in SSML format
:param attributes: Dictionary of attributes to store in the current session
"""
return respond(
reprompt_text=text,
reprompt_ssml=ssml,
attributes=attributes,
end_session=False
) | def function[reprompt, parameter[text, ssml, attributes]]:
constant[Convenience method to save a little bit of typing for the common case of
reprompting the user. Simply calls :py:func:`alexandra.util.respond` with
the given arguments and holds the session open.
One of either the `text` or `ssml` should be provided if any
speech output is desired.
:param text: Plain text speech output
:param ssml: Speech output in SSML format
:param attributes: Dictionary of attributes to store in the current session
]
return[call[name[respond], parameter[]]] | keyword[def] identifier[reprompt] ( identifier[text] = keyword[None] , identifier[ssml] = keyword[None] , identifier[attributes] = keyword[None] ):
literal[string]
keyword[return] identifier[respond] (
identifier[reprompt_text] = identifier[text] ,
identifier[reprompt_ssml] = identifier[ssml] ,
identifier[attributes] = identifier[attributes] ,
identifier[end_session] = keyword[False]
) | def reprompt(text=None, ssml=None, attributes=None):
"""Convenience method to save a little bit of typing for the common case of
reprompting the user. Simply calls :py:func:`alexandra.util.respond` with
the given arguments and holds the session open.
One of either the `text` or `ssml` should be provided if any
speech output is desired.
:param text: Plain text speech output
:param ssml: Speech output in SSML format
:param attributes: Dictionary of attributes to store in the current session
"""
return respond(reprompt_text=text, reprompt_ssml=ssml, attributes=attributes, end_session=False) |
def parse(self, string, root=None):
"""
Parses a string to handle escaped tags and retrieve phrases.
This method works recursively to parse nested tags. When escaped
tags are found, those are removed from the string. Also argument
sequences are removed from the string. The string returned can
thus be quite different from the string passed.
Arguments:
string (str): The string to parse.
root (Phrase): If in a recursive call, the root/parent phrase.
Returns:
For one, the escaped string (without escape characters and
phrase arguments). For the other, it depends on the stack-depth.
If this is the lowest recursion depth/level (i.e. the stack
call resulting from the first function call in self.beautify()),
it will return a list of phrases. For higher stack levels (
i.e. resulting from recursive function calls from with
self.parse(), for nested phrases), it returns exactly one
Phrase instance.
Raises:
errors.ParseError: If no closing tag could be
found for an opening tag.
"""
phrases = []
meta = self.meta.search(string)
while meta:
# Save some function calls
pos = meta.start()
if meta.group() == "<":
string, child, meta = self.open_phrase(string, pos)
if child and root:
root.nested.append(child)
elif child:
phrases.append(child)
# else it was escaped (+ new meta)
continue
elif root:
if meta.group() == "(":
meta = self.meta.search(string, pos + 1)
if meta.group() == ")":
string, root, meta = self.handle_arguments(string,
root,
pos,
meta.start())
continue
elif meta.group() == ">":
string, phrase, meta = self.close_phrase(string,
root,
pos)
if phrase:
return string, phrase
# else was escaped (+ new meta)
continue
string, meta = self.escape_meta(string, pos)
if not root:
return string, phrases
# If this is not the first stack-depth the function should
# have returned upon finding a closing tag,
# i.e. we should never have gotten here.
word = re.search(r"([\w\s]+)(?![\d]*>[\w\s]+>)", string)
what = "No closing tag found for opening tag"
if word:
what += " after expression '{0}'".format(word.group())
raise errors.ParseError(what + "!") | def function[parse, parameter[self, string, root]]:
constant[
Parses a string to handle escaped tags and retrieve phrases.
This method works recursively to parse nested tags. When escaped
tags are found, those are removed from the string. Also argument
sequences are removed from the string. The string returned can
thus be quite different from the string passed.
Arguments:
string (str): The string to parse.
root (Phrase): If in a recursive call, the root/parent phrase.
Returns:
For one, the escaped string (without escape characters and
phrase arguments). For the other, it depends on the stack-depth.
If this is the lowest recursion depth/level (i.e. the stack
call resulting from the first function call in self.beautify()),
it will return a list of phrases. For higher stack levels (
i.e. resulting from recursive function calls from with
self.parse(), for nested phrases), it returns exactly one
Phrase instance.
Raises:
errors.ParseError: If no closing tag could be
found for an opening tag.
]
variable[phrases] assign[=] list[[]]
variable[meta] assign[=] call[name[self].meta.search, parameter[name[string]]]
while name[meta] begin[:]
variable[pos] assign[=] call[name[meta].start, parameter[]]
if compare[call[name[meta].group, parameter[]] equal[==] constant[<]] begin[:]
<ast.Tuple object at 0x7da1b0a81cc0> assign[=] call[name[self].open_phrase, parameter[name[string], name[pos]]]
if <ast.BoolOp object at 0x7da1b0a806a0> begin[:]
call[name[root].nested.append, parameter[name[child]]]
continue
<ast.Tuple object at 0x7da1b0a82cb0> assign[=] call[name[self].escape_meta, parameter[name[string], name[pos]]]
if <ast.UnaryOp object at 0x7da18dc9a740> begin[:]
return[tuple[[<ast.Name object at 0x7da18dc9a950>, <ast.Name object at 0x7da18dc98c10>]]]
variable[word] assign[=] call[name[re].search, parameter[constant[([\w\s]+)(?![\d]*>[\w\s]+>)], name[string]]]
variable[what] assign[=] constant[No closing tag found for opening tag]
if name[word] begin[:]
<ast.AugAssign object at 0x7da18dc9acb0>
<ast.Raise object at 0x7da18dc9a6e0> | keyword[def] identifier[parse] ( identifier[self] , identifier[string] , identifier[root] = keyword[None] ):
literal[string]
identifier[phrases] =[]
identifier[meta] = identifier[self] . identifier[meta] . identifier[search] ( identifier[string] )
keyword[while] identifier[meta] :
identifier[pos] = identifier[meta] . identifier[start] ()
keyword[if] identifier[meta] . identifier[group] ()== literal[string] :
identifier[string] , identifier[child] , identifier[meta] = identifier[self] . identifier[open_phrase] ( identifier[string] , identifier[pos] )
keyword[if] identifier[child] keyword[and] identifier[root] :
identifier[root] . identifier[nested] . identifier[append] ( identifier[child] )
keyword[elif] identifier[child] :
identifier[phrases] . identifier[append] ( identifier[child] )
keyword[continue]
keyword[elif] identifier[root] :
keyword[if] identifier[meta] . identifier[group] ()== literal[string] :
identifier[meta] = identifier[self] . identifier[meta] . identifier[search] ( identifier[string] , identifier[pos] + literal[int] )
keyword[if] identifier[meta] . identifier[group] ()== literal[string] :
identifier[string] , identifier[root] , identifier[meta] = identifier[self] . identifier[handle_arguments] ( identifier[string] ,
identifier[root] ,
identifier[pos] ,
identifier[meta] . identifier[start] ())
keyword[continue]
keyword[elif] identifier[meta] . identifier[group] ()== literal[string] :
identifier[string] , identifier[phrase] , identifier[meta] = identifier[self] . identifier[close_phrase] ( identifier[string] ,
identifier[root] ,
identifier[pos] )
keyword[if] identifier[phrase] :
keyword[return] identifier[string] , identifier[phrase]
keyword[continue]
identifier[string] , identifier[meta] = identifier[self] . identifier[escape_meta] ( identifier[string] , identifier[pos] )
keyword[if] keyword[not] identifier[root] :
keyword[return] identifier[string] , identifier[phrases]
identifier[word] = identifier[re] . identifier[search] ( literal[string] , identifier[string] )
identifier[what] = literal[string]
keyword[if] identifier[word] :
identifier[what] += literal[string] . identifier[format] ( identifier[word] . identifier[group] ())
keyword[raise] identifier[errors] . identifier[ParseError] ( identifier[what] + literal[string] ) | def parse(self, string, root=None):
"""
Parses a string to handle escaped tags and retrieve phrases.
This method works recursively to parse nested tags. When escaped
tags are found, those are removed from the string. Also argument
sequences are removed from the string. The string returned can
thus be quite different from the string passed.
Arguments:
string (str): The string to parse.
root (Phrase): If in a recursive call, the root/parent phrase.
Returns:
For one, the escaped string (without escape characters and
phrase arguments). For the other, it depends on the stack-depth.
If this is the lowest recursion depth/level (i.e. the stack
call resulting from the first function call in self.beautify()),
it will return a list of phrases. For higher stack levels (
i.e. resulting from recursive function calls from with
self.parse(), for nested phrases), it returns exactly one
Phrase instance.
Raises:
errors.ParseError: If no closing tag could be
found for an opening tag.
"""
phrases = []
meta = self.meta.search(string)
while meta: # Save some function calls
pos = meta.start()
if meta.group() == '<':
(string, child, meta) = self.open_phrase(string, pos)
if child and root:
root.nested.append(child) # depends on [control=['if'], data=[]]
elif child:
phrases.append(child) # depends on [control=['if'], data=[]] # else it was escaped (+ new meta)
continue # depends on [control=['if'], data=[]]
elif root:
if meta.group() == '(':
meta = self.meta.search(string, pos + 1)
if meta.group() == ')':
(string, root, meta) = self.handle_arguments(string, root, pos, meta.start())
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif meta.group() == '>':
(string, phrase, meta) = self.close_phrase(string, root, pos)
if phrase:
return (string, phrase) # depends on [control=['if'], data=[]] # else was escaped (+ new meta)
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
(string, meta) = self.escape_meta(string, pos) # depends on [control=['while'], data=[]]
if not root:
return (string, phrases) # depends on [control=['if'], data=[]] # If this is not the first stack-depth the function should
# have returned upon finding a closing tag,
# i.e. we should never have gotten here.
word = re.search('([\\w\\s]+)(?![\\d]*>[\\w\\s]+>)', string)
what = 'No closing tag found for opening tag'
if word:
what += " after expression '{0}'".format(word.group()) # depends on [control=['if'], data=[]]
raise errors.ParseError(what + '!') |
def apply_entities_as_html(text, entities):
"""
Format text as HTML. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {'<': '<',
'>': '>',
'&': '&',}
formatters = {'bold': lambda s,e: '<b>'+s+'</b>',
'italic': lambda s,e: '<i>'+s+'</i>',
'text_link': lambda s,e: '<a href="'+e['url']+'">'+s+'</a>',
'text_mention': lambda s,e: '<a href="tg://user?id='+str(e['user']['id'])+'">'+s+'</a>',
'code': lambda s,e: '<code>'+s+'</code>',
'pre': lambda s,e: '<pre>'+s+'</pre>'}
return _apply_entities(text, entities, escapes, formatters) | def function[apply_entities_as_html, parameter[text, entities]]:
constant[
Format text as HTML. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
]
variable[escapes] assign[=] dictionary[[<ast.Constant object at 0x7da1b1a57340>, <ast.Constant object at 0x7da1b1a56080>, <ast.Constant object at 0x7da1b1a54be0>], [<ast.Constant object at 0x7da1b1a56d10>, <ast.Constant object at 0x7da1b1a56c20>, <ast.Constant object at 0x7da1b1a54520>]]
variable[formatters] assign[=] dictionary[[<ast.Constant object at 0x7da1b1a57ca0>, <ast.Constant object at 0x7da1b1a57ac0>, <ast.Constant object at 0x7da1b1a54e20>, <ast.Constant object at 0x7da1b1a561a0>, <ast.Constant object at 0x7da1b1a565c0>, <ast.Constant object at 0x7da1b1a56b90>], [<ast.Lambda object at 0x7da1b1a57850>, <ast.Lambda object at 0x7da1b1a574f0>, <ast.Lambda object at 0x7da1b1a54cd0>, <ast.Lambda object at 0x7da1b1a549d0>, <ast.Lambda object at 0x7da1b1a54d90>, <ast.Lambda object at 0x7da1b1a54eb0>]]
return[call[name[_apply_entities], parameter[name[text], name[entities], name[escapes], name[formatters]]]] | keyword[def] identifier[apply_entities_as_html] ( identifier[text] , identifier[entities] ):
literal[string]
identifier[escapes] ={ literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,}
identifier[formatters] ={ literal[string] : keyword[lambda] identifier[s] , identifier[e] : literal[string] + identifier[s] + literal[string] ,
literal[string] : keyword[lambda] identifier[s] , identifier[e] : literal[string] + identifier[s] + literal[string] ,
literal[string] : keyword[lambda] identifier[s] , identifier[e] : literal[string] + identifier[e] [ literal[string] ]+ literal[string] + identifier[s] + literal[string] ,
literal[string] : keyword[lambda] identifier[s] , identifier[e] : literal[string] + identifier[str] ( identifier[e] [ literal[string] ][ literal[string] ])+ literal[string] + identifier[s] + literal[string] ,
literal[string] : keyword[lambda] identifier[s] , identifier[e] : literal[string] + identifier[s] + literal[string] ,
literal[string] : keyword[lambda] identifier[s] , identifier[e] : literal[string] + identifier[s] + literal[string] }
keyword[return] identifier[_apply_entities] ( identifier[text] , identifier[entities] , identifier[escapes] , identifier[formatters] ) | def apply_entities_as_html(text, entities):
"""
Format text as HTML. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {'<': '<', '>': '>', '&': '&'}
formatters = {'bold': lambda s, e: '<b>' + s + '</b>', 'italic': lambda s, e: '<i>' + s + '</i>', 'text_link': lambda s, e: '<a href="' + e['url'] + '">' + s + '</a>', 'text_mention': lambda s, e: '<a href="tg://user?id=' + str(e['user']['id']) + '">' + s + '</a>', 'code': lambda s, e: '<code>' + s + '</code>', 'pre': lambda s, e: '<pre>' + s + '</pre>'}
return _apply_entities(text, entities, escapes, formatters) |
def show_prediction(self, **kwargs):
"""
Call :func:`eli5.show_prediction` for the locally-fit
classification pipeline. Keyword arguments are passed
to :func:`eli5.show_prediction`.
:func:`fit` must be called before using this method.
"""
self._fix_target_names(kwargs)
return eli5.show_prediction(self.clf_, self.doc_, vec=self.vec_,
**kwargs) | def function[show_prediction, parameter[self]]:
constant[
Call :func:`eli5.show_prediction` for the locally-fit
classification pipeline. Keyword arguments are passed
to :func:`eli5.show_prediction`.
:func:`fit` must be called before using this method.
]
call[name[self]._fix_target_names, parameter[name[kwargs]]]
return[call[name[eli5].show_prediction, parameter[name[self].clf_, name[self].doc_]]] | keyword[def] identifier[show_prediction] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_fix_target_names] ( identifier[kwargs] )
keyword[return] identifier[eli5] . identifier[show_prediction] ( identifier[self] . identifier[clf_] , identifier[self] . identifier[doc_] , identifier[vec] = identifier[self] . identifier[vec_] ,
** identifier[kwargs] ) | def show_prediction(self, **kwargs):
"""
Call :func:`eli5.show_prediction` for the locally-fit
classification pipeline. Keyword arguments are passed
to :func:`eli5.show_prediction`.
:func:`fit` must be called before using this method.
"""
self._fix_target_names(kwargs)
return eli5.show_prediction(self.clf_, self.doc_, vec=self.vec_, **kwargs) |
def get_remote(self, key, default=None, scope=None):
"""
Get data from the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).get_remote(key, default)
See :meth:`conversation` and :meth:`Conversation.get_remote`.
"""
return self.conversation(scope).get_remote(key, default) | def function[get_remote, parameter[self, key, default, scope]]:
constant[
Get data from the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).get_remote(key, default)
See :meth:`conversation` and :meth:`Conversation.get_remote`.
]
return[call[call[name[self].conversation, parameter[name[scope]]].get_remote, parameter[name[key], name[default]]]] | keyword[def] identifier[get_remote] ( identifier[self] , identifier[key] , identifier[default] = keyword[None] , identifier[scope] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[conversation] ( identifier[scope] ). identifier[get_remote] ( identifier[key] , identifier[default] ) | def get_remote(self, key, default=None, scope=None):
"""
Get data from the remote end(s) of the :class:`Conversation` with the given scope.
In Python, this is equivalent to::
relation.conversation(scope).get_remote(key, default)
See :meth:`conversation` and :meth:`Conversation.get_remote`.
"""
return self.conversation(scope).get_remote(key, default) |
def lipid_box(self):
"""The box containing all of the lipids. """
if self._lipid_box:
return self._lipid_box
else:
self._lipid_box = self.lipid_components.boundingbox
# Add buffer around lipid box.
self._lipid_box.mins -= np.array([0.5*np.sqrt(self.apl),
0.5*np.sqrt(self.apl),
0.5*np.sqrt(self.apl)])
self._lipid_box.maxs += np.array([0.5*np.sqrt(self.apl),
0.5*np.sqrt(self.apl),
0.5*np.sqrt(self.apl)])
return self._lipid_box | def function[lipid_box, parameter[self]]:
constant[The box containing all of the lipids. ]
if name[self]._lipid_box begin[:]
return[name[self]._lipid_box] | keyword[def] identifier[lipid_box] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_lipid_box] :
keyword[return] identifier[self] . identifier[_lipid_box]
keyword[else] :
identifier[self] . identifier[_lipid_box] = identifier[self] . identifier[lipid_components] . identifier[boundingbox]
identifier[self] . identifier[_lipid_box] . identifier[mins] -= identifier[np] . identifier[array] ([ literal[int] * identifier[np] . identifier[sqrt] ( identifier[self] . identifier[apl] ),
literal[int] * identifier[np] . identifier[sqrt] ( identifier[self] . identifier[apl] ),
literal[int] * identifier[np] . identifier[sqrt] ( identifier[self] . identifier[apl] )])
identifier[self] . identifier[_lipid_box] . identifier[maxs] += identifier[np] . identifier[array] ([ literal[int] * identifier[np] . identifier[sqrt] ( identifier[self] . identifier[apl] ),
literal[int] * identifier[np] . identifier[sqrt] ( identifier[self] . identifier[apl] ),
literal[int] * identifier[np] . identifier[sqrt] ( identifier[self] . identifier[apl] )])
keyword[return] identifier[self] . identifier[_lipid_box] | def lipid_box(self):
"""The box containing all of the lipids. """
if self._lipid_box:
return self._lipid_box # depends on [control=['if'], data=[]]
else:
self._lipid_box = self.lipid_components.boundingbox
# Add buffer around lipid box.
self._lipid_box.mins -= np.array([0.5 * np.sqrt(self.apl), 0.5 * np.sqrt(self.apl), 0.5 * np.sqrt(self.apl)])
self._lipid_box.maxs += np.array([0.5 * np.sqrt(self.apl), 0.5 * np.sqrt(self.apl), 0.5 * np.sqrt(self.apl)])
return self._lipid_box |
def land(self, absolute_height_m, duration_s, group_mask=ALL_GROUPS):
"""
vertical land from current x-y position to given height
:param absolute_height_m: absolut (m)
:param duration_s: time it should take until target height is
reached (s)
:param group_mask: mask for which CFs this should apply to
"""
self._send_packet(struct.pack('<BBff',
self.COMMAND_LAND,
group_mask,
absolute_height_m,
duration_s)) | def function[land, parameter[self, absolute_height_m, duration_s, group_mask]]:
constant[
vertical land from current x-y position to given height
:param absolute_height_m: absolut (m)
:param duration_s: time it should take until target height is
reached (s)
:param group_mask: mask for which CFs this should apply to
]
call[name[self]._send_packet, parameter[call[name[struct].pack, parameter[constant[<BBff], name[self].COMMAND_LAND, name[group_mask], name[absolute_height_m], name[duration_s]]]]] | keyword[def] identifier[land] ( identifier[self] , identifier[absolute_height_m] , identifier[duration_s] , identifier[group_mask] = identifier[ALL_GROUPS] ):
literal[string]
identifier[self] . identifier[_send_packet] ( identifier[struct] . identifier[pack] ( literal[string] ,
identifier[self] . identifier[COMMAND_LAND] ,
identifier[group_mask] ,
identifier[absolute_height_m] ,
identifier[duration_s] )) | def land(self, absolute_height_m, duration_s, group_mask=ALL_GROUPS):
"""
vertical land from current x-y position to given height
:param absolute_height_m: absolut (m)
:param duration_s: time it should take until target height is
reached (s)
:param group_mask: mask for which CFs this should apply to
"""
self._send_packet(struct.pack('<BBff', self.COMMAND_LAND, group_mask, absolute_height_m, duration_s)) |
def getDescendants(self, all_descendants=False):
"""Returns the descendant Analysis Requests
:param all_descendants: recursively include all descendants
"""
# N.B. full objects returned here from
# `Products.Archetypes.Referenceable.getBRefs`
# -> don't add this method into Metadata
children = self.getBackReferences(
"AnalysisRequestParentAnalysisRequest")
descendants = []
# recursively include all children
if all_descendants:
for child in children:
descendants.append(child)
descendants += child.getDescendants(all_descendants=True)
else:
descendants = children
return descendants | def function[getDescendants, parameter[self, all_descendants]]:
constant[Returns the descendant Analysis Requests
:param all_descendants: recursively include all descendants
]
variable[children] assign[=] call[name[self].getBackReferences, parameter[constant[AnalysisRequestParentAnalysisRequest]]]
variable[descendants] assign[=] list[[]]
if name[all_descendants] begin[:]
for taget[name[child]] in starred[name[children]] begin[:]
call[name[descendants].append, parameter[name[child]]]
<ast.AugAssign object at 0x7da204961390>
return[name[descendants]] | keyword[def] identifier[getDescendants] ( identifier[self] , identifier[all_descendants] = keyword[False] ):
literal[string]
identifier[children] = identifier[self] . identifier[getBackReferences] (
literal[string] )
identifier[descendants] =[]
keyword[if] identifier[all_descendants] :
keyword[for] identifier[child] keyword[in] identifier[children] :
identifier[descendants] . identifier[append] ( identifier[child] )
identifier[descendants] += identifier[child] . identifier[getDescendants] ( identifier[all_descendants] = keyword[True] )
keyword[else] :
identifier[descendants] = identifier[children]
keyword[return] identifier[descendants] | def getDescendants(self, all_descendants=False):
"""Returns the descendant Analysis Requests
:param all_descendants: recursively include all descendants
"""
# N.B. full objects returned here from
# `Products.Archetypes.Referenceable.getBRefs`
# -> don't add this method into Metadata
children = self.getBackReferences('AnalysisRequestParentAnalysisRequest')
descendants = []
# recursively include all children
if all_descendants:
for child in children:
descendants.append(child)
descendants += child.getDescendants(all_descendants=True) # depends on [control=['for'], data=['child']] # depends on [control=['if'], data=[]]
else:
descendants = children
return descendants |
def flatten(lst):
"""flatten([["a","btr"],"b", [],["c",["d",["e"], []]]]) will return ['a', 'btr', 'b', 'c', 'd', 'e']"""
def flatten_aux(item, accumulated):
if type(item) != list:
accumulated.append(item)
else:
for l in item:
flatten_aux(l, accumulated)
accumulated = []
flatten_aux(lst,accumulated)
return accumulated | def function[flatten, parameter[lst]]:
constant[flatten([["a","btr"],"b", [],["c",["d",["e"], []]]]) will return ['a', 'btr', 'b', 'c', 'd', 'e']]
def function[flatten_aux, parameter[item, accumulated]]:
if compare[call[name[type], parameter[name[item]]] not_equal[!=] name[list]] begin[:]
call[name[accumulated].append, parameter[name[item]]]
variable[accumulated] assign[=] list[[]]
call[name[flatten_aux], parameter[name[lst], name[accumulated]]]
return[name[accumulated]] | keyword[def] identifier[flatten] ( identifier[lst] ):
literal[string]
keyword[def] identifier[flatten_aux] ( identifier[item] , identifier[accumulated] ):
keyword[if] identifier[type] ( identifier[item] )!= identifier[list] :
identifier[accumulated] . identifier[append] ( identifier[item] )
keyword[else] :
keyword[for] identifier[l] keyword[in] identifier[item] :
identifier[flatten_aux] ( identifier[l] , identifier[accumulated] )
identifier[accumulated] =[]
identifier[flatten_aux] ( identifier[lst] , identifier[accumulated] )
keyword[return] identifier[accumulated] | def flatten(lst):
"""flatten([["a","btr"],"b", [],["c",["d",["e"], []]]]) will return ['a', 'btr', 'b', 'c', 'd', 'e']"""
def flatten_aux(item, accumulated):
if type(item) != list:
accumulated.append(item) # depends on [control=['if'], data=[]]
else:
for l in item:
flatten_aux(l, accumulated) # depends on [control=['for'], data=['l']]
accumulated = []
flatten_aux(lst, accumulated)
return accumulated |
def calculate_change_hash(item: StoreItem) -> str:
"""
Utility function to calculate a change hash for a `StoreItem`.
:param item:
:return:
"""
cpy = copy(item)
if cpy.e_tag is not None:
del cpy.e_tag
return str(cpy) | def function[calculate_change_hash, parameter[item]]:
constant[
Utility function to calculate a change hash for a `StoreItem`.
:param item:
:return:
]
variable[cpy] assign[=] call[name[copy], parameter[name[item]]]
if compare[name[cpy].e_tag is_not constant[None]] begin[:]
<ast.Delete object at 0x7da1b05f9300>
return[call[name[str], parameter[name[cpy]]]] | keyword[def] identifier[calculate_change_hash] ( identifier[item] : identifier[StoreItem] )-> identifier[str] :
literal[string]
identifier[cpy] = identifier[copy] ( identifier[item] )
keyword[if] identifier[cpy] . identifier[e_tag] keyword[is] keyword[not] keyword[None] :
keyword[del] identifier[cpy] . identifier[e_tag]
keyword[return] identifier[str] ( identifier[cpy] ) | def calculate_change_hash(item: StoreItem) -> str:
"""
Utility function to calculate a change hash for a `StoreItem`.
:param item:
:return:
"""
cpy = copy(item)
if cpy.e_tag is not None:
del cpy.e_tag # depends on [control=['if'], data=[]]
return str(cpy) |
def create_index(self, index=None, settings=None, es=None):
"""Create new index and ignore if it exists already."""
if index is None:
index = self.index
if es is None:
es = self.es
try:
alias = index
index = generate_index_name(alias)
args = {'index': index}
if settings:
args['body'] = settings
es.indices.create(**args)
es.indices.put_alias(index, alias)
logger.info('created index alias=%s index=%s' % (alias, index))
except elasticsearch.TransportError: # index exists
pass | def function[create_index, parameter[self, index, settings, es]]:
constant[Create new index and ignore if it exists already.]
if compare[name[index] is constant[None]] begin[:]
variable[index] assign[=] name[self].index
if compare[name[es] is constant[None]] begin[:]
variable[es] assign[=] name[self].es
<ast.Try object at 0x7da1b28d6050> | keyword[def] identifier[create_index] ( identifier[self] , identifier[index] = keyword[None] , identifier[settings] = keyword[None] , identifier[es] = keyword[None] ):
literal[string]
keyword[if] identifier[index] keyword[is] keyword[None] :
identifier[index] = identifier[self] . identifier[index]
keyword[if] identifier[es] keyword[is] keyword[None] :
identifier[es] = identifier[self] . identifier[es]
keyword[try] :
identifier[alias] = identifier[index]
identifier[index] = identifier[generate_index_name] ( identifier[alias] )
identifier[args] ={ literal[string] : identifier[index] }
keyword[if] identifier[settings] :
identifier[args] [ literal[string] ]= identifier[settings]
identifier[es] . identifier[indices] . identifier[create] (** identifier[args] )
identifier[es] . identifier[indices] . identifier[put_alias] ( identifier[index] , identifier[alias] )
identifier[logger] . identifier[info] ( literal[string] %( identifier[alias] , identifier[index] ))
keyword[except] identifier[elasticsearch] . identifier[TransportError] :
keyword[pass] | def create_index(self, index=None, settings=None, es=None):
"""Create new index and ignore if it exists already."""
if index is None:
index = self.index # depends on [control=['if'], data=['index']]
if es is None:
es = self.es # depends on [control=['if'], data=['es']]
try:
alias = index
index = generate_index_name(alias)
args = {'index': index}
if settings:
args['body'] = settings # depends on [control=['if'], data=[]]
es.indices.create(**args)
es.indices.put_alias(index, alias)
logger.info('created index alias=%s index=%s' % (alias, index)) # depends on [control=['try'], data=[]]
except elasticsearch.TransportError: # index exists
pass # depends on [control=['except'], data=[]] |
def start(self):
""" Start the Manager process.
The worker loops on this:
1. If the last message sent was older than heartbeat period we send a heartbeat
2.
TODO: Move task receiving to a thread
"""
self.comm.Barrier()
logger.debug("Manager synced with workers")
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.pull_tasks,
args=(self._kill_event,))
self._result_pusher_thread = threading.Thread(target=self.push_results,
args=(self._kill_event,))
self._task_puller_thread.start()
self._result_pusher_thread.start()
start = None
result_counter = 0
task_recv_counter = 0
task_sent_counter = 0
logger.info("Loop start")
while not self._kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
# In this block we attempt to probe MPI for a set amount of time,
# and if we have exhausted all available MPI events, we move on
# to the next block. The timer and counter trigger balance
# fairness and responsiveness.
timer = time.time() + 0.05
counter = min(10, comm.size)
while time.time() < timer:
info = MPI.Status()
if counter > 10:
logger.debug("Hit max mpi events per round")
break
if not self.comm.Iprobe(status=info):
logger.debug("Timer expired, processed {} mpi events".format(counter))
break
else:
tag = info.Get_tag()
logger.info("Message with tag {} received".format(tag))
counter += 1
if tag == RESULT_TAG:
result = self.recv_result_from_workers()
self.pending_result_queue.put(result)
result_counter += 1
elif tag == TASK_REQUEST_TAG:
worker_rank = self.recv_task_request_from_workers()
self.ready_worker_queue.put(worker_rank)
else:
logger.error("Unknown tag {} - ignoring this message and continuing".format(tag))
available_worker_cnt = self.ready_worker_queue.qsize()
available_task_cnt = self.pending_task_queue.qsize()
logger.debug("[MAIN] Ready workers: {} Ready tasks: {}".format(available_worker_cnt,
available_task_cnt))
this_round = min(available_worker_cnt, available_task_cnt)
for i in range(this_round):
worker_rank = self.ready_worker_queue.get()
task = self.pending_task_queue.get()
comm.send(task, dest=worker_rank, tag=worker_rank)
task_sent_counter += 1
logger.debug("Assigning worker:{} task:{}".format(worker_rank, task['task_id']))
if not start:
start = time.time()
logger.debug("Tasks recvd:{} Tasks dispatched:{} Results recvd:{}".format(
task_recv_counter, task_sent_counter, result_counter))
# print("[{}] Received: {}".format(self.identity, msg))
# time.sleep(random.randint(4,10)/10)
self._task_puller_thread.join()
self._result_pusher_thread.join()
self.task_incoming.close()
self.result_outgoing.close()
self.context.term()
delta = time.time() - start
logger.info("mpi_worker_pool ran for {} seconds".format(delta)) | def function[start, parameter[self]]:
constant[ Start the Manager process.
The worker loops on this:
1. If the last message sent was older than heartbeat period we send a heartbeat
2.
TODO: Move task receiving to a thread
]
call[name[self].comm.Barrier, parameter[]]
call[name[logger].debug, parameter[constant[Manager synced with workers]]]
name[self]._kill_event assign[=] call[name[threading].Event, parameter[]]
name[self]._task_puller_thread assign[=] call[name[threading].Thread, parameter[]]
name[self]._result_pusher_thread assign[=] call[name[threading].Thread, parameter[]]
call[name[self]._task_puller_thread.start, parameter[]]
call[name[self]._result_pusher_thread.start, parameter[]]
variable[start] assign[=] constant[None]
variable[result_counter] assign[=] constant[0]
variable[task_recv_counter] assign[=] constant[0]
variable[task_sent_counter] assign[=] constant[0]
call[name[logger].info, parameter[constant[Loop start]]]
while <ast.UnaryOp object at 0x7da1b01ba2c0> begin[:]
call[name[time].sleep, parameter[name[LOOP_SLOWDOWN]]]
variable[timer] assign[=] binary_operation[call[name[time].time, parameter[]] + constant[0.05]]
variable[counter] assign[=] call[name[min], parameter[constant[10], name[comm].size]]
while compare[call[name[time].time, parameter[]] less[<] name[timer]] begin[:]
variable[info] assign[=] call[name[MPI].Status, parameter[]]
if compare[name[counter] greater[>] constant[10]] begin[:]
call[name[logger].debug, parameter[constant[Hit max mpi events per round]]]
break
if <ast.UnaryOp object at 0x7da1b01bb280> begin[:]
call[name[logger].debug, parameter[call[constant[Timer expired, processed {} mpi events].format, parameter[name[counter]]]]]
break
variable[available_worker_cnt] assign[=] call[name[self].ready_worker_queue.qsize, parameter[]]
variable[available_task_cnt] assign[=] call[name[self].pending_task_queue.qsize, parameter[]]
call[name[logger].debug, parameter[call[constant[[MAIN] Ready workers: {} Ready tasks: {}].format, parameter[name[available_worker_cnt], name[available_task_cnt]]]]]
variable[this_round] assign[=] call[name[min], parameter[name[available_worker_cnt], name[available_task_cnt]]]
for taget[name[i]] in starred[call[name[range], parameter[name[this_round]]]] begin[:]
variable[worker_rank] assign[=] call[name[self].ready_worker_queue.get, parameter[]]
variable[task] assign[=] call[name[self].pending_task_queue.get, parameter[]]
call[name[comm].send, parameter[name[task]]]
<ast.AugAssign object at 0x7da18dc99de0>
call[name[logger].debug, parameter[call[constant[Assigning worker:{} task:{}].format, parameter[name[worker_rank], call[name[task]][constant[task_id]]]]]]
if <ast.UnaryOp object at 0x7da18dc9b340> begin[:]
variable[start] assign[=] call[name[time].time, parameter[]]
call[name[logger].debug, parameter[call[constant[Tasks recvd:{} Tasks dispatched:{} Results recvd:{}].format, parameter[name[task_recv_counter], name[task_sent_counter], name[result_counter]]]]]
call[name[self]._task_puller_thread.join, parameter[]]
call[name[self]._result_pusher_thread.join, parameter[]]
call[name[self].task_incoming.close, parameter[]]
call[name[self].result_outgoing.close, parameter[]]
call[name[self].context.term, parameter[]]
variable[delta] assign[=] binary_operation[call[name[time].time, parameter[]] - name[start]]
call[name[logger].info, parameter[call[constant[mpi_worker_pool ran for {} seconds].format, parameter[name[delta]]]]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[self] . identifier[comm] . identifier[Barrier] ()
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_kill_event] = identifier[threading] . identifier[Event] ()
identifier[self] . identifier[_task_puller_thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[pull_tasks] ,
identifier[args] =( identifier[self] . identifier[_kill_event] ,))
identifier[self] . identifier[_result_pusher_thread] = identifier[threading] . identifier[Thread] ( identifier[target] = identifier[self] . identifier[push_results] ,
identifier[args] =( identifier[self] . identifier[_kill_event] ,))
identifier[self] . identifier[_task_puller_thread] . identifier[start] ()
identifier[self] . identifier[_result_pusher_thread] . identifier[start] ()
identifier[start] = keyword[None]
identifier[result_counter] = literal[int]
identifier[task_recv_counter] = literal[int]
identifier[task_sent_counter] = literal[int]
identifier[logger] . identifier[info] ( literal[string] )
keyword[while] keyword[not] identifier[self] . identifier[_kill_event] . identifier[is_set] ():
identifier[time] . identifier[sleep] ( identifier[LOOP_SLOWDOWN] )
identifier[timer] = identifier[time] . identifier[time] ()+ literal[int]
identifier[counter] = identifier[min] ( literal[int] , identifier[comm] . identifier[size] )
keyword[while] identifier[time] . identifier[time] ()< identifier[timer] :
identifier[info] = identifier[MPI] . identifier[Status] ()
keyword[if] identifier[counter] > literal[int] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[break]
keyword[if] keyword[not] identifier[self] . identifier[comm] . identifier[Iprobe] ( identifier[status] = identifier[info] ):
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[counter] ))
keyword[break]
keyword[else] :
identifier[tag] = identifier[info] . identifier[Get_tag] ()
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[tag] ))
identifier[counter] += literal[int]
keyword[if] identifier[tag] == identifier[RESULT_TAG] :
identifier[result] = identifier[self] . identifier[recv_result_from_workers] ()
identifier[self] . identifier[pending_result_queue] . identifier[put] ( identifier[result] )
identifier[result_counter] += literal[int]
keyword[elif] identifier[tag] == identifier[TASK_REQUEST_TAG] :
identifier[worker_rank] = identifier[self] . identifier[recv_task_request_from_workers] ()
identifier[self] . identifier[ready_worker_queue] . identifier[put] ( identifier[worker_rank] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] . identifier[format] ( identifier[tag] ))
identifier[available_worker_cnt] = identifier[self] . identifier[ready_worker_queue] . identifier[qsize] ()
identifier[available_task_cnt] = identifier[self] . identifier[pending_task_queue] . identifier[qsize] ()
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[available_worker_cnt] ,
identifier[available_task_cnt] ))
identifier[this_round] = identifier[min] ( identifier[available_worker_cnt] , identifier[available_task_cnt] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[this_round] ):
identifier[worker_rank] = identifier[self] . identifier[ready_worker_queue] . identifier[get] ()
identifier[task] = identifier[self] . identifier[pending_task_queue] . identifier[get] ()
identifier[comm] . identifier[send] ( identifier[task] , identifier[dest] = identifier[worker_rank] , identifier[tag] = identifier[worker_rank] )
identifier[task_sent_counter] += literal[int]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[worker_rank] , identifier[task] [ literal[string] ]))
keyword[if] keyword[not] identifier[start] :
identifier[start] = identifier[time] . identifier[time] ()
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[task_recv_counter] , identifier[task_sent_counter] , identifier[result_counter] ))
identifier[self] . identifier[_task_puller_thread] . identifier[join] ()
identifier[self] . identifier[_result_pusher_thread] . identifier[join] ()
identifier[self] . identifier[task_incoming] . identifier[close] ()
identifier[self] . identifier[result_outgoing] . identifier[close] ()
identifier[self] . identifier[context] . identifier[term] ()
identifier[delta] = identifier[time] . identifier[time] ()- identifier[start]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[delta] )) | def start(self):
""" Start the Manager process.
The worker loops on this:
1. If the last message sent was older than heartbeat period we send a heartbeat
2.
TODO: Move task receiving to a thread
"""
self.comm.Barrier()
logger.debug('Manager synced with workers')
self._kill_event = threading.Event()
self._task_puller_thread = threading.Thread(target=self.pull_tasks, args=(self._kill_event,))
self._result_pusher_thread = threading.Thread(target=self.push_results, args=(self._kill_event,))
self._task_puller_thread.start()
self._result_pusher_thread.start()
start = None
result_counter = 0
task_recv_counter = 0
task_sent_counter = 0
logger.info('Loop start')
while not self._kill_event.is_set():
time.sleep(LOOP_SLOWDOWN)
# In this block we attempt to probe MPI for a set amount of time,
# and if we have exhausted all available MPI events, we move on
# to the next block. The timer and counter trigger balance
# fairness and responsiveness.
timer = time.time() + 0.05
counter = min(10, comm.size)
while time.time() < timer:
info = MPI.Status()
if counter > 10:
logger.debug('Hit max mpi events per round')
break # depends on [control=['if'], data=[]]
if not self.comm.Iprobe(status=info):
logger.debug('Timer expired, processed {} mpi events'.format(counter))
break # depends on [control=['if'], data=[]]
else:
tag = info.Get_tag()
logger.info('Message with tag {} received'.format(tag))
counter += 1
if tag == RESULT_TAG:
result = self.recv_result_from_workers()
self.pending_result_queue.put(result)
result_counter += 1 # depends on [control=['if'], data=[]]
elif tag == TASK_REQUEST_TAG:
worker_rank = self.recv_task_request_from_workers()
self.ready_worker_queue.put(worker_rank) # depends on [control=['if'], data=[]]
else:
logger.error('Unknown tag {} - ignoring this message and continuing'.format(tag)) # depends on [control=['while'], data=[]]
available_worker_cnt = self.ready_worker_queue.qsize()
available_task_cnt = self.pending_task_queue.qsize()
logger.debug('[MAIN] Ready workers: {} Ready tasks: {}'.format(available_worker_cnt, available_task_cnt))
this_round = min(available_worker_cnt, available_task_cnt)
for i in range(this_round):
worker_rank = self.ready_worker_queue.get()
task = self.pending_task_queue.get()
comm.send(task, dest=worker_rank, tag=worker_rank)
task_sent_counter += 1
logger.debug('Assigning worker:{} task:{}'.format(worker_rank, task['task_id'])) # depends on [control=['for'], data=[]]
if not start:
start = time.time() # depends on [control=['if'], data=[]]
logger.debug('Tasks recvd:{} Tasks dispatched:{} Results recvd:{}'.format(task_recv_counter, task_sent_counter, result_counter)) # depends on [control=['while'], data=[]]
# print("[{}] Received: {}".format(self.identity, msg))
# time.sleep(random.randint(4,10)/10)
self._task_puller_thread.join()
self._result_pusher_thread.join()
self.task_incoming.close()
self.result_outgoing.close()
self.context.term()
delta = time.time() - start
logger.info('mpi_worker_pool ran for {} seconds'.format(delta)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.