code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and not self._finished:
self._increment()
i += 1
return i | def function[_skip_whitespace, parameter[self]]:
constant[Increment over whitespace, counting characters.]
variable[i] assign[=] constant[0]
while <ast.BoolOp object at 0x7da204565b10> begin[:]
call[name[self]._increment, parameter[]]
<ast.AugAssign object at 0x7da2045656c0>
return[name[i]] | keyword[def] identifier[_skip_whitespace] ( identifier[self] ):
literal[string]
identifier[i] = literal[int]
keyword[while] identifier[self] . identifier[_cur_token] [ literal[string] ] keyword[is] identifier[TT] . identifier[ws] keyword[and] keyword[not] identifier[self] . identifier[_finished] :
identifier[self] . identifier[_increment] ()
identifier[i] += literal[int]
keyword[return] identifier[i] | def _skip_whitespace(self):
"""Increment over whitespace, counting characters."""
i = 0
while self._cur_token['type'] is TT.ws and (not self._finished):
self._increment()
i += 1 # depends on [control=['while'], data=[]]
return i |
def uniq(args):
"""
%prog uniq fasta uniq.fasta
remove fasta records that are the same
"""
p = OptionParser(uniq.__doc__)
p.add_option("--seq", default=False, action="store_true",
help="Uniqify the sequences [default: %default]")
p.add_option("-t", "--trimname", dest="trimname",
action="store_true", default=False,
help="turn on the defline trim to first space [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, uniqfastafile = args
fw = must_open(uniqfastafile, "w")
seq = opts.seq
for rec in _uniq_rec(fastafile, seq=seq):
if opts.trimname:
rec.description = ""
SeqIO.write([rec], fw, "fasta") | def function[uniq, parameter[args]]:
constant[
%prog uniq fasta uniq.fasta
remove fasta records that are the same
]
variable[p] assign[=] call[name[OptionParser], parameter[name[uniq].__doc__]]
call[name[p].add_option, parameter[constant[--seq]]]
call[name[p].add_option, parameter[constant[-t], constant[--trimname]]]
<ast.Tuple object at 0x7da1b0962860> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[call[name[p].print_help, parameter[]]]]
<ast.Tuple object at 0x7da1b0962b30> assign[=] name[args]
variable[fw] assign[=] call[name[must_open], parameter[name[uniqfastafile], constant[w]]]
variable[seq] assign[=] name[opts].seq
for taget[name[rec]] in starred[call[name[_uniq_rec], parameter[name[fastafile]]]] begin[:]
if name[opts].trimname begin[:]
name[rec].description assign[=] constant[]
call[name[SeqIO].write, parameter[list[[<ast.Name object at 0x7da1b09620b0>]], name[fw], constant[fasta]]] | keyword[def] identifier[uniq] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[uniq] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[action] = literal[string] , identifier[default] = keyword[False] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( identifier[p] . identifier[print_help] ())
identifier[fastafile] , identifier[uniqfastafile] = identifier[args]
identifier[fw] = identifier[must_open] ( identifier[uniqfastafile] , literal[string] )
identifier[seq] = identifier[opts] . identifier[seq]
keyword[for] identifier[rec] keyword[in] identifier[_uniq_rec] ( identifier[fastafile] , identifier[seq] = identifier[seq] ):
keyword[if] identifier[opts] . identifier[trimname] :
identifier[rec] . identifier[description] = literal[string]
identifier[SeqIO] . identifier[write] ([ identifier[rec] ], identifier[fw] , literal[string] ) | def uniq(args):
"""
%prog uniq fasta uniq.fasta
remove fasta records that are the same
"""
p = OptionParser(uniq.__doc__)
p.add_option('--seq', default=False, action='store_true', help='Uniqify the sequences [default: %default]')
p.add_option('-t', '--trimname', dest='trimname', action='store_true', default=False, help='turn on the defline trim to first space [default: %default]')
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help()) # depends on [control=['if'], data=[]]
(fastafile, uniqfastafile) = args
fw = must_open(uniqfastafile, 'w')
seq = opts.seq
for rec in _uniq_rec(fastafile, seq=seq):
if opts.trimname:
rec.description = '' # depends on [control=['if'], data=[]]
SeqIO.write([rec], fw, 'fasta') # depends on [control=['for'], data=['rec']] |
def _resolve_qualifiers(self, new_quals, inherited_quals, new_class,
super_class, obj_name, obj_type, qualifier_repo,
propagate=False, verbose=False):
"""
Process the override of qualifiers from the inherited_quals dictionary
to the new_quals dict following the override rules in DSP0004.
"""
superclassname = super_class.classname if super_class else None
# TODO Diagnostic we will keep until really sure of this code
if verbose:
print("\nRESOLVE sc_name=%s nc_name=%s, obj_name=%s obj_type=%s "
" propagate=%s" %
(superclassname, new_class.classname, obj_name, obj_type,
propagate))
print('\nNEW QUAL')
for q, qv in new_quals.items():
print(' %s: %r' % (q, qv))
print('INHERITEDQ:')
if inherited_quals:
for q, qv in inherited_quals.items():
print(' %s: %r' % (q, qv))
# If propagate flag not set, initialize the qualfiers
# by setting flavor defaults and propagated False
if not propagate:
for qname, qvalue in new_quals.items():
self._init_qualifier(qvalue, qualifier_repo)
return
# resolve qualifiers not in inherited object
for qname, qvalue in new_quals.items():
if not inherited_quals or qname not in inherited_quals:
self._init_qualifier(qvalue, qualifier_repo)
# resolve qualifiers from inherited object
for inh_qname, inh_qvalue in inherited_quals.items():
if inh_qvalue.tosubclass:
if inh_qvalue.overridable:
# if not in new quals, copy it to the new quals, else ignore
if inh_qname not in new_quals:
new_quals[inh_qname] = inherited_quals[inh_qname].copy()
new_quals[inh_qname].propagated = True
else:
new_quals[inh_qname].propagated = False
self._init_qualifier(new_quals[inh_qname],
qualifier_repo)
else: # not overridable
if inh_qname in new_quals:
# allow for same qualifier def in subclass
# TODO should more than value match here.??
if new_quals[inh_qname].value != \
inherited_quals[inh_qname].value:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid new_class {0!A}:{1!A} "
"qualifier {2!A}. "
"in class {3!A}. Not overridable ",
obj_type, obj_name, inh_qname,
new_class.classname))
else:
new_quals[inh_qname].propagated = True
else: # not in new class, add it
new_quals[inh_qname] = inherited_quals[inh_qname].copy()
new_quals[inh_qname].propagated = True
else: # not tosubclass, i.e. restricted.
if inh_qname in new_quals:
if inh_qvalue.overridable or inh_qvalue.overridable is None:
new_quals[inh_qname].propagated = True
else:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("Invalid qualifier object {0!A} qualifier "
"{1!A} . Restricted in super class {2!A}",
obj_name, inh_qname, superclassname)) | def function[_resolve_qualifiers, parameter[self, new_quals, inherited_quals, new_class, super_class, obj_name, obj_type, qualifier_repo, propagate, verbose]]:
constant[
Process the override of qualifiers from the inherited_quals dictionary
to the new_quals dict following the override rules in DSP0004.
]
variable[superclassname] assign[=] <ast.IfExp object at 0x7da1b26ada20>
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[
RESOLVE sc_name=%s nc_name=%s, obj_name=%s obj_type=%s propagate=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ad600>, <ast.Attribute object at 0x7da1b26af1c0>, <ast.Name object at 0x7da1b26aec80>, <ast.Name object at 0x7da1b26af760>, <ast.Name object at 0x7da1b26ad7b0>]]]]]
call[name[print], parameter[constant[
NEW QUAL]]]
for taget[tuple[[<ast.Name object at 0x7da1b26aedd0>, <ast.Name object at 0x7da1b26ac250>]]] in starred[call[name[new_quals].items, parameter[]]] begin[:]
call[name[print], parameter[binary_operation[constant[ %s: %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26aca30>, <ast.Name object at 0x7da1b26ae530>]]]]]
call[name[print], parameter[constant[INHERITEDQ:]]]
if name[inherited_quals] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c76d810>, <ast.Name object at 0x7da20c76e6b0>]]] in starred[call[name[inherited_quals].items, parameter[]]] begin[:]
call[name[print], parameter[binary_operation[constant[ %s: %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c76df00>, <ast.Name object at 0x7da20c76d420>]]]]]
if <ast.UnaryOp object at 0x7da20c76f0d0> begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c76f0a0>, <ast.Name object at 0x7da20c76d5a0>]]] in starred[call[name[new_quals].items, parameter[]]] begin[:]
call[name[self]._init_qualifier, parameter[name[qvalue], name[qualifier_repo]]]
return[None]
for taget[tuple[[<ast.Name object at 0x7da20c76e5c0>, <ast.Name object at 0x7da20c76e860>]]] in starred[call[name[new_quals].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da20c76ffa0> begin[:]
call[name[self]._init_qualifier, parameter[name[qvalue], name[qualifier_repo]]]
for taget[tuple[[<ast.Name object at 0x7da20c76e050>, <ast.Name object at 0x7da20c76f7c0>]]] in starred[call[name[inherited_quals].items, parameter[]]] begin[:]
if name[inh_qvalue].tosubclass begin[:]
if name[inh_qvalue].overridable begin[:]
if compare[name[inh_qname] <ast.NotIn object at 0x7da2590d7190> name[new_quals]] begin[:]
call[name[new_quals]][name[inh_qname]] assign[=] call[call[name[inherited_quals]][name[inh_qname]].copy, parameter[]]
call[name[new_quals]][name[inh_qname]].propagated assign[=] constant[True] | keyword[def] identifier[_resolve_qualifiers] ( identifier[self] , identifier[new_quals] , identifier[inherited_quals] , identifier[new_class] ,
identifier[super_class] , identifier[obj_name] , identifier[obj_type] , identifier[qualifier_repo] ,
identifier[propagate] = keyword[False] , identifier[verbose] = keyword[False] ):
literal[string]
identifier[superclassname] = identifier[super_class] . identifier[classname] keyword[if] identifier[super_class] keyword[else] keyword[None]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string]
literal[string] %
( identifier[superclassname] , identifier[new_class] . identifier[classname] , identifier[obj_name] , identifier[obj_type] ,
identifier[propagate] ))
identifier[print] ( literal[string] )
keyword[for] identifier[q] , identifier[qv] keyword[in] identifier[new_quals] . identifier[items] ():
identifier[print] ( literal[string] %( identifier[q] , identifier[qv] ))
identifier[print] ( literal[string] )
keyword[if] identifier[inherited_quals] :
keyword[for] identifier[q] , identifier[qv] keyword[in] identifier[inherited_quals] . identifier[items] ():
identifier[print] ( literal[string] %( identifier[q] , identifier[qv] ))
keyword[if] keyword[not] identifier[propagate] :
keyword[for] identifier[qname] , identifier[qvalue] keyword[in] identifier[new_quals] . identifier[items] ():
identifier[self] . identifier[_init_qualifier] ( identifier[qvalue] , identifier[qualifier_repo] )
keyword[return]
keyword[for] identifier[qname] , identifier[qvalue] keyword[in] identifier[new_quals] . identifier[items] ():
keyword[if] keyword[not] identifier[inherited_quals] keyword[or] identifier[qname] keyword[not] keyword[in] identifier[inherited_quals] :
identifier[self] . identifier[_init_qualifier] ( identifier[qvalue] , identifier[qualifier_repo] )
keyword[for] identifier[inh_qname] , identifier[inh_qvalue] keyword[in] identifier[inherited_quals] . identifier[items] ():
keyword[if] identifier[inh_qvalue] . identifier[tosubclass] :
keyword[if] identifier[inh_qvalue] . identifier[overridable] :
keyword[if] identifier[inh_qname] keyword[not] keyword[in] identifier[new_quals] :
identifier[new_quals] [ identifier[inh_qname] ]= identifier[inherited_quals] [ identifier[inh_qname] ]. identifier[copy] ()
identifier[new_quals] [ identifier[inh_qname] ]. identifier[propagated] = keyword[True]
keyword[else] :
identifier[new_quals] [ identifier[inh_qname] ]. identifier[propagated] = keyword[False]
identifier[self] . identifier[_init_qualifier] ( identifier[new_quals] [ identifier[inh_qname] ],
identifier[qualifier_repo] )
keyword[else] :
keyword[if] identifier[inh_qname] keyword[in] identifier[new_quals] :
keyword[if] identifier[new_quals] [ identifier[inh_qname] ]. identifier[value] != identifier[inherited_quals] [ identifier[inh_qname] ]. identifier[value] :
keyword[raise] identifier[CIMError] (
identifier[CIM_ERR_INVALID_PARAMETER] ,
identifier[_format] ( literal[string]
literal[string]
literal[string] ,
identifier[obj_type] , identifier[obj_name] , identifier[inh_qname] ,
identifier[new_class] . identifier[classname] ))
keyword[else] :
identifier[new_quals] [ identifier[inh_qname] ]. identifier[propagated] = keyword[True]
keyword[else] :
identifier[new_quals] [ identifier[inh_qname] ]= identifier[inherited_quals] [ identifier[inh_qname] ]. identifier[copy] ()
identifier[new_quals] [ identifier[inh_qname] ]. identifier[propagated] = keyword[True]
keyword[else] :
keyword[if] identifier[inh_qname] keyword[in] identifier[new_quals] :
keyword[if] identifier[inh_qvalue] . identifier[overridable] keyword[or] identifier[inh_qvalue] . identifier[overridable] keyword[is] keyword[None] :
identifier[new_quals] [ identifier[inh_qname] ]. identifier[propagated] = keyword[True]
keyword[else] :
keyword[raise] identifier[CIMError] (
identifier[CIM_ERR_INVALID_PARAMETER] ,
identifier[_format] ( literal[string]
literal[string] ,
identifier[obj_name] , identifier[inh_qname] , identifier[superclassname] )) | def _resolve_qualifiers(self, new_quals, inherited_quals, new_class, super_class, obj_name, obj_type, qualifier_repo, propagate=False, verbose=False):
"""
Process the override of qualifiers from the inherited_quals dictionary
to the new_quals dict following the override rules in DSP0004.
"""
superclassname = super_class.classname if super_class else None
# TODO Diagnostic we will keep until really sure of this code
if verbose:
print('\nRESOLVE sc_name=%s nc_name=%s, obj_name=%s obj_type=%s propagate=%s' % (superclassname, new_class.classname, obj_name, obj_type, propagate))
print('\nNEW QUAL')
for (q, qv) in new_quals.items():
print(' %s: %r' % (q, qv)) # depends on [control=['for'], data=[]]
print('INHERITEDQ:')
if inherited_quals:
for (q, qv) in inherited_quals.items():
print(' %s: %r' % (q, qv)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# If propagate flag not set, initialize the qualfiers
# by setting flavor defaults and propagated False
if not propagate:
for (qname, qvalue) in new_quals.items():
self._init_qualifier(qvalue, qualifier_repo) # depends on [control=['for'], data=[]]
return # depends on [control=['if'], data=[]]
# resolve qualifiers not in inherited object
for (qname, qvalue) in new_quals.items():
if not inherited_quals or qname not in inherited_quals:
self._init_qualifier(qvalue, qualifier_repo) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# resolve qualifiers from inherited object
for (inh_qname, inh_qvalue) in inherited_quals.items():
if inh_qvalue.tosubclass:
if inh_qvalue.overridable:
# if not in new quals, copy it to the new quals, else ignore
if inh_qname not in new_quals:
new_quals[inh_qname] = inherited_quals[inh_qname].copy()
new_quals[inh_qname].propagated = True # depends on [control=['if'], data=['inh_qname', 'new_quals']]
else:
new_quals[inh_qname].propagated = False
self._init_qualifier(new_quals[inh_qname], qualifier_repo) # depends on [control=['if'], data=[]] # not overridable
elif inh_qname in new_quals:
# allow for same qualifier def in subclass
# TODO should more than value match here.??
if new_quals[inh_qname].value != inherited_quals[inh_qname].value:
raise CIMError(CIM_ERR_INVALID_PARAMETER, _format('Invalid new_class {0!A}:{1!A} qualifier {2!A}. in class {3!A}. Not overridable ', obj_type, obj_name, inh_qname, new_class.classname)) # depends on [control=['if'], data=[]]
else:
new_quals[inh_qname].propagated = True # depends on [control=['if'], data=['inh_qname', 'new_quals']]
else: # not in new class, add it
new_quals[inh_qname] = inherited_quals[inh_qname].copy()
new_quals[inh_qname].propagated = True # depends on [control=['if'], data=[]] # not tosubclass, i.e. restricted.
elif inh_qname in new_quals:
if inh_qvalue.overridable or inh_qvalue.overridable is None:
new_quals[inh_qname].propagated = True # depends on [control=['if'], data=[]]
else:
raise CIMError(CIM_ERR_INVALID_PARAMETER, _format('Invalid qualifier object {0!A} qualifier {1!A} . Restricted in super class {2!A}', obj_name, inh_qname, superclassname)) # depends on [control=['if'], data=['inh_qname', 'new_quals']] # depends on [control=['for'], data=[]] |
def split(X, Y, question):
"""Partitions a dataset.
For each row in the dataset, check if it matches the question. If
so, add it to 'true rows', otherwise, add it to 'false rows'.
"""
true_X, false_X = [], []
true_Y, false_Y = [], []
for x, y in zip(X, Y):
if question.match(x):
true_X.append(x)
true_Y.append(y)
else:
false_X.append(x)
false_Y.append(y)
return (np.array(true_X), np.array(false_X),
np.array(true_Y), np.array(false_Y)) | def function[split, parameter[X, Y, question]]:
constant[Partitions a dataset.
For each row in the dataset, check if it matches the question. If
so, add it to 'true rows', otherwise, add it to 'false rows'.
]
<ast.Tuple object at 0x7da1b209ea70> assign[=] tuple[[<ast.List object at 0x7da1b209e890>, <ast.List object at 0x7da1b209e020>]]
<ast.Tuple object at 0x7da1b209d570> assign[=] tuple[[<ast.List object at 0x7da1b209c370>, <ast.List object at 0x7da1b1f0e8c0>]]
for taget[tuple[[<ast.Name object at 0x7da1b1f0d1e0>, <ast.Name object at 0x7da1b1f0d090>]]] in starred[call[name[zip], parameter[name[X], name[Y]]]] begin[:]
if call[name[question].match, parameter[name[x]]] begin[:]
call[name[true_X].append, parameter[name[x]]]
call[name[true_Y].append, parameter[name[y]]]
return[tuple[[<ast.Call object at 0x7da1b1f0dd50>, <ast.Call object at 0x7da1b1f0de70>, <ast.Call object at 0x7da1b1f0d870>, <ast.Call object at 0x7da1b1f0c8e0>]]] | keyword[def] identifier[split] ( identifier[X] , identifier[Y] , identifier[question] ):
literal[string]
identifier[true_X] , identifier[false_X] =[],[]
identifier[true_Y] , identifier[false_Y] =[],[]
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[zip] ( identifier[X] , identifier[Y] ):
keyword[if] identifier[question] . identifier[match] ( identifier[x] ):
identifier[true_X] . identifier[append] ( identifier[x] )
identifier[true_Y] . identifier[append] ( identifier[y] )
keyword[else] :
identifier[false_X] . identifier[append] ( identifier[x] )
identifier[false_Y] . identifier[append] ( identifier[y] )
keyword[return] ( identifier[np] . identifier[array] ( identifier[true_X] ), identifier[np] . identifier[array] ( identifier[false_X] ),
identifier[np] . identifier[array] ( identifier[true_Y] ), identifier[np] . identifier[array] ( identifier[false_Y] )) | def split(X, Y, question):
"""Partitions a dataset.
For each row in the dataset, check if it matches the question. If
so, add it to 'true rows', otherwise, add it to 'false rows'.
"""
(true_X, false_X) = ([], [])
(true_Y, false_Y) = ([], [])
for (x, y) in zip(X, Y):
if question.match(x):
true_X.append(x)
true_Y.append(y) # depends on [control=['if'], data=[]]
else:
false_X.append(x)
false_Y.append(y) # depends on [control=['for'], data=[]]
return (np.array(true_X), np.array(false_X), np.array(true_Y), np.array(false_Y)) |
def _f_cash_root(x, counts, bkg, model):
"""
Function to find root of. Described in Appendix A, Stewart (2009).
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return np.sum(model * (counts / (x * model + bkg) - 1.0)) | def function[_f_cash_root, parameter[x, counts, bkg, model]]:
constant[
Function to find root of. Described in Appendix A, Stewart (2009).
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
]
return[call[name[np].sum, parameter[binary_operation[name[model] * binary_operation[binary_operation[name[counts] / binary_operation[binary_operation[name[x] * name[model]] + name[bkg]]] - constant[1.0]]]]]] | keyword[def] identifier[_f_cash_root] ( identifier[x] , identifier[counts] , identifier[bkg] , identifier[model] ):
literal[string]
keyword[return] identifier[np] . identifier[sum] ( identifier[model] *( identifier[counts] /( identifier[x] * identifier[model] + identifier[bkg] )- literal[int] )) | def _f_cash_root(x, counts, bkg, model):
"""
Function to find root of. Described in Appendix A, Stewart (2009).
Parameters
----------
x : float
Model amplitude.
counts : `~numpy.ndarray`
Count map slice, where model is defined.
bkg : `~numpy.ndarray`
Background map slice, where model is defined.
model : `~numpy.ndarray`
Source template (multiplied with exposure).
"""
return np.sum(model * (counts / (x * model + bkg) - 1.0)) |
def process(self, tup):
"""Process steps:
1. Stream third positional value from input into Kafka topic.
"""
status_seq = self.iter_using_shelf(tup.values[2], self.tweet_shelf)
# This could be more efficient by passing the result from twitter
# straight through to the producer, instead of deserializing and
# reserializing json.
self.producer.produce(json.dumps(status) for status in status_seq) | def function[process, parameter[self, tup]]:
constant[Process steps:
1. Stream third positional value from input into Kafka topic.
]
variable[status_seq] assign[=] call[name[self].iter_using_shelf, parameter[call[name[tup].values][constant[2]], name[self].tweet_shelf]]
call[name[self].producer.produce, parameter[<ast.GeneratorExp object at 0x7da18fe91120>]] | keyword[def] identifier[process] ( identifier[self] , identifier[tup] ):
literal[string]
identifier[status_seq] = identifier[self] . identifier[iter_using_shelf] ( identifier[tup] . identifier[values] [ literal[int] ], identifier[self] . identifier[tweet_shelf] )
identifier[self] . identifier[producer] . identifier[produce] ( identifier[json] . identifier[dumps] ( identifier[status] ) keyword[for] identifier[status] keyword[in] identifier[status_seq] ) | def process(self, tup):
"""Process steps:
1. Stream third positional value from input into Kafka topic.
"""
status_seq = self.iter_using_shelf(tup.values[2], self.tweet_shelf)
# This could be more efficient by passing the result from twitter
# straight through to the producer, instead of deserializing and
# reserializing json.
self.producer.produce((json.dumps(status) for status in status_seq)) |
def _release_line(c):
"""
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
"""
# TODO: I don't _think_ this technically overlaps with Releases (because
# that only ever deals with changelog contents, and therefore full release
# version numbers) but in case it does, move it there sometime.
# TODO: this and similar calls in this module may want to be given an
# explicit pointer-to-git-repo option (i.e. if run from outside project
# context).
# TODO: major releases? or are they big enough events we don't need to
# bother with the script? Also just hard to gauge - when is master the next
# 1.x feature vs 2.0?
branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
type_ = Release.UNDEFINED
if BUGFIX_RE.match(branch):
type_ = Release.BUGFIX
if FEATURE_RE.match(branch):
type_ = Release.FEATURE
return branch, type_ | def function[_release_line, parameter[c]]:
constant[
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
]
variable[branch] assign[=] call[call[name[c].run, parameter[constant[git rev-parse --abbrev-ref HEAD]]].stdout.strip, parameter[]]
variable[type_] assign[=] name[Release].UNDEFINED
if call[name[BUGFIX_RE].match, parameter[name[branch]]] begin[:]
variable[type_] assign[=] name[Release].BUGFIX
if call[name[FEATURE_RE].match, parameter[name[branch]]] begin[:]
variable[type_] assign[=] name[Release].FEATURE
return[tuple[[<ast.Name object at 0x7da2041d80a0>, <ast.Name object at 0x7da2041db430>]]] | keyword[def] identifier[_release_line] ( identifier[c] ):
literal[string]
identifier[branch] = identifier[c] . identifier[run] ( literal[string] , identifier[hide] = keyword[True] ). identifier[stdout] . identifier[strip] ()
identifier[type_] = identifier[Release] . identifier[UNDEFINED]
keyword[if] identifier[BUGFIX_RE] . identifier[match] ( identifier[branch] ):
identifier[type_] = identifier[Release] . identifier[BUGFIX]
keyword[if] identifier[FEATURE_RE] . identifier[match] ( identifier[branch] ):
identifier[type_] = identifier[Release] . identifier[FEATURE]
keyword[return] identifier[branch] , identifier[type_] | def _release_line(c):
"""
Examine current repo state to determine what type of release to prep.
:returns:
A two-tuple of ``(branch-name, line-type)`` where:
- ``branch-name`` is the current branch name, e.g. ``1.1``, ``master``,
``gobbledygook`` (or, usually, ``HEAD`` if not on a branch).
- ``line-type`` is a symbolic member of `.Release` representing what
"type" of release the line appears to be for:
- ``Release.BUGFIX`` if on a bugfix/stable release line, e.g.
``1.1``.
- ``Release.FEATURE`` if on a feature-release branch (typically
``master``).
- ``Release.UNDEFINED`` if neither of those appears to apply
(usually means on some unmerged feature/dev branch).
"""
# TODO: I don't _think_ this technically overlaps with Releases (because
# that only ever deals with changelog contents, and therefore full release
# version numbers) but in case it does, move it there sometime.
# TODO: this and similar calls in this module may want to be given an
# explicit pointer-to-git-repo option (i.e. if run from outside project
# context).
# TODO: major releases? or are they big enough events we don't need to
# bother with the script? Also just hard to gauge - when is master the next
# 1.x feature vs 2.0?
branch = c.run('git rev-parse --abbrev-ref HEAD', hide=True).stdout.strip()
type_ = Release.UNDEFINED
if BUGFIX_RE.match(branch):
type_ = Release.BUGFIX # depends on [control=['if'], data=[]]
if FEATURE_RE.match(branch):
type_ = Release.FEATURE # depends on [control=['if'], data=[]]
return (branch, type_) |
def GetRaw(self, name, context=None, default=utils.NotAValue):
"""Get the raw value without interpolations."""
if context is None:
context = self.context
# Getting a raw value is pretty cheap so we wont bother with the cache here.
_, value = self._GetValue(name, context, default=default)
return value | def function[GetRaw, parameter[self, name, context, default]]:
constant[Get the raw value without interpolations.]
if compare[name[context] is constant[None]] begin[:]
variable[context] assign[=] name[self].context
<ast.Tuple object at 0x7da1b1b68340> assign[=] call[name[self]._GetValue, parameter[name[name], name[context]]]
return[name[value]] | keyword[def] identifier[GetRaw] ( identifier[self] , identifier[name] , identifier[context] = keyword[None] , identifier[default] = identifier[utils] . identifier[NotAValue] ):
literal[string]
keyword[if] identifier[context] keyword[is] keyword[None] :
identifier[context] = identifier[self] . identifier[context]
identifier[_] , identifier[value] = identifier[self] . identifier[_GetValue] ( identifier[name] , identifier[context] , identifier[default] = identifier[default] )
keyword[return] identifier[value] | def GetRaw(self, name, context=None, default=utils.NotAValue):
"""Get the raw value without interpolations."""
if context is None:
context = self.context # depends on [control=['if'], data=['context']]
# Getting a raw value is pretty cheap so we wont bother with the cache here.
(_, value) = self._GetValue(name, context, default=default)
return value |
def parse_geo_tiff(
key_dir_vlr: GeoKeyDirectoryVlr,
double_vlr: GeoDoubleParamsVlr,
ascii_vlr: GeoAsciiParamsVlr,
) -> List[GeoTiffKey]:
""" Parses the GeoTiff VLRs information into nicer structs
"""
geotiff_keys = []
for k in key_dir_vlr.geo_keys:
if k.tiff_tag_location == 0:
value = k.value_offset
elif k.tiff_tag_location == 34736:
value = double_vlr.doubles[k.value_offset]
elif k.tiff_tag_location == 34737:
try:
value = ascii_vlr.strings[k.value_offset][k.count :]
except IndexError:
# Maybe I'm just misunderstanding the specification :thinking:
value = ascii_vlr.strings[0][k.value_offset : k.value_offset + k.count]
else:
logger.warning(
"GeoTiffKey with unknown tiff tag location ({})".format(
k.tiff_tag_location
)
)
continue
geotiff_keys.append(GeoTiffKey(k.id, value))
return geotiff_keys | def function[parse_geo_tiff, parameter[key_dir_vlr, double_vlr, ascii_vlr]]:
constant[ Parses the GeoTiff VLRs information into nicer structs
]
variable[geotiff_keys] assign[=] list[[]]
for taget[name[k]] in starred[name[key_dir_vlr].geo_keys] begin[:]
if compare[name[k].tiff_tag_location equal[==] constant[0]] begin[:]
variable[value] assign[=] name[k].value_offset
call[name[geotiff_keys].append, parameter[call[name[GeoTiffKey], parameter[name[k].id, name[value]]]]]
return[name[geotiff_keys]] | keyword[def] identifier[parse_geo_tiff] (
identifier[key_dir_vlr] : identifier[GeoKeyDirectoryVlr] ,
identifier[double_vlr] : identifier[GeoDoubleParamsVlr] ,
identifier[ascii_vlr] : identifier[GeoAsciiParamsVlr] ,
)-> identifier[List] [ identifier[GeoTiffKey] ]:
literal[string]
identifier[geotiff_keys] =[]
keyword[for] identifier[k] keyword[in] identifier[key_dir_vlr] . identifier[geo_keys] :
keyword[if] identifier[k] . identifier[tiff_tag_location] == literal[int] :
identifier[value] = identifier[k] . identifier[value_offset]
keyword[elif] identifier[k] . identifier[tiff_tag_location] == literal[int] :
identifier[value] = identifier[double_vlr] . identifier[doubles] [ identifier[k] . identifier[value_offset] ]
keyword[elif] identifier[k] . identifier[tiff_tag_location] == literal[int] :
keyword[try] :
identifier[value] = identifier[ascii_vlr] . identifier[strings] [ identifier[k] . identifier[value_offset] ][ identifier[k] . identifier[count] :]
keyword[except] identifier[IndexError] :
identifier[value] = identifier[ascii_vlr] . identifier[strings] [ literal[int] ][ identifier[k] . identifier[value_offset] : identifier[k] . identifier[value_offset] + identifier[k] . identifier[count] ]
keyword[else] :
identifier[logger] . identifier[warning] (
literal[string] . identifier[format] (
identifier[k] . identifier[tiff_tag_location]
)
)
keyword[continue]
identifier[geotiff_keys] . identifier[append] ( identifier[GeoTiffKey] ( identifier[k] . identifier[id] , identifier[value] ))
keyword[return] identifier[geotiff_keys] | def parse_geo_tiff(key_dir_vlr: GeoKeyDirectoryVlr, double_vlr: GeoDoubleParamsVlr, ascii_vlr: GeoAsciiParamsVlr) -> List[GeoTiffKey]:
""" Parses the GeoTiff VLRs information into nicer structs
"""
geotiff_keys = []
for k in key_dir_vlr.geo_keys:
if k.tiff_tag_location == 0:
value = k.value_offset # depends on [control=['if'], data=[]]
elif k.tiff_tag_location == 34736:
value = double_vlr.doubles[k.value_offset] # depends on [control=['if'], data=[]]
elif k.tiff_tag_location == 34737:
try:
value = ascii_vlr.strings[k.value_offset][k.count:] # depends on [control=['try'], data=[]]
except IndexError:
# Maybe I'm just misunderstanding the specification :thinking:
value = ascii_vlr.strings[0][k.value_offset:k.value_offset + k.count] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
logger.warning('GeoTiffKey with unknown tiff tag location ({})'.format(k.tiff_tag_location))
continue
geotiff_keys.append(GeoTiffKey(k.id, value)) # depends on [control=['for'], data=['k']]
return geotiff_keys |
def _exit(self):
""" Shuts down the terminal (and app) """
self._removeInputPrompt()
print(self._color(self.COLOR_YELLOW, 'CLOSING TERMINAL...'))
self.stop() | def function[_exit, parameter[self]]:
constant[ Shuts down the terminal (and app) ]
call[name[self]._removeInputPrompt, parameter[]]
call[name[print], parameter[call[name[self]._color, parameter[name[self].COLOR_YELLOW, constant[CLOSING TERMINAL...]]]]]
call[name[self].stop, parameter[]] | keyword[def] identifier[_exit] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_removeInputPrompt] ()
identifier[print] ( identifier[self] . identifier[_color] ( identifier[self] . identifier[COLOR_YELLOW] , literal[string] ))
identifier[self] . identifier[stop] () | def _exit(self):
""" Shuts down the terminal (and app) """
self._removeInputPrompt()
print(self._color(self.COLOR_YELLOW, 'CLOSING TERMINAL...'))
self.stop() |
def GetSubkeyByIndex(self, index):
"""Retrieves a subkey by index.
Args:
index (int): index of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
Raises:
IndexError: if the index is out of bounds.
"""
if index < 0 or index >= self._pyregf_key.number_of_sub_keys:
raise IndexError('Index out of bounds.')
pyregf_key = self._pyregf_key.get_sub_key(index)
if not pyregf_key:
return None
key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name])
return REGFWinRegistryKey(pyregf_key, key_path=key_path) | def function[GetSubkeyByIndex, parameter[self, index]]:
constant[Retrieves a subkey by index.
Args:
index (int): index of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
Raises:
IndexError: if the index is out of bounds.
]
if <ast.BoolOp object at 0x7da2044c1600> begin[:]
<ast.Raise object at 0x7da2044c2c80>
variable[pyregf_key] assign[=] call[name[self]._pyregf_key.get_sub_key, parameter[name[index]]]
if <ast.UnaryOp object at 0x7da2044c0ee0> begin[:]
return[constant[None]]
variable[key_path] assign[=] call[name[key_paths].JoinKeyPath, parameter[list[[<ast.Attribute object at 0x7da18dc04730>, <ast.Attribute object at 0x7da18dc07fd0>]]]]
return[call[name[REGFWinRegistryKey], parameter[name[pyregf_key]]]] | keyword[def] identifier[GetSubkeyByIndex] ( identifier[self] , identifier[index] ):
literal[string]
keyword[if] identifier[index] < literal[int] keyword[or] identifier[index] >= identifier[self] . identifier[_pyregf_key] . identifier[number_of_sub_keys] :
keyword[raise] identifier[IndexError] ( literal[string] )
identifier[pyregf_key] = identifier[self] . identifier[_pyregf_key] . identifier[get_sub_key] ( identifier[index] )
keyword[if] keyword[not] identifier[pyregf_key] :
keyword[return] keyword[None]
identifier[key_path] = identifier[key_paths] . identifier[JoinKeyPath] ([ identifier[self] . identifier[_key_path] , identifier[pyregf_key] . identifier[name] ])
keyword[return] identifier[REGFWinRegistryKey] ( identifier[pyregf_key] , identifier[key_path] = identifier[key_path] ) | def GetSubkeyByIndex(self, index):
"""Retrieves a subkey by index.
Args:
index (int): index of the subkey.
Returns:
WinRegistryKey: Windows Registry subkey or None if not found.
Raises:
IndexError: if the index is out of bounds.
"""
if index < 0 or index >= self._pyregf_key.number_of_sub_keys:
raise IndexError('Index out of bounds.') # depends on [control=['if'], data=[]]
pyregf_key = self._pyregf_key.get_sub_key(index)
if not pyregf_key:
return None # depends on [control=['if'], data=[]]
key_path = key_paths.JoinKeyPath([self._key_path, pyregf_key.name])
return REGFWinRegistryKey(pyregf_key, key_path=key_path) |
def time_stops(self):
""" Valid time steps for this service as a list of datetime objects. """
if not self.supports_time:
return []
if self.service.calendar == 'standard':
units = self.service.time_interval_units
interval = self.service.time_interval
steps = [self.time_start]
if units in ('years', 'decades', 'centuries'):
if units == 'years':
years = interval
elif units == 'decades':
years = 10 * interval
else:
years = 100 * interval
next_value = lambda x: x.replace(year=x.year + years)
elif units == 'months':
def _fn(x):
year = x.year + (x.month+interval-1) // 12
month = (x.month+interval) % 12 or 12
day = min(x.day, calendar.monthrange(year, month)[1])
return x.replace(year=year, month=month, day=day)
next_value = _fn
else:
if units == 'milliseconds':
delta = timedelta(milliseconds=interval)
elif units == 'seconds':
delta = timedelta(seconds=interval)
elif units == 'minutes':
delta = timedelta(minutes=interval)
elif units == 'hours':
delta = timedelta(hours=interval)
elif units == 'days':
delta = timedelta(days=interval)
elif units == 'weeks':
delta = timedelta(weeks=interval)
else:
raise ValidationError(
"Service has an invalid time_interval_units: {}".format(self.service.time_interval_units)
)
next_value = lambda x: x + delta
while steps[-1] < self.time_end:
value = next_value(steps[-1])
if value > self.time_end:
break
steps.append(value)
return steps
else:
# TODO
raise NotImplementedError | def function[time_stops, parameter[self]]:
constant[ Valid time steps for this service as a list of datetime objects. ]
if <ast.UnaryOp object at 0x7da2054a5420> begin[:]
return[list[[]]]
if compare[name[self].service.calendar equal[==] constant[standard]] begin[:]
variable[units] assign[=] name[self].service.time_interval_units
variable[interval] assign[=] name[self].service.time_interval
variable[steps] assign[=] list[[<ast.Attribute object at 0x7da2054a7ca0>]]
if compare[name[units] in tuple[[<ast.Constant object at 0x7da2054a7f10>, <ast.Constant object at 0x7da2054a4580>, <ast.Constant object at 0x7da2054a7fd0>]]] begin[:]
if compare[name[units] equal[==] constant[years]] begin[:]
variable[years] assign[=] name[interval]
variable[next_value] assign[=] <ast.Lambda object at 0x7da2054a4b80>
while compare[call[name[steps]][<ast.UnaryOp object at 0x7da2043440a0>] less[<] name[self].time_end] begin[:]
variable[value] assign[=] call[name[next_value], parameter[call[name[steps]][<ast.UnaryOp object at 0x7da204344970>]]]
if compare[name[value] greater[>] name[self].time_end] begin[:]
break
call[name[steps].append, parameter[name[value]]]
return[name[steps]] | keyword[def] identifier[time_stops] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_time] :
keyword[return] []
keyword[if] identifier[self] . identifier[service] . identifier[calendar] == literal[string] :
identifier[units] = identifier[self] . identifier[service] . identifier[time_interval_units]
identifier[interval] = identifier[self] . identifier[service] . identifier[time_interval]
identifier[steps] =[ identifier[self] . identifier[time_start] ]
keyword[if] identifier[units] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[units] == literal[string] :
identifier[years] = identifier[interval]
keyword[elif] identifier[units] == literal[string] :
identifier[years] = literal[int] * identifier[interval]
keyword[else] :
identifier[years] = literal[int] * identifier[interval]
identifier[next_value] = keyword[lambda] identifier[x] : identifier[x] . identifier[replace] ( identifier[year] = identifier[x] . identifier[year] + identifier[years] )
keyword[elif] identifier[units] == literal[string] :
keyword[def] identifier[_fn] ( identifier[x] ):
identifier[year] = identifier[x] . identifier[year] +( identifier[x] . identifier[month] + identifier[interval] - literal[int] )// literal[int]
identifier[month] =( identifier[x] . identifier[month] + identifier[interval] )% literal[int] keyword[or] literal[int]
identifier[day] = identifier[min] ( identifier[x] . identifier[day] , identifier[calendar] . identifier[monthrange] ( identifier[year] , identifier[month] )[ literal[int] ])
keyword[return] identifier[x] . identifier[replace] ( identifier[year] = identifier[year] , identifier[month] = identifier[month] , identifier[day] = identifier[day] )
identifier[next_value] = identifier[_fn]
keyword[else] :
keyword[if] identifier[units] == literal[string] :
identifier[delta] = identifier[timedelta] ( identifier[milliseconds] = identifier[interval] )
keyword[elif] identifier[units] == literal[string] :
identifier[delta] = identifier[timedelta] ( identifier[seconds] = identifier[interval] )
keyword[elif] identifier[units] == literal[string] :
identifier[delta] = identifier[timedelta] ( identifier[minutes] = identifier[interval] )
keyword[elif] identifier[units] == literal[string] :
identifier[delta] = identifier[timedelta] ( identifier[hours] = identifier[interval] )
keyword[elif] identifier[units] == literal[string] :
identifier[delta] = identifier[timedelta] ( identifier[days] = identifier[interval] )
keyword[elif] identifier[units] == literal[string] :
identifier[delta] = identifier[timedelta] ( identifier[weeks] = identifier[interval] )
keyword[else] :
keyword[raise] identifier[ValidationError] (
literal[string] . identifier[format] ( identifier[self] . identifier[service] . identifier[time_interval_units] )
)
identifier[next_value] = keyword[lambda] identifier[x] : identifier[x] + identifier[delta]
keyword[while] identifier[steps] [- literal[int] ]< identifier[self] . identifier[time_end] :
identifier[value] = identifier[next_value] ( identifier[steps] [- literal[int] ])
keyword[if] identifier[value] > identifier[self] . identifier[time_end] :
keyword[break]
identifier[steps] . identifier[append] ( identifier[value] )
keyword[return] identifier[steps]
keyword[else] :
keyword[raise] identifier[NotImplementedError] | def time_stops(self):
""" Valid time steps for this service as a list of datetime objects. """
if not self.supports_time:
return [] # depends on [control=['if'], data=[]]
if self.service.calendar == 'standard':
units = self.service.time_interval_units
interval = self.service.time_interval
steps = [self.time_start]
if units in ('years', 'decades', 'centuries'):
if units == 'years':
years = interval # depends on [control=['if'], data=[]]
elif units == 'decades':
years = 10 * interval # depends on [control=['if'], data=[]]
else:
years = 100 * interval
next_value = lambda x: x.replace(year=x.year + years) # depends on [control=['if'], data=['units']]
elif units == 'months':
def _fn(x):
year = x.year + (x.month + interval - 1) // 12
month = (x.month + interval) % 12 or 12
day = min(x.day, calendar.monthrange(year, month)[1])
return x.replace(year=year, month=month, day=day)
next_value = _fn # depends on [control=['if'], data=[]]
else:
if units == 'milliseconds':
delta = timedelta(milliseconds=interval) # depends on [control=['if'], data=[]]
elif units == 'seconds':
delta = timedelta(seconds=interval) # depends on [control=['if'], data=[]]
elif units == 'minutes':
delta = timedelta(minutes=interval) # depends on [control=['if'], data=[]]
elif units == 'hours':
delta = timedelta(hours=interval) # depends on [control=['if'], data=[]]
elif units == 'days':
delta = timedelta(days=interval) # depends on [control=['if'], data=[]]
elif units == 'weeks':
delta = timedelta(weeks=interval) # depends on [control=['if'], data=[]]
else:
raise ValidationError('Service has an invalid time_interval_units: {}'.format(self.service.time_interval_units))
next_value = lambda x: x + delta
while steps[-1] < self.time_end:
value = next_value(steps[-1])
if value > self.time_end:
break # depends on [control=['if'], data=[]]
steps.append(value) # depends on [control=['while'], data=[]]
return steps # depends on [control=['if'], data=[]]
else:
# TODO
raise NotImplementedError |
def fit_predict(self, features, target, sample_weight=None, groups=None):
"""Call fit and predict in sequence.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
target: array-like {n_samples}
List of class labels for prediction
sample_weight: array-like {n_samples}, optional
Per-sample weights. Higher weights force TPOT to put more emphasis on those points
groups: array-like, with shape {n_samples, }, optional
Group labels for the samples used when performing cross-validation.
This parameter should only be used in conjunction with sklearn's Group cross-validation
functions, such as sklearn.model_selection.GroupKFold
Returns
----------
array-like: {n_samples}
Predicted target for the provided features
"""
self.fit(features, target, sample_weight=sample_weight, groups=groups)
return self.predict(features) | def function[fit_predict, parameter[self, features, target, sample_weight, groups]]:
constant[Call fit and predict in sequence.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
target: array-like {n_samples}
List of class labels for prediction
sample_weight: array-like {n_samples}, optional
Per-sample weights. Higher weights force TPOT to put more emphasis on those points
groups: array-like, with shape {n_samples, }, optional
Group labels for the samples used when performing cross-validation.
This parameter should only be used in conjunction with sklearn's Group cross-validation
functions, such as sklearn.model_selection.GroupKFold
Returns
----------
array-like: {n_samples}
Predicted target for the provided features
]
call[name[self].fit, parameter[name[features], name[target]]]
return[call[name[self].predict, parameter[name[features]]]] | keyword[def] identifier[fit_predict] ( identifier[self] , identifier[features] , identifier[target] , identifier[sample_weight] = keyword[None] , identifier[groups] = keyword[None] ):
literal[string]
identifier[self] . identifier[fit] ( identifier[features] , identifier[target] , identifier[sample_weight] = identifier[sample_weight] , identifier[groups] = identifier[groups] )
keyword[return] identifier[self] . identifier[predict] ( identifier[features] ) | def fit_predict(self, features, target, sample_weight=None, groups=None):
"""Call fit and predict in sequence.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
target: array-like {n_samples}
List of class labels for prediction
sample_weight: array-like {n_samples}, optional
Per-sample weights. Higher weights force TPOT to put more emphasis on those points
groups: array-like, with shape {n_samples, }, optional
Group labels for the samples used when performing cross-validation.
This parameter should only be used in conjunction with sklearn's Group cross-validation
functions, such as sklearn.model_selection.GroupKFold
Returns
----------
array-like: {n_samples}
Predicted target for the provided features
"""
self.fit(features, target, sample_weight=sample_weight, groups=groups)
return self.predict(features) |
def import_file(package: str, fname: str) -> ModuleType:
"""Import file directly.
This is a hack to import files from packages without importing
<package>/__init__.py, its purpose is to allow import without requiring
all the dependencies at this point.
Args:
package: Package to import from
fname: File to import
Returns:
Imported module
"""
mod_name = fname.rstrip('.py')
spec = spec_from_file_location(mod_name, '{}/{}'.format(package, fname))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module | def function[import_file, parameter[package, fname]]:
constant[Import file directly.
This is a hack to import files from packages without importing
<package>/__init__.py, its purpose is to allow import without requiring
all the dependencies at this point.
Args:
package: Package to import from
fname: File to import
Returns:
Imported module
]
variable[mod_name] assign[=] call[name[fname].rstrip, parameter[constant[.py]]]
variable[spec] assign[=] call[name[spec_from_file_location], parameter[name[mod_name], call[constant[{}/{}].format, parameter[name[package], name[fname]]]]]
variable[module] assign[=] call[name[module_from_spec], parameter[name[spec]]]
call[name[spec].loader.exec_module, parameter[name[module]]]
return[name[module]] | keyword[def] identifier[import_file] ( identifier[package] : identifier[str] , identifier[fname] : identifier[str] )-> identifier[ModuleType] :
literal[string]
identifier[mod_name] = identifier[fname] . identifier[rstrip] ( literal[string] )
identifier[spec] = identifier[spec_from_file_location] ( identifier[mod_name] , literal[string] . identifier[format] ( identifier[package] , identifier[fname] ))
identifier[module] = identifier[module_from_spec] ( identifier[spec] )
identifier[spec] . identifier[loader] . identifier[exec_module] ( identifier[module] )
keyword[return] identifier[module] | def import_file(package: str, fname: str) -> ModuleType:
"""Import file directly.
This is a hack to import files from packages without importing
<package>/__init__.py, its purpose is to allow import without requiring
all the dependencies at this point.
Args:
package: Package to import from
fname: File to import
Returns:
Imported module
"""
mod_name = fname.rstrip('.py')
spec = spec_from_file_location(mod_name, '{}/{}'.format(package, fname))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module |
def star_expr__30(self, star_opt, expr):
"""(3.0, 3.1) star_expr: ['*'] expr"""
if star_opt:
return ast.Starred(value=expr, ctx=None,
star_loc=star_opt, loc=expr.loc.join(star_opt))
return expr | def function[star_expr__30, parameter[self, star_opt, expr]]:
constant[(3.0, 3.1) star_expr: ['*'] expr]
if name[star_opt] begin[:]
return[call[name[ast].Starred, parameter[]]]
return[name[expr]] | keyword[def] identifier[star_expr__30] ( identifier[self] , identifier[star_opt] , identifier[expr] ):
literal[string]
keyword[if] identifier[star_opt] :
keyword[return] identifier[ast] . identifier[Starred] ( identifier[value] = identifier[expr] , identifier[ctx] = keyword[None] ,
identifier[star_loc] = identifier[star_opt] , identifier[loc] = identifier[expr] . identifier[loc] . identifier[join] ( identifier[star_opt] ))
keyword[return] identifier[expr] | def star_expr__30(self, star_opt, expr):
"""(3.0, 3.1) star_expr: ['*'] expr"""
if star_opt:
return ast.Starred(value=expr, ctx=None, star_loc=star_opt, loc=expr.loc.join(star_opt)) # depends on [control=['if'], data=[]]
return expr |
def capture(cmd, **kw):
"""Run a command and return its stripped captured output."""
kw = kw.copy()
kw['hide'] = 'out'
if not kw.get('echo', False):
kw['echo'] = False
ignore_failures = kw.pop('ignore_failures', False)
try:
return invoke_run(cmd, **kw).stdout.strip()
except exceptions.Failure as exc:
if not ignore_failures:
notify.error("Command `{}` failed with RC={}!".format(cmd, exc.result.return_code,))
raise | def function[capture, parameter[cmd]]:
constant[Run a command and return its stripped captured output.]
variable[kw] assign[=] call[name[kw].copy, parameter[]]
call[name[kw]][constant[hide]] assign[=] constant[out]
if <ast.UnaryOp object at 0x7da1b003d240> begin[:]
call[name[kw]][constant[echo]] assign[=] constant[False]
variable[ignore_failures] assign[=] call[name[kw].pop, parameter[constant[ignore_failures], constant[False]]]
<ast.Try object at 0x7da1b003f940> | keyword[def] identifier[capture] ( identifier[cmd] ,** identifier[kw] ):
literal[string]
identifier[kw] = identifier[kw] . identifier[copy] ()
identifier[kw] [ literal[string] ]= literal[string]
keyword[if] keyword[not] identifier[kw] . identifier[get] ( literal[string] , keyword[False] ):
identifier[kw] [ literal[string] ]= keyword[False]
identifier[ignore_failures] = identifier[kw] . identifier[pop] ( literal[string] , keyword[False] )
keyword[try] :
keyword[return] identifier[invoke_run] ( identifier[cmd] ,** identifier[kw] ). identifier[stdout] . identifier[strip] ()
keyword[except] identifier[exceptions] . identifier[Failure] keyword[as] identifier[exc] :
keyword[if] keyword[not] identifier[ignore_failures] :
identifier[notify] . identifier[error] ( literal[string] . identifier[format] ( identifier[cmd] , identifier[exc] . identifier[result] . identifier[return_code] ,))
keyword[raise] | def capture(cmd, **kw):
"""Run a command and return its stripped captured output."""
kw = kw.copy()
kw['hide'] = 'out'
if not kw.get('echo', False):
kw['echo'] = False # depends on [control=['if'], data=[]]
ignore_failures = kw.pop('ignore_failures', False)
try:
return invoke_run(cmd, **kw).stdout.strip() # depends on [control=['try'], data=[]]
except exceptions.Failure as exc:
if not ignore_failures:
notify.error('Command `{}` failed with RC={}!'.format(cmd, exc.result.return_code))
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['exc']] |
def plot_traindata(self, name: str='dataplot') -> None:
"""
Plots traindata.... choo choo...
"""
traindata = self.get_traindata()
plt.figure(figsize=(16, 16))
plt.scatter(traindata[:, 1], traindata[:, 2],
c=traindata[:, 5], marker='o', label='Datastore Points')
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.legend(loc=2, fontsize='xx-large')
plt.savefig('./img/{}.png'.format(name)) | def function[plot_traindata, parameter[self, name]]:
constant[
Plots traindata.... choo choo...
]
variable[traindata] assign[=] call[name[self].get_traindata, parameter[]]
call[name[plt].figure, parameter[]]
call[name[plt].scatter, parameter[call[name[traindata]][tuple[[<ast.Slice object at 0x7da1b1366200>, <ast.Constant object at 0x7da1b1366ec0>]]], call[name[traindata]][tuple[[<ast.Slice object at 0x7da1b1367610>, <ast.Constant object at 0x7da1b13663b0>]]]]]
call[name[plt].xlabel, parameter[constant[$\log_{10}$ Noise]]]
call[name[plt].ylabel, parameter[constant[$\log_{10}$ Curvature]]]
call[name[plt].legend, parameter[]]
call[name[plt].savefig, parameter[call[constant[./img/{}.png].format, parameter[name[name]]]]] | keyword[def] identifier[plot_traindata] ( identifier[self] , identifier[name] : identifier[str] = literal[string] )-> keyword[None] :
literal[string]
identifier[traindata] = identifier[self] . identifier[get_traindata] ()
identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] ))
identifier[plt] . identifier[scatter] ( identifier[traindata] [:, literal[int] ], identifier[traindata] [:, literal[int] ],
identifier[c] = identifier[traindata] [:, literal[int] ], identifier[marker] = literal[string] , identifier[label] = literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[legend] ( identifier[loc] = literal[int] , identifier[fontsize] = literal[string] )
identifier[plt] . identifier[savefig] ( literal[string] . identifier[format] ( identifier[name] )) | def plot_traindata(self, name: str='dataplot') -> None:
"""
Plots traindata.... choo choo...
"""
traindata = self.get_traindata()
plt.figure(figsize=(16, 16))
plt.scatter(traindata[:, 1], traindata[:, 2], c=traindata[:, 5], marker='o', label='Datastore Points')
plt.xlabel('$\\log_{10}$ Noise')
plt.ylabel('$\\log_{10}$ Curvature')
plt.legend(loc=2, fontsize='xx-large')
plt.savefig('./img/{}.png'.format(name)) |
def SetPreferredLanguageIdentifier(self, language_identifier):
"""Sets the preferred language identifier.
Args:
language_identifier (str): language identifier string such as "en-US"
for US English or "is-IS" for Icelandic.
Raises:
KeyError: if the language identifier is not defined.
ValueError: if the language identifier is not a string type.
"""
if not isinstance(language_identifier, py2to3.STRING_TYPES):
raise ValueError('Language identifier is not a string.')
values = language_ids.LANGUAGE_IDENTIFIERS.get(
language_identifier.lower(), None)
if not values:
raise KeyError('Language identifier: {0:s} is not defined.'.format(
language_identifier))
self._language_identifier = language_identifier
self._lcid = values[0] | def function[SetPreferredLanguageIdentifier, parameter[self, language_identifier]]:
constant[Sets the preferred language identifier.
Args:
language_identifier (str): language identifier string such as "en-US"
for US English or "is-IS" for Icelandic.
Raises:
KeyError: if the language identifier is not defined.
ValueError: if the language identifier is not a string type.
]
if <ast.UnaryOp object at 0x7da18bccbb20> begin[:]
<ast.Raise object at 0x7da18bcca7d0>
variable[values] assign[=] call[name[language_ids].LANGUAGE_IDENTIFIERS.get, parameter[call[name[language_identifier].lower, parameter[]], constant[None]]]
if <ast.UnaryOp object at 0x7da18bcca980> begin[:]
<ast.Raise object at 0x7da18bcc8fa0>
name[self]._language_identifier assign[=] name[language_identifier]
name[self]._lcid assign[=] call[name[values]][constant[0]] | keyword[def] identifier[SetPreferredLanguageIdentifier] ( identifier[self] , identifier[language_identifier] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[language_identifier] , identifier[py2to3] . identifier[STRING_TYPES] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[values] = identifier[language_ids] . identifier[LANGUAGE_IDENTIFIERS] . identifier[get] (
identifier[language_identifier] . identifier[lower] (), keyword[None] )
keyword[if] keyword[not] identifier[values] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] (
identifier[language_identifier] ))
identifier[self] . identifier[_language_identifier] = identifier[language_identifier]
identifier[self] . identifier[_lcid] = identifier[values] [ literal[int] ] | def SetPreferredLanguageIdentifier(self, language_identifier):
"""Sets the preferred language identifier.
Args:
language_identifier (str): language identifier string such as "en-US"
for US English or "is-IS" for Icelandic.
Raises:
KeyError: if the language identifier is not defined.
ValueError: if the language identifier is not a string type.
"""
if not isinstance(language_identifier, py2to3.STRING_TYPES):
raise ValueError('Language identifier is not a string.') # depends on [control=['if'], data=[]]
values = language_ids.LANGUAGE_IDENTIFIERS.get(language_identifier.lower(), None)
if not values:
raise KeyError('Language identifier: {0:s} is not defined.'.format(language_identifier)) # depends on [control=['if'], data=[]]
self._language_identifier = language_identifier
self._lcid = values[0] |
def ms_pan(self, viewer, event, data_x, data_y):
"""A 'drag' or proportional pan, where the image is panned by
'dragging the canvas' up or down. The amount of the pan is
proportionate to the length of the drag.
"""
if not self.canpan:
return True
x, y = viewer.get_last_win_xy()
if event.state == 'move':
data_x, data_y = self.get_new_pan(viewer, x, y,
ptype=self._pantype)
viewer.panset_xy(data_x, data_y)
elif event.state == 'down':
self.pan_set_origin(viewer, x, y, data_x, data_y)
self.pan_start(viewer, ptype=2)
else:
self.pan_stop(viewer)
return True | def function[ms_pan, parameter[self, viewer, event, data_x, data_y]]:
constant[A 'drag' or proportional pan, where the image is panned by
'dragging the canvas' up or down. The amount of the pan is
proportionate to the length of the drag.
]
if <ast.UnaryOp object at 0x7da18dc07070> begin[:]
return[constant[True]]
<ast.Tuple object at 0x7da18dc06cb0> assign[=] call[name[viewer].get_last_win_xy, parameter[]]
if compare[name[event].state equal[==] constant[move]] begin[:]
<ast.Tuple object at 0x7da18dc07b50> assign[=] call[name[self].get_new_pan, parameter[name[viewer], name[x], name[y]]]
call[name[viewer].panset_xy, parameter[name[data_x], name[data_y]]]
return[constant[True]] | keyword[def] identifier[ms_pan] ( identifier[self] , identifier[viewer] , identifier[event] , identifier[data_x] , identifier[data_y] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[canpan] :
keyword[return] keyword[True]
identifier[x] , identifier[y] = identifier[viewer] . identifier[get_last_win_xy] ()
keyword[if] identifier[event] . identifier[state] == literal[string] :
identifier[data_x] , identifier[data_y] = identifier[self] . identifier[get_new_pan] ( identifier[viewer] , identifier[x] , identifier[y] ,
identifier[ptype] = identifier[self] . identifier[_pantype] )
identifier[viewer] . identifier[panset_xy] ( identifier[data_x] , identifier[data_y] )
keyword[elif] identifier[event] . identifier[state] == literal[string] :
identifier[self] . identifier[pan_set_origin] ( identifier[viewer] , identifier[x] , identifier[y] , identifier[data_x] , identifier[data_y] )
identifier[self] . identifier[pan_start] ( identifier[viewer] , identifier[ptype] = literal[int] )
keyword[else] :
identifier[self] . identifier[pan_stop] ( identifier[viewer] )
keyword[return] keyword[True] | def ms_pan(self, viewer, event, data_x, data_y):
"""A 'drag' or proportional pan, where the image is panned by
'dragging the canvas' up or down. The amount of the pan is
proportionate to the length of the drag.
"""
if not self.canpan:
return True # depends on [control=['if'], data=[]]
(x, y) = viewer.get_last_win_xy()
if event.state == 'move':
(data_x, data_y) = self.get_new_pan(viewer, x, y, ptype=self._pantype)
viewer.panset_xy(data_x, data_y) # depends on [control=['if'], data=[]]
elif event.state == 'down':
self.pan_set_origin(viewer, x, y, data_x, data_y)
self.pan_start(viewer, ptype=2) # depends on [control=['if'], data=[]]
else:
self.pan_stop(viewer)
return True |
def random_graph(out_degree):
'''Random graph generator. Does not generate self-edges.
out_degree : array-like of ints, controlling the out degree of each vertex.
'''
n = len(out_degree)
out_degree = np.asarray(out_degree, dtype=int)
if (out_degree >= n).any():
raise ValueError('Cannot have degree >= num_vertices')
row = np.repeat(np.arange(n), out_degree)
weights = np.ones_like(row, dtype=float)
# Generate random edges from 0 to n-2, then shift by one to avoid self-edges.
col = np.concatenate([np.random.choice(n-1, d, replace=False)
for d in out_degree])
col[col >= row] += 1
adj = coo_matrix((weights, (row, col)), shape=(n, n))
return Graph.from_adj_matrix(adj) | def function[random_graph, parameter[out_degree]]:
constant[Random graph generator. Does not generate self-edges.
out_degree : array-like of ints, controlling the out degree of each vertex.
]
variable[n] assign[=] call[name[len], parameter[name[out_degree]]]
variable[out_degree] assign[=] call[name[np].asarray, parameter[name[out_degree]]]
if call[compare[name[out_degree] greater_or_equal[>=] name[n]].any, parameter[]] begin[:]
<ast.Raise object at 0x7da18f7217b0>
variable[row] assign[=] call[name[np].repeat, parameter[call[name[np].arange, parameter[name[n]]], name[out_degree]]]
variable[weights] assign[=] call[name[np].ones_like, parameter[name[row]]]
variable[col] assign[=] call[name[np].concatenate, parameter[<ast.ListComp object at 0x7da18dc068f0>]]
<ast.AugAssign object at 0x7da18dc05810>
variable[adj] assign[=] call[name[coo_matrix], parameter[tuple[[<ast.Name object at 0x7da18dc05e70>, <ast.Tuple object at 0x7da18dc044f0>]]]]
return[call[name[Graph].from_adj_matrix, parameter[name[adj]]]] | keyword[def] identifier[random_graph] ( identifier[out_degree] ):
literal[string]
identifier[n] = identifier[len] ( identifier[out_degree] )
identifier[out_degree] = identifier[np] . identifier[asarray] ( identifier[out_degree] , identifier[dtype] = identifier[int] )
keyword[if] ( identifier[out_degree] >= identifier[n] ). identifier[any] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[row] = identifier[np] . identifier[repeat] ( identifier[np] . identifier[arange] ( identifier[n] ), identifier[out_degree] )
identifier[weights] = identifier[np] . identifier[ones_like] ( identifier[row] , identifier[dtype] = identifier[float] )
identifier[col] = identifier[np] . identifier[concatenate] ([ identifier[np] . identifier[random] . identifier[choice] ( identifier[n] - literal[int] , identifier[d] , identifier[replace] = keyword[False] )
keyword[for] identifier[d] keyword[in] identifier[out_degree] ])
identifier[col] [ identifier[col] >= identifier[row] ]+= literal[int]
identifier[adj] = identifier[coo_matrix] (( identifier[weights] ,( identifier[row] , identifier[col] )), identifier[shape] =( identifier[n] , identifier[n] ))
keyword[return] identifier[Graph] . identifier[from_adj_matrix] ( identifier[adj] ) | def random_graph(out_degree):
"""Random graph generator. Does not generate self-edges.
out_degree : array-like of ints, controlling the out degree of each vertex.
"""
n = len(out_degree)
out_degree = np.asarray(out_degree, dtype=int)
if (out_degree >= n).any():
raise ValueError('Cannot have degree >= num_vertices') # depends on [control=['if'], data=[]]
row = np.repeat(np.arange(n), out_degree)
weights = np.ones_like(row, dtype=float)
# Generate random edges from 0 to n-2, then shift by one to avoid self-edges.
col = np.concatenate([np.random.choice(n - 1, d, replace=False) for d in out_degree])
col[col >= row] += 1
adj = coo_matrix((weights, (row, col)), shape=(n, n))
return Graph.from_adj_matrix(adj) |
def update_insight(self, project_owner, project_id, id, **kwargs):
"""
Update an insight
Update an insight. Note that only elements included in the request will be updated. All omitted elements will remain untouched.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_insight(project_owner, project_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required)
:param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required)
:param str id: Insight unique identifier. (required)
:param InsightPatchRequest body:
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_insight_with_http_info(project_owner, project_id, id, **kwargs)
else:
(data) = self.update_insight_with_http_info(project_owner, project_id, id, **kwargs)
return data | def function[update_insight, parameter[self, project_owner, project_id, id]]:
constant[
Update an insight
Update an insight. Note that only elements included in the request will be updated. All omitted elements will remain untouched.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_insight(project_owner, project_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required)
:param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required)
:param str id: Insight unique identifier. (required)
:param InsightPatchRequest body:
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[callback]]] begin[:]
return[call[name[self].update_insight_with_http_info, parameter[name[project_owner], name[project_id], name[id]]]] | keyword[def] identifier[update_insight] ( identifier[self] , identifier[project_owner] , identifier[project_id] , identifier[id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[update_insight_with_http_info] ( identifier[project_owner] , identifier[project_id] , identifier[id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[update_insight_with_http_info] ( identifier[project_owner] , identifier[project_id] , identifier[id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def update_insight(self, project_owner, project_id, id, **kwargs):
"""
Update an insight
Update an insight. Note that only elements included in the request will be updated. All omitted elements will remain untouched.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_insight(project_owner, project_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str project_owner: User name and unique identifier of the creator of a project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `government` is the unique identifier of the owner. (required)
:param str project_id: User name and unique identifier of the project. For example, in the URL: [https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs](https://data.world/government/how-to-add-depth-to-your-data-with-the-us-census-acs), `how-to-add-depth-to-your-data-with-the-us-census-acs` is the unique identifier of the owner. (required)
:param str id: Insight unique identifier. (required)
:param InsightPatchRequest body:
:return: SuccessMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_insight_with_http_info(project_owner, project_id, id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = self.update_insight_with_http_info(project_owner, project_id, id, **kwargs)
return data |
def _prepare_put_or_patch(self, kwargs):
"""Retrieve the appropriate request items for put or patch calls."""
requests_params = self._handle_requests_params(kwargs)
update_uri = self._meta_data['uri']
session = self._meta_data['bigip']._meta_data['icr_session']
read_only = self._meta_data.get('read_only_attributes', [])
return requests_params, update_uri, session, read_only | def function[_prepare_put_or_patch, parameter[self, kwargs]]:
constant[Retrieve the appropriate request items for put or patch calls.]
variable[requests_params] assign[=] call[name[self]._handle_requests_params, parameter[name[kwargs]]]
variable[update_uri] assign[=] call[name[self]._meta_data][constant[uri]]
variable[session] assign[=] call[call[name[self]._meta_data][constant[bigip]]._meta_data][constant[icr_session]]
variable[read_only] assign[=] call[name[self]._meta_data.get, parameter[constant[read_only_attributes], list[[]]]]
return[tuple[[<ast.Name object at 0x7da1b1632dd0>, <ast.Name object at 0x7da1b1632ec0>, <ast.Name object at 0x7da1b1632a10>, <ast.Name object at 0x7da1b16315d0>]]] | keyword[def] identifier[_prepare_put_or_patch] ( identifier[self] , identifier[kwargs] ):
literal[string]
identifier[requests_params] = identifier[self] . identifier[_handle_requests_params] ( identifier[kwargs] )
identifier[update_uri] = identifier[self] . identifier[_meta_data] [ literal[string] ]
identifier[session] = identifier[self] . identifier[_meta_data] [ literal[string] ]. identifier[_meta_data] [ literal[string] ]
identifier[read_only] = identifier[self] . identifier[_meta_data] . identifier[get] ( literal[string] ,[])
keyword[return] identifier[requests_params] , identifier[update_uri] , identifier[session] , identifier[read_only] | def _prepare_put_or_patch(self, kwargs):
"""Retrieve the appropriate request items for put or patch calls."""
requests_params = self._handle_requests_params(kwargs)
update_uri = self._meta_data['uri']
session = self._meta_data['bigip']._meta_data['icr_session']
read_only = self._meta_data.get('read_only_attributes', [])
return (requests_params, update_uri, session, read_only) |
def returner(ret):
'''
Return information to a Kafka server
'''
if __salt__['config.option']('returner.kafka.topic'):
topic = __salt__['config.option']('returner.kafka.topic')
conn = _get_conn()
producer = Producer({'bootstrap.servers': conn})
producer.poll(0)
producer.produce(topic, salt.utils.json.dumps(ret), str(ret).encode('utf-8'), callback=_delivery_report)
producer.flush()
else:
log.error('Unable to find kafka returner config option: topic') | def function[returner, parameter[ret]]:
constant[
Return information to a Kafka server
]
if call[call[name[__salt__]][constant[config.option]], parameter[constant[returner.kafka.topic]]] begin[:]
variable[topic] assign[=] call[call[name[__salt__]][constant[config.option]], parameter[constant[returner.kafka.topic]]]
variable[conn] assign[=] call[name[_get_conn], parameter[]]
variable[producer] assign[=] call[name[Producer], parameter[dictionary[[<ast.Constant object at 0x7da1b1fa1150>], [<ast.Name object at 0x7da1b1fa17e0>]]]]
call[name[producer].poll, parameter[constant[0]]]
call[name[producer].produce, parameter[name[topic], call[name[salt].utils.json.dumps, parameter[name[ret]]], call[call[name[str], parameter[name[ret]]].encode, parameter[constant[utf-8]]]]]
call[name[producer].flush, parameter[]] | keyword[def] identifier[returner] ( identifier[ret] ):
literal[string]
keyword[if] identifier[__salt__] [ literal[string] ]( literal[string] ):
identifier[topic] = identifier[__salt__] [ literal[string] ]( literal[string] )
identifier[conn] = identifier[_get_conn] ()
identifier[producer] = identifier[Producer] ({ literal[string] : identifier[conn] })
identifier[producer] . identifier[poll] ( literal[int] )
identifier[producer] . identifier[produce] ( identifier[topic] , identifier[salt] . identifier[utils] . identifier[json] . identifier[dumps] ( identifier[ret] ), identifier[str] ( identifier[ret] ). identifier[encode] ( literal[string] ), identifier[callback] = identifier[_delivery_report] )
identifier[producer] . identifier[flush] ()
keyword[else] :
identifier[log] . identifier[error] ( literal[string] ) | def returner(ret):
"""
Return information to a Kafka server
"""
if __salt__['config.option']('returner.kafka.topic'):
topic = __salt__['config.option']('returner.kafka.topic')
conn = _get_conn()
producer = Producer({'bootstrap.servers': conn})
producer.poll(0)
producer.produce(topic, salt.utils.json.dumps(ret), str(ret).encode('utf-8'), callback=_delivery_report)
producer.flush() # depends on [control=['if'], data=[]]
else:
log.error('Unable to find kafka returner config option: topic') |
def do_txn(self, params):
"""
\x1b[1mNAME\x1b[0m
txn - Create and execute a transaction
\x1b[1mSYNOPSIS\x1b[0m
txn <cmd> [cmd] [cmd] ... [cmd]
\x1b[1mDESCRIPTION\x1b[0m
Allowed cmds are check, create, rm and set. Check parameters are:
check <path> <version>
For create, rm and set see their help menu for their respective parameters.
\x1b[1mEXAMPLES\x1b[0m
> txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1'
"""
try:
with self.transaction():
for cmd in params.cmds:
try:
self.onecmd(cmd)
except AttributeError:
# silently swallow unrecognized commands
pass
except BadVersionError:
self.show_output("Bad version.")
except NoNodeError:
self.show_output("Missing path.")
except NodeExistsError:
self.show_output("One of the paths exists.") | def function[do_txn, parameter[self, params]]:
constant[
[1mNAME[0m
txn - Create and execute a transaction
[1mSYNOPSIS[0m
txn <cmd> [cmd] [cmd] ... [cmd]
[1mDESCRIPTION[0m
Allowed cmds are check, create, rm and set. Check parameters are:
check <path> <version>
For create, rm and set see their help menu for their respective parameters.
[1mEXAMPLES[0m
> txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1'
]
<ast.Try object at 0x7da1b26af340> | keyword[def] identifier[do_txn] ( identifier[self] , identifier[params] ):
literal[string]
keyword[try] :
keyword[with] identifier[self] . identifier[transaction] ():
keyword[for] identifier[cmd] keyword[in] identifier[params] . identifier[cmds] :
keyword[try] :
identifier[self] . identifier[onecmd] ( identifier[cmd] )
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[except] identifier[BadVersionError] :
identifier[self] . identifier[show_output] ( literal[string] )
keyword[except] identifier[NoNodeError] :
identifier[self] . identifier[show_output] ( literal[string] )
keyword[except] identifier[NodeExistsError] :
identifier[self] . identifier[show_output] ( literal[string] ) | def do_txn(self, params):
"""
\x1b[1mNAME\x1b[0m
txn - Create and execute a transaction
\x1b[1mSYNOPSIS\x1b[0m
txn <cmd> [cmd] [cmd] ... [cmd]
\x1b[1mDESCRIPTION\x1b[0m
Allowed cmds are check, create, rm and set. Check parameters are:
check <path> <version>
For create, rm and set see their help menu for their respective parameters.
\x1b[1mEXAMPLES\x1b[0m
> txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1'
"""
try:
with self.transaction():
for cmd in params.cmds:
try:
self.onecmd(cmd) # depends on [control=['try'], data=[]]
except AttributeError:
# silently swallow unrecognized commands
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['cmd']] # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except BadVersionError:
self.show_output('Bad version.') # depends on [control=['except'], data=[]]
except NoNodeError:
self.show_output('Missing path.') # depends on [control=['except'], data=[]]
except NodeExistsError:
self.show_output('One of the paths exists.') # depends on [control=['except'], data=[]] |
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1,
seq_align="start", trim_seq_len=None):
"""
Find best k-mers for CONCISE initialization.
Args:
dt (pd.DataFrame): Table containing response variable and sequence.
response (str): Name of the column used as the reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
k (int): Desired k-mer length.
n_cores (int): Number of cores to use for computation. It can use up to 3 cores.
consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
Returns:
string list: Best set of motifs for this dataset sorted with respect to
confidence (best candidate occuring first).
Details:
First a lasso model gets fitted to get a set of initial motifs. Next, the best
subset of unrelated motifs is selected by stepwise selection.
"""
y = dt[response]
seq = dt[sequence]
if trim_seq_len is not None:
seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len)
seq = [s.replace("N", "") for s in seq]
dt_kmer = kmer_count(seq, k)
Xsp = csc_matrix(dt_kmer)
en = ElasticNet(alpha=1, standardize=False, n_splits=3)
en.fit(Xsp, y)
# which coefficients are nonzero?=
nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist()
# perform stepwise selection
#
# TODO - how do we deal with the intercept?
# largest number of motifs where they don't differ by more than 1 k-mer
def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True):
"""
perform stepwise model selection while preventing to add a motif similar to the
already selected motifs.
"""
F, pval = f_regression(dt_kmer[to_be_selected_kmers], y)
kmer = to_be_selected_kmers.pop(pval.argmin())
selected_kmers.append(kmer)
def select_criterion(s1, s2, consider_shift=True):
if hamming_distance(s1, s2) <= 1:
return False
if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0:
return False
if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0:
return False
return True
to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers
if select_criterion(ckmer, kmer, consider_shift)]
if len(to_be_selected_kmers) == 0:
return selected_kmers
else:
# regress out the new feature
lm = LinearRegression()
lm.fit(dt_kmer[selected_kmers], y)
y_new = y - lm.predict(dt_kmer[selected_kmers])
return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift)
selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift)
return selected_kmers | def function[best_kmers, parameter[dt, response, sequence, k, consider_shift, n_cores, seq_align, trim_seq_len]]:
constant[
Find best k-mers for CONCISE initialization.
Args:
dt (pd.DataFrame): Table containing response variable and sequence.
response (str): Name of the column used as the reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
k (int): Desired k-mer length.
n_cores (int): Number of cores to use for computation. It can use up to 3 cores.
consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
Returns:
string list: Best set of motifs for this dataset sorted with respect to
confidence (best candidate occuring first).
Details:
First a lasso model gets fitted to get a set of initial motifs. Next, the best
subset of unrelated motifs is selected by stepwise selection.
]
variable[y] assign[=] call[name[dt]][name[response]]
variable[seq] assign[=] call[name[dt]][name[sequence]]
if compare[name[trim_seq_len] is_not constant[None]] begin[:]
variable[seq] assign[=] call[name[pad_sequences], parameter[name[seq]]]
variable[seq] assign[=] <ast.ListComp object at 0x7da1b0464be0>
variable[dt_kmer] assign[=] call[name[kmer_count], parameter[name[seq], name[k]]]
variable[Xsp] assign[=] call[name[csc_matrix], parameter[name[dt_kmer]]]
variable[en] assign[=] call[name[ElasticNet], parameter[]]
call[name[en].fit, parameter[name[Xsp], name[y]]]
variable[nonzero_kmers] assign[=] call[call[name[dt_kmer].columns.values][compare[name[en].coef_ not_equal[!=] constant[0]]].tolist, parameter[]]
def function[find_next_best, parameter[dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift]]:
constant[
perform stepwise model selection while preventing to add a motif similar to the
already selected motifs.
]
<ast.Tuple object at 0x7da1b04154e0> assign[=] call[name[f_regression], parameter[call[name[dt_kmer]][name[to_be_selected_kmers]], name[y]]]
variable[kmer] assign[=] call[name[to_be_selected_kmers].pop, parameter[call[name[pval].argmin, parameter[]]]]
call[name[selected_kmers].append, parameter[name[kmer]]]
def function[select_criterion, parameter[s1, s2, consider_shift]]:
if compare[call[name[hamming_distance], parameter[name[s1], name[s2]]] less_or_equal[<=] constant[1]] begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b04da7a0> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b04db160> begin[:]
return[constant[False]]
return[constant[True]]
variable[to_be_selected_kmers] assign[=] <ast.ListComp object at 0x7da1b04dbb80>
if compare[call[name[len], parameter[name[to_be_selected_kmers]]] equal[==] constant[0]] begin[:]
return[name[selected_kmers]]
variable[selected_kmers] assign[=] call[name[find_next_best], parameter[name[dt_kmer], name[y], list[[]], name[nonzero_kmers], name[consider_shift]]]
return[name[selected_kmers]] | keyword[def] identifier[best_kmers] ( identifier[dt] , identifier[response] , identifier[sequence] , identifier[k] = literal[int] , identifier[consider_shift] = keyword[True] , identifier[n_cores] = literal[int] ,
identifier[seq_align] = literal[string] , identifier[trim_seq_len] = keyword[None] ):
literal[string]
identifier[y] = identifier[dt] [ identifier[response] ]
identifier[seq] = identifier[dt] [ identifier[sequence] ]
keyword[if] identifier[trim_seq_len] keyword[is] keyword[not] keyword[None] :
identifier[seq] = identifier[pad_sequences] ( identifier[seq] , identifier[align] = identifier[seq_align] , identifier[maxlen] = identifier[trim_seq_len] )
identifier[seq] =[ identifier[s] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[s] keyword[in] identifier[seq] ]
identifier[dt_kmer] = identifier[kmer_count] ( identifier[seq] , identifier[k] )
identifier[Xsp] = identifier[csc_matrix] ( identifier[dt_kmer] )
identifier[en] = identifier[ElasticNet] ( identifier[alpha] = literal[int] , identifier[standardize] = keyword[False] , identifier[n_splits] = literal[int] )
identifier[en] . identifier[fit] ( identifier[Xsp] , identifier[y] )
identifier[nonzero_kmers] = identifier[dt_kmer] . identifier[columns] . identifier[values] [ identifier[en] . identifier[coef_] != literal[int] ]. identifier[tolist] ()
keyword[def] identifier[find_next_best] ( identifier[dt_kmer] , identifier[y] , identifier[selected_kmers] , identifier[to_be_selected_kmers] , identifier[consider_shift] = keyword[True] ):
literal[string]
identifier[F] , identifier[pval] = identifier[f_regression] ( identifier[dt_kmer] [ identifier[to_be_selected_kmers] ], identifier[y] )
identifier[kmer] = identifier[to_be_selected_kmers] . identifier[pop] ( identifier[pval] . identifier[argmin] ())
identifier[selected_kmers] . identifier[append] ( identifier[kmer] )
keyword[def] identifier[select_criterion] ( identifier[s1] , identifier[s2] , identifier[consider_shift] = keyword[True] ):
keyword[if] identifier[hamming_distance] ( identifier[s1] , identifier[s2] )<= literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[consider_shift] keyword[and] identifier[hamming_distance] ( identifier[s1] [ literal[int] :], identifier[s2] [:- literal[int] ])== literal[int] :
keyword[return] keyword[False]
keyword[if] identifier[consider_shift] keyword[and] identifier[hamming_distance] ( identifier[s1] [:- literal[int] ], identifier[s2] [ literal[int] :])== literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True]
identifier[to_be_selected_kmers] =[ identifier[ckmer] keyword[for] identifier[ckmer] keyword[in] identifier[to_be_selected_kmers]
keyword[if] identifier[select_criterion] ( identifier[ckmer] , identifier[kmer] , identifier[consider_shift] )]
keyword[if] identifier[len] ( identifier[to_be_selected_kmers] )== literal[int] :
keyword[return] identifier[selected_kmers]
keyword[else] :
identifier[lm] = identifier[LinearRegression] ()
identifier[lm] . identifier[fit] ( identifier[dt_kmer] [ identifier[selected_kmers] ], identifier[y] )
identifier[y_new] = identifier[y] - identifier[lm] . identifier[predict] ( identifier[dt_kmer] [ identifier[selected_kmers] ])
keyword[return] identifier[find_next_best] ( identifier[dt_kmer] , identifier[y_new] , identifier[selected_kmers] , identifier[to_be_selected_kmers] , identifier[consider_shift] )
identifier[selected_kmers] = identifier[find_next_best] ( identifier[dt_kmer] , identifier[y] ,[], identifier[nonzero_kmers] , identifier[consider_shift] )
keyword[return] identifier[selected_kmers] | def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align='start', trim_seq_len=None):
"""
Find best k-mers for CONCISE initialization.
Args:
dt (pd.DataFrame): Table containing response variable and sequence.
response (str): Name of the column used as the reponse variable.
sequence (str): Name of the column storing the DNA/RNA sequences.
k (int): Desired k-mer length.
n_cores (int): Number of cores to use for computation. It can use up to 3 cores.
consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG?
seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences?
trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered.
Returns:
string list: Best set of motifs for this dataset sorted with respect to
confidence (best candidate occuring first).
Details:
First a lasso model gets fitted to get a set of initial motifs. Next, the best
subset of unrelated motifs is selected by stepwise selection.
"""
y = dt[response]
seq = dt[sequence]
if trim_seq_len is not None:
seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len)
seq = [s.replace('N', '') for s in seq] # depends on [control=['if'], data=['trim_seq_len']]
dt_kmer = kmer_count(seq, k)
Xsp = csc_matrix(dt_kmer)
en = ElasticNet(alpha=1, standardize=False, n_splits=3)
en.fit(Xsp, y)
# which coefficients are nonzero?=
nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist()
# perform stepwise selection
#
# TODO - how do we deal with the intercept?
# largest number of motifs where they don't differ by more than 1 k-mer
def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True):
"""
perform stepwise model selection while preventing to add a motif similar to the
already selected motifs.
"""
(F, pval) = f_regression(dt_kmer[to_be_selected_kmers], y)
kmer = to_be_selected_kmers.pop(pval.argmin())
selected_kmers.append(kmer)
def select_criterion(s1, s2, consider_shift=True):
if hamming_distance(s1, s2) <= 1:
return False # depends on [control=['if'], data=[]]
if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0:
return False # depends on [control=['if'], data=[]]
if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0:
return False # depends on [control=['if'], data=[]]
return True
to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)]
if len(to_be_selected_kmers) == 0:
return selected_kmers # depends on [control=['if'], data=[]]
else:
# regress out the new feature
lm = LinearRegression()
lm.fit(dt_kmer[selected_kmers], y)
y_new = y - lm.predict(dt_kmer[selected_kmers])
return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift)
selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift)
return selected_kmers |
def merge(self, dict_=None):
"""not is use so far, see check()"""
if dict_ is None and hasattr(self, '__dict__'):
dict_ = self.__dict__
# doesn't work anymore as we have _lock attribute
if dict_ is None:
return self
self.update(dict_)
return self | def function[merge, parameter[self, dict_]]:
constant[not is use so far, see check()]
if <ast.BoolOp object at 0x7da1b0b418a0> begin[:]
variable[dict_] assign[=] name[self].__dict__
if compare[name[dict_] is constant[None]] begin[:]
return[name[self]]
call[name[self].update, parameter[name[dict_]]]
return[name[self]] | keyword[def] identifier[merge] ( identifier[self] , identifier[dict_] = keyword[None] ):
literal[string]
keyword[if] identifier[dict_] keyword[is] keyword[None] keyword[and] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[dict_] = identifier[self] . identifier[__dict__]
keyword[if] identifier[dict_] keyword[is] keyword[None] :
keyword[return] identifier[self]
identifier[self] . identifier[update] ( identifier[dict_] )
keyword[return] identifier[self] | def merge(self, dict_=None):
"""not is use so far, see check()"""
if dict_ is None and hasattr(self, '__dict__'):
dict_ = self.__dict__ # depends on [control=['if'], data=[]]
# doesn't work anymore as we have _lock attribute
if dict_ is None:
return self # depends on [control=['if'], data=[]]
self.update(dict_)
return self |
def trim_key(self, name, min_version, mount_point=DEFAULT_MOUNT_POINT):
"""Trims older key versions setting a minimum version for the keyring.
Once trimmed, previous versions of the key cannot be recovered.
Supported methods:
POST: /{mount_point}/keys/{name}/trim. Produces: 200 application/json
:param name: Specifies the name of the key to be trimmed.
:type name: str | unicode
:param min_version: The minimum version for the key ring. All versions before this version will be permanently
deleted. This value can at most be equal to the lesser of min_decryption_version and min_encryption_version.
This is not allowed to be set when either min_encryption_version or min_decryption_version is set to zero.
:type min_version: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {
'min_available_version': min_version,
}
api_path = '/v1/{mount_point}/keys/{name}/trim'.format(
mount_point=mount_point,
name=name,
)
return self._adapter.post(
url=api_path,
json=params,
) | def function[trim_key, parameter[self, name, min_version, mount_point]]:
constant[Trims older key versions setting a minimum version for the keyring.
Once trimmed, previous versions of the key cannot be recovered.
Supported methods:
POST: /{mount_point}/keys/{name}/trim. Produces: 200 application/json
:param name: Specifies the name of the key to be trimmed.
:type name: str | unicode
:param min_version: The minimum version for the key ring. All versions before this version will be permanently
deleted. This value can at most be equal to the lesser of min_decryption_version and min_encryption_version.
This is not allowed to be set when either min_encryption_version or min_decryption_version is set to zero.
:type min_version: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20e9611b0>], [<ast.Name object at 0x7da20e960f40>]]
variable[api_path] assign[=] call[constant[/v1/{mount_point}/keys/{name}/trim].format, parameter[]]
return[call[name[self]._adapter.post, parameter[]]] | keyword[def] identifier[trim_key] ( identifier[self] , identifier[name] , identifier[min_version] , identifier[mount_point] = identifier[DEFAULT_MOUNT_POINT] ):
literal[string]
identifier[params] ={
literal[string] : identifier[min_version] ,
}
identifier[api_path] = literal[string] . identifier[format] (
identifier[mount_point] = identifier[mount_point] ,
identifier[name] = identifier[name] ,
)
keyword[return] identifier[self] . identifier[_adapter] . identifier[post] (
identifier[url] = identifier[api_path] ,
identifier[json] = identifier[params] ,
) | def trim_key(self, name, min_version, mount_point=DEFAULT_MOUNT_POINT):
"""Trims older key versions setting a minimum version for the keyring.
Once trimmed, previous versions of the key cannot be recovered.
Supported methods:
POST: /{mount_point}/keys/{name}/trim. Produces: 200 application/json
:param name: Specifies the name of the key to be trimmed.
:type name: str | unicode
:param min_version: The minimum version for the key ring. All versions before this version will be permanently
deleted. This value can at most be equal to the lesser of min_decryption_version and min_encryption_version.
This is not allowed to be set when either min_encryption_version or min_decryption_version is set to zero.
:type min_version: int
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
params = {'min_available_version': min_version}
api_path = '/v1/{mount_point}/keys/{name}/trim'.format(mount_point=mount_point, name=name)
return self._adapter.post(url=api_path, json=params) |
def logical_raid_levels(self):
"""Gets the raid level for each logical volume
:returns the set of list of raid levels configured.
"""
lg_raid_lvls = set()
for member in self.get_members():
lg_raid_lvls.add(mappings.RAID_LEVEL_MAP_REV.get(member.raid))
return lg_raid_lvls | def function[logical_raid_levels, parameter[self]]:
constant[Gets the raid level for each logical volume
:returns the set of list of raid levels configured.
]
variable[lg_raid_lvls] assign[=] call[name[set], parameter[]]
for taget[name[member]] in starred[call[name[self].get_members, parameter[]]] begin[:]
call[name[lg_raid_lvls].add, parameter[call[name[mappings].RAID_LEVEL_MAP_REV.get, parameter[name[member].raid]]]]
return[name[lg_raid_lvls]] | keyword[def] identifier[logical_raid_levels] ( identifier[self] ):
literal[string]
identifier[lg_raid_lvls] = identifier[set] ()
keyword[for] identifier[member] keyword[in] identifier[self] . identifier[get_members] ():
identifier[lg_raid_lvls] . identifier[add] ( identifier[mappings] . identifier[RAID_LEVEL_MAP_REV] . identifier[get] ( identifier[member] . identifier[raid] ))
keyword[return] identifier[lg_raid_lvls] | def logical_raid_levels(self):
"""Gets the raid level for each logical volume
:returns the set of list of raid levels configured.
"""
lg_raid_lvls = set()
for member in self.get_members():
lg_raid_lvls.add(mappings.RAID_LEVEL_MAP_REV.get(member.raid)) # depends on [control=['for'], data=['member']]
return lg_raid_lvls |
def process_nxml(nxml_filename, pmid=None, extra_annotations=None,
cleanup=True, add_grounding=True):
"""Process an NXML file using the ISI reader
First converts NXML to plain text and preprocesses it, then runs the ISI
reader, and processes the output to extract INDRA Statements.
Parameters
----------
nxml_filename : str
nxml file to process
pmid : Optional[str]
pmid of this nxml file, to be added to the Evidence object of the
extracted INDRA statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted Statements
"""
if extra_annotations is None:
extra_annotations = {}
# Create a temporary directory to store the proprocessed input
pp_dir = tempfile.mkdtemp('indra_isi_pp_output')
pp = IsiPreprocessor(pp_dir)
extra_annotations = {}
pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations)
# Run the ISI reader and extract statements
ip = process_preprocessed(pp)
if add_grounding:
ip.add_grounding()
if cleanup:
# Remove temporary directory with processed input
shutil.rmtree(pp_dir)
else:
logger.info('Not cleaning up %s' % pp_dir)
return ip | def function[process_nxml, parameter[nxml_filename, pmid, extra_annotations, cleanup, add_grounding]]:
constant[Process an NXML file using the ISI reader
First converts NXML to plain text and preprocesses it, then runs the ISI
reader, and processes the output to extract INDRA Statements.
Parameters
----------
nxml_filename : str
nxml file to process
pmid : Optional[str]
pmid of this nxml file, to be added to the Evidence object of the
extracted INDRA statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted Statements
]
if compare[name[extra_annotations] is constant[None]] begin[:]
variable[extra_annotations] assign[=] dictionary[[], []]
variable[pp_dir] assign[=] call[name[tempfile].mkdtemp, parameter[constant[indra_isi_pp_output]]]
variable[pp] assign[=] call[name[IsiPreprocessor], parameter[name[pp_dir]]]
variable[extra_annotations] assign[=] dictionary[[], []]
call[name[pp].preprocess_nxml_file, parameter[name[nxml_filename], name[pmid], name[extra_annotations]]]
variable[ip] assign[=] call[name[process_preprocessed], parameter[name[pp]]]
if name[add_grounding] begin[:]
call[name[ip].add_grounding, parameter[]]
if name[cleanup] begin[:]
call[name[shutil].rmtree, parameter[name[pp_dir]]]
return[name[ip]] | keyword[def] identifier[process_nxml] ( identifier[nxml_filename] , identifier[pmid] = keyword[None] , identifier[extra_annotations] = keyword[None] ,
identifier[cleanup] = keyword[True] , identifier[add_grounding] = keyword[True] ):
literal[string]
keyword[if] identifier[extra_annotations] keyword[is] keyword[None] :
identifier[extra_annotations] ={}
identifier[pp_dir] = identifier[tempfile] . identifier[mkdtemp] ( literal[string] )
identifier[pp] = identifier[IsiPreprocessor] ( identifier[pp_dir] )
identifier[extra_annotations] ={}
identifier[pp] . identifier[preprocess_nxml_file] ( identifier[nxml_filename] , identifier[pmid] , identifier[extra_annotations] )
identifier[ip] = identifier[process_preprocessed] ( identifier[pp] )
keyword[if] identifier[add_grounding] :
identifier[ip] . identifier[add_grounding] ()
keyword[if] identifier[cleanup] :
identifier[shutil] . identifier[rmtree] ( identifier[pp_dir] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] % identifier[pp_dir] )
keyword[return] identifier[ip] | def process_nxml(nxml_filename, pmid=None, extra_annotations=None, cleanup=True, add_grounding=True):
"""Process an NXML file using the ISI reader
First converts NXML to plain text and preprocesses it, then runs the ISI
reader, and processes the output to extract INDRA Statements.
Parameters
----------
nxml_filename : str
nxml file to process
pmid : Optional[str]
pmid of this nxml file, to be added to the Evidence object of the
extracted INDRA statements
extra_annotations : Optional[dict]
Additional annotations to add to the Evidence object of all extracted
INDRA statements. Extra annotations called 'interaction' are ignored
since this is used by the processor to store the corresponding
raw ISI output.
cleanup : Optional[bool]
If True, the temporary folders created for preprocessed reading input
and output are removed. Default: True
add_grounding : Optional[bool]
If True the extracted Statements' grounding is mapped
Returns
-------
ip : indra.sources.isi.processor.IsiProcessor
A processor containing extracted Statements
"""
if extra_annotations is None:
extra_annotations = {} # depends on [control=['if'], data=['extra_annotations']]
# Create a temporary directory to store the proprocessed input
pp_dir = tempfile.mkdtemp('indra_isi_pp_output')
pp = IsiPreprocessor(pp_dir)
extra_annotations = {}
pp.preprocess_nxml_file(nxml_filename, pmid, extra_annotations)
# Run the ISI reader and extract statements
ip = process_preprocessed(pp)
if add_grounding:
ip.add_grounding() # depends on [control=['if'], data=[]]
if cleanup:
# Remove temporary directory with processed input
shutil.rmtree(pp_dir) # depends on [control=['if'], data=[]]
else:
logger.info('Not cleaning up %s' % pp_dir)
return ip |
def scheme_host_port_prefix(self, scheme='http', host='host',
port=None, prefix=None):
"""Return URI composed of scheme, server, port, and prefix."""
uri = scheme + '://' + host
if (port and not ((scheme == 'http' and port == 80) or
(scheme == 'https' and port == 443))):
uri += ':' + str(port)
if (prefix):
uri += '/' + prefix
return uri | def function[scheme_host_port_prefix, parameter[self, scheme, host, port, prefix]]:
constant[Return URI composed of scheme, server, port, and prefix.]
variable[uri] assign[=] binary_operation[binary_operation[name[scheme] + constant[://]] + name[host]]
if <ast.BoolOp object at 0x7da18bc72470> begin[:]
<ast.AugAssign object at 0x7da18bc721d0>
if name[prefix] begin[:]
<ast.AugAssign object at 0x7da18bc72aa0>
return[name[uri]] | keyword[def] identifier[scheme_host_port_prefix] ( identifier[self] , identifier[scheme] = literal[string] , identifier[host] = literal[string] ,
identifier[port] = keyword[None] , identifier[prefix] = keyword[None] ):
literal[string]
identifier[uri] = identifier[scheme] + literal[string] + identifier[host]
keyword[if] ( identifier[port] keyword[and] keyword[not] (( identifier[scheme] == literal[string] keyword[and] identifier[port] == literal[int] ) keyword[or]
( identifier[scheme] == literal[string] keyword[and] identifier[port] == literal[int] ))):
identifier[uri] += literal[string] + identifier[str] ( identifier[port] )
keyword[if] ( identifier[prefix] ):
identifier[uri] += literal[string] + identifier[prefix]
keyword[return] identifier[uri] | def scheme_host_port_prefix(self, scheme='http', host='host', port=None, prefix=None):
"""Return URI composed of scheme, server, port, and prefix."""
uri = scheme + '://' + host
if port and (not (scheme == 'http' and port == 80 or (scheme == 'https' and port == 443))):
uri += ':' + str(port) # depends on [control=['if'], data=[]]
if prefix:
uri += '/' + prefix # depends on [control=['if'], data=[]]
return uri |
def sort_together(iterables, key_list=(0,), reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
return list(zip(*sorted(zip(*iterables),
key=itemgetter(*key_list),
reverse=reverse))) | def function[sort_together, parameter[iterables, key_list, reverse]]:
constant[Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
]
return[call[name[list], parameter[call[name[zip], parameter[<ast.Starred object at 0x7da1b1da2590>]]]]] | keyword[def] identifier[sort_together] ( identifier[iterables] , identifier[key_list] =( literal[int] ,), identifier[reverse] = keyword[False] ):
literal[string]
keyword[return] identifier[list] ( identifier[zip] (* identifier[sorted] ( identifier[zip] (* identifier[iterables] ),
identifier[key] = identifier[itemgetter] (* identifier[key_list] ),
identifier[reverse] = identifier[reverse] ))) | def sort_together(iterables, key_list=(0,), reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying multiple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
return list(zip(*sorted(zip(*iterables), key=itemgetter(*key_list), reverse=reverse))) |
def choose_raw_dataset(currently=""):
"""Let the user choose a raw dataset. Return the absolute path."""
folder = os.path.join(get_project_root(), "raw-datasets")
files = [os.path.join(folder, name) for name in os.listdir(folder)
if name.endswith(".pickle")]
default = -1
for i, filename in enumerate(files):
if os.path.basename(currently) == os.path.basename(filename):
default = i
if i != default:
print("[%i]\t%s" % (i, os.path.basename(filename)))
else:
print("\033[1m[%i]\033[0m\t%s" % (i, os.path.basename(filename)))
i = input_int_default("Choose a dataset by number: ", default)
return files[i] | def function[choose_raw_dataset, parameter[currently]]:
constant[Let the user choose a raw dataset. Return the absolute path.]
variable[folder] assign[=] call[name[os].path.join, parameter[call[name[get_project_root], parameter[]], constant[raw-datasets]]]
variable[files] assign[=] <ast.ListComp object at 0x7da1b2846ce0>
variable[default] assign[=] <ast.UnaryOp object at 0x7da1b2847160>
for taget[tuple[[<ast.Name object at 0x7da1b2846f20>, <ast.Name object at 0x7da1b2844ac0>]]] in starred[call[name[enumerate], parameter[name[files]]]] begin[:]
if compare[call[name[os].path.basename, parameter[name[currently]]] equal[==] call[name[os].path.basename, parameter[name[filename]]]] begin[:]
variable[default] assign[=] name[i]
if compare[name[i] not_equal[!=] name[default]] begin[:]
call[name[print], parameter[binary_operation[constant[[%i] %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b2844640>, <ast.Call object at 0x7da1b2846920>]]]]]
variable[i] assign[=] call[name[input_int_default], parameter[constant[Choose a dataset by number: ], name[default]]]
return[call[name[files]][name[i]]] | keyword[def] identifier[choose_raw_dataset] ( identifier[currently] = literal[string] ):
literal[string]
identifier[folder] = identifier[os] . identifier[path] . identifier[join] ( identifier[get_project_root] (), literal[string] )
identifier[files] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[folder] , identifier[name] ) keyword[for] identifier[name] keyword[in] identifier[os] . identifier[listdir] ( identifier[folder] )
keyword[if] identifier[name] . identifier[endswith] ( literal[string] )]
identifier[default] =- literal[int]
keyword[for] identifier[i] , identifier[filename] keyword[in] identifier[enumerate] ( identifier[files] ):
keyword[if] identifier[os] . identifier[path] . identifier[basename] ( identifier[currently] )== identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] ):
identifier[default] = identifier[i]
keyword[if] identifier[i] != identifier[default] :
identifier[print] ( literal[string] %( identifier[i] , identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] )))
keyword[else] :
identifier[print] ( literal[string] %( identifier[i] , identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] )))
identifier[i] = identifier[input_int_default] ( literal[string] , identifier[default] )
keyword[return] identifier[files] [ identifier[i] ] | def choose_raw_dataset(currently=''):
"""Let the user choose a raw dataset. Return the absolute path."""
folder = os.path.join(get_project_root(), 'raw-datasets')
files = [os.path.join(folder, name) for name in os.listdir(folder) if name.endswith('.pickle')]
default = -1
for (i, filename) in enumerate(files):
if os.path.basename(currently) == os.path.basename(filename):
default = i # depends on [control=['if'], data=[]]
if i != default:
print('[%i]\t%s' % (i, os.path.basename(filename))) # depends on [control=['if'], data=['i']]
else:
print('\x1b[1m[%i]\x1b[0m\t%s' % (i, os.path.basename(filename))) # depends on [control=['for'], data=[]]
i = input_int_default('Choose a dataset by number: ', default)
return files[i] |
def get_unit_property_names(self, unit_id=None):
'''Get a list of property names for a given unit, or for all units if unit_id is None
Parameters
----------
unit_id: int
The unit id for which the property names will be returned
If None (default), will return property names for all units
Returns
----------
property_names
The list of property names from the specified unit(s)
'''
if unit_id is None:
property_names = []
for unit_id in self.get_unit_ids():
curr_property_names = self.get_unit_property_names(unit_id)
for curr_property_name in curr_property_names:
property_names.append(curr_property_name)
property_names = sorted(list(set(property_names)))
return property_names
if isinstance(unit_id, (int, np.integer)):
if unit_id in self.get_unit_ids():
if unit_id not in self._unit_properties:
self._unit_properties[unit_id] = {}
property_names = sorted(self._unit_properties[unit_id].keys())
return property_names
else:
raise ValueError(str(unit_id) + " is not a valid unit_id")
else:
raise ValueError(str(unit_id) + " must be an int") | def function[get_unit_property_names, parameter[self, unit_id]]:
constant[Get a list of property names for a given unit, or for all units if unit_id is None
Parameters
----------
unit_id: int
The unit id for which the property names will be returned
If None (default), will return property names for all units
Returns
----------
property_names
The list of property names from the specified unit(s)
]
if compare[name[unit_id] is constant[None]] begin[:]
variable[property_names] assign[=] list[[]]
for taget[name[unit_id]] in starred[call[name[self].get_unit_ids, parameter[]]] begin[:]
variable[curr_property_names] assign[=] call[name[self].get_unit_property_names, parameter[name[unit_id]]]
for taget[name[curr_property_name]] in starred[name[curr_property_names]] begin[:]
call[name[property_names].append, parameter[name[curr_property_name]]]
variable[property_names] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[set], parameter[name[property_names]]]]]]]
return[name[property_names]]
if call[name[isinstance], parameter[name[unit_id], tuple[[<ast.Name object at 0x7da1b0bae3e0>, <ast.Attribute object at 0x7da1b0bad8a0>]]]] begin[:]
if compare[name[unit_id] in call[name[self].get_unit_ids, parameter[]]] begin[:]
if compare[name[unit_id] <ast.NotIn object at 0x7da2590d7190> name[self]._unit_properties] begin[:]
call[name[self]._unit_properties][name[unit_id]] assign[=] dictionary[[], []]
variable[property_names] assign[=] call[name[sorted], parameter[call[call[name[self]._unit_properties][name[unit_id]].keys, parameter[]]]]
return[name[property_names]] | keyword[def] identifier[get_unit_property_names] ( identifier[self] , identifier[unit_id] = keyword[None] ):
literal[string]
keyword[if] identifier[unit_id] keyword[is] keyword[None] :
identifier[property_names] =[]
keyword[for] identifier[unit_id] keyword[in] identifier[self] . identifier[get_unit_ids] ():
identifier[curr_property_names] = identifier[self] . identifier[get_unit_property_names] ( identifier[unit_id] )
keyword[for] identifier[curr_property_name] keyword[in] identifier[curr_property_names] :
identifier[property_names] . identifier[append] ( identifier[curr_property_name] )
identifier[property_names] = identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[property_names] )))
keyword[return] identifier[property_names]
keyword[if] identifier[isinstance] ( identifier[unit_id] ,( identifier[int] , identifier[np] . identifier[integer] )):
keyword[if] identifier[unit_id] keyword[in] identifier[self] . identifier[get_unit_ids] ():
keyword[if] identifier[unit_id] keyword[not] keyword[in] identifier[self] . identifier[_unit_properties] :
identifier[self] . identifier[_unit_properties] [ identifier[unit_id] ]={}
identifier[property_names] = identifier[sorted] ( identifier[self] . identifier[_unit_properties] [ identifier[unit_id] ]. identifier[keys] ())
keyword[return] identifier[property_names]
keyword[else] :
keyword[raise] identifier[ValueError] ( identifier[str] ( identifier[unit_id] )+ literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] ( identifier[str] ( identifier[unit_id] )+ literal[string] ) | def get_unit_property_names(self, unit_id=None):
"""Get a list of property names for a given unit, or for all units if unit_id is None
Parameters
----------
unit_id: int
The unit id for which the property names will be returned
If None (default), will return property names for all units
Returns
----------
property_names
The list of property names from the specified unit(s)
"""
if unit_id is None:
property_names = []
for unit_id in self.get_unit_ids():
curr_property_names = self.get_unit_property_names(unit_id)
for curr_property_name in curr_property_names:
property_names.append(curr_property_name) # depends on [control=['for'], data=['curr_property_name']] # depends on [control=['for'], data=['unit_id']]
property_names = sorted(list(set(property_names)))
return property_names # depends on [control=['if'], data=['unit_id']]
if isinstance(unit_id, (int, np.integer)):
if unit_id in self.get_unit_ids():
if unit_id not in self._unit_properties:
self._unit_properties[unit_id] = {} # depends on [control=['if'], data=['unit_id']]
property_names = sorted(self._unit_properties[unit_id].keys())
return property_names # depends on [control=['if'], data=['unit_id']]
else:
raise ValueError(str(unit_id) + ' is not a valid unit_id') # depends on [control=['if'], data=[]]
else:
raise ValueError(str(unit_id) + ' must be an int') |
def _preprocess(text, tab=4):
"""Normalize a text."""
text = re.sub(r'\r\n|\r', '\n', text)
text = text.replace('\t', ' ' * tab)
text = text.replace('\u00a0', ' ')
text = text.replace('\u2424', '\n')
pattern = re.compile(r'^ +$', re.M)
text = pattern.sub('', text)
text = _rstrip_lines(text)
return text | def function[_preprocess, parameter[text, tab]]:
constant[Normalize a text.]
variable[text] assign[=] call[name[re].sub, parameter[constant[\r\n|\r], constant[
], name[text]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[ ], binary_operation[constant[ ] * name[tab]]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[ ], constant[ ]]]
variable[text] assign[=] call[name[text].replace, parameter[constant[], constant[
]]]
variable[pattern] assign[=] call[name[re].compile, parameter[constant[^ +$], name[re].M]]
variable[text] assign[=] call[name[pattern].sub, parameter[constant[], name[text]]]
variable[text] assign[=] call[name[_rstrip_lines], parameter[name[text]]]
return[name[text]] | keyword[def] identifier[_preprocess] ( identifier[text] , identifier[tab] = literal[int] ):
literal[string]
identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] )
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] * identifier[tab] )
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] )
identifier[text] = identifier[text] . identifier[replace] ( literal[string] , literal[string] )
identifier[pattern] = identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[M] )
identifier[text] = identifier[pattern] . identifier[sub] ( literal[string] , identifier[text] )
identifier[text] = identifier[_rstrip_lines] ( identifier[text] )
keyword[return] identifier[text] | def _preprocess(text, tab=4):
"""Normalize a text."""
text = re.sub('\\r\\n|\\r', '\n', text)
text = text.replace('\t', ' ' * tab)
text = text.replace('\xa0', ' ')
text = text.replace('', '\n')
pattern = re.compile('^ +$', re.M)
text = pattern.sub('', text)
text = _rstrip_lines(text)
return text |
def tilemap(self, query, styles={}, bbox=[-180,-90,180,90], zoom=16,
api_key=os.environ.get('MAPBOX_API_KEY', None),
image=None, image_bounds=None,
index="vector-user-provided", name="GBDX_Task_Output", **kwargs):
"""
Renders a mapbox gl map from a vector service query
"""
try:
from IPython.display import display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable."
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=index)
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
url = 'https://vector.geobigdata.io/insight-vector/api/mvt/{z}/{x}/{y}?';
url += 'q={}&index={}'.format(query, index);
if styles is not None and not isinstance(styles, list):
styles = [styles]
map_id = "map_{}".format(str(int(time.time())))
map_data = VectorTileLayer(url, source_name=name, styles=styles, **kwargs)
image_layer = self._build_image_layer(image, image_bounds)
template = BaseTemplate(map_id, **{
"lat": lat,
"lon": lon,
"zoom": zoom,
"datasource": json.dumps(map_data.datasource),
"layers": json.dumps(map_data.layers),
"image_layer": image_layer,
"mbkey": api_key,
"token": self.gbdx_connection.access_token
})
template.inject() | def function[tilemap, parameter[self, query, styles, bbox, zoom, api_key, image, image_bounds, index, name]]:
constant[
Renders a mapbox gl map from a vector service query
]
<ast.Try object at 0x7da1b0004760>
assert[compare[name[api_key] is_not constant[None]]]
variable[wkt] assign[=] call[name[box], parameter[<ast.Starred object at 0x7da1b0004df0>]].wkt
variable[features] assign[=] call[name[self].query, parameter[name[wkt], name[query]]]
variable[union] assign[=] call[name[cascaded_union], parameter[<ast.ListComp object at 0x7da1b0004f40>]]
<ast.Tuple object at 0x7da1b0005f60> assign[=] call[name[union].centroid.coords][constant[0]]
variable[url] assign[=] constant[https://vector.geobigdata.io/insight-vector/api/mvt/{z}/{x}/{y}?]
<ast.AugAssign object at 0x7da1b0005d80>
if <ast.BoolOp object at 0x7da1b0005bd0> begin[:]
variable[styles] assign[=] list[[<ast.Name object at 0x7da1b0010bb0>]]
variable[map_id] assign[=] call[constant[map_{}].format, parameter[call[name[str], parameter[call[name[int], parameter[call[name[time].time, parameter[]]]]]]]]
variable[map_data] assign[=] call[name[VectorTileLayer], parameter[name[url]]]
variable[image_layer] assign[=] call[name[self]._build_image_layer, parameter[name[image], name[image_bounds]]]
variable[template] assign[=] call[name[BaseTemplate], parameter[name[map_id]]]
call[name[template].inject, parameter[]] | keyword[def] identifier[tilemap] ( identifier[self] , identifier[query] , identifier[styles] ={}, identifier[bbox] =[- literal[int] ,- literal[int] , literal[int] , literal[int] ], identifier[zoom] = literal[int] ,
identifier[api_key] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , keyword[None] ),
identifier[image] = keyword[None] , identifier[image_bounds] = keyword[None] ,
identifier[index] = literal[string] , identifier[name] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[from] identifier[IPython] . identifier[display] keyword[import] identifier[display]
keyword[except] :
identifier[print] ( literal[string] )
keyword[return]
keyword[assert] identifier[api_key] keyword[is] keyword[not] keyword[None] , literal[string]
identifier[wkt] = identifier[box] (* identifier[bbox] ). identifier[wkt]
identifier[features] = identifier[self] . identifier[query] ( identifier[wkt] , identifier[query] , identifier[index] = identifier[index] )
identifier[union] = identifier[cascaded_union] ([ identifier[shape] ( identifier[f] [ literal[string] ]) keyword[for] identifier[f] keyword[in] identifier[features] ])
identifier[lon] , identifier[lat] = identifier[union] . identifier[centroid] . identifier[coords] [ literal[int] ]
identifier[url] = literal[string] ;
identifier[url] += literal[string] . identifier[format] ( identifier[query] , identifier[index] );
keyword[if] identifier[styles] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[styles] , identifier[list] ):
identifier[styles] =[ identifier[styles] ]
identifier[map_id] = literal[string] . identifier[format] ( identifier[str] ( identifier[int] ( identifier[time] . identifier[time] ())))
identifier[map_data] = identifier[VectorTileLayer] ( identifier[url] , identifier[source_name] = identifier[name] , identifier[styles] = identifier[styles] ,** identifier[kwargs] )
identifier[image_layer] = identifier[self] . identifier[_build_image_layer] ( identifier[image] , identifier[image_bounds] )
identifier[template] = identifier[BaseTemplate] ( identifier[map_id] ,**{
literal[string] : identifier[lat] ,
literal[string] : identifier[lon] ,
literal[string] : identifier[zoom] ,
literal[string] : identifier[json] . identifier[dumps] ( identifier[map_data] . identifier[datasource] ),
literal[string] : identifier[json] . identifier[dumps] ( identifier[map_data] . identifier[layers] ),
literal[string] : identifier[image_layer] ,
literal[string] : identifier[api_key] ,
literal[string] : identifier[self] . identifier[gbdx_connection] . identifier[access_token]
})
identifier[template] . identifier[inject] () | def tilemap(self, query, styles={}, bbox=[-180, -90, 180, 90], zoom=16, api_key=os.environ.get('MAPBOX_API_KEY', None), image=None, image_bounds=None, index='vector-user-provided', name='GBDX_Task_Output', **kwargs):
"""
Renders a mapbox gl map from a vector service query
"""
try:
from IPython.display import display # depends on [control=['try'], data=[]]
except:
print('IPython is required to produce maps.')
return # depends on [control=['except'], data=[]]
assert api_key is not None, 'No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable.'
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=index)
union = cascaded_union([shape(f['geometry']) for f in features])
(lon, lat) = union.centroid.coords[0]
url = 'https://vector.geobigdata.io/insight-vector/api/mvt/{z}/{x}/{y}?'
url += 'q={}&index={}'.format(query, index)
if styles is not None and (not isinstance(styles, list)):
styles = [styles] # depends on [control=['if'], data=[]]
map_id = 'map_{}'.format(str(int(time.time())))
map_data = VectorTileLayer(url, source_name=name, styles=styles, **kwargs)
image_layer = self._build_image_layer(image, image_bounds)
template = BaseTemplate(map_id, **{'lat': lat, 'lon': lon, 'zoom': zoom, 'datasource': json.dumps(map_data.datasource), 'layers': json.dumps(map_data.layers), 'image_layer': image_layer, 'mbkey': api_key, 'token': self.gbdx_connection.access_token})
template.inject() |
def add(self, stat_name, threshold_description):
"""Add a new threshold to the dict (key = stat_name)"""
if threshold_description not in self.threshold_list:
return False
else:
self._thresholds[stat_name] = getattr(self.current_module,
'GlancesThreshold' + threshold_description.capitalize())()
return True | def function[add, parameter[self, stat_name, threshold_description]]:
constant[Add a new threshold to the dict (key = stat_name)]
if compare[name[threshold_description] <ast.NotIn object at 0x7da2590d7190> name[self].threshold_list] begin[:]
return[constant[False]] | keyword[def] identifier[add] ( identifier[self] , identifier[stat_name] , identifier[threshold_description] ):
literal[string]
keyword[if] identifier[threshold_description] keyword[not] keyword[in] identifier[self] . identifier[threshold_list] :
keyword[return] keyword[False]
keyword[else] :
identifier[self] . identifier[_thresholds] [ identifier[stat_name] ]= identifier[getattr] ( identifier[self] . identifier[current_module] ,
literal[string] + identifier[threshold_description] . identifier[capitalize] ())()
keyword[return] keyword[True] | def add(self, stat_name, threshold_description):
"""Add a new threshold to the dict (key = stat_name)"""
if threshold_description not in self.threshold_list:
return False # depends on [control=['if'], data=[]]
else:
self._thresholds[stat_name] = getattr(self.current_module, 'GlancesThreshold' + threshold_description.capitalize())()
return True |
def calculate_integral(self, T1, T2, method):
r'''Method to calculate the integral of a property with respect to
temperature, using a specified method. Implements the analytical
integrals of all available methods except for tabular data.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
'''
if method == PERRY151:
H2 = (self.PERRY151_const*T2 + 0.5*self.PERRY151_lin*T2**2
- self.PERRY151_quadinv/T2 + self.PERRY151_quad*T2**3/3.)
H1 = (self.PERRY151_const*T1 + 0.5*self.PERRY151_lin*T1**2
- self.PERRY151_quadinv/T1 + self.PERRY151_quad*T1**3/3.)
return (H2-H1)*calorie
elif method == CRCSTD:
return (T2-T1)*self.CRCSTD_Cp
elif method == LASTOVKA_S:
dH = (Lastovka_solid_integral(T2, self.similarity_variable)
- Lastovka_solid_integral(T1, self.similarity_variable))
return property_mass_to_molar(dH, self.MW)
elif method in self.tabular_data:
return float(quad(self.calculate, T1, T2, args=(method))[0])
else:
raise Exception('Method not valid') | def function[calculate_integral, parameter[self, T1, T2, method]]:
constant[Method to calculate the integral of a property with respect to
temperature, using a specified method. Implements the analytical
integrals of all available methods except for tabular data.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
]
if compare[name[method] equal[==] name[PERRY151]] begin[:]
variable[H2] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[self].PERRY151_const * name[T2]] + binary_operation[binary_operation[constant[0.5] * name[self].PERRY151_lin] * binary_operation[name[T2] ** constant[2]]]] - binary_operation[name[self].PERRY151_quadinv / name[T2]]] + binary_operation[binary_operation[name[self].PERRY151_quad * binary_operation[name[T2] ** constant[3]]] / constant[3.0]]]
variable[H1] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[self].PERRY151_const * name[T1]] + binary_operation[binary_operation[constant[0.5] * name[self].PERRY151_lin] * binary_operation[name[T1] ** constant[2]]]] - binary_operation[name[self].PERRY151_quadinv / name[T1]]] + binary_operation[binary_operation[name[self].PERRY151_quad * binary_operation[name[T1] ** constant[3]]] / constant[3.0]]]
return[binary_operation[binary_operation[name[H2] - name[H1]] * name[calorie]]] | keyword[def] identifier[calculate_integral] ( identifier[self] , identifier[T1] , identifier[T2] , identifier[method] ):
literal[string]
keyword[if] identifier[method] == identifier[PERRY151] :
identifier[H2] =( identifier[self] . identifier[PERRY151_const] * identifier[T2] + literal[int] * identifier[self] . identifier[PERRY151_lin] * identifier[T2] ** literal[int]
- identifier[self] . identifier[PERRY151_quadinv] / identifier[T2] + identifier[self] . identifier[PERRY151_quad] * identifier[T2] ** literal[int] / literal[int] )
identifier[H1] =( identifier[self] . identifier[PERRY151_const] * identifier[T1] + literal[int] * identifier[self] . identifier[PERRY151_lin] * identifier[T1] ** literal[int]
- identifier[self] . identifier[PERRY151_quadinv] / identifier[T1] + identifier[self] . identifier[PERRY151_quad] * identifier[T1] ** literal[int] / literal[int] )
keyword[return] ( identifier[H2] - identifier[H1] )* identifier[calorie]
keyword[elif] identifier[method] == identifier[CRCSTD] :
keyword[return] ( identifier[T2] - identifier[T1] )* identifier[self] . identifier[CRCSTD_Cp]
keyword[elif] identifier[method] == identifier[LASTOVKA_S] :
identifier[dH] =( identifier[Lastovka_solid_integral] ( identifier[T2] , identifier[self] . identifier[similarity_variable] )
- identifier[Lastovka_solid_integral] ( identifier[T1] , identifier[self] . identifier[similarity_variable] ))
keyword[return] identifier[property_mass_to_molar] ( identifier[dH] , identifier[self] . identifier[MW] )
keyword[elif] identifier[method] keyword[in] identifier[self] . identifier[tabular_data] :
keyword[return] identifier[float] ( identifier[quad] ( identifier[self] . identifier[calculate] , identifier[T1] , identifier[T2] , identifier[args] =( identifier[method] ))[ literal[int] ])
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] ) | def calculate_integral(self, T1, T2, method):
"""Method to calculate the integral of a property with respect to
temperature, using a specified method. Implements the analytical
integrals of all available methods except for tabular data.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units*K`]
"""
if method == PERRY151:
H2 = self.PERRY151_const * T2 + 0.5 * self.PERRY151_lin * T2 ** 2 - self.PERRY151_quadinv / T2 + self.PERRY151_quad * T2 ** 3 / 3.0
H1 = self.PERRY151_const * T1 + 0.5 * self.PERRY151_lin * T1 ** 2 - self.PERRY151_quadinv / T1 + self.PERRY151_quad * T1 ** 3 / 3.0
return (H2 - H1) * calorie # depends on [control=['if'], data=[]]
elif method == CRCSTD:
return (T2 - T1) * self.CRCSTD_Cp # depends on [control=['if'], data=[]]
elif method == LASTOVKA_S:
dH = Lastovka_solid_integral(T2, self.similarity_variable) - Lastovka_solid_integral(T1, self.similarity_variable)
return property_mass_to_molar(dH, self.MW) # depends on [control=['if'], data=[]]
elif method in self.tabular_data:
return float(quad(self.calculate, T1, T2, args=method)[0]) # depends on [control=['if'], data=['method']]
else:
raise Exception('Method not valid') |
async def dict(self, full):
'''
Open a HiveDict at the given full path.
'''
node = await self.open(full)
return await HiveDict.anit(self, node) | <ast.AsyncFunctionDef object at 0x7da204960e80> | keyword[async] keyword[def] identifier[dict] ( identifier[self] , identifier[full] ):
literal[string]
identifier[node] = keyword[await] identifier[self] . identifier[open] ( identifier[full] )
keyword[return] keyword[await] identifier[HiveDict] . identifier[anit] ( identifier[self] , identifier[node] ) | async def dict(self, full):
"""
Open a HiveDict at the given full path.
"""
node = await self.open(full)
return await HiveDict.anit(self, node) |
def is_using_format(cls, markers, used_formats=None):
"""
Args:
markers (str | unicode): Space separated list of markers to look for
used_formats (str | unicode): Formats to consider (default: cls.used_formats)
Returns:
(bool): True if any one of the 'markers' is seen in 'used_formats'
"""
if used_formats is None:
used_formats = cls.used_formats
if not markers or not used_formats:
return False
return any(marker in used_formats for marker in flattened(markers, split=(" ", UNIQUE))) | def function[is_using_format, parameter[cls, markers, used_formats]]:
constant[
Args:
markers (str | unicode): Space separated list of markers to look for
used_formats (str | unicode): Formats to consider (default: cls.used_formats)
Returns:
(bool): True if any one of the 'markers' is seen in 'used_formats'
]
if compare[name[used_formats] is constant[None]] begin[:]
variable[used_formats] assign[=] name[cls].used_formats
if <ast.BoolOp object at 0x7da1b24ef1f0> begin[:]
return[constant[False]]
return[call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b24eeb30>]]] | keyword[def] identifier[is_using_format] ( identifier[cls] , identifier[markers] , identifier[used_formats] = keyword[None] ):
literal[string]
keyword[if] identifier[used_formats] keyword[is] keyword[None] :
identifier[used_formats] = identifier[cls] . identifier[used_formats]
keyword[if] keyword[not] identifier[markers] keyword[or] keyword[not] identifier[used_formats] :
keyword[return] keyword[False]
keyword[return] identifier[any] ( identifier[marker] keyword[in] identifier[used_formats] keyword[for] identifier[marker] keyword[in] identifier[flattened] ( identifier[markers] , identifier[split] =( literal[string] , identifier[UNIQUE] ))) | def is_using_format(cls, markers, used_formats=None):
"""
Args:
markers (str | unicode): Space separated list of markers to look for
used_formats (str | unicode): Formats to consider (default: cls.used_formats)
Returns:
(bool): True if any one of the 'markers' is seen in 'used_formats'
"""
if used_formats is None:
used_formats = cls.used_formats # depends on [control=['if'], data=['used_formats']]
if not markers or not used_formats:
return False # depends on [control=['if'], data=[]]
return any((marker in used_formats for marker in flattened(markers, split=(' ', UNIQUE)))) |
def handle_error(result, exception_class=None):
"""
Extracts the last Windows error message into a python unicode string
:param result:
A function result, 0 or None indicates failure
:param exception_class:
The exception class to use for the exception if an error occurred
:return:
A unicode string error message
"""
if result == 0:
return
if result == Secur32Const.SEC_E_OUT_OF_SEQUENCE:
raise TLSError('A packet was received out of order')
if result == Secur32Const.SEC_E_MESSAGE_ALTERED:
raise TLSError('A packet was received altered')
if result == Secur32Const.SEC_E_CONTEXT_EXPIRED:
raise TLSError('The TLS session expired')
_, error_string = get_error()
if not isinstance(error_string, str_cls):
error_string = _try_decode(error_string)
if exception_class is None:
exception_class = OSError
raise exception_class(('SECURITY_STATUS error 0x%0.2X: ' % result) + error_string) | def function[handle_error, parameter[result, exception_class]]:
constant[
Extracts the last Windows error message into a python unicode string
:param result:
A function result, 0 or None indicates failure
:param exception_class:
The exception class to use for the exception if an error occurred
:return:
A unicode string error message
]
if compare[name[result] equal[==] constant[0]] begin[:]
return[None]
if compare[name[result] equal[==] name[Secur32Const].SEC_E_OUT_OF_SEQUENCE] begin[:]
<ast.Raise object at 0x7da1b000dae0>
if compare[name[result] equal[==] name[Secur32Const].SEC_E_MESSAGE_ALTERED] begin[:]
<ast.Raise object at 0x7da1b000cbe0>
if compare[name[result] equal[==] name[Secur32Const].SEC_E_CONTEXT_EXPIRED] begin[:]
<ast.Raise object at 0x7da1b00d7c40>
<ast.Tuple object at 0x7da1b00d4520> assign[=] call[name[get_error], parameter[]]
if <ast.UnaryOp object at 0x7da1b00d73d0> begin[:]
variable[error_string] assign[=] call[name[_try_decode], parameter[name[error_string]]]
if compare[name[exception_class] is constant[None]] begin[:]
variable[exception_class] assign[=] name[OSError]
<ast.Raise object at 0x7da1b00d6b00> | keyword[def] identifier[handle_error] ( identifier[result] , identifier[exception_class] = keyword[None] ):
literal[string]
keyword[if] identifier[result] == literal[int] :
keyword[return]
keyword[if] identifier[result] == identifier[Secur32Const] . identifier[SEC_E_OUT_OF_SEQUENCE] :
keyword[raise] identifier[TLSError] ( literal[string] )
keyword[if] identifier[result] == identifier[Secur32Const] . identifier[SEC_E_MESSAGE_ALTERED] :
keyword[raise] identifier[TLSError] ( literal[string] )
keyword[if] identifier[result] == identifier[Secur32Const] . identifier[SEC_E_CONTEXT_EXPIRED] :
keyword[raise] identifier[TLSError] ( literal[string] )
identifier[_] , identifier[error_string] = identifier[get_error] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[error_string] , identifier[str_cls] ):
identifier[error_string] = identifier[_try_decode] ( identifier[error_string] )
keyword[if] identifier[exception_class] keyword[is] keyword[None] :
identifier[exception_class] = identifier[OSError]
keyword[raise] identifier[exception_class] (( literal[string] % identifier[result] )+ identifier[error_string] ) | def handle_error(result, exception_class=None):
"""
Extracts the last Windows error message into a python unicode string
:param result:
A function result, 0 or None indicates failure
:param exception_class:
The exception class to use for the exception if an error occurred
:return:
A unicode string error message
"""
if result == 0:
return # depends on [control=['if'], data=[]]
if result == Secur32Const.SEC_E_OUT_OF_SEQUENCE:
raise TLSError('A packet was received out of order') # depends on [control=['if'], data=[]]
if result == Secur32Const.SEC_E_MESSAGE_ALTERED:
raise TLSError('A packet was received altered') # depends on [control=['if'], data=[]]
if result == Secur32Const.SEC_E_CONTEXT_EXPIRED:
raise TLSError('The TLS session expired') # depends on [control=['if'], data=[]]
(_, error_string) = get_error()
if not isinstance(error_string, str_cls):
error_string = _try_decode(error_string) # depends on [control=['if'], data=[]]
if exception_class is None:
exception_class = OSError # depends on [control=['if'], data=['exception_class']]
raise exception_class('SECURITY_STATUS error 0x%0.2X: ' % result + error_string) |
def _nanstd(array, axis=None, ddof=0):
"""Bottleneck nanstd function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
return bottleneck.nanstd(array, axis=axis, ddof=ddof) | def function[_nanstd, parameter[array, axis, ddof]]:
constant[Bottleneck nanstd function that handle tuple axis.]
if call[name[isinstance], parameter[name[axis], name[tuple]]] begin[:]
variable[array] assign[=] call[name[_move_tuple_axes_first], parameter[name[array]]]
variable[axis] assign[=] constant[0]
return[call[name[bottleneck].nanstd, parameter[name[array]]]] | keyword[def] identifier[_nanstd] ( identifier[array] , identifier[axis] = keyword[None] , identifier[ddof] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[axis] , identifier[tuple] ):
identifier[array] = identifier[_move_tuple_axes_first] ( identifier[array] , identifier[axis] = identifier[axis] )
identifier[axis] = literal[int]
keyword[return] identifier[bottleneck] . identifier[nanstd] ( identifier[array] , identifier[axis] = identifier[axis] , identifier[ddof] = identifier[ddof] ) | def _nanstd(array, axis=None, ddof=0):
"""Bottleneck nanstd function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0 # depends on [control=['if'], data=[]]
return bottleneck.nanstd(array, axis=axis, ddof=ddof) |
def _prepare_deprecation_data(self):
"""
Cycles through the list of AppSettingDeprecation instances set on
``self.deprecations`` and prepulates two new dictionary attributes:
``self._deprecated_settings``:
Uses the deprecated setting names themselves as the keys. Used to
check whether a request is for a deprecated setting.
``self._renamed_settings``:
Uses the 'replacement setting' names as keys (where supplied).
Used to allow the helper to temporarily support override settings
defined using the old name, when the values for the new setting are
requested.
"""
if not isinstance(self.deprecations, (list, tuple)):
raise IncorrectDeprecationsValueType(
"'deprecations' must be a list or tuple, not a {}."
.format(type(self.deprecations).__name__)
)
self._deprecated_settings = {}
self._replacement_settings = defaultdict(list)
for item in self.deprecations:
item.prefix = self.get_prefix()
if not self.in_defaults(item.setting_name):
raise InvalidDeprecationDefinition(
"There is an issue with one of your setting deprecation "
"definitions. '{setting_name}' could not be found in "
"{defaults_module_path}. Please ensure a default value "
"remains there until the end of the setting's deprecation "
"period.".format(
setting_name=item.setting_name,
defaults_module_path=self._defaults_module_path,
)
)
if item.setting_name in self._deprecated_settings:
raise DuplicateDeprecationError(
"The setting name for each deprecation definition must be "
"unique, but '{setting_name}' has been used more than once "
"for {helper_class}.".format(
setting_name=item.setting_name,
helper_class=self.__class__.__name__,
)
)
self._deprecated_settings[item.setting_name] = item
if item.replacement_name:
if not self.in_defaults(item.replacement_name):
raise InvalidDeprecationDefinition(
"There is an issue with one of your settings "
"deprecation definitions. '{replacement_name}' is not "
"a valid replacement for '{setting_name}', as no such "
"value can be found in {defaults_module_path}."
.format(
replacement_name=item.replacement_name,
setting_name=item.setting_name,
defaults_module_path=self._defaults_module_path,
)
)
self._replacement_settings[item.replacement_name].append(item) | def function[_prepare_deprecation_data, parameter[self]]:
constant[
Cycles through the list of AppSettingDeprecation instances set on
``self.deprecations`` and prepulates two new dictionary attributes:
``self._deprecated_settings``:
Uses the deprecated setting names themselves as the keys. Used to
check whether a request is for a deprecated setting.
``self._renamed_settings``:
Uses the 'replacement setting' names as keys (where supplied).
Used to allow the helper to temporarily support override settings
defined using the old name, when the values for the new setting are
requested.
]
if <ast.UnaryOp object at 0x7da20c6a8bb0> begin[:]
<ast.Raise object at 0x7da20c6aa3b0>
name[self]._deprecated_settings assign[=] dictionary[[], []]
name[self]._replacement_settings assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[name[item]] in starred[name[self].deprecations] begin[:]
name[item].prefix assign[=] call[name[self].get_prefix, parameter[]]
if <ast.UnaryOp object at 0x7da20c6a84c0> begin[:]
<ast.Raise object at 0x7da20c6a8130>
if compare[name[item].setting_name in name[self]._deprecated_settings] begin[:]
<ast.Raise object at 0x7da20c6a99f0>
call[name[self]._deprecated_settings][name[item].setting_name] assign[=] name[item]
if name[item].replacement_name begin[:]
if <ast.UnaryOp object at 0x7da20c6a8910> begin[:]
<ast.Raise object at 0x7da20c6aa740>
call[call[name[self]._replacement_settings][name[item].replacement_name].append, parameter[name[item]]] | keyword[def] identifier[_prepare_deprecation_data] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[deprecations] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[IncorrectDeprecationsValueType] (
literal[string]
. identifier[format] ( identifier[type] ( identifier[self] . identifier[deprecations] ). identifier[__name__] )
)
identifier[self] . identifier[_deprecated_settings] ={}
identifier[self] . identifier[_replacement_settings] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[deprecations] :
identifier[item] . identifier[prefix] = identifier[self] . identifier[get_prefix] ()
keyword[if] keyword[not] identifier[self] . identifier[in_defaults] ( identifier[item] . identifier[setting_name] ):
keyword[raise] identifier[InvalidDeprecationDefinition] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[setting_name] = identifier[item] . identifier[setting_name] ,
identifier[defaults_module_path] = identifier[self] . identifier[_defaults_module_path] ,
)
)
keyword[if] identifier[item] . identifier[setting_name] keyword[in] identifier[self] . identifier[_deprecated_settings] :
keyword[raise] identifier[DuplicateDeprecationError] (
literal[string]
literal[string]
literal[string] . identifier[format] (
identifier[setting_name] = identifier[item] . identifier[setting_name] ,
identifier[helper_class] = identifier[self] . identifier[__class__] . identifier[__name__] ,
)
)
identifier[self] . identifier[_deprecated_settings] [ identifier[item] . identifier[setting_name] ]= identifier[item]
keyword[if] identifier[item] . identifier[replacement_name] :
keyword[if] keyword[not] identifier[self] . identifier[in_defaults] ( identifier[item] . identifier[replacement_name] ):
keyword[raise] identifier[InvalidDeprecationDefinition] (
literal[string]
literal[string]
literal[string]
literal[string]
. identifier[format] (
identifier[replacement_name] = identifier[item] . identifier[replacement_name] ,
identifier[setting_name] = identifier[item] . identifier[setting_name] ,
identifier[defaults_module_path] = identifier[self] . identifier[_defaults_module_path] ,
)
)
identifier[self] . identifier[_replacement_settings] [ identifier[item] . identifier[replacement_name] ]. identifier[append] ( identifier[item] ) | def _prepare_deprecation_data(self):
"""
Cycles through the list of AppSettingDeprecation instances set on
``self.deprecations`` and prepulates two new dictionary attributes:
``self._deprecated_settings``:
Uses the deprecated setting names themselves as the keys. Used to
check whether a request is for a deprecated setting.
``self._renamed_settings``:
Uses the 'replacement setting' names as keys (where supplied).
Used to allow the helper to temporarily support override settings
defined using the old name, when the values for the new setting are
requested.
"""
if not isinstance(self.deprecations, (list, tuple)):
raise IncorrectDeprecationsValueType("'deprecations' must be a list or tuple, not a {}.".format(type(self.deprecations).__name__)) # depends on [control=['if'], data=[]]
self._deprecated_settings = {}
self._replacement_settings = defaultdict(list)
for item in self.deprecations:
item.prefix = self.get_prefix()
if not self.in_defaults(item.setting_name):
raise InvalidDeprecationDefinition("There is an issue with one of your setting deprecation definitions. '{setting_name}' could not be found in {defaults_module_path}. Please ensure a default value remains there until the end of the setting's deprecation period.".format(setting_name=item.setting_name, defaults_module_path=self._defaults_module_path)) # depends on [control=['if'], data=[]]
if item.setting_name in self._deprecated_settings:
raise DuplicateDeprecationError("The setting name for each deprecation definition must be unique, but '{setting_name}' has been used more than once for {helper_class}.".format(setting_name=item.setting_name, helper_class=self.__class__.__name__)) # depends on [control=['if'], data=[]]
self._deprecated_settings[item.setting_name] = item
if item.replacement_name:
if not self.in_defaults(item.replacement_name):
raise InvalidDeprecationDefinition("There is an issue with one of your settings deprecation definitions. '{replacement_name}' is not a valid replacement for '{setting_name}', as no such value can be found in {defaults_module_path}.".format(replacement_name=item.replacement_name, setting_name=item.setting_name, defaults_module_path=self._defaults_module_path)) # depends on [control=['if'], data=[]]
self._replacement_settings[item.replacement_name].append(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] |
def addcols(X, cols, names=None):
"""
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
"""
if isinstance(names,str):
names = [n.strip() for n in names.split(',')]
if isinstance(cols, list):
if any([isinstance(x,np.ndarray) or isinstance(x,list) or \
isinstance(x,tuple) for x in cols]):
assert all([len(x) == len(X) for x in cols]), \
'Trying to add columns of wrong length.'
assert names != None and len(cols) == len(names), \
'Number of columns to add must equal number of new names.'
cols = utils.fromarrays(cols,type=np.ndarray,names = names)
else:
assert len(cols) == len(X), 'Trying to add column of wrong length.'
cols = utils.fromarrays([cols], type=np.ndarray,names=names)
else:
assert isinstance(cols, np.ndarray)
if cols.dtype.names == None:
cols = utils.fromarrays([cols],type=np.ndarray, names=names)
Replacements = [a for a in cols.dtype.names if a in X.dtype.names]
if len(Replacements) > 0:
print('Replacing columns',
[a for a in cols.dtype.names if a in X.dtype.names])
return utils.fromarrays(
[X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] +
[cols[a] for a in cols.dtype.names if a not in X.dtype.names],
type=np.ndarray,
names=list(X.dtype.names) + [a for a in cols.dtype.names
if a not in X.dtype.names]) | def function[addcols, parameter[X, cols, names]]:
constant[
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
]
if call[name[isinstance], parameter[name[names], name[str]]] begin[:]
variable[names] assign[=] <ast.ListComp object at 0x7da20c6ab490>
if call[name[isinstance], parameter[name[cols], name[list]]] begin[:]
if call[name[any], parameter[<ast.ListComp object at 0x7da20c6a8c70>]] begin[:]
assert[call[name[all], parameter[<ast.ListComp object at 0x7da20c6a8250>]]]
assert[<ast.BoolOp object at 0x7da20c6aae30>]
variable[cols] assign[=] call[name[utils].fromarrays, parameter[name[cols]]]
variable[Replacements] assign[=] <ast.ListComp object at 0x7da20c6ab640>
if compare[call[name[len], parameter[name[Replacements]]] greater[>] constant[0]] begin[:]
call[name[print], parameter[constant[Replacing columns], <ast.ListComp object at 0x7da20c6abbb0>]]
return[call[name[utils].fromarrays, parameter[binary_operation[<ast.ListComp object at 0x7da20c6a8400> + <ast.ListComp object at 0x7da20c6a8940>]]]] | keyword[def] identifier[addcols] ( identifier[X] , identifier[cols] , identifier[names] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[names] , identifier[str] ):
identifier[names] =[ identifier[n] . identifier[strip] () keyword[for] identifier[n] keyword[in] identifier[names] . identifier[split] ( literal[string] )]
keyword[if] identifier[isinstance] ( identifier[cols] , identifier[list] ):
keyword[if] identifier[any] ([ identifier[isinstance] ( identifier[x] , identifier[np] . identifier[ndarray] ) keyword[or] identifier[isinstance] ( identifier[x] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[x] , identifier[tuple] ) keyword[for] identifier[x] keyword[in] identifier[cols] ]):
keyword[assert] identifier[all] ([ identifier[len] ( identifier[x] )== identifier[len] ( identifier[X] ) keyword[for] identifier[x] keyword[in] identifier[cols] ]), literal[string]
keyword[assert] identifier[names] != keyword[None] keyword[and] identifier[len] ( identifier[cols] )== identifier[len] ( identifier[names] ), literal[string]
identifier[cols] = identifier[utils] . identifier[fromarrays] ( identifier[cols] , identifier[type] = identifier[np] . identifier[ndarray] , identifier[names] = identifier[names] )
keyword[else] :
keyword[assert] identifier[len] ( identifier[cols] )== identifier[len] ( identifier[X] ), literal[string]
identifier[cols] = identifier[utils] . identifier[fromarrays] ([ identifier[cols] ], identifier[type] = identifier[np] . identifier[ndarray] , identifier[names] = identifier[names] )
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[cols] , identifier[np] . identifier[ndarray] )
keyword[if] identifier[cols] . identifier[dtype] . identifier[names] == keyword[None] :
identifier[cols] = identifier[utils] . identifier[fromarrays] ([ identifier[cols] ], identifier[type] = identifier[np] . identifier[ndarray] , identifier[names] = identifier[names] )
identifier[Replacements] =[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[cols] . identifier[dtype] . identifier[names] keyword[if] identifier[a] keyword[in] identifier[X] . identifier[dtype] . identifier[names] ]
keyword[if] identifier[len] ( identifier[Replacements] )> literal[int] :
identifier[print] ( literal[string] ,
[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[cols] . identifier[dtype] . identifier[names] keyword[if] identifier[a] keyword[in] identifier[X] . identifier[dtype] . identifier[names] ])
keyword[return] identifier[utils] . identifier[fromarrays] (
[ identifier[X] [ identifier[a] ] keyword[if] identifier[a] keyword[not] keyword[in] identifier[cols] . identifier[dtype] . identifier[names] keyword[else] identifier[cols] [ identifier[a] ] keyword[for] identifier[a] keyword[in] identifier[X] . identifier[dtype] . identifier[names] ]+
[ identifier[cols] [ identifier[a] ] keyword[for] identifier[a] keyword[in] identifier[cols] . identifier[dtype] . identifier[names] keyword[if] identifier[a] keyword[not] keyword[in] identifier[X] . identifier[dtype] . identifier[names] ],
identifier[type] = identifier[np] . identifier[ndarray] ,
identifier[names] = identifier[list] ( identifier[X] . identifier[dtype] . identifier[names] )+[ identifier[a] keyword[for] identifier[a] keyword[in] identifier[cols] . identifier[dtype] . identifier[names]
keyword[if] identifier[a] keyword[not] keyword[in] identifier[X] . identifier[dtype] . identifier[names] ]) | def addcols(X, cols, names=None):
"""
Add one or more columns to a numpy ndarray.
Technical dependency of :func:`tabular.spreadsheet.aggregate_in`.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.addcols`.
**Parameters**
**X** : numpy ndarray with structured dtype or recarray
The recarray to add columns to.
**cols** : numpy ndarray, or list of arrays of columns
Column(s) to add.
**names**: list of strings, optional
Names of the new columns. Only applicable when `cols` is a
list of arrays.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy array made up of `X` plus the new columns.
**See also:** :func:`tabular.spreadsheet.colstack`
"""
if isinstance(names, str):
names = [n.strip() for n in names.split(',')] # depends on [control=['if'], data=[]]
if isinstance(cols, list):
if any([isinstance(x, np.ndarray) or isinstance(x, list) or isinstance(x, tuple) for x in cols]):
assert all([len(x) == len(X) for x in cols]), 'Trying to add columns of wrong length.'
assert names != None and len(cols) == len(names), 'Number of columns to add must equal number of new names.'
cols = utils.fromarrays(cols, type=np.ndarray, names=names) # depends on [control=['if'], data=[]]
else:
assert len(cols) == len(X), 'Trying to add column of wrong length.'
cols = utils.fromarrays([cols], type=np.ndarray, names=names) # depends on [control=['if'], data=[]]
else:
assert isinstance(cols, np.ndarray)
if cols.dtype.names == None:
cols = utils.fromarrays([cols], type=np.ndarray, names=names) # depends on [control=['if'], data=[]]
Replacements = [a for a in cols.dtype.names if a in X.dtype.names]
if len(Replacements) > 0:
print('Replacing columns', [a for a in cols.dtype.names if a in X.dtype.names]) # depends on [control=['if'], data=[]]
return utils.fromarrays([X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] + [cols[a] for a in cols.dtype.names if a not in X.dtype.names], type=np.ndarray, names=list(X.dtype.names) + [a for a in cols.dtype.names if a not in X.dtype.names]) |
def set_data(self, data=None, **kwargs):
'''
Read data into memory, applying all actions in queue.
Additionally, update queue and history.
'''
if data is None:
data = self.get_data(**kwargs)
setattr(self, '_data', data)
self.history += self.queue
self.queue = [] | def function[set_data, parameter[self, data]]:
constant[
Read data into memory, applying all actions in queue.
Additionally, update queue and history.
]
if compare[name[data] is constant[None]] begin[:]
variable[data] assign[=] call[name[self].get_data, parameter[]]
call[name[setattr], parameter[name[self], constant[_data], name[data]]]
<ast.AugAssign object at 0x7da20e9b05e0>
name[self].queue assign[=] list[[]] | keyword[def] identifier[set_data] ( identifier[self] , identifier[data] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[data] = identifier[self] . identifier[get_data] (** identifier[kwargs] )
identifier[setattr] ( identifier[self] , literal[string] , identifier[data] )
identifier[self] . identifier[history] += identifier[self] . identifier[queue]
identifier[self] . identifier[queue] =[] | def set_data(self, data=None, **kwargs):
"""
Read data into memory, applying all actions in queue.
Additionally, update queue and history.
"""
if data is None:
data = self.get_data(**kwargs) # depends on [control=['if'], data=['data']]
setattr(self, '_data', data)
self.history += self.queue
self.queue = [] |
def coderef_to_ecoclass(self, code, reference=None):
"""
Map a GAF code to an ECO class
Arguments
---------
code : str
GAF evidence code, e.g. ISS, IDA
reference: str
CURIE for a reference for the evidence instance. E.g. GO_REF:0000001.
Optional - If provided can give a mapping to a more specific ECO class
Return
------
str
ECO class CURIE/ID
"""
mcls = None
for (this_code,this_ref,cls) in self.mappings():
if str(this_code) == str(code):
if this_ref == reference:
return cls
if this_ref is None:
mcls = cls
return mcls | def function[coderef_to_ecoclass, parameter[self, code, reference]]:
constant[
Map a GAF code to an ECO class
Arguments
---------
code : str
GAF evidence code, e.g. ISS, IDA
reference: str
CURIE for a reference for the evidence instance. E.g. GO_REF:0000001.
Optional - If provided can give a mapping to a more specific ECO class
Return
------
str
ECO class CURIE/ID
]
variable[mcls] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b0880310>, <ast.Name object at 0x7da1b0882890>, <ast.Name object at 0x7da1b0882f20>]]] in starred[call[name[self].mappings, parameter[]]] begin[:]
if compare[call[name[str], parameter[name[this_code]]] equal[==] call[name[str], parameter[name[code]]]] begin[:]
if compare[name[this_ref] equal[==] name[reference]] begin[:]
return[name[cls]]
if compare[name[this_ref] is constant[None]] begin[:]
variable[mcls] assign[=] name[cls]
return[name[mcls]] | keyword[def] identifier[coderef_to_ecoclass] ( identifier[self] , identifier[code] , identifier[reference] = keyword[None] ):
literal[string]
identifier[mcls] = keyword[None]
keyword[for] ( identifier[this_code] , identifier[this_ref] , identifier[cls] ) keyword[in] identifier[self] . identifier[mappings] ():
keyword[if] identifier[str] ( identifier[this_code] )== identifier[str] ( identifier[code] ):
keyword[if] identifier[this_ref] == identifier[reference] :
keyword[return] identifier[cls]
keyword[if] identifier[this_ref] keyword[is] keyword[None] :
identifier[mcls] = identifier[cls]
keyword[return] identifier[mcls] | def coderef_to_ecoclass(self, code, reference=None):
"""
Map a GAF code to an ECO class
Arguments
---------
code : str
GAF evidence code, e.g. ISS, IDA
reference: str
CURIE for a reference for the evidence instance. E.g. GO_REF:0000001.
Optional - If provided can give a mapping to a more specific ECO class
Return
------
str
ECO class CURIE/ID
"""
mcls = None
for (this_code, this_ref, cls) in self.mappings():
if str(this_code) == str(code):
if this_ref == reference:
return cls # depends on [control=['if'], data=[]]
if this_ref is None:
mcls = cls # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return mcls |
def get_sys_info():
"Returns system information as a dict"
blob = []
# commit = cc._git_hash
# blob.append(('commit', commit))
try:
(sysname, nodename, release, version,
machine, processor) = platform.uname()
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
# ("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
("LANG", "%s" % os.environ.get('LANG', "None")),
("LOCALE", "%s.%s" % locale.getlocale()),
])
except Exception:
pass
return blob | def function[get_sys_info, parameter[]]:
constant[Returns system information as a dict]
variable[blob] assign[=] list[[]]
<ast.Try object at 0x7da1b28f1120>
return[name[blob]] | keyword[def] identifier[get_sys_info] ():
literal[string]
identifier[blob] =[]
keyword[try] :
( identifier[sysname] , identifier[nodename] , identifier[release] , identifier[version] ,
identifier[machine] , identifier[processor] )= identifier[platform] . identifier[uname] ()
identifier[blob] . identifier[extend] ([
( literal[string] , literal[string] % identifier[sys] . identifier[version_info] [:]),
( literal[string] , identifier[struct] . identifier[calcsize] ( literal[string] )* literal[int] ),
( literal[string] , literal[string] %( identifier[sysname] )),
( literal[string] , literal[string] %( identifier[release] )),
( literal[string] , literal[string] %( identifier[machine] )),
( literal[string] , literal[string] %( identifier[processor] )),
( literal[string] , literal[string] % identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )),
( literal[string] , literal[string] % identifier[os] . identifier[environ] . identifier[get] ( literal[string] , literal[string] )),
( literal[string] , literal[string] % identifier[locale] . identifier[getlocale] ()),
])
keyword[except] identifier[Exception] :
keyword[pass]
keyword[return] identifier[blob] | def get_sys_info():
"""Returns system information as a dict"""
blob = []
# commit = cc._git_hash
# blob.append(('commit', commit))
try:
(sysname, nodename, release, version, machine, processor) = platform.uname()
# ("Version", "%s" % (version)),
# ("byteorder", "%s" % sys.byteorder),
blob.extend([('python', '%d.%d.%d.%s.%s' % sys.version_info[:]), ('python-bits', struct.calcsize('P') * 8), ('OS', '%s' % sysname), ('OS-release', '%s' % release), ('machine', '%s' % machine), ('processor', '%s' % processor), ('LC_ALL', '%s' % os.environ.get('LC_ALL', 'None')), ('LANG', '%s' % os.environ.get('LANG', 'None')), ('LOCALE', '%s.%s' % locale.getlocale())]) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
return blob |
def iter(self, count=0, func=sum):
'''Iterator of infinite dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
'''
while True:
yield self.roll(count, func) | def function[iter, parameter[self, count, func]]:
constant[Iterator of infinite dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
]
while constant[True] begin[:]
<ast.Yield object at 0x7da20c7962f0> | keyword[def] identifier[iter] ( identifier[self] , identifier[count] = literal[int] , identifier[func] = identifier[sum] ):
literal[string]
keyword[while] keyword[True] :
keyword[yield] identifier[self] . identifier[roll] ( identifier[count] , identifier[func] ) | def iter(self, count=0, func=sum):
"""Iterator of infinite dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
"""
while True:
yield self.roll(count, func) # depends on [control=['while'], data=[]] |
def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:]
if len(new_tex_name) == 0:
return None
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = ''
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file)
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder),
new_tex_file)
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file)
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0],
new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file)
if tex_location is None and not recurred:
return get_tex_location(new_tex_name + '.tex', current_tex_name,
recurred=True)
return tex_location | def function[get_tex_location, parameter[new_tex_name, current_tex_name, recurred]]:
constant[
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
]
variable[tex_location] assign[=] constant[None]
variable[current_dir] assign[=] call[call[name[os].path.split, parameter[name[current_tex_name]]]][constant[0]]
variable[some_kind_of_tag] assign[=] constant[\\\w+ ]
variable[new_tex_name] assign[=] call[name[new_tex_name].strip, parameter[]]
if call[name[new_tex_name].startswith, parameter[constant[input]]] begin[:]
variable[new_tex_name] assign[=] call[name[new_tex_name]][<ast.Slice object at 0x7da204347fa0>]
if call[name[re].match, parameter[name[some_kind_of_tag], name[new_tex_name]]] begin[:]
variable[new_tex_name] assign[=] call[name[new_tex_name]][<ast.Slice object at 0x7da2043465f0>]
if call[name[new_tex_name].startswith, parameter[constant[./]]] begin[:]
variable[new_tex_name] assign[=] call[name[new_tex_name]][<ast.Slice object at 0x7da1b1461d20>]
if compare[call[name[len], parameter[name[new_tex_name]]] equal[==] constant[0]] begin[:]
return[constant[None]]
variable[new_tex_name] assign[=] call[name[new_tex_name].strip, parameter[]]
variable[new_tex_file] assign[=] call[call[name[os].path.split, parameter[name[new_tex_name]]]][<ast.UnaryOp object at 0x7da1b1463b50>]
variable[new_tex_folder] assign[=] call[call[name[os].path.split, parameter[name[new_tex_name]]]][constant[0]]
if compare[name[new_tex_folder] equal[==] name[new_tex_file]] begin[:]
variable[new_tex_folder] assign[=] constant[]
for taget[name[any_file]] in starred[call[name[os].listdir, parameter[name[current_dir]]]] begin[:]
if compare[name[any_file] equal[==] name[new_tex_file]] begin[:]
return[call[name[os].path.join, parameter[name[current_dir], name[new_tex_file]]]]
if call[name[os].path.isdir, parameter[call[name[os].path.join, parameter[name[current_dir], name[new_tex_folder]]]]] begin[:]
for taget[name[any_file]] in starred[call[name[os].listdir, parameter[call[name[os].path.join, parameter[name[current_dir], name[new_tex_folder]]]]]] begin[:]
if compare[name[any_file] equal[==] name[new_tex_file]] begin[:]
return[call[name[os].path.join, parameter[call[name[os].path.join, parameter[name[current_dir], name[new_tex_folder]]], name[new_tex_file]]]]
variable[one_dir_up] assign[=] call[name[os].path.join, parameter[call[call[name[os].path.split, parameter[name[current_dir]]]][constant[0]], name[new_tex_folder]]]
if call[name[os].path.isdir, parameter[name[one_dir_up]]] begin[:]
for taget[name[any_file]] in starred[call[name[os].listdir, parameter[name[one_dir_up]]]] begin[:]
if compare[name[any_file] equal[==] name[new_tex_file]] begin[:]
return[call[name[os].path.join, parameter[name[one_dir_up], name[new_tex_file]]]]
variable[two_dirs_up] assign[=] call[name[os].path.join, parameter[call[call[name[os].path.split, parameter[call[call[name[os].path.split, parameter[name[current_dir]]]][constant[0]]]]][constant[0]], name[new_tex_folder]]]
if call[name[os].path.isdir, parameter[name[two_dirs_up]]] begin[:]
for taget[name[any_file]] in starred[call[name[os].listdir, parameter[name[two_dirs_up]]]] begin[:]
if compare[name[any_file] equal[==] name[new_tex_file]] begin[:]
return[call[name[os].path.join, parameter[name[two_dirs_up], name[new_tex_file]]]]
if <ast.BoolOp object at 0x7da1b14b38e0> begin[:]
return[call[name[get_tex_location], parameter[binary_operation[name[new_tex_name] + constant[.tex]], name[current_tex_name]]]]
return[name[tex_location]] | keyword[def] identifier[get_tex_location] ( identifier[new_tex_name] , identifier[current_tex_name] , identifier[recurred] = keyword[False] ):
literal[string]
identifier[tex_location] = keyword[None]
identifier[current_dir] = identifier[os] . identifier[path] . identifier[split] ( identifier[current_tex_name] )[ literal[int] ]
identifier[some_kind_of_tag] = literal[string]
identifier[new_tex_name] = identifier[new_tex_name] . identifier[strip] ()
keyword[if] identifier[new_tex_name] . identifier[startswith] ( literal[string] ):
identifier[new_tex_name] = identifier[new_tex_name] [ identifier[len] ( literal[string] ):]
keyword[if] identifier[re] . identifier[match] ( identifier[some_kind_of_tag] , identifier[new_tex_name] ):
identifier[new_tex_name] = identifier[new_tex_name] [ identifier[len] ( identifier[new_tex_name] . identifier[split] ( literal[string] )[ literal[int] ])+ literal[int] :]
keyword[if] identifier[new_tex_name] . identifier[startswith] ( literal[string] ):
identifier[new_tex_name] = identifier[new_tex_name] [ literal[int] :]
keyword[if] identifier[len] ( identifier[new_tex_name] )== literal[int] :
keyword[return] keyword[None]
identifier[new_tex_name] = identifier[new_tex_name] . identifier[strip] ()
identifier[new_tex_file] = identifier[os] . identifier[path] . identifier[split] ( identifier[new_tex_name] )[- literal[int] ]
identifier[new_tex_folder] = identifier[os] . identifier[path] . identifier[split] ( identifier[new_tex_name] )[ literal[int] ]
keyword[if] identifier[new_tex_folder] == identifier[new_tex_file] :
identifier[new_tex_folder] = literal[string]
keyword[for] identifier[any_file] keyword[in] identifier[os] . identifier[listdir] ( identifier[current_dir] ):
keyword[if] identifier[any_file] == identifier[new_tex_file] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[current_dir] , identifier[new_tex_file] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[current_dir] , identifier[new_tex_folder] )):
keyword[for] identifier[any_file] keyword[in] identifier[os] . identifier[listdir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[current_dir] , identifier[new_tex_folder] )):
keyword[if] identifier[any_file] == identifier[new_tex_file] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[join] ( identifier[current_dir] , identifier[new_tex_folder] ),
identifier[new_tex_file] )
identifier[one_dir_up] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[split] ( identifier[current_dir] )[ literal[int] ], identifier[new_tex_folder] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[one_dir_up] ):
keyword[for] identifier[any_file] keyword[in] identifier[os] . identifier[listdir] ( identifier[one_dir_up] ):
keyword[if] identifier[any_file] == identifier[new_tex_file] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[one_dir_up] , identifier[new_tex_file] )
identifier[two_dirs_up] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[split] ( identifier[os] . identifier[path] . identifier[split] ( identifier[current_dir] )[ literal[int] ])[ literal[int] ],
identifier[new_tex_folder] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[two_dirs_up] ):
keyword[for] identifier[any_file] keyword[in] identifier[os] . identifier[listdir] ( identifier[two_dirs_up] ):
keyword[if] identifier[any_file] == identifier[new_tex_file] :
keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[two_dirs_up] , identifier[new_tex_file] )
keyword[if] identifier[tex_location] keyword[is] keyword[None] keyword[and] keyword[not] identifier[recurred] :
keyword[return] identifier[get_tex_location] ( identifier[new_tex_name] + literal[string] , identifier[current_tex_name] ,
identifier[recurred] = keyword[True] )
keyword[return] identifier[tex_location] | def get_tex_location(new_tex_name, current_tex_name, recurred=False):
"""
Takes the name of a TeX file and attempts to match it to an actual file
in the tarball.
:param: new_tex_name (string): the name of the TeX file to find
:param: current_tex_name (string): the location of the TeX file where we
found the reference
:return: tex_location (string): the location of the other TeX file on
disk or None if it is not found
"""
tex_location = None
current_dir = os.path.split(current_tex_name)[0]
some_kind_of_tag = '\\\\\\w+ '
new_tex_name = new_tex_name.strip()
if new_tex_name.startswith('input'):
new_tex_name = new_tex_name[len('input'):] # depends on [control=['if'], data=[]]
if re.match(some_kind_of_tag, new_tex_name):
new_tex_name = new_tex_name[len(new_tex_name.split(' ')[0]) + 1:] # depends on [control=['if'], data=[]]
if new_tex_name.startswith('./'):
new_tex_name = new_tex_name[2:] # depends on [control=['if'], data=[]]
if len(new_tex_name) == 0:
return None # depends on [control=['if'], data=[]]
new_tex_name = new_tex_name.strip()
new_tex_file = os.path.split(new_tex_name)[-1]
new_tex_folder = os.path.split(new_tex_name)[0]
if new_tex_folder == new_tex_file:
new_tex_folder = '' # depends on [control=['if'], data=['new_tex_folder']]
# could be in the current directory
for any_file in os.listdir(current_dir):
if any_file == new_tex_file:
return os.path.join(current_dir, new_tex_file) # depends on [control=['if'], data=['new_tex_file']] # depends on [control=['for'], data=['any_file']]
# could be in a subfolder of the current directory
if os.path.isdir(os.path.join(current_dir, new_tex_folder)):
for any_file in os.listdir(os.path.join(current_dir, new_tex_folder)):
if any_file == new_tex_file:
return os.path.join(os.path.join(current_dir, new_tex_folder), new_tex_file) # depends on [control=['if'], data=['new_tex_file']] # depends on [control=['for'], data=['any_file']] # depends on [control=['if'], data=[]]
# could be in a subfolder of a higher directory
one_dir_up = os.path.join(os.path.split(current_dir)[0], new_tex_folder)
if os.path.isdir(one_dir_up):
for any_file in os.listdir(one_dir_up):
if any_file == new_tex_file:
return os.path.join(one_dir_up, new_tex_file) # depends on [control=['if'], data=['new_tex_file']] # depends on [control=['for'], data=['any_file']] # depends on [control=['if'], data=[]]
two_dirs_up = os.path.join(os.path.split(os.path.split(current_dir)[0])[0], new_tex_folder)
if os.path.isdir(two_dirs_up):
for any_file in os.listdir(two_dirs_up):
if any_file == new_tex_file:
return os.path.join(two_dirs_up, new_tex_file) # depends on [control=['if'], data=['new_tex_file']] # depends on [control=['for'], data=['any_file']] # depends on [control=['if'], data=[]]
if tex_location is None and (not recurred):
return get_tex_location(new_tex_name + '.tex', current_tex_name, recurred=True) # depends on [control=['if'], data=[]]
return tex_location |
def remove_subvisual(self, visual):
"""Remove a subvisual
Parameters
----------
visual : instance of Visual
The visual to remove.
"""
visual.events.update.disconnect(self._subv_update)
self._subvisuals.remove(visual)
self.update() | def function[remove_subvisual, parameter[self, visual]]:
constant[Remove a subvisual
Parameters
----------
visual : instance of Visual
The visual to remove.
]
call[name[visual].events.update.disconnect, parameter[name[self]._subv_update]]
call[name[self]._subvisuals.remove, parameter[name[visual]]]
call[name[self].update, parameter[]] | keyword[def] identifier[remove_subvisual] ( identifier[self] , identifier[visual] ):
literal[string]
identifier[visual] . identifier[events] . identifier[update] . identifier[disconnect] ( identifier[self] . identifier[_subv_update] )
identifier[self] . identifier[_subvisuals] . identifier[remove] ( identifier[visual] )
identifier[self] . identifier[update] () | def remove_subvisual(self, visual):
"""Remove a subvisual
Parameters
----------
visual : instance of Visual
The visual to remove.
"""
visual.events.update.disconnect(self._subv_update)
self._subvisuals.remove(visual)
self.update() |
def comports(vid_pid=None, include_all=False, check_available=True,
only_available=False):
'''
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
'''
df_comports = _comports()
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# FTDIBUS\VID_0403+PID_6001+A60081GEA\0000
df_hwid = (df_comports.hardware_id.str.lower().str
.extract('vid_(?P<vid>[0-9a-f]+)\+pid_(?P<pid>[0-9a-f]+)',
expand=True))
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# USB VID:PID=16C0:0483 SNR=2145930
no_id_mask = df_hwid.vid.isnull()
df_hwid.loc[no_id_mask] = (df_comports.loc[no_id_mask, 'hardware_id']
.str.lower().str
.extract('vid:pid=(?P<vid>[0-9a-f]+):'
'(?P<pid>[0-9a-f]+)', expand=True))
df_comports = df_comports.join(df_hwid)
if vid_pid is not None:
if isinstance(vid_pid, six.string_types):
# Single USB vendor/product ID specified.
vid_pid = [vid_pid]
# Mark ports that match specified USB vendor/product IDs.
df_comports['include'] = (df_comports.vid + ':' +
df_comports.pid).isin(map(str.lower,
vid_pid))
if include_all:
# All ports should be included, but sort rows such that ports
# matching specified USB vendor/product IDs come first.
df_comports = (df_comports.sort_values('include', ascending=False)
.drop('include', axis=1))
else:
# Only include ports that match specified USB vendor/product IDs.
df_comports = (df_comports.loc[df_comports.include]
.drop('include', axis=1))
if check_available or only_available:
# Add `available` column indicating whether each port accepted a
# connection. A port may not, for example, accept a connection if the
# port is already open.
available = []
for name_i, port_info_i in df_comports.iterrows():
try:
connection = serial.Serial(port=name_i)
connection.close()
available.append(True)
except serial.SerialException:
available.append(False)
df_comports['available'] = available
if only_available:
df_comports = df_comports.loc[df_comports.available]
if not check_available:
del df_comports['available']
return df_comports | def function[comports, parameter[vid_pid, include_all, check_available, only_available]]:
constant[
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
]
variable[df_comports] assign[=] call[name[_comports], parameter[]]
variable[df_hwid] assign[=] call[call[name[df_comports].hardware_id.str.lower, parameter[]].str.extract, parameter[constant[vid_(?P<vid>[0-9a-f]+)\+pid_(?P<pid>[0-9a-f]+)]]]
variable[no_id_mask] assign[=] call[name[df_hwid].vid.isnull, parameter[]]
call[name[df_hwid].loc][name[no_id_mask]] assign[=] call[call[call[name[df_comports].loc][tuple[[<ast.Name object at 0x7da1b1a1c2b0>, <ast.Constant object at 0x7da1b1a1c760>]]].str.lower, parameter[]].str.extract, parameter[constant[vid:pid=(?P<vid>[0-9a-f]+):(?P<pid>[0-9a-f]+)]]]
variable[df_comports] assign[=] call[name[df_comports].join, parameter[name[df_hwid]]]
if compare[name[vid_pid] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[vid_pid], name[six].string_types]] begin[:]
variable[vid_pid] assign[=] list[[<ast.Name object at 0x7da1b1a1cd90>]]
call[name[df_comports]][constant[include]] assign[=] call[binary_operation[binary_operation[name[df_comports].vid + constant[:]] + name[df_comports].pid].isin, parameter[call[name[map], parameter[name[str].lower, name[vid_pid]]]]]
if name[include_all] begin[:]
variable[df_comports] assign[=] call[call[name[df_comports].sort_values, parameter[constant[include]]].drop, parameter[constant[include]]]
if <ast.BoolOp object at 0x7da1b1a1ead0> begin[:]
variable[available] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1a1c610>, <ast.Name object at 0x7da1b1a1d450>]]] in starred[call[name[df_comports].iterrows, parameter[]]] begin[:]
<ast.Try object at 0x7da1b1a1eb00>
call[name[df_comports]][constant[available]] assign[=] name[available]
if name[only_available] begin[:]
variable[df_comports] assign[=] call[name[df_comports].loc][name[df_comports].available]
if <ast.UnaryOp object at 0x7da1b1a1e740> begin[:]
<ast.Delete object at 0x7da1b1a1c220>
return[name[df_comports]] | keyword[def] identifier[comports] ( identifier[vid_pid] = keyword[None] , identifier[include_all] = keyword[False] , identifier[check_available] = keyword[True] ,
identifier[only_available] = keyword[False] ):
literal[string]
identifier[df_comports] = identifier[_comports] ()
identifier[df_hwid] =( identifier[df_comports] . identifier[hardware_id] . identifier[str] . identifier[lower] (). identifier[str]
. identifier[extract] ( literal[string] ,
identifier[expand] = keyword[True] ))
identifier[no_id_mask] = identifier[df_hwid] . identifier[vid] . identifier[isnull] ()
identifier[df_hwid] . identifier[loc] [ identifier[no_id_mask] ]=( identifier[df_comports] . identifier[loc] [ identifier[no_id_mask] , literal[string] ]
. identifier[str] . identifier[lower] (). identifier[str]
. identifier[extract] ( literal[string]
literal[string] , identifier[expand] = keyword[True] ))
identifier[df_comports] = identifier[df_comports] . identifier[join] ( identifier[df_hwid] )
keyword[if] identifier[vid_pid] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[vid_pid] , identifier[six] . identifier[string_types] ):
identifier[vid_pid] =[ identifier[vid_pid] ]
identifier[df_comports] [ literal[string] ]=( identifier[df_comports] . identifier[vid] + literal[string] +
identifier[df_comports] . identifier[pid] ). identifier[isin] ( identifier[map] ( identifier[str] . identifier[lower] ,
identifier[vid_pid] ))
keyword[if] identifier[include_all] :
identifier[df_comports] =( identifier[df_comports] . identifier[sort_values] ( literal[string] , identifier[ascending] = keyword[False] )
. identifier[drop] ( literal[string] , identifier[axis] = literal[int] ))
keyword[else] :
identifier[df_comports] =( identifier[df_comports] . identifier[loc] [ identifier[df_comports] . identifier[include] ]
. identifier[drop] ( literal[string] , identifier[axis] = literal[int] ))
keyword[if] identifier[check_available] keyword[or] identifier[only_available] :
identifier[available] =[]
keyword[for] identifier[name_i] , identifier[port_info_i] keyword[in] identifier[df_comports] . identifier[iterrows] ():
keyword[try] :
identifier[connection] = identifier[serial] . identifier[Serial] ( identifier[port] = identifier[name_i] )
identifier[connection] . identifier[close] ()
identifier[available] . identifier[append] ( keyword[True] )
keyword[except] identifier[serial] . identifier[SerialException] :
identifier[available] . identifier[append] ( keyword[False] )
identifier[df_comports] [ literal[string] ]= identifier[available]
keyword[if] identifier[only_available] :
identifier[df_comports] = identifier[df_comports] . identifier[loc] [ identifier[df_comports] . identifier[available] ]
keyword[if] keyword[not] identifier[check_available] :
keyword[del] identifier[df_comports] [ literal[string] ]
keyword[return] identifier[df_comports] | def comports(vid_pid=None, include_all=False, check_available=True, only_available=False):
"""
.. versionchanged:: 0.9
Add :data:`check_available` keyword argument to optionally check if
each port is actually available by attempting to open a temporary
connection.
Add :data:`only_available` keyword argument to only include ports that
are actually available for connection.
Parameters
----------
vid_pid : str or list, optional
One or more USB vendor/product IDs to match.
Each USB vendor/product must be in the form ``'<vid>:<pid>'``.
For example, ``'2341:0010'``.
include_all : bool, optional
If ``True``, include all available serial ports, but sort rows such
that ports matching specified USB vendor/product IDs come first.
If ``False``, only include ports that match specified USB
vendor/product IDs.
check_available : bool, optional
If ``True``, check if each port is actually available by attempting to
open a temporary connection.
only_available : bool, optional
If ``True``, only include ports that are available.
Returns
-------
pandas.DataFrame
Table containing descriptor and hardware ID of each COM port, indexed
by port (e.g., "COM4").
.. versionchanged:: 0.9
If :data:`check_available` is ``True``, add an ``available`` column
to the table indicating whether each port accepted a connection.
"""
df_comports = _comports()
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# FTDIBUS\VID_0403+PID_6001+A60081GEA\0000
df_hwid = df_comports.hardware_id.str.lower().str.extract('vid_(?P<vid>[0-9a-f]+)\\+pid_(?P<pid>[0-9a-f]+)', expand=True)
# Extract USB product and vendor IDs from `hwid` entries of the form:
#
# USB VID:PID=16C0:0483 SNR=2145930
no_id_mask = df_hwid.vid.isnull()
df_hwid.loc[no_id_mask] = df_comports.loc[no_id_mask, 'hardware_id'].str.lower().str.extract('vid:pid=(?P<vid>[0-9a-f]+):(?P<pid>[0-9a-f]+)', expand=True)
df_comports = df_comports.join(df_hwid)
if vid_pid is not None:
if isinstance(vid_pid, six.string_types):
# Single USB vendor/product ID specified.
vid_pid = [vid_pid] # depends on [control=['if'], data=[]]
# Mark ports that match specified USB vendor/product IDs.
df_comports['include'] = (df_comports.vid + ':' + df_comports.pid).isin(map(str.lower, vid_pid))
if include_all:
# All ports should be included, but sort rows such that ports
# matching specified USB vendor/product IDs come first.
df_comports = df_comports.sort_values('include', ascending=False).drop('include', axis=1) # depends on [control=['if'], data=[]]
else:
# Only include ports that match specified USB vendor/product IDs.
df_comports = df_comports.loc[df_comports.include].drop('include', axis=1) # depends on [control=['if'], data=['vid_pid']]
if check_available or only_available:
# Add `available` column indicating whether each port accepted a
# connection. A port may not, for example, accept a connection if the
# port is already open.
available = []
for (name_i, port_info_i) in df_comports.iterrows():
try:
connection = serial.Serial(port=name_i)
connection.close()
available.append(True) # depends on [control=['try'], data=[]]
except serial.SerialException:
available.append(False) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
df_comports['available'] = available
if only_available:
df_comports = df_comports.loc[df_comports.available] # depends on [control=['if'], data=[]]
if not check_available:
del df_comports['available'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return df_comports |
def fit(self, data, parent_node=None, estimator=None):
"""
Computes the CPD for each node from a given data in the form of a pandas dataframe.
If a variable from the data is not present in the model, it adds that node into the model.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variable names of network
parent_node: any hashable python object (optional)
Parent node of the model, if not specified it looks for a previously specified
parent node.
estimator: Estimator class
Any pgmpy estimator. If nothing is specified, the default ``MaximumLikelihoodEstimator``
would be used.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import NaiveBayes
>>> model = NaiveBayes()
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model.fit(values, 'A')
>>> model.get_cpds()
[<TabularCPD representing P(D:2 | A:2) at 0x4b72870>,
<TabularCPD representing P(E:2 | A:2) at 0x4bb2150>,
<TabularCPD representing P(A:2) at 0x4bb23d0>,
<TabularCPD representing P(B:2 | A:2) at 0x4bb24b0>,
<TabularCPD representing P(C:2 | A:2) at 0x4bb2750>]
>>> model.edges()
[('A', 'D'), ('A', 'E'), ('A', 'B'), ('A', 'C')]
"""
if not parent_node:
if not self.parent_node:
raise ValueError("parent node must be specified for the model")
else:
parent_node = self.parent_node
if parent_node not in data.columns:
raise ValueError("parent node: {node} is not present in the given data".format(node=parent_node))
for child_node in data.columns:
if child_node != parent_node:
self.add_edge(parent_node, child_node)
super(NaiveBayes, self).fit(data, estimator) | def function[fit, parameter[self, data, parent_node, estimator]]:
constant[
Computes the CPD for each node from a given data in the form of a pandas dataframe.
If a variable from the data is not present in the model, it adds that node into the model.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variable names of network
parent_node: any hashable python object (optional)
Parent node of the model, if not specified it looks for a previously specified
parent node.
estimator: Estimator class
Any pgmpy estimator. If nothing is specified, the default ``MaximumLikelihoodEstimator``
would be used.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import NaiveBayes
>>> model = NaiveBayes()
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model.fit(values, 'A')
>>> model.get_cpds()
[<TabularCPD representing P(D:2 | A:2) at 0x4b72870>,
<TabularCPD representing P(E:2 | A:2) at 0x4bb2150>,
<TabularCPD representing P(A:2) at 0x4bb23d0>,
<TabularCPD representing P(B:2 | A:2) at 0x4bb24b0>,
<TabularCPD representing P(C:2 | A:2) at 0x4bb2750>]
>>> model.edges()
[('A', 'D'), ('A', 'E'), ('A', 'B'), ('A', 'C')]
]
if <ast.UnaryOp object at 0x7da20e954190> begin[:]
if <ast.UnaryOp object at 0x7da20e955db0> begin[:]
<ast.Raise object at 0x7da20e954280>
if compare[name[parent_node] <ast.NotIn object at 0x7da2590d7190> name[data].columns] begin[:]
<ast.Raise object at 0x7da2054a6980>
for taget[name[child_node]] in starred[name[data].columns] begin[:]
if compare[name[child_node] not_equal[!=] name[parent_node]] begin[:]
call[name[self].add_edge, parameter[name[parent_node], name[child_node]]]
call[call[name[super], parameter[name[NaiveBayes], name[self]]].fit, parameter[name[data], name[estimator]]] | keyword[def] identifier[fit] ( identifier[self] , identifier[data] , identifier[parent_node] = keyword[None] , identifier[estimator] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[parent_node] :
keyword[if] keyword[not] identifier[self] . identifier[parent_node] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[parent_node] = identifier[self] . identifier[parent_node]
keyword[if] identifier[parent_node] keyword[not] keyword[in] identifier[data] . identifier[columns] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[node] = identifier[parent_node] ))
keyword[for] identifier[child_node] keyword[in] identifier[data] . identifier[columns] :
keyword[if] identifier[child_node] != identifier[parent_node] :
identifier[self] . identifier[add_edge] ( identifier[parent_node] , identifier[child_node] )
identifier[super] ( identifier[NaiveBayes] , identifier[self] ). identifier[fit] ( identifier[data] , identifier[estimator] ) | def fit(self, data, parent_node=None, estimator=None):
"""
Computes the CPD for each node from a given data in the form of a pandas dataframe.
If a variable from the data is not present in the model, it adds that node into the model.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variable names of network
parent_node: any hashable python object (optional)
Parent node of the model, if not specified it looks for a previously specified
parent node.
estimator: Estimator class
Any pgmpy estimator. If nothing is specified, the default ``MaximumLikelihoodEstimator``
would be used.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import NaiveBayes
>>> model = NaiveBayes()
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model.fit(values, 'A')
>>> model.get_cpds()
[<TabularCPD representing P(D:2 | A:2) at 0x4b72870>,
<TabularCPD representing P(E:2 | A:2) at 0x4bb2150>,
<TabularCPD representing P(A:2) at 0x4bb23d0>,
<TabularCPD representing P(B:2 | A:2) at 0x4bb24b0>,
<TabularCPD representing P(C:2 | A:2) at 0x4bb2750>]
>>> model.edges()
[('A', 'D'), ('A', 'E'), ('A', 'B'), ('A', 'C')]
"""
if not parent_node:
if not self.parent_node:
raise ValueError('parent node must be specified for the model') # depends on [control=['if'], data=[]]
else:
parent_node = self.parent_node # depends on [control=['if'], data=[]]
if parent_node not in data.columns:
raise ValueError('parent node: {node} is not present in the given data'.format(node=parent_node)) # depends on [control=['if'], data=['parent_node']]
for child_node in data.columns:
if child_node != parent_node:
self.add_edge(parent_node, child_node) # depends on [control=['if'], data=['child_node', 'parent_node']] # depends on [control=['for'], data=['child_node']]
super(NaiveBayes, self).fit(data, estimator) |
def _from_dict(cls, _dict):
"""Initialize a Word object from a json dictionary."""
args = {}
if 'word' in _dict:
args['word'] = _dict.get('word')
else:
raise ValueError(
'Required property \'word\' not present in Word JSON')
if 'sounds_like' in _dict:
args['sounds_like'] = _dict.get('sounds_like')
else:
raise ValueError(
'Required property \'sounds_like\' not present in Word JSON')
if 'display_as' in _dict:
args['display_as'] = _dict.get('display_as')
else:
raise ValueError(
'Required property \'display_as\' not present in Word JSON')
if 'count' in _dict:
args['count'] = _dict.get('count')
else:
raise ValueError(
'Required property \'count\' not present in Word JSON')
if 'source' in _dict:
args['source'] = _dict.get('source')
else:
raise ValueError(
'Required property \'source\' not present in Word JSON')
if 'error' in _dict:
args['error'] = [
WordError._from_dict(x) for x in (_dict.get('error'))
]
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a Word object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[word] in name[_dict]] begin[:]
call[name[args]][constant[word]] assign[=] call[name[_dict].get, parameter[constant[word]]]
if compare[constant[sounds_like] in name[_dict]] begin[:]
call[name[args]][constant[sounds_like]] assign[=] call[name[_dict].get, parameter[constant[sounds_like]]]
if compare[constant[display_as] in name[_dict]] begin[:]
call[name[args]][constant[display_as]] assign[=] call[name[_dict].get, parameter[constant[display_as]]]
if compare[constant[count] in name[_dict]] begin[:]
call[name[args]][constant[count]] assign[=] call[name[_dict].get, parameter[constant[count]]]
if compare[constant[source] in name[_dict]] begin[:]
call[name[args]][constant[source]] assign[=] call[name[_dict].get, parameter[constant[source]]]
if compare[constant[error] in name[_dict]] begin[:]
call[name[args]][constant[error]] assign[=] <ast.ListComp object at 0x7da1b2346c80>
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[WordError] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a Word object from a json dictionary."""
args = {}
if 'word' in _dict:
args['word'] = _dict.get('word') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'word' not present in Word JSON")
if 'sounds_like' in _dict:
args['sounds_like'] = _dict.get('sounds_like') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'sounds_like' not present in Word JSON")
if 'display_as' in _dict:
args['display_as'] = _dict.get('display_as') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'display_as' not present in Word JSON")
if 'count' in _dict:
args['count'] = _dict.get('count') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'count' not present in Word JSON")
if 'source' in _dict:
args['source'] = _dict.get('source') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'source' not present in Word JSON")
if 'error' in _dict:
args['error'] = [WordError._from_dict(x) for x in _dict.get('error')] # depends on [control=['if'], data=['_dict']]
return cls(**args) |
def Append(self, value, timestamp):
"""Adds value at timestamp.
Values must be added in order of increasing timestamp.
Args:
value: An observed value.
timestamp: The timestamp at which value was observed.
Raises:
RuntimeError: If timestamp is smaller than the previous timstamp.
"""
timestamp = self._NormalizeTime(timestamp)
if self.data and timestamp < self.data[-1][1]:
raise RuntimeError("Next timestamp must be larger.")
self.data.append([value, timestamp]) | def function[Append, parameter[self, value, timestamp]]:
constant[Adds value at timestamp.
Values must be added in order of increasing timestamp.
Args:
value: An observed value.
timestamp: The timestamp at which value was observed.
Raises:
RuntimeError: If timestamp is smaller than the previous timstamp.
]
variable[timestamp] assign[=] call[name[self]._NormalizeTime, parameter[name[timestamp]]]
if <ast.BoolOp object at 0x7da1b1b6fd00> begin[:]
<ast.Raise object at 0x7da1b1b6e890>
call[name[self].data.append, parameter[list[[<ast.Name object at 0x7da1b1b6f4c0>, <ast.Name object at 0x7da1b1b6c160>]]]] | keyword[def] identifier[Append] ( identifier[self] , identifier[value] , identifier[timestamp] ):
literal[string]
identifier[timestamp] = identifier[self] . identifier[_NormalizeTime] ( identifier[timestamp] )
keyword[if] identifier[self] . identifier[data] keyword[and] identifier[timestamp] < identifier[self] . identifier[data] [- literal[int] ][ literal[int] ]:
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[data] . identifier[append] ([ identifier[value] , identifier[timestamp] ]) | def Append(self, value, timestamp):
"""Adds value at timestamp.
Values must be added in order of increasing timestamp.
Args:
value: An observed value.
timestamp: The timestamp at which value was observed.
Raises:
RuntimeError: If timestamp is smaller than the previous timstamp.
"""
timestamp = self._NormalizeTime(timestamp)
if self.data and timestamp < self.data[-1][1]:
raise RuntimeError('Next timestamp must be larger.') # depends on [control=['if'], data=[]]
self.data.append([value, timestamp]) |
def Module(EPIC, campaign=None):
'''
Returns the module number for a given EPIC target.
'''
channel = Channel(EPIC, campaign=campaign)
nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25,
10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49,
16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73,
23: 77, 24: 81}
for c in [channel, channel - 1, channel - 2, channel - 3]:
if c in nums.values():
for mod, chan in nums.items():
if chan == c:
return mod
return None | def function[Module, parameter[EPIC, campaign]]:
constant[
Returns the module number for a given EPIC target.
]
variable[channel] assign[=] call[name[Channel], parameter[name[EPIC]]]
variable[nums] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ea93c0>, <ast.Constant object at 0x7da1b0ea9360>, <ast.Constant object at 0x7da1b0ea9390>, <ast.Constant object at 0x7da1b0ea94b0>, <ast.Constant object at 0x7da1b0ea93f0>, <ast.Constant object at 0x7da1b0ea9480>, <ast.Constant object at 0x7da1b0ea9420>, <ast.Constant object at 0x7da1b0ea9450>, <ast.Constant object at 0x7da1b0eab730>, <ast.Constant object at 0x7da1b0eab700>, <ast.Constant object at 0x7da1b0eab6d0>, <ast.Constant object at 0x7da1b0eab6a0>, <ast.Constant object at 0x7da1b0eab670>, <ast.Constant object at 0x7da1b0eab640>, <ast.Constant object at 0x7da1b0eab610>, <ast.Constant object at 0x7da1b0eab5e0>, <ast.Constant object at 0x7da1b0eab5b0>, <ast.Constant object at 0x7da1b0eab580>, <ast.Constant object at 0x7da1b0eab550>, <ast.Constant object at 0x7da1b0eab520>, <ast.Constant object at 0x7da1b0eab4f0>], [<ast.Constant object at 0x7da1b0eab4c0>, <ast.Constant object at 0x7da1b0eab490>, <ast.Constant object at 0x7da1b0eab460>, <ast.Constant object at 0x7da1b0eab430>, <ast.Constant object at 0x7da1b0eab400>, <ast.Constant object at 0x7da1b0eab3d0>, <ast.Constant object at 0x7da1b0eab3a0>, <ast.Constant object at 0x7da1b0eab370>, <ast.Constant object at 0x7da1b0eab340>, <ast.Constant object at 0x7da1b0eab310>, <ast.Constant object at 0x7da1b0eab2e0>, <ast.Constant object at 0x7da1b0eab2b0>, <ast.Constant object at 0x7da1b0eab280>, <ast.Constant object at 0x7da1b0eab250>, <ast.Constant object at 0x7da1b0eab220>, <ast.Constant object at 0x7da1b0eab1f0>, <ast.Constant object at 0x7da1b0eab1c0>, <ast.Constant object at 0x7da1b0eab190>, <ast.Constant object at 0x7da1b0eab160>, <ast.Constant object at 0x7da1b0eab130>, <ast.Constant object at 0x7da1b0eab100>]]
for taget[name[c]] in starred[list[[<ast.Name object at 0x7da1b0eab040>, <ast.BinOp object at 0x7da1b0eab010>, <ast.BinOp object at 0x7da1b0eaaf80>, <ast.BinOp object at 0x7da1b0eaaef0>]]] begin[:]
if compare[name[c] in call[name[nums].values, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0eaa3b0>, <ast.Name object at 0x7da1b0eaa3e0>]]] in starred[call[name[nums].items, parameter[]]] begin[:]
if compare[name[chan] equal[==] name[c]] begin[:]
return[name[mod]]
return[constant[None]] | keyword[def] identifier[Module] ( identifier[EPIC] , identifier[campaign] = keyword[None] ):
literal[string]
identifier[channel] = identifier[Channel] ( identifier[EPIC] , identifier[campaign] = identifier[campaign] )
identifier[nums] ={ literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] ,
literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] ,
literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] , literal[int] : literal[int] ,
literal[int] : literal[int] , literal[int] : literal[int] }
keyword[for] identifier[c] keyword[in] [ identifier[channel] , identifier[channel] - literal[int] , identifier[channel] - literal[int] , identifier[channel] - literal[int] ]:
keyword[if] identifier[c] keyword[in] identifier[nums] . identifier[values] ():
keyword[for] identifier[mod] , identifier[chan] keyword[in] identifier[nums] . identifier[items] ():
keyword[if] identifier[chan] == identifier[c] :
keyword[return] identifier[mod]
keyword[return] keyword[None] | def Module(EPIC, campaign=None):
"""
Returns the module number for a given EPIC target.
"""
channel = Channel(EPIC, campaign=campaign)
nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25, 10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49, 16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73, 23: 77, 24: 81}
for c in [channel, channel - 1, channel - 2, channel - 3]:
if c in nums.values():
for (mod, chan) in nums.items():
if chan == c:
return mod # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=['c']]
return None |
def execDetails(self, reqId, contract, execution):
"""
This wrapper handles both live fills and responses to reqExecutions.
"""
if execution.orderId == UNSET_INTEGER:
# bug in TWS: executions of manual orders have unset value
execution.orderId = 0
key = self.orderKey(
execution.clientId, execution.orderId, execution.permId)
trade = self.trades.get(key)
if trade and contract.conId == trade.contract.conId:
contract = trade.contract
else:
contract = Contract.create(**contract.dict())
execId = execution.execId
execution.time = util.parseIBDatetime(execution.time). \
astimezone(datetime.timezone.utc)
isLive = reqId not in self._futures
time = self.lastTime if isLive else execution.time
fill = Fill(contract, execution, CommissionReport(), time)
if execId not in self.fills:
# first time we see this execution so add it
self.fills[execId] = fill
if trade:
trade.fills.append(fill)
logEntry = TradeLogEntry(
self.lastTime,
trade.orderStatus.status,
f'Fill {execution.shares}@{execution.price}')
trade.log.append(logEntry)
if isLive:
self._logger.info(f'execDetails: {fill}')
self.ib.execDetailsEvent.emit(trade, fill)
trade.fillEvent(trade, fill)
if not isLive:
self._results[reqId].append(fill) | def function[execDetails, parameter[self, reqId, contract, execution]]:
constant[
This wrapper handles both live fills and responses to reqExecutions.
]
if compare[name[execution].orderId equal[==] name[UNSET_INTEGER]] begin[:]
name[execution].orderId assign[=] constant[0]
variable[key] assign[=] call[name[self].orderKey, parameter[name[execution].clientId, name[execution].orderId, name[execution].permId]]
variable[trade] assign[=] call[name[self].trades.get, parameter[name[key]]]
if <ast.BoolOp object at 0x7da18f812200> begin[:]
variable[contract] assign[=] name[trade].contract
variable[execId] assign[=] name[execution].execId
name[execution].time assign[=] call[call[name[util].parseIBDatetime, parameter[name[execution].time]].astimezone, parameter[name[datetime].timezone.utc]]
variable[isLive] assign[=] compare[name[reqId] <ast.NotIn object at 0x7da2590d7190> name[self]._futures]
variable[time] assign[=] <ast.IfExp object at 0x7da18f812a10>
variable[fill] assign[=] call[name[Fill], parameter[name[contract], name[execution], call[name[CommissionReport], parameter[]], name[time]]]
if compare[name[execId] <ast.NotIn object at 0x7da2590d7190> name[self].fills] begin[:]
call[name[self].fills][name[execId]] assign[=] name[fill]
if name[trade] begin[:]
call[name[trade].fills.append, parameter[name[fill]]]
variable[logEntry] assign[=] call[name[TradeLogEntry], parameter[name[self].lastTime, name[trade].orderStatus.status, <ast.JoinedStr object at 0x7da18bccbb80>]]
call[name[trade].log.append, parameter[name[logEntry]]]
if name[isLive] begin[:]
call[name[self]._logger.info, parameter[<ast.JoinedStr object at 0x7da18bccaf20>]]
call[name[self].ib.execDetailsEvent.emit, parameter[name[trade], name[fill]]]
call[name[trade].fillEvent, parameter[name[trade], name[fill]]]
if <ast.UnaryOp object at 0x7da18f813ca0> begin[:]
call[call[name[self]._results][name[reqId]].append, parameter[name[fill]]] | keyword[def] identifier[execDetails] ( identifier[self] , identifier[reqId] , identifier[contract] , identifier[execution] ):
literal[string]
keyword[if] identifier[execution] . identifier[orderId] == identifier[UNSET_INTEGER] :
identifier[execution] . identifier[orderId] = literal[int]
identifier[key] = identifier[self] . identifier[orderKey] (
identifier[execution] . identifier[clientId] , identifier[execution] . identifier[orderId] , identifier[execution] . identifier[permId] )
identifier[trade] = identifier[self] . identifier[trades] . identifier[get] ( identifier[key] )
keyword[if] identifier[trade] keyword[and] identifier[contract] . identifier[conId] == identifier[trade] . identifier[contract] . identifier[conId] :
identifier[contract] = identifier[trade] . identifier[contract]
keyword[else] :
identifier[contract] = identifier[Contract] . identifier[create] (** identifier[contract] . identifier[dict] ())
identifier[execId] = identifier[execution] . identifier[execId]
identifier[execution] . identifier[time] = identifier[util] . identifier[parseIBDatetime] ( identifier[execution] . identifier[time] ). identifier[astimezone] ( identifier[datetime] . identifier[timezone] . identifier[utc] )
identifier[isLive] = identifier[reqId] keyword[not] keyword[in] identifier[self] . identifier[_futures]
identifier[time] = identifier[self] . identifier[lastTime] keyword[if] identifier[isLive] keyword[else] identifier[execution] . identifier[time]
identifier[fill] = identifier[Fill] ( identifier[contract] , identifier[execution] , identifier[CommissionReport] (), identifier[time] )
keyword[if] identifier[execId] keyword[not] keyword[in] identifier[self] . identifier[fills] :
identifier[self] . identifier[fills] [ identifier[execId] ]= identifier[fill]
keyword[if] identifier[trade] :
identifier[trade] . identifier[fills] . identifier[append] ( identifier[fill] )
identifier[logEntry] = identifier[TradeLogEntry] (
identifier[self] . identifier[lastTime] ,
identifier[trade] . identifier[orderStatus] . identifier[status] ,
literal[string] )
identifier[trade] . identifier[log] . identifier[append] ( identifier[logEntry] )
keyword[if] identifier[isLive] :
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[ib] . identifier[execDetailsEvent] . identifier[emit] ( identifier[trade] , identifier[fill] )
identifier[trade] . identifier[fillEvent] ( identifier[trade] , identifier[fill] )
keyword[if] keyword[not] identifier[isLive] :
identifier[self] . identifier[_results] [ identifier[reqId] ]. identifier[append] ( identifier[fill] ) | def execDetails(self, reqId, contract, execution):
"""
This wrapper handles both live fills and responses to reqExecutions.
"""
if execution.orderId == UNSET_INTEGER:
# bug in TWS: executions of manual orders have unset value
execution.orderId = 0 # depends on [control=['if'], data=[]]
key = self.orderKey(execution.clientId, execution.orderId, execution.permId)
trade = self.trades.get(key)
if trade and contract.conId == trade.contract.conId:
contract = trade.contract # depends on [control=['if'], data=[]]
else:
contract = Contract.create(**contract.dict())
execId = execution.execId
execution.time = util.parseIBDatetime(execution.time).astimezone(datetime.timezone.utc)
isLive = reqId not in self._futures
time = self.lastTime if isLive else execution.time
fill = Fill(contract, execution, CommissionReport(), time)
if execId not in self.fills:
# first time we see this execution so add it
self.fills[execId] = fill
if trade:
trade.fills.append(fill)
logEntry = TradeLogEntry(self.lastTime, trade.orderStatus.status, f'Fill {execution.shares}@{execution.price}')
trade.log.append(logEntry)
if isLive:
self._logger.info(f'execDetails: {fill}')
self.ib.execDetailsEvent.emit(trade, fill)
trade.fillEvent(trade, fill) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['execId']]
if not isLive:
self._results[reqId].append(fill) # depends on [control=['if'], data=[]] |
def truthy(self):
''' Recognize a Boolean-like string value as a Boolean.
Note: the rules are a bit different than string "truthiness."
'0' --> False
'1' --> True
('no', 'false') --> False # case-insensitive
('yes', 'true') --> True # case-insensitive
'''
lower = self.lower()
if lower.isdigit():
return bool(int(lower))
elif lower in ('yes', 'true'):
return True
elif lower in ('no', 'false'):
return False
elif self == '':
return False
else:
return None | def function[truthy, parameter[self]]:
constant[ Recognize a Boolean-like string value as a Boolean.
Note: the rules are a bit different than string "truthiness."
'0' --> False
'1' --> True
('no', 'false') --> False # case-insensitive
('yes', 'true') --> True # case-insensitive
]
variable[lower] assign[=] call[name[self].lower, parameter[]]
if call[name[lower].isdigit, parameter[]] begin[:]
return[call[name[bool], parameter[call[name[int], parameter[name[lower]]]]]] | keyword[def] identifier[truthy] ( identifier[self] ):
literal[string]
identifier[lower] = identifier[self] . identifier[lower] ()
keyword[if] identifier[lower] . identifier[isdigit] ():
keyword[return] identifier[bool] ( identifier[int] ( identifier[lower] ))
keyword[elif] identifier[lower] keyword[in] ( literal[string] , literal[string] ):
keyword[return] keyword[True]
keyword[elif] identifier[lower] keyword[in] ( literal[string] , literal[string] ):
keyword[return] keyword[False]
keyword[elif] identifier[self] == literal[string] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[None] | def truthy(self):
""" Recognize a Boolean-like string value as a Boolean.
Note: the rules are a bit different than string "truthiness."
'0' --> False
'1' --> True
('no', 'false') --> False #\xa0case-insensitive
('yes', 'true') --> True #\xa0case-insensitive
"""
lower = self.lower()
if lower.isdigit():
return bool(int(lower)) # depends on [control=['if'], data=[]]
elif lower in ('yes', 'true'):
return True # depends on [control=['if'], data=[]]
elif lower in ('no', 'false'):
return False # depends on [control=['if'], data=[]]
elif self == '':
return False # depends on [control=['if'], data=[]]
else:
return None |
def ip(ip_address, return_format=None):
"""Returns a summary of the information our database holds for a
particular IP address (similar to /ipinfo.html).
In the returned data:
Count: (also reports or records) total number of packets blocked from
this IP.
Attacks: (also targets) number of unique destination IP addresses for
these packets.
:param ip_address: a valid IP address
"""
response = _get('ip/{address}'.format(address=ip_address), return_format)
if 'bad IP address' in str(response):
raise Error('Bad IP address, {address}'.format(address=ip_address))
else:
return response | def function[ip, parameter[ip_address, return_format]]:
constant[Returns a summary of the information our database holds for a
particular IP address (similar to /ipinfo.html).
In the returned data:
Count: (also reports or records) total number of packets blocked from
this IP.
Attacks: (also targets) number of unique destination IP addresses for
these packets.
:param ip_address: a valid IP address
]
variable[response] assign[=] call[name[_get], parameter[call[constant[ip/{address}].format, parameter[]], name[return_format]]]
if compare[constant[bad IP address] in call[name[str], parameter[name[response]]]] begin[:]
<ast.Raise object at 0x7da1b1956c20> | keyword[def] identifier[ip] ( identifier[ip_address] , identifier[return_format] = keyword[None] ):
literal[string]
identifier[response] = identifier[_get] ( literal[string] . identifier[format] ( identifier[address] = identifier[ip_address] ), identifier[return_format] )
keyword[if] literal[string] keyword[in] identifier[str] ( identifier[response] ):
keyword[raise] identifier[Error] ( literal[string] . identifier[format] ( identifier[address] = identifier[ip_address] ))
keyword[else] :
keyword[return] identifier[response] | def ip(ip_address, return_format=None):
"""Returns a summary of the information our database holds for a
particular IP address (similar to /ipinfo.html).
In the returned data:
Count: (also reports or records) total number of packets blocked from
this IP.
Attacks: (also targets) number of unique destination IP addresses for
these packets.
:param ip_address: a valid IP address
"""
response = _get('ip/{address}'.format(address=ip_address), return_format)
if 'bad IP address' in str(response):
raise Error('Bad IP address, {address}'.format(address=ip_address)) # depends on [control=['if'], data=[]]
else:
return response |
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en, -ln, -rn.
b = b0 = re.sub("en$", "", re.sub("ln$", "l", re.sub("rn$", "r", v)))
# Split common prefixes.
x, x1, x2 = "", "", ""
for prefix in prefix_separable:
if v.startswith(prefix):
b, x = b[len(prefix):], prefix
x1 = (" " + x).rstrip()
x2 = x + "ge"
break
# Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
pl = b.endswith("el") and b[:-2]+"l" or b
# Present tense 1pl -el: handeln => wir handeln
pw = v.endswith(("ln", "rn")) and v or b+"en"
# Present tense ending in -d or -t gets -e:
pr = b.endswith(("d", "t")) and b+"e" or b
# Present tense 2sg gets -st, unless stem ends with -s or -z.
p2 = pr.endswith(("s","z")) and pr+"t" or pr+"st"
# Present participle: spiel + -end, arbeiten + -d:
pp = v.endswith(("en", "ln", "rn")) and v+"d" or v+"end"
# Past tense regular:
pt = encode_sz(pr) + "t"
# Past participle: haushalten => hausgehalten
ge = (v.startswith(prefix_inseparable) or b.endswith(("r","t"))) and pt or "ge"+pt
ge = x and x+"ge"+pt or ge
# Present subjunctive: stem + -e, -est, -en, -et:
s1 = encode_sz(pl)
# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
s2 = encode_sz(pt)
# Construct the lexeme:
lexeme = a = [
v,
pl+"e"+x1, p2+x1, pr+"t"+x1, pw+x1, pr+"t"+x1, pp, # present
pt+"e"+x1, pt+"est"+x1, pt+"e"+x1, pt+"en"+x1, pt+"et"+x1, ge, # past
b+"e"+x1, pr+"t"+x1, x+pw, # imperative
s1+"e"+x1, s1+"est"+x1, s1+"en"+x1, s1+"et"+x1, # subjunctive I
s2+"e"+x1, s2+"est"+x1, s2+"en"+x1, s2+"et"+x1 # subjunctive II
]
# Encode Eszett (ß) and attempt to retrieve from the lexicon.
# Decode Eszett for present and imperative.
if encode_sz(v) in self:
a = self[encode_sz(v)]
a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:]
# Since the lexicon does not contain imperative for all verbs, don't simply return it.
# Instead, update the rule-based lexeme with inflections from the lexicon.
return [a[i] or lexeme[i] for i in range(len(a))] | def function[find_lexeme, parameter[self, verb]]:
constant[ For a regular verb (base form), returns the forms using a rule-based approach.
]
variable[v] assign[=] call[name[verb].lower, parameter[]]
variable[b] assign[=] call[name[re].sub, parameter[constant[en$], constant[], call[name[re].sub, parameter[constant[ln$], constant[l], call[name[re].sub, parameter[constant[rn$], constant[r], name[v]]]]]]]
<ast.Tuple object at 0x7da2047e9e70> assign[=] tuple[[<ast.Constant object at 0x7da2047ea770>, <ast.Constant object at 0x7da2047e9e10>, <ast.Constant object at 0x7da2047eb160>]]
for taget[name[prefix]] in starred[name[prefix_separable]] begin[:]
if call[name[v].startswith, parameter[name[prefix]]] begin[:]
<ast.Tuple object at 0x7da18dc07f10> assign[=] tuple[[<ast.Subscript object at 0x7da18dc057b0>, <ast.Name object at 0x7da18dc068c0>]]
variable[x1] assign[=] call[binary_operation[constant[ ] + name[x]].rstrip, parameter[]]
variable[x2] assign[=] binary_operation[name[x] + constant[ge]]
break
variable[pl] assign[=] <ast.BoolOp object at 0x7da18dc05870>
variable[pw] assign[=] <ast.BoolOp object at 0x7da18dc06dd0>
variable[pr] assign[=] <ast.BoolOp object at 0x7da18dc06b00>
variable[p2] assign[=] <ast.BoolOp object at 0x7da18dc05bd0>
variable[pp] assign[=] <ast.BoolOp object at 0x7da18dc07d00>
variable[pt] assign[=] binary_operation[call[name[encode_sz], parameter[name[pr]]] + constant[t]]
variable[ge] assign[=] <ast.BoolOp object at 0x7da18dc04640>
variable[ge] assign[=] <ast.BoolOp object at 0x7da18dc05840>
variable[s1] assign[=] call[name[encode_sz], parameter[name[pl]]]
variable[s2] assign[=] call[name[encode_sz], parameter[name[pt]]]
variable[lexeme] assign[=] list[[<ast.Name object at 0x7da18dc05ea0>, <ast.BinOp object at 0x7da18dc06800>, <ast.BinOp object at 0x7da18dc06bc0>, <ast.BinOp object at 0x7da18dc07100>, <ast.BinOp object at 0x7da18dc05fc0>, <ast.BinOp object at 0x7da18dc06fb0>, <ast.Name object at 0x7da18dc06bf0>, <ast.BinOp object at 0x7da18dc06740>, <ast.BinOp object at 0x7da18dc07370>, <ast.BinOp object at 0x7da18f58fac0>, <ast.BinOp object at 0x7da18f58c5b0>, <ast.BinOp object at 0x7da18f58cfd0>, <ast.Name object at 0x7da18f58df60>, <ast.BinOp object at 0x7da18f58dae0>, <ast.BinOp object at 0x7da18f58dd80>, <ast.BinOp object at 0x7da18f58d960>, <ast.BinOp object at 0x7da18f58d1e0>, <ast.BinOp object at 0x7da18f58eec0>, <ast.BinOp object at 0x7da18f58cd90>, <ast.BinOp object at 0x7da18f58c280>, <ast.BinOp object at 0x7da18f58dcf0>, <ast.BinOp object at 0x7da18f58e980>, <ast.BinOp object at 0x7da18f58dff0>, <ast.BinOp object at 0x7da18f58f430>]]
if compare[call[name[encode_sz], parameter[name[v]]] in name[self]] begin[:]
variable[a] assign[=] call[name[self]][call[name[encode_sz], parameter[name[v]]]]
variable[a] assign[=] binary_operation[binary_operation[binary_operation[<ast.ListComp object at 0x7da18f58e8c0> + call[name[a]][<ast.Slice object at 0x7da18f58ee60>]] + <ast.ListComp object at 0x7da18f58c220>] + call[name[a]][<ast.Slice object at 0x7da20cabf1c0>]]
return[<ast.ListComp object at 0x7da20cabd750>] | keyword[def] identifier[find_lexeme] ( identifier[self] , identifier[verb] ):
literal[string]
identifier[v] = identifier[verb] . identifier[lower] ()
identifier[b] = identifier[b0] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[v] )))
identifier[x] , identifier[x1] , identifier[x2] = literal[string] , literal[string] , literal[string]
keyword[for] identifier[prefix] keyword[in] identifier[prefix_separable] :
keyword[if] identifier[v] . identifier[startswith] ( identifier[prefix] ):
identifier[b] , identifier[x] = identifier[b] [ identifier[len] ( identifier[prefix] ):], identifier[prefix]
identifier[x1] =( literal[string] + identifier[x] ). identifier[rstrip] ()
identifier[x2] = identifier[x] + literal[string]
keyword[break]
identifier[pl] = identifier[b] . identifier[endswith] ( literal[string] ) keyword[and] identifier[b] [:- literal[int] ]+ literal[string] keyword[or] identifier[b]
identifier[pw] = identifier[v] . identifier[endswith] (( literal[string] , literal[string] )) keyword[and] identifier[v] keyword[or] identifier[b] + literal[string]
identifier[pr] = identifier[b] . identifier[endswith] (( literal[string] , literal[string] )) keyword[and] identifier[b] + literal[string] keyword[or] identifier[b]
identifier[p2] = identifier[pr] . identifier[endswith] (( literal[string] , literal[string] )) keyword[and] identifier[pr] + literal[string] keyword[or] identifier[pr] + literal[string]
identifier[pp] = identifier[v] . identifier[endswith] (( literal[string] , literal[string] , literal[string] )) keyword[and] identifier[v] + literal[string] keyword[or] identifier[v] + literal[string]
identifier[pt] = identifier[encode_sz] ( identifier[pr] )+ literal[string]
identifier[ge] =( identifier[v] . identifier[startswith] ( identifier[prefix_inseparable] ) keyword[or] identifier[b] . identifier[endswith] (( literal[string] , literal[string] ))) keyword[and] identifier[pt] keyword[or] literal[string] + identifier[pt]
identifier[ge] = identifier[x] keyword[and] identifier[x] + literal[string] + identifier[pt] keyword[or] identifier[ge]
identifier[s1] = identifier[encode_sz] ( identifier[pl] )
identifier[s2] = identifier[encode_sz] ( identifier[pt] )
identifier[lexeme] = identifier[a] =[
identifier[v] ,
identifier[pl] + literal[string] + identifier[x1] , identifier[p2] + identifier[x1] , identifier[pr] + literal[string] + identifier[x1] , identifier[pw] + identifier[x1] , identifier[pr] + literal[string] + identifier[x1] , identifier[pp] ,
identifier[pt] + literal[string] + identifier[x1] , identifier[pt] + literal[string] + identifier[x1] , identifier[pt] + literal[string] + identifier[x1] , identifier[pt] + literal[string] + identifier[x1] , identifier[pt] + literal[string] + identifier[x1] , identifier[ge] ,
identifier[b] + literal[string] + identifier[x1] , identifier[pr] + literal[string] + identifier[x1] , identifier[x] + identifier[pw] ,
identifier[s1] + literal[string] + identifier[x1] , identifier[s1] + literal[string] + identifier[x1] , identifier[s1] + literal[string] + identifier[x1] , identifier[s1] + literal[string] + identifier[x1] ,
identifier[s2] + literal[string] + identifier[x1] , identifier[s2] + literal[string] + identifier[x1] , identifier[s2] + literal[string] + identifier[x1] , identifier[s2] + literal[string] + identifier[x1]
]
keyword[if] identifier[encode_sz] ( identifier[v] ) keyword[in] identifier[self] :
identifier[a] = identifier[self] [ identifier[encode_sz] ( identifier[v] )]
identifier[a] =[ identifier[decode_sz] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[a] [: literal[int] ]]+ identifier[a] [ literal[int] : literal[int] ]+[ identifier[decode_sz] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[a] [ literal[int] : literal[int] ]]+ identifier[a] [ literal[int] :]
keyword[return] [ identifier[a] [ identifier[i] ] keyword[or] identifier[lexeme] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[a] ))] | def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
# Stem = infinitive minus -en, -ln, -rn.
b = b0 = re.sub('en$', '', re.sub('ln$', 'l', re.sub('rn$', 'r', v)))
# Split common prefixes.
(x, x1, x2) = ('', '', '')
for prefix in prefix_separable:
if v.startswith(prefix):
(b, x) = (b[len(prefix):], prefix)
x1 = (' ' + x).rstrip()
x2 = x + 'ge'
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prefix']]
# Present tense 1sg and subjunctive -el: handeln => ich handle, du handlest.
pl = b.endswith('el') and b[:-2] + 'l' or b
# Present tense 1pl -el: handeln => wir handeln
pw = v.endswith(('ln', 'rn')) and v or b + 'en'
# Present tense ending in -d or -t gets -e:
pr = b.endswith(('d', 't')) and b + 'e' or b
# Present tense 2sg gets -st, unless stem ends with -s or -z.
p2 = pr.endswith(('s', 'z')) and pr + 't' or pr + 'st'
# Present participle: spiel + -end, arbeiten + -d:
pp = v.endswith(('en', 'ln', 'rn')) and v + 'd' or v + 'end'
# Past tense regular:
pt = encode_sz(pr) + 't'
# Past participle: haushalten => hausgehalten
ge = (v.startswith(prefix_inseparable) or b.endswith(('r', 't'))) and pt or 'ge' + pt
ge = x and x + 'ge' + pt or ge
# Present subjunctive: stem + -e, -est, -en, -et:
s1 = encode_sz(pl)
# Past subjunctive: past (usually with Umlaut) + -e, -est, -en, -et:
s2 = encode_sz(pt)
# Construct the lexeme:
# present
# past
# imperative
# subjunctive I
# subjunctive II
lexeme = a = [v, pl + 'e' + x1, p2 + x1, pr + 't' + x1, pw + x1, pr + 't' + x1, pp, pt + 'e' + x1, pt + 'est' + x1, pt + 'e' + x1, pt + 'en' + x1, pt + 'et' + x1, ge, b + 'e' + x1, pr + 't' + x1, x + pw, s1 + 'e' + x1, s1 + 'est' + x1, s1 + 'en' + x1, s1 + 'et' + x1, s2 + 'e' + x1, s2 + 'est' + x1, s2 + 'en' + x1, s2 + 'et' + x1]
# Encode Eszett (ß) and attempt to retrieve from the lexicon.
# Decode Eszett for present and imperative.
if encode_sz(v) in self:
a = self[encode_sz(v)]
a = [decode_sz(v) for v in a[:7]] + a[7:13] + [decode_sz(v) for v in a[13:20]] + a[20:] # depends on [control=['if'], data=['self']]
# Since the lexicon does not contain imperative for all verbs, don't simply return it.
# Instead, update the rule-based lexeme with inflections from the lexicon.
return [a[i] or lexeme[i] for i in range(len(a))] |
def obfuscate_ip(ip):
"""Obfuscate given host in IP form.
@ip: IPv4 address string
@return: hexadecimal IP string ('0x1ab...')
@raise: ValueError on invalid IP addresses
"""
if is_valid_ipv4(ip):
res = "0x%s" % "".join(hex(int(x))[2:] for x in ip.split("."))
else:
raise ValueError('Invalid IP value %r' % ip)
assert is_obfuscated_ip(res), '%r obfuscation error' % res
return res | def function[obfuscate_ip, parameter[ip]]:
constant[Obfuscate given host in IP form.
@ip: IPv4 address string
@return: hexadecimal IP string ('0x1ab...')
@raise: ValueError on invalid IP addresses
]
if call[name[is_valid_ipv4], parameter[name[ip]]] begin[:]
variable[res] assign[=] binary_operation[constant[0x%s] <ast.Mod object at 0x7da2590d6920> call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da20e960c70>]]]
assert[call[name[is_obfuscated_ip], parameter[name[res]]]]
return[name[res]] | keyword[def] identifier[obfuscate_ip] ( identifier[ip] ):
literal[string]
keyword[if] identifier[is_valid_ipv4] ( identifier[ip] ):
identifier[res] = literal[string] % literal[string] . identifier[join] ( identifier[hex] ( identifier[int] ( identifier[x] ))[ literal[int] :] keyword[for] identifier[x] keyword[in] identifier[ip] . identifier[split] ( literal[string] ))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[ip] )
keyword[assert] identifier[is_obfuscated_ip] ( identifier[res] ), literal[string] % identifier[res]
keyword[return] identifier[res] | def obfuscate_ip(ip):
"""Obfuscate given host in IP form.
@ip: IPv4 address string
@return: hexadecimal IP string ('0x1ab...')
@raise: ValueError on invalid IP addresses
"""
if is_valid_ipv4(ip):
res = '0x%s' % ''.join((hex(int(x))[2:] for x in ip.split('.'))) # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid IP value %r' % ip)
assert is_obfuscated_ip(res), '%r obfuscation error' % res
return res |
def set_data(self, data, offset=None, copy=False):
"""Set texture data
Parameters
----------
data : ndarray
Data to be uploaded
offset: int | tuple of ints
Offset in texture where to start copying data
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory. Asking explicitly
for a copy will prevent this behavior.
Notes
-----
This operation implicitely resizes the texture to the shape of
the data if given offset is None.
"""
self._set_emulated_shape(data)
Texture2D.set_data(self, self._normalize_emulated_shape(data),
offset, copy)
self._update_variables() | def function[set_data, parameter[self, data, offset, copy]]:
constant[Set texture data
Parameters
----------
data : ndarray
Data to be uploaded
offset: int | tuple of ints
Offset in texture where to start copying data
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory. Asking explicitly
for a copy will prevent this behavior.
Notes
-----
This operation implicitely resizes the texture to the shape of
the data if given offset is None.
]
call[name[self]._set_emulated_shape, parameter[name[data]]]
call[name[Texture2D].set_data, parameter[name[self], call[name[self]._normalize_emulated_shape, parameter[name[data]]], name[offset], name[copy]]]
call[name[self]._update_variables, parameter[]] | keyword[def] identifier[set_data] ( identifier[self] , identifier[data] , identifier[offset] = keyword[None] , identifier[copy] = keyword[False] ):
literal[string]
identifier[self] . identifier[_set_emulated_shape] ( identifier[data] )
identifier[Texture2D] . identifier[set_data] ( identifier[self] , identifier[self] . identifier[_normalize_emulated_shape] ( identifier[data] ),
identifier[offset] , identifier[copy] )
identifier[self] . identifier[_update_variables] () | def set_data(self, data, offset=None, copy=False):
"""Set texture data
Parameters
----------
data : ndarray
Data to be uploaded
offset: int | tuple of ints
Offset in texture where to start copying data
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory. Asking explicitly
for a copy will prevent this behavior.
Notes
-----
This operation implicitely resizes the texture to the shape of
the data if given offset is None.
"""
self._set_emulated_shape(data)
Texture2D.set_data(self, self._normalize_emulated_shape(data), offset, copy)
self._update_variables() |
def load_module(self, module_name):
"""Attempts to load the specified module.
If successful, .loaded_modules[module_name] will be populated, and
module_name will be added to the end of .module_ordering as well if
it is not already present. Note that this function does NOT call
start()/stop() on the module - in general, you don't want to call
this directly but instead use reload_module().
Returns True if the module was successfully loaded, otherwise False.
"""
if module_name in self.currently_loading:
_log.warning("Ignoring request to load module '%s' because it "
"is already currently being loaded.", module_name)
return False
try: # ensure that currently_loading gets reset no matter what
self.currently_loading.add(module_name)
if self.loaded_on_this_event is not None:
self.loaded_on_this_event.add(module_name)
# Force the module to actually be reloaded
try:
_temp = reload(importlib.import_module(module_name))
except ImportError:
_log.error("Unable to load module '%s' - module not found.",
module_name)
return False
except SyntaxError:
_log.exception("Unable to load module '%s' - syntax error(s).",
module_name)
return False
if not hasattr(_temp, "module"):
_log.error("Unable to load module '%s' - no 'module' member.",
module_name)
return False
module = _temp.module
if not issubclass(module, Module):
_log.error("Unable to load module '%s' - it's 'module' member "
"is not a kitnirc.modular.Module.", module_name)
return False
self.loaded_modules[module_name] = module(self)
if module_name not in self.module_ordering:
self.module_ordering.append(module_name)
return True
finally:
self.currently_loading.discard(module_name) | def function[load_module, parameter[self, module_name]]:
constant[Attempts to load the specified module.
If successful, .loaded_modules[module_name] will be populated, and
module_name will be added to the end of .module_ordering as well if
it is not already present. Note that this function does NOT call
start()/stop() on the module - in general, you don't want to call
this directly but instead use reload_module().
Returns True if the module was successfully loaded, otherwise False.
]
if compare[name[module_name] in name[self].currently_loading] begin[:]
call[name[_log].warning, parameter[constant[Ignoring request to load module '%s' because it is already currently being loaded.], name[module_name]]]
return[constant[False]]
<ast.Try object at 0x7da18f58fd60> | keyword[def] identifier[load_module] ( identifier[self] , identifier[module_name] ):
literal[string]
keyword[if] identifier[module_name] keyword[in] identifier[self] . identifier[currently_loading] :
identifier[_log] . identifier[warning] ( literal[string]
literal[string] , identifier[module_name] )
keyword[return] keyword[False]
keyword[try] :
identifier[self] . identifier[currently_loading] . identifier[add] ( identifier[module_name] )
keyword[if] identifier[self] . identifier[loaded_on_this_event] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[loaded_on_this_event] . identifier[add] ( identifier[module_name] )
keyword[try] :
identifier[_temp] = identifier[reload] ( identifier[importlib] . identifier[import_module] ( identifier[module_name] ))
keyword[except] identifier[ImportError] :
identifier[_log] . identifier[error] ( literal[string] ,
identifier[module_name] )
keyword[return] keyword[False]
keyword[except] identifier[SyntaxError] :
identifier[_log] . identifier[exception] ( literal[string] ,
identifier[module_name] )
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[hasattr] ( identifier[_temp] , literal[string] ):
identifier[_log] . identifier[error] ( literal[string] ,
identifier[module_name] )
keyword[return] keyword[False]
identifier[module] = identifier[_temp] . identifier[module]
keyword[if] keyword[not] identifier[issubclass] ( identifier[module] , identifier[Module] ):
identifier[_log] . identifier[error] ( literal[string]
literal[string] , identifier[module_name] )
keyword[return] keyword[False]
identifier[self] . identifier[loaded_modules] [ identifier[module_name] ]= identifier[module] ( identifier[self] )
keyword[if] identifier[module_name] keyword[not] keyword[in] identifier[self] . identifier[module_ordering] :
identifier[self] . identifier[module_ordering] . identifier[append] ( identifier[module_name] )
keyword[return] keyword[True]
keyword[finally] :
identifier[self] . identifier[currently_loading] . identifier[discard] ( identifier[module_name] ) | def load_module(self, module_name):
"""Attempts to load the specified module.
If successful, .loaded_modules[module_name] will be populated, and
module_name will be added to the end of .module_ordering as well if
it is not already present. Note that this function does NOT call
start()/stop() on the module - in general, you don't want to call
this directly but instead use reload_module().
Returns True if the module was successfully loaded, otherwise False.
"""
if module_name in self.currently_loading:
_log.warning("Ignoring request to load module '%s' because it is already currently being loaded.", module_name)
return False # depends on [control=['if'], data=['module_name']]
try: # ensure that currently_loading gets reset no matter what
self.currently_loading.add(module_name)
if self.loaded_on_this_event is not None:
self.loaded_on_this_event.add(module_name) # depends on [control=['if'], data=[]]
# Force the module to actually be reloaded
try:
_temp = reload(importlib.import_module(module_name)) # depends on [control=['try'], data=[]]
except ImportError:
_log.error("Unable to load module '%s' - module not found.", module_name)
return False # depends on [control=['except'], data=[]]
except SyntaxError:
_log.exception("Unable to load module '%s' - syntax error(s).", module_name)
return False # depends on [control=['except'], data=[]]
if not hasattr(_temp, 'module'):
_log.error("Unable to load module '%s' - no 'module' member.", module_name)
return False # depends on [control=['if'], data=[]]
module = _temp.module
if not issubclass(module, Module):
_log.error("Unable to load module '%s' - it's 'module' member is not a kitnirc.modular.Module.", module_name)
return False # depends on [control=['if'], data=[]]
self.loaded_modules[module_name] = module(self)
if module_name not in self.module_ordering:
self.module_ordering.append(module_name) # depends on [control=['if'], data=['module_name']]
return True # depends on [control=['try'], data=[]]
finally:
self.currently_loading.discard(module_name) |
def get_group_attributegroup_items(network_id, group_id, **kwargs):
"""
Get all the items in a specified group, within a network
"""
user_id=kwargs.get('user_id')
network_i = _get_network(network_id)
network_i.check_read_permission(user_id)
group_items_i = db.DBSession.query(AttrGroupItem).filter(
AttrGroupItem.network_id==network_id,
AttrGroupItem.group_id==group_id).all()
return group_items_i | def function[get_group_attributegroup_items, parameter[network_id, group_id]]:
constant[
Get all the items in a specified group, within a network
]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
variable[network_i] assign[=] call[name[_get_network], parameter[name[network_id]]]
call[name[network_i].check_read_permission, parameter[name[user_id]]]
variable[group_items_i] assign[=] call[call[call[name[db].DBSession.query, parameter[name[AttrGroupItem]]].filter, parameter[compare[name[AttrGroupItem].network_id equal[==] name[network_id]], compare[name[AttrGroupItem].group_id equal[==] name[group_id]]]].all, parameter[]]
return[name[group_items_i]] | keyword[def] identifier[get_group_attributegroup_items] ( identifier[network_id] , identifier[group_id] ,** identifier[kwargs] ):
literal[string]
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[network_i] = identifier[_get_network] ( identifier[network_id] )
identifier[network_i] . identifier[check_read_permission] ( identifier[user_id] )
identifier[group_items_i] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[AttrGroupItem] ). identifier[filter] (
identifier[AttrGroupItem] . identifier[network_id] == identifier[network_id] ,
identifier[AttrGroupItem] . identifier[group_id] == identifier[group_id] ). identifier[all] ()
keyword[return] identifier[group_items_i] | def get_group_attributegroup_items(network_id, group_id, **kwargs):
"""
Get all the items in a specified group, within a network
"""
user_id = kwargs.get('user_id')
network_i = _get_network(network_id)
network_i.check_read_permission(user_id)
group_items_i = db.DBSession.query(AttrGroupItem).filter(AttrGroupItem.network_id == network_id, AttrGroupItem.group_id == group_id).all()
return group_items_i |
def log_entry_encode(self, id, num_logs, last_log_num, time_utc, size):
'''
Reply to LOG_REQUEST_LIST
id : Log id (uint16_t)
num_logs : Total number of logs (uint16_t)
last_log_num : High log number (uint16_t)
time_utc : UTC timestamp of log in seconds since 1970, or 0 if not available (uint32_t)
size : Size of the log (may be approximate) in bytes (uint32_t)
'''
return MAVLink_log_entry_message(id, num_logs, last_log_num, time_utc, size) | def function[log_entry_encode, parameter[self, id, num_logs, last_log_num, time_utc, size]]:
constant[
Reply to LOG_REQUEST_LIST
id : Log id (uint16_t)
num_logs : Total number of logs (uint16_t)
last_log_num : High log number (uint16_t)
time_utc : UTC timestamp of log in seconds since 1970, or 0 if not available (uint32_t)
size : Size of the log (may be approximate) in bytes (uint32_t)
]
return[call[name[MAVLink_log_entry_message], parameter[name[id], name[num_logs], name[last_log_num], name[time_utc], name[size]]]] | keyword[def] identifier[log_entry_encode] ( identifier[self] , identifier[id] , identifier[num_logs] , identifier[last_log_num] , identifier[time_utc] , identifier[size] ):
literal[string]
keyword[return] identifier[MAVLink_log_entry_message] ( identifier[id] , identifier[num_logs] , identifier[last_log_num] , identifier[time_utc] , identifier[size] ) | def log_entry_encode(self, id, num_logs, last_log_num, time_utc, size):
"""
Reply to LOG_REQUEST_LIST
id : Log id (uint16_t)
num_logs : Total number of logs (uint16_t)
last_log_num : High log number (uint16_t)
time_utc : UTC timestamp of log in seconds since 1970, or 0 if not available (uint32_t)
size : Size of the log (may be approximate) in bytes (uint32_t)
"""
return MAVLink_log_entry_message(id, num_logs, last_log_num, time_utc, size) |
def wantMethod(self, method):
"""Accept the method if its attributes match.
"""
try:
cls = method.im_class
except AttributeError:
return False
return self.validateAttrib(method, cls) | def function[wantMethod, parameter[self, method]]:
constant[Accept the method if its attributes match.
]
<ast.Try object at 0x7da207f99900>
return[call[name[self].validateAttrib, parameter[name[method], name[cls]]]] | keyword[def] identifier[wantMethod] ( identifier[self] , identifier[method] ):
literal[string]
keyword[try] :
identifier[cls] = identifier[method] . identifier[im_class]
keyword[except] identifier[AttributeError] :
keyword[return] keyword[False]
keyword[return] identifier[self] . identifier[validateAttrib] ( identifier[method] , identifier[cls] ) | def wantMethod(self, method):
"""Accept the method if its attributes match.
"""
try:
cls = method.im_class # depends on [control=['try'], data=[]]
except AttributeError:
return False # depends on [control=['except'], data=[]]
return self.validateAttrib(method, cls) |
def _get_single_item(self, url_suffix, data, content_type=ContentType.json):
"""
Send GET request to API at url_suffix with post_data.
Raises error if x-total-pages is contained in the response.
:param url_suffix: str URL path we are sending a GET to
:param url_data: object data we are sending
:param content_type: str from ContentType that determines how we format the data
:return: requests.Response containing the result
"""
(url, data_str, headers) = self._url_parts(url_suffix, data, content_type=content_type)
resp = self.http.get(url, headers=headers, params=data_str)
return self._check_err(resp, url_suffix, data, allow_pagination=False) | def function[_get_single_item, parameter[self, url_suffix, data, content_type]]:
constant[
Send GET request to API at url_suffix with post_data.
Raises error if x-total-pages is contained in the response.
:param url_suffix: str URL path we are sending a GET to
:param url_data: object data we are sending
:param content_type: str from ContentType that determines how we format the data
:return: requests.Response containing the result
]
<ast.Tuple object at 0x7da18f09ec20> assign[=] call[name[self]._url_parts, parameter[name[url_suffix], name[data]]]
variable[resp] assign[=] call[name[self].http.get, parameter[name[url]]]
return[call[name[self]._check_err, parameter[name[resp], name[url_suffix], name[data]]]] | keyword[def] identifier[_get_single_item] ( identifier[self] , identifier[url_suffix] , identifier[data] , identifier[content_type] = identifier[ContentType] . identifier[json] ):
literal[string]
( identifier[url] , identifier[data_str] , identifier[headers] )= identifier[self] . identifier[_url_parts] ( identifier[url_suffix] , identifier[data] , identifier[content_type] = identifier[content_type] )
identifier[resp] = identifier[self] . identifier[http] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[params] = identifier[data_str] )
keyword[return] identifier[self] . identifier[_check_err] ( identifier[resp] , identifier[url_suffix] , identifier[data] , identifier[allow_pagination] = keyword[False] ) | def _get_single_item(self, url_suffix, data, content_type=ContentType.json):
"""
Send GET request to API at url_suffix with post_data.
Raises error if x-total-pages is contained in the response.
:param url_suffix: str URL path we are sending a GET to
:param url_data: object data we are sending
:param content_type: str from ContentType that determines how we format the data
:return: requests.Response containing the result
"""
(url, data_str, headers) = self._url_parts(url_suffix, data, content_type=content_type)
resp = self.http.get(url, headers=headers, params=data_str)
return self._check_err(resp, url_suffix, data, allow_pagination=False) |
def render_field(dictionary,
field,
prepend=None,
append=None,
quotes=False,
**opts):
'''
Render a field found under the ``field`` level of the hierarchy in the
``dictionary`` object.
This is useful to render a field in a Jinja template without worrying that
the hierarchy might not exist. For example if we do the following in Jinja:
``{{ interfaces.interface.Ethernet5.config.description }}`` for the
following object:
``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}``
it would error, as the ``Ethernet5`` key does not exist.
With this helper, we can skip this and avoid existence checks. This must be
however used with care.
dictionary
The dictionary to traverse.
field
The key name or part to traverse in the ``dictionary``.
prepend: ``None``
The text to prepend in front of the text. Usually, we need to have the
name of the field too when generating the configuration.
append: ``None``
Text to append at the end.
quotes: ``False``
Whether should wrap the text around quotes.
CLI Example:
.. code-block:: bash
salt '*' napalm_formula.render_field "{'enabled': True}" enabled
# This would return the value of the ``enabled`` leaf key
salt '*' napalm_formula.render_field "{'enabled': True}" description
# This would not error
Jinja usage example:
.. code-block:: jinja
{%- set config = {'enabled': True, 'description': 'Interface description'} %}
{{ salt.napalm_formula.render_field(config, 'description', quotes=True) }}
The example above would be rendered on Arista / Cisco as:
.. code-block:: text
description "Interface description"
While on Junos (the semicolon is important to be added, otherwise the
configuration won't be accepted by Junos):
.. code-block:: text
description "Interface description";
'''
value = traverse(dictionary, field)
if value is None:
return ''
if prepend is None:
prepend = field.replace('_', '-')
if append is None:
if __grains__['os'] in ('junos',):
append = ';'
else:
append = ''
if quotes:
value = '"{value}"'.format(value=value)
return '{prepend} {value}{append}'.format(prepend=prepend,
value=value,
append=append) | def function[render_field, parameter[dictionary, field, prepend, append, quotes]]:
constant[
Render a field found under the ``field`` level of the hierarchy in the
``dictionary`` object.
This is useful to render a field in a Jinja template without worrying that
the hierarchy might not exist. For example if we do the following in Jinja:
``{{ interfaces.interface.Ethernet5.config.description }}`` for the
following object:
``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}``
it would error, as the ``Ethernet5`` key does not exist.
With this helper, we can skip this and avoid existence checks. This must be
however used with care.
dictionary
The dictionary to traverse.
field
The key name or part to traverse in the ``dictionary``.
prepend: ``None``
The text to prepend in front of the text. Usually, we need to have the
name of the field too when generating the configuration.
append: ``None``
Text to append at the end.
quotes: ``False``
Whether should wrap the text around quotes.
CLI Example:
.. code-block:: bash
salt '*' napalm_formula.render_field "{'enabled': True}" enabled
# This would return the value of the ``enabled`` leaf key
salt '*' napalm_formula.render_field "{'enabled': True}" description
# This would not error
Jinja usage example:
.. code-block:: jinja
{%- set config = {'enabled': True, 'description': 'Interface description'} %}
{{ salt.napalm_formula.render_field(config, 'description', quotes=True) }}
The example above would be rendered on Arista / Cisco as:
.. code-block:: text
description "Interface description"
While on Junos (the semicolon is important to be added, otherwise the
configuration won't be accepted by Junos):
.. code-block:: text
description "Interface description";
]
variable[value] assign[=] call[name[traverse], parameter[name[dictionary], name[field]]]
if compare[name[value] is constant[None]] begin[:]
return[constant[]]
if compare[name[prepend] is constant[None]] begin[:]
variable[prepend] assign[=] call[name[field].replace, parameter[constant[_], constant[-]]]
if compare[name[append] is constant[None]] begin[:]
if compare[call[name[__grains__]][constant[os]] in tuple[[<ast.Constant object at 0x7da2045640d0>]]] begin[:]
variable[append] assign[=] constant[;]
if name[quotes] begin[:]
variable[value] assign[=] call[constant["{value}"].format, parameter[]]
return[call[constant[{prepend} {value}{append}].format, parameter[]]] | keyword[def] identifier[render_field] ( identifier[dictionary] ,
identifier[field] ,
identifier[prepend] = keyword[None] ,
identifier[append] = keyword[None] ,
identifier[quotes] = keyword[False] ,
** identifier[opts] ):
literal[string]
identifier[value] = identifier[traverse] ( identifier[dictionary] , identifier[field] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[if] identifier[prepend] keyword[is] keyword[None] :
identifier[prepend] = identifier[field] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[append] keyword[is] keyword[None] :
keyword[if] identifier[__grains__] [ literal[string] ] keyword[in] ( literal[string] ,):
identifier[append] = literal[string]
keyword[else] :
identifier[append] = literal[string]
keyword[if] identifier[quotes] :
identifier[value] = literal[string] . identifier[format] ( identifier[value] = identifier[value] )
keyword[return] literal[string] . identifier[format] ( identifier[prepend] = identifier[prepend] ,
identifier[value] = identifier[value] ,
identifier[append] = identifier[append] ) | def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts):
"""
Render a field found under the ``field`` level of the hierarchy in the
``dictionary`` object.
This is useful to render a field in a Jinja template without worrying that
the hierarchy might not exist. For example if we do the following in Jinja:
``{{ interfaces.interface.Ethernet5.config.description }}`` for the
following object:
``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}``
it would error, as the ``Ethernet5`` key does not exist.
With this helper, we can skip this and avoid existence checks. This must be
however used with care.
dictionary
The dictionary to traverse.
field
The key name or part to traverse in the ``dictionary``.
prepend: ``None``
The text to prepend in front of the text. Usually, we need to have the
name of the field too when generating the configuration.
append: ``None``
Text to append at the end.
quotes: ``False``
Whether should wrap the text around quotes.
CLI Example:
.. code-block:: bash
salt '*' napalm_formula.render_field "{'enabled': True}" enabled
# This would return the value of the ``enabled`` leaf key
salt '*' napalm_formula.render_field "{'enabled': True}" description
# This would not error
Jinja usage example:
.. code-block:: jinja
{%- set config = {'enabled': True, 'description': 'Interface description'} %}
{{ salt.napalm_formula.render_field(config, 'description', quotes=True) }}
The example above would be rendered on Arista / Cisco as:
.. code-block:: text
description "Interface description"
While on Junos (the semicolon is important to be added, otherwise the
configuration won't be accepted by Junos):
.. code-block:: text
description "Interface description";
"""
value = traverse(dictionary, field)
if value is None:
return '' # depends on [control=['if'], data=[]]
if prepend is None:
prepend = field.replace('_', '-') # depends on [control=['if'], data=['prepend']]
if append is None:
if __grains__['os'] in ('junos',):
append = ';' # depends on [control=['if'], data=[]]
else:
append = '' # depends on [control=['if'], data=['append']]
if quotes:
value = '"{value}"'.format(value=value) # depends on [control=['if'], data=[]]
return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append) |
def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current '
'line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
self.set_sigint_handler()
return 1 | def function[do_until, parameter[self, arg]]:
constant[unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
]
if name[arg] begin[:]
<ast.Try object at 0x7da1b0ea7f10>
if compare[name[lineno] less_or_equal[<=] name[self].curframe.f_lineno] begin[:]
call[name[self].error, parameter[constant["until" line number is smaller than current line number]]]
return[None]
call[name[self].set_until, parameter[name[self].curframe, name[lineno]]]
call[name[self].set_sigint_handler, parameter[]]
return[constant[1]] | keyword[def] identifier[do_until] ( identifier[self] , identifier[arg] ):
literal[string]
keyword[if] identifier[arg] :
keyword[try] :
identifier[lineno] = identifier[int] ( identifier[arg] )
keyword[except] identifier[ValueError] :
identifier[self] . identifier[error] ( literal[string] % identifier[arg] )
keyword[return]
keyword[if] identifier[lineno] <= identifier[self] . identifier[curframe] . identifier[f_lineno] :
identifier[self] . identifier[error] ( literal[string]
literal[string] )
keyword[return]
keyword[else] :
identifier[lineno] = keyword[None]
identifier[self] . identifier[set_until] ( identifier[self] . identifier[curframe] , identifier[lineno] )
identifier[self] . identifier[set_sigint_handler] ()
keyword[return] literal[int] | def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg) # depends on [control=['try'], data=[]]
except ValueError:
self.error('Error in argument: %r' % arg)
return # depends on [control=['except'], data=[]]
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current line number')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
lineno = None
self.set_until(self.curframe, lineno)
self.set_sigint_handler()
return 1 |
def daft_link(self):
"""
This method returns the url of the listing.
:return:
"""
try:
if self._data_from_search:
link = self._data_from_search.find('a', href=True)
return 'http://www.daft.ie' + link['href']
else:
return self._ad_page_content.find('link', {'rel': 'canonical'})['href']
except Exception as e:
if self._debug:
logging.error(
"Error getting daft_link. Error message: " + e.args[0])
return | def function[daft_link, parameter[self]]:
constant[
This method returns the url of the listing.
:return:
]
<ast.Try object at 0x7da18eb553c0> | keyword[def] identifier[daft_link] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[_data_from_search] :
identifier[link] = identifier[self] . identifier[_data_from_search] . identifier[find] ( literal[string] , identifier[href] = keyword[True] )
keyword[return] literal[string] + identifier[link] [ literal[string] ]
keyword[else] :
keyword[return] identifier[self] . identifier[_ad_page_content] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[self] . identifier[_debug] :
identifier[logging] . identifier[error] (
literal[string] + identifier[e] . identifier[args] [ literal[int] ])
keyword[return] | def daft_link(self):
"""
This method returns the url of the listing.
:return:
"""
try:
if self._data_from_search:
link = self._data_from_search.find('a', href=True)
return 'http://www.daft.ie' + link['href'] # depends on [control=['if'], data=[]]
else:
return self._ad_page_content.find('link', {'rel': 'canonical'})['href'] # depends on [control=['try'], data=[]]
except Exception as e:
if self._debug:
logging.error('Error getting daft_link. Error message: ' + e.args[0]) # depends on [control=['if'], data=[]]
return # depends on [control=['except'], data=['e']] |
def _get_question_map(self, question_id):
"""get question map from questions matching question_id
This can make sense of both Section assigned Ids or normal Question/Item Ids
"""
if question_id.get_authority() == ASSESSMENT_AUTHORITY:
key = '_id'
match_value = ObjectId(question_id.get_identifier())
else:
key = 'questionId'
match_value = str(question_id)
for question_map in self._my_map['questions']:
if question_map[key] == match_value:
return question_map
raise errors.NotFound() | def function[_get_question_map, parameter[self, question_id]]:
constant[get question map from questions matching question_id
This can make sense of both Section assigned Ids or normal Question/Item Ids
]
if compare[call[name[question_id].get_authority, parameter[]] equal[==] name[ASSESSMENT_AUTHORITY]] begin[:]
variable[key] assign[=] constant[_id]
variable[match_value] assign[=] call[name[ObjectId], parameter[call[name[question_id].get_identifier, parameter[]]]]
for taget[name[question_map]] in starred[call[name[self]._my_map][constant[questions]]] begin[:]
if compare[call[name[question_map]][name[key]] equal[==] name[match_value]] begin[:]
return[name[question_map]]
<ast.Raise object at 0x7da20c993370> | keyword[def] identifier[_get_question_map] ( identifier[self] , identifier[question_id] ):
literal[string]
keyword[if] identifier[question_id] . identifier[get_authority] ()== identifier[ASSESSMENT_AUTHORITY] :
identifier[key] = literal[string]
identifier[match_value] = identifier[ObjectId] ( identifier[question_id] . identifier[get_identifier] ())
keyword[else] :
identifier[key] = literal[string]
identifier[match_value] = identifier[str] ( identifier[question_id] )
keyword[for] identifier[question_map] keyword[in] identifier[self] . identifier[_my_map] [ literal[string] ]:
keyword[if] identifier[question_map] [ identifier[key] ]== identifier[match_value] :
keyword[return] identifier[question_map]
keyword[raise] identifier[errors] . identifier[NotFound] () | def _get_question_map(self, question_id):
"""get question map from questions matching question_id
This can make sense of both Section assigned Ids or normal Question/Item Ids
"""
if question_id.get_authority() == ASSESSMENT_AUTHORITY:
key = '_id'
match_value = ObjectId(question_id.get_identifier()) # depends on [control=['if'], data=[]]
else:
key = 'questionId'
match_value = str(question_id)
for question_map in self._my_map['questions']:
if question_map[key] == match_value:
return question_map # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['question_map']]
raise errors.NotFound() |
def __get_subscript_delete(self, name):
"""
Returns `del <data_var>["<name>"]`.
"""
return ast.Delete(targets=[self.__get_subscript(name, ast.Del())]) | def function[__get_subscript_delete, parameter[self, name]]:
constant[
Returns `del <data_var>["<name>"]`.
]
return[call[name[ast].Delete, parameter[]]] | keyword[def] identifier[__get_subscript_delete] ( identifier[self] , identifier[name] ):
literal[string]
keyword[return] identifier[ast] . identifier[Delete] ( identifier[targets] =[ identifier[self] . identifier[__get_subscript] ( identifier[name] , identifier[ast] . identifier[Del] ())]) | def __get_subscript_delete(self, name):
"""
Returns `del <data_var>["<name>"]`.
"""
return ast.Delete(targets=[self.__get_subscript(name, ast.Del())]) |
def checkUserManage(self):
""" Checks if the current user has granted access to this worksheet
and if has also privileges for managing it.
"""
granted = False
can_access = self.checkUserAccess()
if can_access is True:
pm = getToolByName(self, 'portal_membership')
edit_allowed = pm.checkPermission(EditWorksheet, self)
if edit_allowed:
# Check if the current user is the WS's current analyst
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
# Has management privileges?
if pm.checkPermission(ManageWorksheets, self):
granted = True
else:
granted = True
return granted | def function[checkUserManage, parameter[self]]:
constant[ Checks if the current user has granted access to this worksheet
and if has also privileges for managing it.
]
variable[granted] assign[=] constant[False]
variable[can_access] assign[=] call[name[self].checkUserAccess, parameter[]]
if compare[name[can_access] is constant[True]] begin[:]
variable[pm] assign[=] call[name[getToolByName], parameter[name[self], constant[portal_membership]]]
variable[edit_allowed] assign[=] call[name[pm].checkPermission, parameter[name[EditWorksheet], name[self]]]
if name[edit_allowed] begin[:]
variable[member] assign[=] call[name[pm].getAuthenticatedMember, parameter[]]
variable[analyst] assign[=] call[call[name[self].getAnalyst, parameter[]].strip, parameter[]]
if compare[name[analyst] not_equal[!=] call[name[_c], parameter[call[name[member].getId, parameter[]]]]] begin[:]
if call[name[pm].checkPermission, parameter[name[ManageWorksheets], name[self]]] begin[:]
variable[granted] assign[=] constant[True]
return[name[granted]] | keyword[def] identifier[checkUserManage] ( identifier[self] ):
literal[string]
identifier[granted] = keyword[False]
identifier[can_access] = identifier[self] . identifier[checkUserAccess] ()
keyword[if] identifier[can_access] keyword[is] keyword[True] :
identifier[pm] = identifier[getToolByName] ( identifier[self] , literal[string] )
identifier[edit_allowed] = identifier[pm] . identifier[checkPermission] ( identifier[EditWorksheet] , identifier[self] )
keyword[if] identifier[edit_allowed] :
identifier[member] = identifier[pm] . identifier[getAuthenticatedMember] ()
identifier[analyst] = identifier[self] . identifier[getAnalyst] (). identifier[strip] ()
keyword[if] identifier[analyst] != identifier[_c] ( identifier[member] . identifier[getId] ()):
keyword[if] identifier[pm] . identifier[checkPermission] ( identifier[ManageWorksheets] , identifier[self] ):
identifier[granted] = keyword[True]
keyword[else] :
identifier[granted] = keyword[True]
keyword[return] identifier[granted] | def checkUserManage(self):
""" Checks if the current user has granted access to this worksheet
and if has also privileges for managing it.
"""
granted = False
can_access = self.checkUserAccess()
if can_access is True:
pm = getToolByName(self, 'portal_membership')
edit_allowed = pm.checkPermission(EditWorksheet, self)
if edit_allowed:
# Check if the current user is the WS's current analyst
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
# Has management privileges?
if pm.checkPermission(ManageWorksheets, self):
granted = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
granted = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return granted |
def open_bucket(bucket_name,
aws_access_key_id=None, aws_secret_access_key=None,
aws_profile=None):
"""Open an S3 Bucket resource.
Parameters
----------
bucket_name : `str`
Name of the S3 bucket.
aws_access_key_id : `str`, optional
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`, optional
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Returns
-------
bucket : Boto3 S3 Bucket instance
The S3 bucket as a Boto3 instance.
"""
session = boto3.session.Session(
profile_name=aws_profile,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
return bucket | def function[open_bucket, parameter[bucket_name, aws_access_key_id, aws_secret_access_key, aws_profile]]:
constant[Open an S3 Bucket resource.
Parameters
----------
bucket_name : `str`
Name of the S3 bucket.
aws_access_key_id : `str`, optional
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`, optional
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Returns
-------
bucket : Boto3 S3 Bucket instance
The S3 bucket as a Boto3 instance.
]
variable[session] assign[=] call[name[boto3].session.Session, parameter[]]
variable[s3] assign[=] call[name[session].resource, parameter[constant[s3]]]
variable[bucket] assign[=] call[name[s3].Bucket, parameter[name[bucket_name]]]
return[name[bucket]] | keyword[def] identifier[open_bucket] ( identifier[bucket_name] ,
identifier[aws_access_key_id] = keyword[None] , identifier[aws_secret_access_key] = keyword[None] ,
identifier[aws_profile] = keyword[None] ):
literal[string]
identifier[session] = identifier[boto3] . identifier[session] . identifier[Session] (
identifier[profile_name] = identifier[aws_profile] ,
identifier[aws_access_key_id] = identifier[aws_access_key_id] ,
identifier[aws_secret_access_key] = identifier[aws_secret_access_key] )
identifier[s3] = identifier[session] . identifier[resource] ( literal[string] )
identifier[bucket] = identifier[s3] . identifier[Bucket] ( identifier[bucket_name] )
keyword[return] identifier[bucket] | def open_bucket(bucket_name, aws_access_key_id=None, aws_secret_access_key=None, aws_profile=None):
"""Open an S3 Bucket resource.
Parameters
----------
bucket_name : `str`
Name of the S3 bucket.
aws_access_key_id : `str`, optional
The access key for your AWS account. Also set
``aws_secret_access_key``.
aws_secret_access_key : `str`, optional
The secret key for your AWS account.
aws_profile : `str`, optional
Name of AWS profile in :file:`~/.aws/credentials`. Use this instead
of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based
credentials.
Returns
-------
bucket : Boto3 S3 Bucket instance
The S3 bucket as a Boto3 instance.
"""
session = boto3.session.Session(profile_name=aws_profile, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
return bucket |
def basepath(self) -> str:
"""Absolute path pointing to the available working directories.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
"""
return os.path.abspath(
os.path.join(self.projectdir, self.BASEDIR)) | def function[basepath, parameter[self]]:
constant[Absolute path pointing to the available working directories.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
]
return[call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[self].projectdir, name[self].BASEDIR]]]]] | keyword[def] identifier[basepath] ( identifier[self] )-> identifier[str] :
literal[string]
keyword[return] identifier[os] . identifier[path] . identifier[abspath] (
identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[projectdir] , identifier[self] . identifier[BASEDIR] )) | def basepath(self) -> str:
"""Absolute path pointing to the available working directories.
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> from hydpy import repr_, TestIO
>>> with TestIO():
... repr_(filemanager.basepath) # doctest: +ELLIPSIS
'...hydpy/tests/iotesting/projectname/basename'
"""
return os.path.abspath(os.path.join(self.projectdir, self.BASEDIR)) |
def template_file(
task: Task,
template: str,
path: str,
jinja_filters: FiltersDict = None,
**kwargs: Any
) -> Result:
"""
Renders contants of a file with jinja2. All the host data is available in the template
Arguments:
template: filename
path: path to dir with templates
jinja_filters: jinja filters to enable. Defaults to nornir.config.jinja2.filters
**kwargs: additional data to pass to the template
Returns:
Result object with the following attributes set:
* result (``string``): rendered string
"""
jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters
text = jinja_helper.render_from_file(
template=template,
path=path,
host=task.host,
jinja_filters=jinja_filters,
**kwargs
)
return Result(host=task.host, result=text) | def function[template_file, parameter[task, template, path, jinja_filters]]:
constant[
Renders contants of a file with jinja2. All the host data is available in the template
Arguments:
template: filename
path: path to dir with templates
jinja_filters: jinja filters to enable. Defaults to nornir.config.jinja2.filters
**kwargs: additional data to pass to the template
Returns:
Result object with the following attributes set:
* result (``string``): rendered string
]
variable[jinja_filters] assign[=] <ast.BoolOp object at 0x7da1b1c6f370>
variable[text] assign[=] call[name[jinja_helper].render_from_file, parameter[]]
return[call[name[Result], parameter[]]] | keyword[def] identifier[template_file] (
identifier[task] : identifier[Task] ,
identifier[template] : identifier[str] ,
identifier[path] : identifier[str] ,
identifier[jinja_filters] : identifier[FiltersDict] = keyword[None] ,
** identifier[kwargs] : identifier[Any]
)-> identifier[Result] :
literal[string]
identifier[jinja_filters] = identifier[jinja_filters] keyword[or] {} keyword[or] identifier[task] . identifier[nornir] . identifier[config] . identifier[jinja2] . identifier[filters]
identifier[text] = identifier[jinja_helper] . identifier[render_from_file] (
identifier[template] = identifier[template] ,
identifier[path] = identifier[path] ,
identifier[host] = identifier[task] . identifier[host] ,
identifier[jinja_filters] = identifier[jinja_filters] ,
** identifier[kwargs]
)
keyword[return] identifier[Result] ( identifier[host] = identifier[task] . identifier[host] , identifier[result] = identifier[text] ) | def template_file(task: Task, template: str, path: str, jinja_filters: FiltersDict=None, **kwargs: Any) -> Result:
"""
Renders contants of a file with jinja2. All the host data is available in the template
Arguments:
template: filename
path: path to dir with templates
jinja_filters: jinja filters to enable. Defaults to nornir.config.jinja2.filters
**kwargs: additional data to pass to the template
Returns:
Result object with the following attributes set:
* result (``string``): rendered string
"""
jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters
text = jinja_helper.render_from_file(template=template, path=path, host=task.host, jinja_filters=jinja_filters, **kwargs)
return Result(host=task.host, result=text) |
def text_width(string, font_name, font_size):
"""Determine with width in pixels of string."""
return stringWidth(string, fontName=font_name, fontSize=font_size) | def function[text_width, parameter[string, font_name, font_size]]:
constant[Determine with width in pixels of string.]
return[call[name[stringWidth], parameter[name[string]]]] | keyword[def] identifier[text_width] ( identifier[string] , identifier[font_name] , identifier[font_size] ):
literal[string]
keyword[return] identifier[stringWidth] ( identifier[string] , identifier[fontName] = identifier[font_name] , identifier[fontSize] = identifier[font_size] ) | def text_width(string, font_name, font_size):
"""Determine with width in pixels of string."""
return stringWidth(string, fontName=font_name, fontSize=font_size) |
def set_python(self, value):
"""Expect list of record instances, convert to a SortedDict for internal representation"""
if not self.multiselect:
if value and not isinstance(value, list):
value = [value]
value = value or []
records = SortedDict()
for record in value:
self.validate_value(record)
records[record.id] = record
return_value = self._set(records)
self.record._raw['values'][self.id] = self.get_swimlane()
return return_value | def function[set_python, parameter[self, value]]:
constant[Expect list of record instances, convert to a SortedDict for internal representation]
if <ast.UnaryOp object at 0x7da18ede6e30> begin[:]
if <ast.BoolOp object at 0x7da18ede6a40> begin[:]
variable[value] assign[=] list[[<ast.Name object at 0x7da18ede4be0>]]
variable[value] assign[=] <ast.BoolOp object at 0x7da18ede44c0>
variable[records] assign[=] call[name[SortedDict], parameter[]]
for taget[name[record]] in starred[name[value]] begin[:]
call[name[self].validate_value, parameter[name[record]]]
call[name[records]][name[record].id] assign[=] name[record]
variable[return_value] assign[=] call[name[self]._set, parameter[name[records]]]
call[call[name[self].record._raw][constant[values]]][name[self].id] assign[=] call[name[self].get_swimlane, parameter[]]
return[name[return_value]] | keyword[def] identifier[set_python] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[multiselect] :
keyword[if] identifier[value] keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[value] =[ identifier[value] ]
identifier[value] = identifier[value] keyword[or] []
identifier[records] = identifier[SortedDict] ()
keyword[for] identifier[record] keyword[in] identifier[value] :
identifier[self] . identifier[validate_value] ( identifier[record] )
identifier[records] [ identifier[record] . identifier[id] ]= identifier[record]
identifier[return_value] = identifier[self] . identifier[_set] ( identifier[records] )
identifier[self] . identifier[record] . identifier[_raw] [ literal[string] ][ identifier[self] . identifier[id] ]= identifier[self] . identifier[get_swimlane] ()
keyword[return] identifier[return_value] | def set_python(self, value):
"""Expect list of record instances, convert to a SortedDict for internal representation"""
if not self.multiselect:
if value and (not isinstance(value, list)):
value = [value] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
value = value or []
records = SortedDict()
for record in value:
self.validate_value(record)
records[record.id] = record # depends on [control=['for'], data=['record']]
return_value = self._set(records)
self.record._raw['values'][self.id] = self.get_swimlane()
return return_value |
def save_loop(filename, framerate=30, time=3.0, axis=np.array([0.,0.,1.]), clf=True, **kwargs):
"""Off-screen save a GIF of one rotation about the scene.
Parameters
----------
filename : str
The filename in which to save the output image (should have extension .gif)
framerate : int
The frame rate at which to animate motion.
time : float
The number of seconds for one rotation.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance.
"""
n_frames = framerate * time
az = 2.0 * np.pi / n_frames
Visualizer3D.save(filename, n_frames=n_frames, axis=axis, clf=clf,
animate_rate=framerate, animate_az=az)
if clf:
Visualizer3D.clf() | def function[save_loop, parameter[filename, framerate, time, axis, clf]]:
constant[Off-screen save a GIF of one rotation about the scene.
Parameters
----------
filename : str
The filename in which to save the output image (should have extension .gif)
framerate : int
The frame rate at which to animate motion.
time : float
The number of seconds for one rotation.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance.
]
variable[n_frames] assign[=] binary_operation[name[framerate] * name[time]]
variable[az] assign[=] binary_operation[binary_operation[constant[2.0] * name[np].pi] / name[n_frames]]
call[name[Visualizer3D].save, parameter[name[filename]]]
if name[clf] begin[:]
call[name[Visualizer3D].clf, parameter[]] | keyword[def] identifier[save_loop] ( identifier[filename] , identifier[framerate] = literal[int] , identifier[time] = literal[int] , identifier[axis] = identifier[np] . identifier[array] ([ literal[int] , literal[int] , literal[int] ]), identifier[clf] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[n_frames] = identifier[framerate] * identifier[time]
identifier[az] = literal[int] * identifier[np] . identifier[pi] / identifier[n_frames]
identifier[Visualizer3D] . identifier[save] ( identifier[filename] , identifier[n_frames] = identifier[n_frames] , identifier[axis] = identifier[axis] , identifier[clf] = identifier[clf] ,
identifier[animate_rate] = identifier[framerate] , identifier[animate_az] = identifier[az] )
keyword[if] identifier[clf] :
identifier[Visualizer3D] . identifier[clf] () | def save_loop(filename, framerate=30, time=3.0, axis=np.array([0.0, 0.0, 1.0]), clf=True, **kwargs):
"""Off-screen save a GIF of one rotation about the scene.
Parameters
----------
filename : str
The filename in which to save the output image (should have extension .gif)
framerate : int
The frame rate at which to animate motion.
time : float
The number of seconds for one rotation.
axis : (3,) float or None
If present, the animation will rotate about the given axis in world coordinates.
Otherwise, the animation will rotate in azimuth.
clf : bool
If true, the Visualizer is cleared after rendering the figure.
kwargs : dict
Other keyword arguments for the SceneViewer instance.
"""
n_frames = framerate * time
az = 2.0 * np.pi / n_frames
Visualizer3D.save(filename, n_frames=n_frames, axis=axis, clf=clf, animate_rate=framerate, animate_az=az)
if clf:
Visualizer3D.clf() # depends on [control=['if'], data=[]] |
def set_mock_engine(self, engine):
"""
Sets a custom mock engine, replacing the built-in one.
This is particularly useful if you want to replace the built-in
HTTP traffic mock interceptor engine with your custom one.
For mock engine implementation details, see `pook.MockEngine`.
Arguments:
engine (pook.MockEngine): custom mock engine to use.
"""
if not engine:
raise TypeError('engine must be a valid object')
# Instantiate mock engine
mock_engine = engine(self)
# Validate minimum viable interface
methods = ('activate', 'disable')
if not all([hasattr(mock_engine, method) for method in methods]):
raise NotImplementedError('engine must implementent the '
'required methods')
# Use the custom mock engine
self.mock_engine = mock_engine
# Enable mock engine, if needed
if self.active:
self.mock_engine.activate() | def function[set_mock_engine, parameter[self, engine]]:
constant[
Sets a custom mock engine, replacing the built-in one.
This is particularly useful if you want to replace the built-in
HTTP traffic mock interceptor engine with your custom one.
For mock engine implementation details, see `pook.MockEngine`.
Arguments:
engine (pook.MockEngine): custom mock engine to use.
]
if <ast.UnaryOp object at 0x7da1b03e0160> begin[:]
<ast.Raise object at 0x7da1b03e2890>
variable[mock_engine] assign[=] call[name[engine], parameter[name[self]]]
variable[methods] assign[=] tuple[[<ast.Constant object at 0x7da1b03e1fc0>, <ast.Constant object at 0x7da1b03e01f0>]]
if <ast.UnaryOp object at 0x7da1b0553190> begin[:]
<ast.Raise object at 0x7da1b02a5660>
name[self].mock_engine assign[=] name[mock_engine]
if name[self].active begin[:]
call[name[self].mock_engine.activate, parameter[]] | keyword[def] identifier[set_mock_engine] ( identifier[self] , identifier[engine] ):
literal[string]
keyword[if] keyword[not] identifier[engine] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[mock_engine] = identifier[engine] ( identifier[self] )
identifier[methods] =( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[all] ([ identifier[hasattr] ( identifier[mock_engine] , identifier[method] ) keyword[for] identifier[method] keyword[in] identifier[methods] ]):
keyword[raise] identifier[NotImplementedError] ( literal[string]
literal[string] )
identifier[self] . identifier[mock_engine] = identifier[mock_engine]
keyword[if] identifier[self] . identifier[active] :
identifier[self] . identifier[mock_engine] . identifier[activate] () | def set_mock_engine(self, engine):
"""
Sets a custom mock engine, replacing the built-in one.
This is particularly useful if you want to replace the built-in
HTTP traffic mock interceptor engine with your custom one.
For mock engine implementation details, see `pook.MockEngine`.
Arguments:
engine (pook.MockEngine): custom mock engine to use.
"""
if not engine:
raise TypeError('engine must be a valid object') # depends on [control=['if'], data=[]]
# Instantiate mock engine
mock_engine = engine(self)
# Validate minimum viable interface
methods = ('activate', 'disable')
if not all([hasattr(mock_engine, method) for method in methods]):
raise NotImplementedError('engine must implementent the required methods') # depends on [control=['if'], data=[]]
# Use the custom mock engine
self.mock_engine = mock_engine
# Enable mock engine, if needed
if self.active:
self.mock_engine.activate() # depends on [control=['if'], data=[]] |
def generate_data(self, data_dir, tmp_dir=None, task_id=-1):
"""Saves the current epoch rollouts to disk, split into train/dev sets."""
if not self._rollouts_by_epoch_and_split[self.current_epoch]:
# Data not loaded from disk.
self._split_current_epoch()
rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch]
splits_and_paths = self.splits_and_paths(data_dir)
for (split, paths) in splits_and_paths:
rollouts = rollouts_by_split[split]
num_frames = self._calc_num_frames(rollouts)
shard_size = num_frames // len(paths)
frame_gen = self._generate_frames(rollouts)
for (path_index, path) in enumerate(paths):
limit = shard_size
# Put the remainder in the last shard to preserve the ordering.
if path_index == len(paths) - 1:
limit = None
generator_utils.generate_files(
itertools.islice(frame_gen, limit), [path],
cycle_every_n=float("inf")
) | def function[generate_data, parameter[self, data_dir, tmp_dir, task_id]]:
constant[Saves the current epoch rollouts to disk, split into train/dev sets.]
if <ast.UnaryOp object at 0x7da1b1e17190> begin[:]
call[name[self]._split_current_epoch, parameter[]]
variable[rollouts_by_split] assign[=] call[name[self]._rollouts_by_epoch_and_split][name[self].current_epoch]
variable[splits_and_paths] assign[=] call[name[self].splits_and_paths, parameter[name[data_dir]]]
for taget[tuple[[<ast.Name object at 0x7da18f00dd80>, <ast.Name object at 0x7da18f00f490>]]] in starred[name[splits_and_paths]] begin[:]
variable[rollouts] assign[=] call[name[rollouts_by_split]][name[split]]
variable[num_frames] assign[=] call[name[self]._calc_num_frames, parameter[name[rollouts]]]
variable[shard_size] assign[=] binary_operation[name[num_frames] <ast.FloorDiv object at 0x7da2590d6bc0> call[name[len], parameter[name[paths]]]]
variable[frame_gen] assign[=] call[name[self]._generate_frames, parameter[name[rollouts]]]
for taget[tuple[[<ast.Name object at 0x7da18f00d120>, <ast.Name object at 0x7da18f00e740>]]] in starred[call[name[enumerate], parameter[name[paths]]]] begin[:]
variable[limit] assign[=] name[shard_size]
if compare[name[path_index] equal[==] binary_operation[call[name[len], parameter[name[paths]]] - constant[1]]] begin[:]
variable[limit] assign[=] constant[None]
call[name[generator_utils].generate_files, parameter[call[name[itertools].islice, parameter[name[frame_gen], name[limit]]], list[[<ast.Name object at 0x7da18f00f940>]]]] | keyword[def] identifier[generate_data] ( identifier[self] , identifier[data_dir] , identifier[tmp_dir] = keyword[None] , identifier[task_id] =- literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_rollouts_by_epoch_and_split] [ identifier[self] . identifier[current_epoch] ]:
identifier[self] . identifier[_split_current_epoch] ()
identifier[rollouts_by_split] = identifier[self] . identifier[_rollouts_by_epoch_and_split] [ identifier[self] . identifier[current_epoch] ]
identifier[splits_and_paths] = identifier[self] . identifier[splits_and_paths] ( identifier[data_dir] )
keyword[for] ( identifier[split] , identifier[paths] ) keyword[in] identifier[splits_and_paths] :
identifier[rollouts] = identifier[rollouts_by_split] [ identifier[split] ]
identifier[num_frames] = identifier[self] . identifier[_calc_num_frames] ( identifier[rollouts] )
identifier[shard_size] = identifier[num_frames] // identifier[len] ( identifier[paths] )
identifier[frame_gen] = identifier[self] . identifier[_generate_frames] ( identifier[rollouts] )
keyword[for] ( identifier[path_index] , identifier[path] ) keyword[in] identifier[enumerate] ( identifier[paths] ):
identifier[limit] = identifier[shard_size]
keyword[if] identifier[path_index] == identifier[len] ( identifier[paths] )- literal[int] :
identifier[limit] = keyword[None]
identifier[generator_utils] . identifier[generate_files] (
identifier[itertools] . identifier[islice] ( identifier[frame_gen] , identifier[limit] ),[ identifier[path] ],
identifier[cycle_every_n] = identifier[float] ( literal[string] )
) | def generate_data(self, data_dir, tmp_dir=None, task_id=-1):
"""Saves the current epoch rollouts to disk, split into train/dev sets."""
if not self._rollouts_by_epoch_and_split[self.current_epoch]:
# Data not loaded from disk.
self._split_current_epoch() # depends on [control=['if'], data=[]]
rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch]
splits_and_paths = self.splits_and_paths(data_dir)
for (split, paths) in splits_and_paths:
rollouts = rollouts_by_split[split]
num_frames = self._calc_num_frames(rollouts)
shard_size = num_frames // len(paths)
frame_gen = self._generate_frames(rollouts)
for (path_index, path) in enumerate(paths):
limit = shard_size
# Put the remainder in the last shard to preserve the ordering.
if path_index == len(paths) - 1:
limit = None # depends on [control=['if'], data=[]]
generator_utils.generate_files(itertools.islice(frame_gen, limit), [path], cycle_every_n=float('inf')) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def _build_request(self, type, commands):
'''
Build NX-API JSON request.
'''
request = {}
headers = {
'content-type': 'application/json',
}
if self.nxargs['connect_over_uds']:
user = self.nxargs['cookie']
headers['cookie'] = 'nxapi_auth=' + user + ':local'
request['url'] = self.NXAPI_UDS_URI_PATH
else:
request['url'] = '{transport}://{host}:{port}{uri}'.format(
transport=self.nxargs['transport'],
host=self.nxargs['host'],
port=self.nxargs['port'],
uri=self.NXAPI_REMOTE_URI_PATH,
)
if isinstance(commands, (list, set, tuple)):
commands = ' ; '.join(commands)
payload = {}
payload['ins_api'] = {
'version': self.NXAPI_VERSION,
'type': type,
'chunk': '0',
'sid': '1',
'input': commands,
'output_format': 'json',
}
request['headers'] = headers
request['payload'] = json.dumps(payload)
request['opts'] = {
'http_request_timeout': self.nxargs['timeout']
}
log.info('request: %s', request)
return request | def function[_build_request, parameter[self, type, commands]]:
constant[
Build NX-API JSON request.
]
variable[request] assign[=] dictionary[[], []]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b2113910>], [<ast.Constant object at 0x7da1b2113850>]]
if call[name[self].nxargs][constant[connect_over_uds]] begin[:]
variable[user] assign[=] call[name[self].nxargs][constant[cookie]]
call[name[headers]][constant[cookie]] assign[=] binary_operation[binary_operation[constant[nxapi_auth=] + name[user]] + constant[:local]]
call[name[request]][constant[url]] assign[=] name[self].NXAPI_UDS_URI_PATH
if call[name[isinstance], parameter[name[commands], tuple[[<ast.Name object at 0x7da1b2113010>, <ast.Name object at 0x7da1b2112f50>, <ast.Name object at 0x7da1b2113040>]]]] begin[:]
variable[commands] assign[=] call[constant[ ; ].join, parameter[name[commands]]]
variable[payload] assign[=] dictionary[[], []]
call[name[payload]][constant[ins_api]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2112e30>, <ast.Constant object at 0x7da1b21109a0>, <ast.Constant object at 0x7da1b21134f0>, <ast.Constant object at 0x7da1b2113640>, <ast.Constant object at 0x7da1b2112620>, <ast.Constant object at 0x7da1b21125f0>], [<ast.Attribute object at 0x7da1b2110e20>, <ast.Name object at 0x7da1b2110b50>, <ast.Constant object at 0x7da1b2110a90>, <ast.Constant object at 0x7da1b2113dc0>, <ast.Name object at 0x7da1b2111e40>, <ast.Constant object at 0x7da1b2112740>]]
call[name[request]][constant[headers]] assign[=] name[headers]
call[name[request]][constant[payload]] assign[=] call[name[json].dumps, parameter[name[payload]]]
call[name[request]][constant[opts]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2110bb0>], [<ast.Subscript object at 0x7da1b21123b0>]]
call[name[log].info, parameter[constant[request: %s], name[request]]]
return[name[request]] | keyword[def] identifier[_build_request] ( identifier[self] , identifier[type] , identifier[commands] ):
literal[string]
identifier[request] ={}
identifier[headers] ={
literal[string] : literal[string] ,
}
keyword[if] identifier[self] . identifier[nxargs] [ literal[string] ]:
identifier[user] = identifier[self] . identifier[nxargs] [ literal[string] ]
identifier[headers] [ literal[string] ]= literal[string] + identifier[user] + literal[string]
identifier[request] [ literal[string] ]= identifier[self] . identifier[NXAPI_UDS_URI_PATH]
keyword[else] :
identifier[request] [ literal[string] ]= literal[string] . identifier[format] (
identifier[transport] = identifier[self] . identifier[nxargs] [ literal[string] ],
identifier[host] = identifier[self] . identifier[nxargs] [ literal[string] ],
identifier[port] = identifier[self] . identifier[nxargs] [ literal[string] ],
identifier[uri] = identifier[self] . identifier[NXAPI_REMOTE_URI_PATH] ,
)
keyword[if] identifier[isinstance] ( identifier[commands] ,( identifier[list] , identifier[set] , identifier[tuple] )):
identifier[commands] = literal[string] . identifier[join] ( identifier[commands] )
identifier[payload] ={}
identifier[payload] [ literal[string] ]={
literal[string] : identifier[self] . identifier[NXAPI_VERSION] ,
literal[string] : identifier[type] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[commands] ,
literal[string] : literal[string] ,
}
identifier[request] [ literal[string] ]= identifier[headers]
identifier[request] [ literal[string] ]= identifier[json] . identifier[dumps] ( identifier[payload] )
identifier[request] [ literal[string] ]={
literal[string] : identifier[self] . identifier[nxargs] [ literal[string] ]
}
identifier[log] . identifier[info] ( literal[string] , identifier[request] )
keyword[return] identifier[request] | def _build_request(self, type, commands):
"""
Build NX-API JSON request.
"""
request = {}
headers = {'content-type': 'application/json'}
if self.nxargs['connect_over_uds']:
user = self.nxargs['cookie']
headers['cookie'] = 'nxapi_auth=' + user + ':local'
request['url'] = self.NXAPI_UDS_URI_PATH # depends on [control=['if'], data=[]]
else:
request['url'] = '{transport}://{host}:{port}{uri}'.format(transport=self.nxargs['transport'], host=self.nxargs['host'], port=self.nxargs['port'], uri=self.NXAPI_REMOTE_URI_PATH)
if isinstance(commands, (list, set, tuple)):
commands = ' ; '.join(commands) # depends on [control=['if'], data=[]]
payload = {}
payload['ins_api'] = {'version': self.NXAPI_VERSION, 'type': type, 'chunk': '0', 'sid': '1', 'input': commands, 'output_format': 'json'}
request['headers'] = headers
request['payload'] = json.dumps(payload)
request['opts'] = {'http_request_timeout': self.nxargs['timeout']}
log.info('request: %s', request)
return request |
def matches(self, address, name=None):
"""Check if this slot identifier matches the given tile.
Matching can happen either by address or by module name (not currently implemented).
Returns:
bool: True if there is a match, otherwise False.
"""
if self.controller:
return address == 8
return self.address == address | def function[matches, parameter[self, address, name]]:
constant[Check if this slot identifier matches the given tile.
Matching can happen either by address or by module name (not currently implemented).
Returns:
bool: True if there is a match, otherwise False.
]
if name[self].controller begin[:]
return[compare[name[address] equal[==] constant[8]]]
return[compare[name[self].address equal[==] name[address]]] | keyword[def] identifier[matches] ( identifier[self] , identifier[address] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[controller] :
keyword[return] identifier[address] == literal[int]
keyword[return] identifier[self] . identifier[address] == identifier[address] | def matches(self, address, name=None):
"""Check if this slot identifier matches the given tile.
Matching can happen either by address or by module name (not currently implemented).
Returns:
bool: True if there is a match, otherwise False.
"""
if self.controller:
return address == 8 # depends on [control=['if'], data=[]]
return self.address == address |
def bvlpdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
foreign_device_table = []
for fdte in self.bvlciFDT:
foreign_device_table.append(fdte.dict_contents(as_class=as_class))
return key_value_contents(use_dict=use_dict, as_class=as_class,
key_values=(
('function', 'ReadForeignDeviceTableAck'),
('foreign_device_table', foreign_device_table),
)) | def function[bvlpdu_contents, parameter[self, use_dict, as_class]]:
constant[Return the contents of an object as a dict.]
variable[foreign_device_table] assign[=] list[[]]
for taget[name[fdte]] in starred[name[self].bvlciFDT] begin[:]
call[name[foreign_device_table].append, parameter[call[name[fdte].dict_contents, parameter[]]]]
return[call[name[key_value_contents], parameter[]]] | keyword[def] identifier[bvlpdu_contents] ( identifier[self] , identifier[use_dict] = keyword[None] , identifier[as_class] = identifier[dict] ):
literal[string]
identifier[foreign_device_table] =[]
keyword[for] identifier[fdte] keyword[in] identifier[self] . identifier[bvlciFDT] :
identifier[foreign_device_table] . identifier[append] ( identifier[fdte] . identifier[dict_contents] ( identifier[as_class] = identifier[as_class] ))
keyword[return] identifier[key_value_contents] ( identifier[use_dict] = identifier[use_dict] , identifier[as_class] = identifier[as_class] ,
identifier[key_values] =(
( literal[string] , literal[string] ),
( literal[string] , identifier[foreign_device_table] ),
)) | def bvlpdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
foreign_device_table = []
for fdte in self.bvlciFDT:
foreign_device_table.append(fdte.dict_contents(as_class=as_class)) # depends on [control=['for'], data=['fdte']]
return key_value_contents(use_dict=use_dict, as_class=as_class, key_values=(('function', 'ReadForeignDeviceTableAck'), ('foreign_device_table', foreign_device_table))) |
def show_lbaas_member(self, lbaas_member, lbaas_pool, **_params):
"""Fetches information of a certain lbaas_member."""
return self.get(self.lbaas_member_path % (lbaas_pool, lbaas_member),
params=_params) | def function[show_lbaas_member, parameter[self, lbaas_member, lbaas_pool]]:
constant[Fetches information of a certain lbaas_member.]
return[call[name[self].get, parameter[binary_operation[name[self].lbaas_member_path <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f00cf70>, <ast.Name object at 0x7da18f00c190>]]]]]] | keyword[def] identifier[show_lbaas_member] ( identifier[self] , identifier[lbaas_member] , identifier[lbaas_pool] ,** identifier[_params] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( identifier[self] . identifier[lbaas_member_path] %( identifier[lbaas_pool] , identifier[lbaas_member] ),
identifier[params] = identifier[_params] ) | def show_lbaas_member(self, lbaas_member, lbaas_pool, **_params):
"""Fetches information of a certain lbaas_member."""
return self.get(self.lbaas_member_path % (lbaas_pool, lbaas_member), params=_params) |
def _replace_on_id(self, new_object):
"""Replace an object by another with the same id."""
the_id = new_object.id
the_index = self._dict[the_id]
list.__setitem__(self, the_index, new_object) | def function[_replace_on_id, parameter[self, new_object]]:
constant[Replace an object by another with the same id.]
variable[the_id] assign[=] name[new_object].id
variable[the_index] assign[=] call[name[self]._dict][name[the_id]]
call[name[list].__setitem__, parameter[name[self], name[the_index], name[new_object]]] | keyword[def] identifier[_replace_on_id] ( identifier[self] , identifier[new_object] ):
literal[string]
identifier[the_id] = identifier[new_object] . identifier[id]
identifier[the_index] = identifier[self] . identifier[_dict] [ identifier[the_id] ]
identifier[list] . identifier[__setitem__] ( identifier[self] , identifier[the_index] , identifier[new_object] ) | def _replace_on_id(self, new_object):
"""Replace an object by another with the same id."""
the_id = new_object.id
the_index = self._dict[the_id]
list.__setitem__(self, the_index, new_object) |
def _assemble_agent_str(agent):
"""Assemble an Agent object to text."""
agent_str = agent.name
# Only do the more detailed assembly for molecular agents
if not isinstance(agent, ist.Agent):
return agent_str
# Handle mutation conditions
if agent.mutations:
is_generic = False
mut_strs = []
for mut in agent.mutations:
res_to = mut.residue_to if mut.residue_to else ''
res_from = mut.residue_from if mut.residue_from else ''
pos = mut.position if mut.position else ''
mut_str = '%s%s%s' % (res_from, pos, res_to)
# If this is the only mutation and there are no details
# then this is a generic mutant
if not mut_str and len(agent.mutations) == 1:
is_generic = True
break
mut_strs.append(mut_str)
if is_generic:
agent_str = 'mutated ' + agent_str
else:
mut_strs = '/'.join(mut_strs)
agent_str = '%s-%s' % (agent_str, mut_strs)
# Handle location
if agent.location is not None:
agent_str += ' in the ' + agent.location
if not agent.mods and not agent.bound_conditions and not agent.activity:
return agent_str
# Handle bound conditions
bound_to = [bc.agent.name for bc in
agent.bound_conditions if bc.is_bound]
not_bound_to = [bc.agent.name for bc in
agent.bound_conditions if not bc.is_bound]
if bound_to:
agent_str += ' bound to ' + _join_list(bound_to)
if not_bound_to:
agent_str += ' and not bound to ' +\
_join_list(not_bound_to)
else:
if not_bound_to:
agent_str += ' not bound to ' +\
_join_list(not_bound_to)
# Handle modification conditions
if agent.mods:
# Special case
if len(agent.mods) == 1 and agent.mods[0].position is None:
prefix = _mod_state_str(agent.mods[0].mod_type)
if agent.mods[0].residue is not None:
residue_str =\
ist.amino_acids[agent.mods[0].residue]['full_name']
prefix = residue_str + '-' + prefix
agent_str = prefix + ' ' + agent_str
else:
if agent.bound_conditions:
agent_str += ' and'
agent_str += ' %s on ' % _mod_state_str(agent.mods[0].mod_type)
mod_lst = []
for m in agent.mods:
if m.position is None:
if m.residue is not None:
residue_str =\
ist.amino_acids[m.residue]['full_name']
mod_lst.append(residue_str)
else:
mod_lst.append('an unknown residue')
elif m.position is not None and m.residue is None:
mod_lst.append('amino acid %s' % m.position)
else:
mod_lst.append(m.residue + m.position)
agent_str += _join_list(mod_lst)
# Handle activity conditions
if agent.activity is not None:
# Get the modifier specific to the activity type, if any
pre_prefix = \
activity_type_prefix.get(agent.activity.activity_type, '')
if agent.activity.is_active:
prefix = pre_prefix + 'active'
else:
# See if there is a special override for the inactive form
if agent.activity.activity_type in inactivity_type_prefix_override:
pre_prefix = inactivity_type_prefix_override[
agent.activity.activity_type]
prefix = pre_prefix + 'inactive'
agent_str = prefix + ' ' + agent_str
return agent_str | def function[_assemble_agent_str, parameter[agent]]:
constant[Assemble an Agent object to text.]
variable[agent_str] assign[=] name[agent].name
if <ast.UnaryOp object at 0x7da1b0f07d90> begin[:]
return[name[agent_str]]
if name[agent].mutations begin[:]
variable[is_generic] assign[=] constant[False]
variable[mut_strs] assign[=] list[[]]
for taget[name[mut]] in starred[name[agent].mutations] begin[:]
variable[res_to] assign[=] <ast.IfExp object at 0x7da1b0f078e0>
variable[res_from] assign[=] <ast.IfExp object at 0x7da1b0f07760>
variable[pos] assign[=] <ast.IfExp object at 0x7da1b0f075e0>
variable[mut_str] assign[=] binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0f073d0>, <ast.Name object at 0x7da1b0f073a0>, <ast.Name object at 0x7da1b0f07370>]]]
if <ast.BoolOp object at 0x7da1b0f07310> begin[:]
variable[is_generic] assign[=] constant[True]
break
call[name[mut_strs].append, parameter[name[mut_str]]]
if name[is_generic] begin[:]
variable[agent_str] assign[=] binary_operation[constant[mutated ] + name[agent_str]]
if compare[name[agent].location is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b0d3eb90>
if <ast.BoolOp object at 0x7da1b0d3d450> begin[:]
return[name[agent_str]]
variable[bound_to] assign[=] <ast.ListComp object at 0x7da1b0d3d390>
variable[not_bound_to] assign[=] <ast.ListComp object at 0x7da1b0d3f400>
if name[bound_to] begin[:]
<ast.AugAssign object at 0x7da207f009d0>
if name[not_bound_to] begin[:]
<ast.AugAssign object at 0x7da207f004c0>
if name[agent].mods begin[:]
if <ast.BoolOp object at 0x7da207f03130> begin[:]
variable[prefix] assign[=] call[name[_mod_state_str], parameter[call[name[agent].mods][constant[0]].mod_type]]
if compare[call[name[agent].mods][constant[0]].residue is_not constant[None]] begin[:]
variable[residue_str] assign[=] call[call[name[ist].amino_acids][call[name[agent].mods][constant[0]].residue]][constant[full_name]]
variable[prefix] assign[=] binary_operation[binary_operation[name[residue_str] + constant[-]] + name[prefix]]
variable[agent_str] assign[=] binary_operation[binary_operation[name[prefix] + constant[ ]] + name[agent_str]]
if compare[name[agent].activity is_not constant[None]] begin[:]
variable[pre_prefix] assign[=] call[name[activity_type_prefix].get, parameter[name[agent].activity.activity_type, constant[]]]
if name[agent].activity.is_active begin[:]
variable[prefix] assign[=] binary_operation[name[pre_prefix] + constant[active]]
variable[agent_str] assign[=] binary_operation[binary_operation[name[prefix] + constant[ ]] + name[agent_str]]
return[name[agent_str]] | keyword[def] identifier[_assemble_agent_str] ( identifier[agent] ):
literal[string]
identifier[agent_str] = identifier[agent] . identifier[name]
keyword[if] keyword[not] identifier[isinstance] ( identifier[agent] , identifier[ist] . identifier[Agent] ):
keyword[return] identifier[agent_str]
keyword[if] identifier[agent] . identifier[mutations] :
identifier[is_generic] = keyword[False]
identifier[mut_strs] =[]
keyword[for] identifier[mut] keyword[in] identifier[agent] . identifier[mutations] :
identifier[res_to] = identifier[mut] . identifier[residue_to] keyword[if] identifier[mut] . identifier[residue_to] keyword[else] literal[string]
identifier[res_from] = identifier[mut] . identifier[residue_from] keyword[if] identifier[mut] . identifier[residue_from] keyword[else] literal[string]
identifier[pos] = identifier[mut] . identifier[position] keyword[if] identifier[mut] . identifier[position] keyword[else] literal[string]
identifier[mut_str] = literal[string] %( identifier[res_from] , identifier[pos] , identifier[res_to] )
keyword[if] keyword[not] identifier[mut_str] keyword[and] identifier[len] ( identifier[agent] . identifier[mutations] )== literal[int] :
identifier[is_generic] = keyword[True]
keyword[break]
identifier[mut_strs] . identifier[append] ( identifier[mut_str] )
keyword[if] identifier[is_generic] :
identifier[agent_str] = literal[string] + identifier[agent_str]
keyword[else] :
identifier[mut_strs] = literal[string] . identifier[join] ( identifier[mut_strs] )
identifier[agent_str] = literal[string] %( identifier[agent_str] , identifier[mut_strs] )
keyword[if] identifier[agent] . identifier[location] keyword[is] keyword[not] keyword[None] :
identifier[agent_str] += literal[string] + identifier[agent] . identifier[location]
keyword[if] keyword[not] identifier[agent] . identifier[mods] keyword[and] keyword[not] identifier[agent] . identifier[bound_conditions] keyword[and] keyword[not] identifier[agent] . identifier[activity] :
keyword[return] identifier[agent_str]
identifier[bound_to] =[ identifier[bc] . identifier[agent] . identifier[name] keyword[for] identifier[bc] keyword[in]
identifier[agent] . identifier[bound_conditions] keyword[if] identifier[bc] . identifier[is_bound] ]
identifier[not_bound_to] =[ identifier[bc] . identifier[agent] . identifier[name] keyword[for] identifier[bc] keyword[in]
identifier[agent] . identifier[bound_conditions] keyword[if] keyword[not] identifier[bc] . identifier[is_bound] ]
keyword[if] identifier[bound_to] :
identifier[agent_str] += literal[string] + identifier[_join_list] ( identifier[bound_to] )
keyword[if] identifier[not_bound_to] :
identifier[agent_str] += literal[string] + identifier[_join_list] ( identifier[not_bound_to] )
keyword[else] :
keyword[if] identifier[not_bound_to] :
identifier[agent_str] += literal[string] + identifier[_join_list] ( identifier[not_bound_to] )
keyword[if] identifier[agent] . identifier[mods] :
keyword[if] identifier[len] ( identifier[agent] . identifier[mods] )== literal[int] keyword[and] identifier[agent] . identifier[mods] [ literal[int] ]. identifier[position] keyword[is] keyword[None] :
identifier[prefix] = identifier[_mod_state_str] ( identifier[agent] . identifier[mods] [ literal[int] ]. identifier[mod_type] )
keyword[if] identifier[agent] . identifier[mods] [ literal[int] ]. identifier[residue] keyword[is] keyword[not] keyword[None] :
identifier[residue_str] = identifier[ist] . identifier[amino_acids] [ identifier[agent] . identifier[mods] [ literal[int] ]. identifier[residue] ][ literal[string] ]
identifier[prefix] = identifier[residue_str] + literal[string] + identifier[prefix]
identifier[agent_str] = identifier[prefix] + literal[string] + identifier[agent_str]
keyword[else] :
keyword[if] identifier[agent] . identifier[bound_conditions] :
identifier[agent_str] += literal[string]
identifier[agent_str] += literal[string] % identifier[_mod_state_str] ( identifier[agent] . identifier[mods] [ literal[int] ]. identifier[mod_type] )
identifier[mod_lst] =[]
keyword[for] identifier[m] keyword[in] identifier[agent] . identifier[mods] :
keyword[if] identifier[m] . identifier[position] keyword[is] keyword[None] :
keyword[if] identifier[m] . identifier[residue] keyword[is] keyword[not] keyword[None] :
identifier[residue_str] = identifier[ist] . identifier[amino_acids] [ identifier[m] . identifier[residue] ][ literal[string] ]
identifier[mod_lst] . identifier[append] ( identifier[residue_str] )
keyword[else] :
identifier[mod_lst] . identifier[append] ( literal[string] )
keyword[elif] identifier[m] . identifier[position] keyword[is] keyword[not] keyword[None] keyword[and] identifier[m] . identifier[residue] keyword[is] keyword[None] :
identifier[mod_lst] . identifier[append] ( literal[string] % identifier[m] . identifier[position] )
keyword[else] :
identifier[mod_lst] . identifier[append] ( identifier[m] . identifier[residue] + identifier[m] . identifier[position] )
identifier[agent_str] += identifier[_join_list] ( identifier[mod_lst] )
keyword[if] identifier[agent] . identifier[activity] keyword[is] keyword[not] keyword[None] :
identifier[pre_prefix] = identifier[activity_type_prefix] . identifier[get] ( identifier[agent] . identifier[activity] . identifier[activity_type] , literal[string] )
keyword[if] identifier[agent] . identifier[activity] . identifier[is_active] :
identifier[prefix] = identifier[pre_prefix] + literal[string]
keyword[else] :
keyword[if] identifier[agent] . identifier[activity] . identifier[activity_type] keyword[in] identifier[inactivity_type_prefix_override] :
identifier[pre_prefix] = identifier[inactivity_type_prefix_override] [
identifier[agent] . identifier[activity] . identifier[activity_type] ]
identifier[prefix] = identifier[pre_prefix] + literal[string]
identifier[agent_str] = identifier[prefix] + literal[string] + identifier[agent_str]
keyword[return] identifier[agent_str] | def _assemble_agent_str(agent):
"""Assemble an Agent object to text."""
agent_str = agent.name
# Only do the more detailed assembly for molecular agents
if not isinstance(agent, ist.Agent):
return agent_str # depends on [control=['if'], data=[]]
# Handle mutation conditions
if agent.mutations:
is_generic = False
mut_strs = []
for mut in agent.mutations:
res_to = mut.residue_to if mut.residue_to else ''
res_from = mut.residue_from if mut.residue_from else ''
pos = mut.position if mut.position else ''
mut_str = '%s%s%s' % (res_from, pos, res_to)
# If this is the only mutation and there are no details
# then this is a generic mutant
if not mut_str and len(agent.mutations) == 1:
is_generic = True
break # depends on [control=['if'], data=[]]
mut_strs.append(mut_str) # depends on [control=['for'], data=['mut']]
if is_generic:
agent_str = 'mutated ' + agent_str # depends on [control=['if'], data=[]]
else:
mut_strs = '/'.join(mut_strs)
agent_str = '%s-%s' % (agent_str, mut_strs) # depends on [control=['if'], data=[]]
# Handle location
if agent.location is not None:
agent_str += ' in the ' + agent.location # depends on [control=['if'], data=[]]
if not agent.mods and (not agent.bound_conditions) and (not agent.activity):
return agent_str # depends on [control=['if'], data=[]]
# Handle bound conditions
bound_to = [bc.agent.name for bc in agent.bound_conditions if bc.is_bound]
not_bound_to = [bc.agent.name for bc in agent.bound_conditions if not bc.is_bound]
if bound_to:
agent_str += ' bound to ' + _join_list(bound_to)
if not_bound_to:
agent_str += ' and not bound to ' + _join_list(not_bound_to) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif not_bound_to:
agent_str += ' not bound to ' + _join_list(not_bound_to) # depends on [control=['if'], data=[]]
# Handle modification conditions
if agent.mods:
# Special case
if len(agent.mods) == 1 and agent.mods[0].position is None:
prefix = _mod_state_str(agent.mods[0].mod_type)
if agent.mods[0].residue is not None:
residue_str = ist.amino_acids[agent.mods[0].residue]['full_name']
prefix = residue_str + '-' + prefix # depends on [control=['if'], data=[]]
agent_str = prefix + ' ' + agent_str # depends on [control=['if'], data=[]]
else:
if agent.bound_conditions:
agent_str += ' and' # depends on [control=['if'], data=[]]
agent_str += ' %s on ' % _mod_state_str(agent.mods[0].mod_type)
mod_lst = []
for m in agent.mods:
if m.position is None:
if m.residue is not None:
residue_str = ist.amino_acids[m.residue]['full_name']
mod_lst.append(residue_str) # depends on [control=['if'], data=[]]
else:
mod_lst.append('an unknown residue') # depends on [control=['if'], data=[]]
elif m.position is not None and m.residue is None:
mod_lst.append('amino acid %s' % m.position) # depends on [control=['if'], data=[]]
else:
mod_lst.append(m.residue + m.position) # depends on [control=['for'], data=['m']]
agent_str += _join_list(mod_lst) # depends on [control=['if'], data=[]]
# Handle activity conditions
if agent.activity is not None:
# Get the modifier specific to the activity type, if any
pre_prefix = activity_type_prefix.get(agent.activity.activity_type, '')
if agent.activity.is_active:
prefix = pre_prefix + 'active' # depends on [control=['if'], data=[]]
else:
# See if there is a special override for the inactive form
if agent.activity.activity_type in inactivity_type_prefix_override:
pre_prefix = inactivity_type_prefix_override[agent.activity.activity_type] # depends on [control=['if'], data=['inactivity_type_prefix_override']]
prefix = pre_prefix + 'inactive'
agent_str = prefix + ' ' + agent_str # depends on [control=['if'], data=[]]
return agent_str |
def _rjust(expr, width, fillchar=' '):
"""
Filling left side of strings in the sequence or scalar with an additional character.
Equivalent to str.rjust().
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with `fillchar`
:param fillchar: Additional character for filling, default is whitespace.
:return: sequence or scalar
"""
return _string_op(expr, Rjust, _width=width, _fillchar=fillchar) | def function[_rjust, parameter[expr, width, fillchar]]:
constant[
Filling left side of strings in the sequence or scalar with an additional character.
Equivalent to str.rjust().
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with `fillchar`
:param fillchar: Additional character for filling, default is whitespace.
:return: sequence or scalar
]
return[call[name[_string_op], parameter[name[expr], name[Rjust]]]] | keyword[def] identifier[_rjust] ( identifier[expr] , identifier[width] , identifier[fillchar] = literal[string] ):
literal[string]
keyword[return] identifier[_string_op] ( identifier[expr] , identifier[Rjust] , identifier[_width] = identifier[width] , identifier[_fillchar] = identifier[fillchar] ) | def _rjust(expr, width, fillchar=' '):
"""
Filling left side of strings in the sequence or scalar with an additional character.
Equivalent to str.rjust().
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with `fillchar`
:param fillchar: Additional character for filling, default is whitespace.
:return: sequence or scalar
"""
return _string_op(expr, Rjust, _width=width, _fillchar=fillchar) |
def sys_mmap_pgoff(self, address, size, prot, flags, fd, offset):
"""Wrapper for mmap2"""
return self.sys_mmap2(address, size, prot, flags, fd, offset) | def function[sys_mmap_pgoff, parameter[self, address, size, prot, flags, fd, offset]]:
constant[Wrapper for mmap2]
return[call[name[self].sys_mmap2, parameter[name[address], name[size], name[prot], name[flags], name[fd], name[offset]]]] | keyword[def] identifier[sys_mmap_pgoff] ( identifier[self] , identifier[address] , identifier[size] , identifier[prot] , identifier[flags] , identifier[fd] , identifier[offset] ):
literal[string]
keyword[return] identifier[self] . identifier[sys_mmap2] ( identifier[address] , identifier[size] , identifier[prot] , identifier[flags] , identifier[fd] , identifier[offset] ) | def sys_mmap_pgoff(self, address, size, prot, flags, fd, offset):
"""Wrapper for mmap2"""
return self.sys_mmap2(address, size, prot, flags, fd, offset) |
def _compute_mean_on_rock(self, C, mag, rrup, F, HW):
"""
Compute mean value on rock (that is eq.1, page 105 with S = 0)
"""
f1 = self._compute_f1(C, mag, rrup)
f3 = self._compute_f3(C, mag)
f4 = self._compute_f4(C, mag, rrup)
return f1 + F * f3 + HW * f4 | def function[_compute_mean_on_rock, parameter[self, C, mag, rrup, F, HW]]:
constant[
Compute mean value on rock (that is eq.1, page 105 with S = 0)
]
variable[f1] assign[=] call[name[self]._compute_f1, parameter[name[C], name[mag], name[rrup]]]
variable[f3] assign[=] call[name[self]._compute_f3, parameter[name[C], name[mag]]]
variable[f4] assign[=] call[name[self]._compute_f4, parameter[name[C], name[mag], name[rrup]]]
return[binary_operation[binary_operation[name[f1] + binary_operation[name[F] * name[f3]]] + binary_operation[name[HW] * name[f4]]]] | keyword[def] identifier[_compute_mean_on_rock] ( identifier[self] , identifier[C] , identifier[mag] , identifier[rrup] , identifier[F] , identifier[HW] ):
literal[string]
identifier[f1] = identifier[self] . identifier[_compute_f1] ( identifier[C] , identifier[mag] , identifier[rrup] )
identifier[f3] = identifier[self] . identifier[_compute_f3] ( identifier[C] , identifier[mag] )
identifier[f4] = identifier[self] . identifier[_compute_f4] ( identifier[C] , identifier[mag] , identifier[rrup] )
keyword[return] identifier[f1] + identifier[F] * identifier[f3] + identifier[HW] * identifier[f4] | def _compute_mean_on_rock(self, C, mag, rrup, F, HW):
"""
Compute mean value on rock (that is eq.1, page 105 with S = 0)
"""
f1 = self._compute_f1(C, mag, rrup)
f3 = self._compute_f3(C, mag)
f4 = self._compute_f4(C, mag, rrup)
return f1 + F * f3 + HW * f4 |
def coldesc(self, columnname, actual=True):
"""Make the description of a column.
Make the description object of the given column as
:func:`makecoldesc` is doing with the description given by
:func:`getcoldesc`.
"""
import casacore.tables.tableutil as pt
return pt.makecoldesc(columnname, self.getcoldesc(columnname, actual)) | def function[coldesc, parameter[self, columnname, actual]]:
constant[Make the description of a column.
Make the description object of the given column as
:func:`makecoldesc` is doing with the description given by
:func:`getcoldesc`.
]
import module[casacore.tables.tableutil] as alias[pt]
return[call[name[pt].makecoldesc, parameter[name[columnname], call[name[self].getcoldesc, parameter[name[columnname], name[actual]]]]]] | keyword[def] identifier[coldesc] ( identifier[self] , identifier[columnname] , identifier[actual] = keyword[True] ):
literal[string]
keyword[import] identifier[casacore] . identifier[tables] . identifier[tableutil] keyword[as] identifier[pt]
keyword[return] identifier[pt] . identifier[makecoldesc] ( identifier[columnname] , identifier[self] . identifier[getcoldesc] ( identifier[columnname] , identifier[actual] )) | def coldesc(self, columnname, actual=True):
"""Make the description of a column.
Make the description object of the given column as
:func:`makecoldesc` is doing with the description given by
:func:`getcoldesc`.
"""
import casacore.tables.tableutil as pt
return pt.makecoldesc(columnname, self.getcoldesc(columnname, actual)) |
def view_rect(self) -> QRectF:
"""
Return the boundaries of the view in scene coordinates
"""
top_left = self.mapToScene(0, 0)
bottom_right = self.mapToScene(self.viewport().width() - 1, self.viewport().height() - 1)
return QRectF(top_left, bottom_right) | def function[view_rect, parameter[self]]:
constant[
Return the boundaries of the view in scene coordinates
]
variable[top_left] assign[=] call[name[self].mapToScene, parameter[constant[0], constant[0]]]
variable[bottom_right] assign[=] call[name[self].mapToScene, parameter[binary_operation[call[call[name[self].viewport, parameter[]].width, parameter[]] - constant[1]], binary_operation[call[call[name[self].viewport, parameter[]].height, parameter[]] - constant[1]]]]
return[call[name[QRectF], parameter[name[top_left], name[bottom_right]]]] | keyword[def] identifier[view_rect] ( identifier[self] )-> identifier[QRectF] :
literal[string]
identifier[top_left] = identifier[self] . identifier[mapToScene] ( literal[int] , literal[int] )
identifier[bottom_right] = identifier[self] . identifier[mapToScene] ( identifier[self] . identifier[viewport] (). identifier[width] ()- literal[int] , identifier[self] . identifier[viewport] (). identifier[height] ()- literal[int] )
keyword[return] identifier[QRectF] ( identifier[top_left] , identifier[bottom_right] ) | def view_rect(self) -> QRectF:
"""
Return the boundaries of the view in scene coordinates
"""
top_left = self.mapToScene(0, 0)
bottom_right = self.mapToScene(self.viewport().width() - 1, self.viewport().height() - 1)
return QRectF(top_left, bottom_right) |
def find_run():
"""Finds the last good run of the given name for a release."""
build = g.build
last_good_release, last_good_run = _find_last_good_run(build)
if last_good_run:
return flask.jsonify(
success=True,
build_id=build.id,
release_name=last_good_release.name,
release_number=last_good_release.number,
run_name=last_good_run.name,
url=last_good_run.url,
image=last_good_run.image,
log=last_good_run.log,
config=last_good_run.config)
return utils.jsonify_error('Run not found') | def function[find_run, parameter[]]:
constant[Finds the last good run of the given name for a release.]
variable[build] assign[=] name[g].build
<ast.Tuple object at 0x7da18bc72fe0> assign[=] call[name[_find_last_good_run], parameter[name[build]]]
if name[last_good_run] begin[:]
return[call[name[flask].jsonify, parameter[]]]
return[call[name[utils].jsonify_error, parameter[constant[Run not found]]]] | keyword[def] identifier[find_run] ():
literal[string]
identifier[build] = identifier[g] . identifier[build]
identifier[last_good_release] , identifier[last_good_run] = identifier[_find_last_good_run] ( identifier[build] )
keyword[if] identifier[last_good_run] :
keyword[return] identifier[flask] . identifier[jsonify] (
identifier[success] = keyword[True] ,
identifier[build_id] = identifier[build] . identifier[id] ,
identifier[release_name] = identifier[last_good_release] . identifier[name] ,
identifier[release_number] = identifier[last_good_release] . identifier[number] ,
identifier[run_name] = identifier[last_good_run] . identifier[name] ,
identifier[url] = identifier[last_good_run] . identifier[url] ,
identifier[image] = identifier[last_good_run] . identifier[image] ,
identifier[log] = identifier[last_good_run] . identifier[log] ,
identifier[config] = identifier[last_good_run] . identifier[config] )
keyword[return] identifier[utils] . identifier[jsonify_error] ( literal[string] ) | def find_run():
"""Finds the last good run of the given name for a release."""
build = g.build
(last_good_release, last_good_run) = _find_last_good_run(build)
if last_good_run:
return flask.jsonify(success=True, build_id=build.id, release_name=last_good_release.name, release_number=last_good_release.number, run_name=last_good_run.name, url=last_good_run.url, image=last_good_run.image, log=last_good_run.log, config=last_good_run.config) # depends on [control=['if'], data=[]]
return utils.jsonify_error('Run not found') |
def augknt(knots,order):
"""Augment knot sequence such that some boundary conditions
are met."""
a = []
[a.append(knots[0]) for t in range(0,order)]
[a.append(k) for k in knots]
[a.append(knots[-1]) for t in range(0,order)]
return np.array(a) | def function[augknt, parameter[knots, order]]:
constant[Augment knot sequence such that some boundary conditions
are met.]
variable[a] assign[=] list[[]]
<ast.ListComp object at 0x7da1b0fa5210>
<ast.ListComp object at 0x7da20cabf2e0>
<ast.ListComp object at 0x7da20cabdb10>
return[call[name[np].array, parameter[name[a]]]] | keyword[def] identifier[augknt] ( identifier[knots] , identifier[order] ):
literal[string]
identifier[a] =[]
[ identifier[a] . identifier[append] ( identifier[knots] [ literal[int] ]) keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , identifier[order] )]
[ identifier[a] . identifier[append] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[knots] ]
[ identifier[a] . identifier[append] ( identifier[knots] [- literal[int] ]) keyword[for] identifier[t] keyword[in] identifier[range] ( literal[int] , identifier[order] )]
keyword[return] identifier[np] . identifier[array] ( identifier[a] ) | def augknt(knots, order):
"""Augment knot sequence such that some boundary conditions
are met."""
a = []
[a.append(knots[0]) for t in range(0, order)]
[a.append(k) for k in knots]
[a.append(knots[-1]) for t in range(0, order)]
return np.array(a) |
def create_asset_content(self, asset_content_form=None):
"""Creates new ``AssetContent`` for a given asset.
arg: asset_content_form (osid.repository.AssetContentForm):
the form for this ``AssetContent``
return: (osid.repository.AssetContent) - the new
``AssetContent``
raise: IllegalState - ``asset_content_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``asset_content_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_content_form`` did not originate
from ``get_asset_content_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
if isinstance(asset_content_form, AssetContentForm):
asset_content = self._provider_session.create_asset_content(
asset_content_form._payload)
else:
asset_content = self._provider_session.create_asset_content(
asset_content_form)
try:
if asset_content.has_url() and 'amazonaws.com' in asset_content.get_url():
return AssetContent(asset_content, self._config_map)
except TypeError:
pass
return asset_content | def function[create_asset_content, parameter[self, asset_content_form]]:
constant[Creates new ``AssetContent`` for a given asset.
arg: asset_content_form (osid.repository.AssetContentForm):
the form for this ``AssetContent``
return: (osid.repository.AssetContent) - the new
``AssetContent``
raise: IllegalState - ``asset_content_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``asset_content_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_content_form`` did not originate
from ``get_asset_content_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
]
if call[name[isinstance], parameter[name[asset_content_form], name[AssetContentForm]]] begin[:]
variable[asset_content] assign[=] call[name[self]._provider_session.create_asset_content, parameter[name[asset_content_form]._payload]]
<ast.Try object at 0x7da20e9577f0>
return[name[asset_content]] | keyword[def] identifier[create_asset_content] ( identifier[self] , identifier[asset_content_form] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[asset_content_form] , identifier[AssetContentForm] ):
identifier[asset_content] = identifier[self] . identifier[_provider_session] . identifier[create_asset_content] (
identifier[asset_content_form] . identifier[_payload] )
keyword[else] :
identifier[asset_content] = identifier[self] . identifier[_provider_session] . identifier[create_asset_content] (
identifier[asset_content_form] )
keyword[try] :
keyword[if] identifier[asset_content] . identifier[has_url] () keyword[and] literal[string] keyword[in] identifier[asset_content] . identifier[get_url] ():
keyword[return] identifier[AssetContent] ( identifier[asset_content] , identifier[self] . identifier[_config_map] )
keyword[except] identifier[TypeError] :
keyword[pass]
keyword[return] identifier[asset_content] | def create_asset_content(self, asset_content_form=None):
"""Creates new ``AssetContent`` for a given asset.
arg: asset_content_form (osid.repository.AssetContentForm):
the form for this ``AssetContent``
return: (osid.repository.AssetContent) - the new
``AssetContent``
raise: IllegalState - ``asset_content_form`` already used in a
create transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``asset_content_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_content_form`` did not originate
from ``get_asset_content_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
if isinstance(asset_content_form, AssetContentForm):
asset_content = self._provider_session.create_asset_content(asset_content_form._payload) # depends on [control=['if'], data=[]]
else:
asset_content = self._provider_session.create_asset_content(asset_content_form)
try:
if asset_content.has_url() and 'amazonaws.com' in asset_content.get_url():
return AssetContent(asset_content, self._config_map) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except TypeError:
pass # depends on [control=['except'], data=[]]
return asset_content |
def user_in_all_groups(user, groups):
"""Returns True if the given user is in all given groups"""
return user_is_superuser(user) or all(user_in_group(user, group) for group in groups) | def function[user_in_all_groups, parameter[user, groups]]:
constant[Returns True if the given user is in all given groups]
return[<ast.BoolOp object at 0x7da1b04d0b20>] | keyword[def] identifier[user_in_all_groups] ( identifier[user] , identifier[groups] ):
literal[string]
keyword[return] identifier[user_is_superuser] ( identifier[user] ) keyword[or] identifier[all] ( identifier[user_in_group] ( identifier[user] , identifier[group] ) keyword[for] identifier[group] keyword[in] identifier[groups] ) | def user_in_all_groups(user, groups):
"""Returns True if the given user is in all given groups"""
return user_is_superuser(user) or all((user_in_group(user, group) for group in groups)) |
def create_metadata(self, resource, keys_vals):
"""
Associates new key-value pairs with the given resource.
Will attempt to add all key-value pairs even if some fail.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to assign to
given resource.
Raises:
HTTPErrorList on failure.
"""
self.metadata_service.set_auth(self._token_metadata)
self.metadata_service.create(resource, keys_vals) | def function[create_metadata, parameter[self, resource, keys_vals]]:
constant[
Associates new key-value pairs with the given resource.
Will attempt to add all key-value pairs even if some fail.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to assign to
given resource.
Raises:
HTTPErrorList on failure.
]
call[name[self].metadata_service.set_auth, parameter[name[self]._token_metadata]]
call[name[self].metadata_service.create, parameter[name[resource], name[keys_vals]]] | keyword[def] identifier[create_metadata] ( identifier[self] , identifier[resource] , identifier[keys_vals] ):
literal[string]
identifier[self] . identifier[metadata_service] . identifier[set_auth] ( identifier[self] . identifier[_token_metadata] )
identifier[self] . identifier[metadata_service] . identifier[create] ( identifier[resource] , identifier[keys_vals] ) | def create_metadata(self, resource, keys_vals):
"""
Associates new key-value pairs with the given resource.
Will attempt to add all key-value pairs even if some fail.
Args:
resource (intern.resource.boss.BossResource)
keys_vals (dictionary): Collection of key-value pairs to assign to
given resource.
Raises:
HTTPErrorList on failure.
"""
self.metadata_service.set_auth(self._token_metadata)
self.metadata_service.create(resource, keys_vals) |
def verify(self, message, signature, do_hash=True):
""" Verifies that message was appropriately signed.
Args:
message (bytes): The message to be verified.
signature (Signature): A signature object.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
verified (bool): True if the signature is verified, False
otherwise.
"""
return self._key.verify(message, signature, do_hash) | def function[verify, parameter[self, message, signature, do_hash]]:
constant[ Verifies that message was appropriately signed.
Args:
message (bytes): The message to be verified.
signature (Signature): A signature object.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
verified (bool): True if the signature is verified, False
otherwise.
]
return[call[name[self]._key.verify, parameter[name[message], name[signature], name[do_hash]]]] | keyword[def] identifier[verify] ( identifier[self] , identifier[message] , identifier[signature] , identifier[do_hash] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[_key] . identifier[verify] ( identifier[message] , identifier[signature] , identifier[do_hash] ) | def verify(self, message, signature, do_hash=True):
""" Verifies that message was appropriately signed.
Args:
message (bytes): The message to be verified.
signature (Signature): A signature object.
do_hash (bool): True if the message should be hashed prior
to signing, False if not. This should always be left as
True except in special situations which require doing
the hash outside (e.g. handling Bitcoin bugs).
Returns:
verified (bool): True if the signature is verified, False
otherwise.
"""
return self._key.verify(message, signature, do_hash) |
def transmit(self, bytes, protocol=None):
"""Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[bytes, protocol]))
data, sw1, sw2 = self.doTransmit(bytes, protocol)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
[data, sw1, sw2]))
if self.errorcheckingchain is not None:
self.errorcheckingchain[0](data, sw1, sw2)
return data, sw1, sw2 | def function[transmit, parameter[self, bytes, protocol]]:
constant[Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
]
call[name[Observable].setChanged, parameter[name[self]]]
call[name[Observable].notifyObservers, parameter[name[self], call[name[CardConnectionEvent], parameter[constant[command], list[[<ast.Name object at 0x7da1b23ef550>, <ast.Name object at 0x7da1b23ecd00>]]]]]]
<ast.Tuple object at 0x7da1b23eddb0> assign[=] call[name[self].doTransmit, parameter[name[bytes], name[protocol]]]
call[name[Observable].setChanged, parameter[name[self]]]
call[name[Observable].notifyObservers, parameter[name[self], call[name[CardConnectionEvent], parameter[constant[response], list[[<ast.Name object at 0x7da1b23f8970>, <ast.Name object at 0x7da1b23f91e0>, <ast.Name object at 0x7da1b23f8460>]]]]]]
if compare[name[self].errorcheckingchain is_not constant[None]] begin[:]
call[call[name[self].errorcheckingchain][constant[0]], parameter[name[data], name[sw1], name[sw2]]]
return[tuple[[<ast.Name object at 0x7da1b23f9540>, <ast.Name object at 0x7da1b23f8130>, <ast.Name object at 0x7da1b23f9150>]]] | keyword[def] identifier[transmit] ( identifier[self] , identifier[bytes] , identifier[protocol] = keyword[None] ):
literal[string]
identifier[Observable] . identifier[setChanged] ( identifier[self] )
identifier[Observable] . identifier[notifyObservers] ( identifier[self] ,
identifier[CardConnectionEvent] (
literal[string] ,
[ identifier[bytes] , identifier[protocol] ]))
identifier[data] , identifier[sw1] , identifier[sw2] = identifier[self] . identifier[doTransmit] ( identifier[bytes] , identifier[protocol] )
identifier[Observable] . identifier[setChanged] ( identifier[self] )
identifier[Observable] . identifier[notifyObservers] ( identifier[self] ,
identifier[CardConnectionEvent] (
literal[string] ,
[ identifier[data] , identifier[sw1] , identifier[sw2] ]))
keyword[if] identifier[self] . identifier[errorcheckingchain] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[errorcheckingchain] [ literal[int] ]( identifier[data] , identifier[sw1] , identifier[sw2] )
keyword[return] identifier[data] , identifier[sw1] , identifier[sw2] | def transmit(self, bytes, protocol=None):
"""Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
"""
Observable.setChanged(self)
Observable.notifyObservers(self, CardConnectionEvent('command', [bytes, protocol]))
(data, sw1, sw2) = self.doTransmit(bytes, protocol)
Observable.setChanged(self)
Observable.notifyObservers(self, CardConnectionEvent('response', [data, sw1, sw2]))
if self.errorcheckingchain is not None:
self.errorcheckingchain[0](data, sw1, sw2) # depends on [control=['if'], data=[]]
return (data, sw1, sw2) |
def run(self):
"""Run the :class:`aiohttp.web.Application` for the exporter."""
run_app(
self.app,
host=self.host,
port=self.port,
print=lambda *args, **kargs: None,
access_log_format='%a "%r" %s %b "%{Referrer}i" "%{User-Agent}i"') | def function[run, parameter[self]]:
constant[Run the :class:`aiohttp.web.Application` for the exporter.]
call[name[run_app], parameter[name[self].app]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
identifier[run_app] (
identifier[self] . identifier[app] ,
identifier[host] = identifier[self] . identifier[host] ,
identifier[port] = identifier[self] . identifier[port] ,
identifier[print] = keyword[lambda] * identifier[args] ,** identifier[kargs] : keyword[None] ,
identifier[access_log_format] = literal[string] ) | def run(self):
"""Run the :class:`aiohttp.web.Application` for the exporter."""
run_app(self.app, host=self.host, port=self.port, print=lambda *args, **kargs: None, access_log_format='%a "%r" %s %b "%{Referrer}i" "%{User-Agent}i"') |
def signal_alias_exists(alias: str) -> bool:
"""
Checks if signal alias exists.
:param alias: Signal alias.
:return:
"""
if SignalDispatcher.signals.get(alias):
return True
return False | def function[signal_alias_exists, parameter[alias]]:
constant[
Checks if signal alias exists.
:param alias: Signal alias.
:return:
]
if call[name[SignalDispatcher].signals.get, parameter[name[alias]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[signal_alias_exists] ( identifier[alias] : identifier[str] )-> identifier[bool] :
literal[string]
keyword[if] identifier[SignalDispatcher] . identifier[signals] . identifier[get] ( identifier[alias] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def signal_alias_exists(alias: str) -> bool:
"""
Checks if signal alias exists.
:param alias: Signal alias.
:return:
"""
if SignalDispatcher.signals.get(alias):
return True # depends on [control=['if'], data=[]]
return False |
def HashFilePath(self, path, byte_count):
"""Updates underlying hashers with file on a given path.
Args:
path: A path to the file that is going to be fed to the hashers.
byte_count: A maximum numbers of bytes that are going to be processed.
"""
with open(path, "rb") as fd:
self.HashFile(fd, byte_count) | def function[HashFilePath, parameter[self, path, byte_count]]:
constant[Updates underlying hashers with file on a given path.
Args:
path: A path to the file that is going to be fed to the hashers.
byte_count: A maximum numbers of bytes that are going to be processed.
]
with call[name[open], parameter[name[path], constant[rb]]] begin[:]
call[name[self].HashFile, parameter[name[fd], name[byte_count]]] | keyword[def] identifier[HashFilePath] ( identifier[self] , identifier[path] , identifier[byte_count] ):
literal[string]
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[fd] :
identifier[self] . identifier[HashFile] ( identifier[fd] , identifier[byte_count] ) | def HashFilePath(self, path, byte_count):
"""Updates underlying hashers with file on a given path.
Args:
path: A path to the file that is going to be fed to the hashers.
byte_count: A maximum numbers of bytes that are going to be processed.
"""
with open(path, 'rb') as fd:
self.HashFile(fd, byte_count) # depends on [control=['with'], data=['fd']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.