code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def create_user(self, name, password, database_names, host=None):
"""
Creates a user with the specified name and password, and gives that
user access to the specified database(s).
If a user with that name already exists, a BadRequest (400) exception
will be raised.
"""
if not isinstance(database_names, (list, tuple)):
database_names = [database_names]
# The API only accepts names, not DB objects
database_names = [db if isinstance(db, six.string_types) else db.name
for db in database_names]
self._user_manager.create(name=name, password=password,
database_names=database_names, host=host, return_none=True)
# Since the API doesn't return the info for creating the user object,
# we have to do it manually.
return self._user_manager.find(name=name) | def function[create_user, parameter[self, name, password, database_names, host]]:
constant[
Creates a user with the specified name and password, and gives that
user access to the specified database(s).
If a user with that name already exists, a BadRequest (400) exception
will be raised.
]
if <ast.UnaryOp object at 0x7da1b055ae90> begin[:]
variable[database_names] assign[=] list[[<ast.Name object at 0x7da1b0558070>]]
variable[database_names] assign[=] <ast.ListComp object at 0x7da1b0558df0>
call[name[self]._user_manager.create, parameter[]]
return[call[name[self]._user_manager.find, parameter[]]] | keyword[def] identifier[create_user] ( identifier[self] , identifier[name] , identifier[password] , identifier[database_names] , identifier[host] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[database_names] ,( identifier[list] , identifier[tuple] )):
identifier[database_names] =[ identifier[database_names] ]
identifier[database_names] =[ identifier[db] keyword[if] identifier[isinstance] ( identifier[db] , identifier[six] . identifier[string_types] ) keyword[else] identifier[db] . identifier[name]
keyword[for] identifier[db] keyword[in] identifier[database_names] ]
identifier[self] . identifier[_user_manager] . identifier[create] ( identifier[name] = identifier[name] , identifier[password] = identifier[password] ,
identifier[database_names] = identifier[database_names] , identifier[host] = identifier[host] , identifier[return_none] = keyword[True] )
keyword[return] identifier[self] . identifier[_user_manager] . identifier[find] ( identifier[name] = identifier[name] ) | def create_user(self, name, password, database_names, host=None):
"""
Creates a user with the specified name and password, and gives that
user access to the specified database(s).
If a user with that name already exists, a BadRequest (400) exception
will be raised.
"""
if not isinstance(database_names, (list, tuple)):
database_names = [database_names] # depends on [control=['if'], data=[]]
# The API only accepts names, not DB objects
database_names = [db if isinstance(db, six.string_types) else db.name for db in database_names]
self._user_manager.create(name=name, password=password, database_names=database_names, host=host, return_none=True)
# Since the API doesn't return the info for creating the user object,
# we have to do it manually.
return self._user_manager.find(name=name) |
def from_bson_voronoi_list(bson_nb_voro_list, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list)
for isite, voro in enumerate(bson_nb_voro_list):
if voro is None or voro == 'None':
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1],
struct_site._lattice, properties=struct_site.properties)
voronoi_list[isite].append((periodic_site, dd))
return voronoi_list | def function[from_bson_voronoi_list, parameter[bson_nb_voro_list, structure]]:
constant[
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
]
variable[voronoi_list] assign[=] binary_operation[list[[<ast.Constant object at 0x7da204344670>]] * call[name[len], parameter[name[bson_nb_voro_list]]]]
for taget[tuple[[<ast.Name object at 0x7da204346dd0>, <ast.Name object at 0x7da204347490>]]] in starred[call[name[enumerate], parameter[name[bson_nb_voro_list]]]] begin[:]
if <ast.BoolOp object at 0x7da204344850> begin[:]
continue
call[name[voronoi_list]][name[isite]] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f00f400>, <ast.Name object at 0x7da18f00fd30>]]] in starred[name[voro]] begin[:]
variable[struct_site] assign[=] call[name[structure]][call[name[dd]][constant[index]]]
variable[periodic_site] assign[=] call[name[PeriodicSite], parameter[name[struct_site]._species, binary_operation[name[struct_site].frac_coords + call[name[psd]][constant[1]]], name[struct_site]._lattice]]
call[call[name[voronoi_list]][name[isite]].append, parameter[tuple[[<ast.Name object at 0x7da18f00d750>, <ast.Name object at 0x7da18f00d6c0>]]]]
return[name[voronoi_list]] | keyword[def] identifier[from_bson_voronoi_list] ( identifier[bson_nb_voro_list] , identifier[structure] ):
literal[string]
identifier[voronoi_list] =[ keyword[None] ]* identifier[len] ( identifier[bson_nb_voro_list] )
keyword[for] identifier[isite] , identifier[voro] keyword[in] identifier[enumerate] ( identifier[bson_nb_voro_list] ):
keyword[if] identifier[voro] keyword[is] keyword[None] keyword[or] identifier[voro] == literal[string] :
keyword[continue]
identifier[voronoi_list] [ identifier[isite] ]=[]
keyword[for] identifier[psd] , identifier[dd] keyword[in] identifier[voro] :
identifier[struct_site] = identifier[structure] [ identifier[dd] [ literal[string] ]]
identifier[periodic_site] = identifier[PeriodicSite] ( identifier[struct_site] . identifier[_species] , identifier[struct_site] . identifier[frac_coords] + identifier[psd] [ literal[int] ],
identifier[struct_site] . identifier[_lattice] , identifier[properties] = identifier[struct_site] . identifier[properties] )
identifier[voronoi_list] [ identifier[isite] ]. identifier[append] (( identifier[periodic_site] , identifier[dd] ))
keyword[return] identifier[voronoi_list] | def from_bson_voronoi_list(bson_nb_voro_list, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list (composed of
vlist and bson_nb_voro_list).
:param vlist: List of voronoi objects
:param bson_nb_voro_list: List of periodic sites involved in the Voronoi
:return: The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format)
"""
voronoi_list = [None] * len(bson_nb_voro_list)
for (isite, voro) in enumerate(bson_nb_voro_list):
if voro is None or voro == 'None':
continue # depends on [control=['if'], data=[]]
voronoi_list[isite] = []
for (psd, dd) in voro:
struct_site = structure[dd['index']]
periodic_site = PeriodicSite(struct_site._species, struct_site.frac_coords + psd[1], struct_site._lattice, properties=struct_site.properties)
voronoi_list[isite].append((periodic_site, dd)) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return voronoi_list |
def apply_realms(self):
"""
:realm_s: an immutable collection of one or more realms
:type realm_s: tuple
"""
self.authenticator.init_realms(self.realms)
self.authorizer.init_realms(self.realms) | def function[apply_realms, parameter[self]]:
constant[
:realm_s: an immutable collection of one or more realms
:type realm_s: tuple
]
call[name[self].authenticator.init_realms, parameter[name[self].realms]]
call[name[self].authorizer.init_realms, parameter[name[self].realms]] | keyword[def] identifier[apply_realms] ( identifier[self] ):
literal[string]
identifier[self] . identifier[authenticator] . identifier[init_realms] ( identifier[self] . identifier[realms] )
identifier[self] . identifier[authorizer] . identifier[init_realms] ( identifier[self] . identifier[realms] ) | def apply_realms(self):
"""
:realm_s: an immutable collection of one or more realms
:type realm_s: tuple
"""
self.authenticator.init_realms(self.realms)
self.authorizer.init_realms(self.realms) |
def to_file( Class, dict, file, is_little_endian=True ):
"""
For constructing a CDB structure in a file. Able to calculate size on
disk and write to a file
"""
io = BinaryFileWriter( file, is_little_endian=is_little_endian )
start_offset = io.tell()
# Header is of fixed length
io.seek( start_offset + ( 8 * 256 ) )
# For each item, key and value length (written as length prefixed
# strings). We also calculate the subtables on this pass.
# NOTE: This requires the key and value be byte strings, support for
# dealing with encoding specific value types should be
# added to this wrapper
subtables = [ [] for i in range(256) ]
for key, value in dict.items():
pair_offset = io.tell()
io.write_uint32( len( key ) )
io.write_uint32( len( value ) )
io.write( key )
io.write( value )
hash = cdbhash( key )
subtables[ hash % 256 ].append( ( hash, pair_offset ) )
# Save the offset where the subtables will start
subtable_offset = io.tell()
# Write subtables
for subtable in subtables:
if len( subtable ) > 0:
# Construct hashtable to be twice the size of the number
# of items in the subtable, and built it in memory
ncells = len( subtable ) * 2
cells = [ (0,0) for i in range( ncells ) ]
for hash, pair_offset in subtable:
index = ( hash >> 8 ) % ncells
while cells[index][1] != 0:
index = ( index + 1 ) % ncells
# Guaranteed to find a non-empty cell
cells[index] = ( hash, pair_offset )
# Write subtable
for hash, pair_offset in cells:
io.write_uint32( hash )
io.write_uint32( pair_offset )
# Go back and write the header
end_offset = io.tell()
io.seek( start_offset )
index = subtable_offset
for subtable in subtables:
io.write_uint32( index )
io.write_uint32( len( subtable * 2 ) )
# For each cell in the subtable, a hash and a pointer to a value
index += ( len( subtable ) * 2 ) * 8
# Leave fp at end of cdb
io.seek( end_offset ) | def function[to_file, parameter[Class, dict, file, is_little_endian]]:
constant[
For constructing a CDB structure in a file. Able to calculate size on
disk and write to a file
]
variable[io] assign[=] call[name[BinaryFileWriter], parameter[name[file]]]
variable[start_offset] assign[=] call[name[io].tell, parameter[]]
call[name[io].seek, parameter[binary_operation[name[start_offset] + binary_operation[constant[8] * constant[256]]]]]
variable[subtables] assign[=] <ast.ListComp object at 0x7da1b0e26200>
for taget[tuple[[<ast.Name object at 0x7da1b0e261d0>, <ast.Name object at 0x7da1b0e245e0>]]] in starred[call[name[dict].items, parameter[]]] begin[:]
variable[pair_offset] assign[=] call[name[io].tell, parameter[]]
call[name[io].write_uint32, parameter[call[name[len], parameter[name[key]]]]]
call[name[io].write_uint32, parameter[call[name[len], parameter[name[value]]]]]
call[name[io].write, parameter[name[key]]]
call[name[io].write, parameter[name[value]]]
variable[hash] assign[=] call[name[cdbhash], parameter[name[key]]]
call[call[name[subtables]][binary_operation[name[hash] <ast.Mod object at 0x7da2590d6920> constant[256]]].append, parameter[tuple[[<ast.Name object at 0x7da1b0e25b70>, <ast.Name object at 0x7da1b0e26260>]]]]
variable[subtable_offset] assign[=] call[name[io].tell, parameter[]]
for taget[name[subtable]] in starred[name[subtables]] begin[:]
if compare[call[name[len], parameter[name[subtable]]] greater[>] constant[0]] begin[:]
variable[ncells] assign[=] binary_operation[call[name[len], parameter[name[subtable]]] * constant[2]]
variable[cells] assign[=] <ast.ListComp object at 0x7da1b0efe6e0>
for taget[tuple[[<ast.Name object at 0x7da1b0efe7d0>, <ast.Name object at 0x7da1b0efc3a0>]]] in starred[name[subtable]] begin[:]
variable[index] assign[=] binary_operation[binary_operation[name[hash] <ast.RShift object at 0x7da2590d6a40> constant[8]] <ast.Mod object at 0x7da2590d6920> name[ncells]]
while compare[call[call[name[cells]][name[index]]][constant[1]] not_equal[!=] constant[0]] begin[:]
variable[index] assign[=] binary_operation[binary_operation[name[index] + constant[1]] <ast.Mod object at 0x7da2590d6920> name[ncells]]
call[name[cells]][name[index]] assign[=] tuple[[<ast.Name object at 0x7da1b0effb80>, <ast.Name object at 0x7da1b0effd90>]]
for taget[tuple[[<ast.Name object at 0x7da1b0efdff0>, <ast.Name object at 0x7da1b0eff790>]]] in starred[name[cells]] begin[:]
call[name[io].write_uint32, parameter[name[hash]]]
call[name[io].write_uint32, parameter[name[pair_offset]]]
variable[end_offset] assign[=] call[name[io].tell, parameter[]]
call[name[io].seek, parameter[name[start_offset]]]
variable[index] assign[=] name[subtable_offset]
for taget[name[subtable]] in starred[name[subtables]] begin[:]
call[name[io].write_uint32, parameter[name[index]]]
call[name[io].write_uint32, parameter[call[name[len], parameter[binary_operation[name[subtable] * constant[2]]]]]]
<ast.AugAssign object at 0x7da1b0d89b10>
call[name[io].seek, parameter[name[end_offset]]] | keyword[def] identifier[to_file] ( identifier[Class] , identifier[dict] , identifier[file] , identifier[is_little_endian] = keyword[True] ):
literal[string]
identifier[io] = identifier[BinaryFileWriter] ( identifier[file] , identifier[is_little_endian] = identifier[is_little_endian] )
identifier[start_offset] = identifier[io] . identifier[tell] ()
identifier[io] . identifier[seek] ( identifier[start_offset] +( literal[int] * literal[int] ))
identifier[subtables] =[[] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] )]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[dict] . identifier[items] ():
identifier[pair_offset] = identifier[io] . identifier[tell] ()
identifier[io] . identifier[write_uint32] ( identifier[len] ( identifier[key] ))
identifier[io] . identifier[write_uint32] ( identifier[len] ( identifier[value] ))
identifier[io] . identifier[write] ( identifier[key] )
identifier[io] . identifier[write] ( identifier[value] )
identifier[hash] = identifier[cdbhash] ( identifier[key] )
identifier[subtables] [ identifier[hash] % literal[int] ]. identifier[append] (( identifier[hash] , identifier[pair_offset] ))
identifier[subtable_offset] = identifier[io] . identifier[tell] ()
keyword[for] identifier[subtable] keyword[in] identifier[subtables] :
keyword[if] identifier[len] ( identifier[subtable] )> literal[int] :
identifier[ncells] = identifier[len] ( identifier[subtable] )* literal[int]
identifier[cells] =[( literal[int] , literal[int] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ncells] )]
keyword[for] identifier[hash] , identifier[pair_offset] keyword[in] identifier[subtable] :
identifier[index] =( identifier[hash] >> literal[int] )% identifier[ncells]
keyword[while] identifier[cells] [ identifier[index] ][ literal[int] ]!= literal[int] :
identifier[index] =( identifier[index] + literal[int] )% identifier[ncells]
identifier[cells] [ identifier[index] ]=( identifier[hash] , identifier[pair_offset] )
keyword[for] identifier[hash] , identifier[pair_offset] keyword[in] identifier[cells] :
identifier[io] . identifier[write_uint32] ( identifier[hash] )
identifier[io] . identifier[write_uint32] ( identifier[pair_offset] )
identifier[end_offset] = identifier[io] . identifier[tell] ()
identifier[io] . identifier[seek] ( identifier[start_offset] )
identifier[index] = identifier[subtable_offset]
keyword[for] identifier[subtable] keyword[in] identifier[subtables] :
identifier[io] . identifier[write_uint32] ( identifier[index] )
identifier[io] . identifier[write_uint32] ( identifier[len] ( identifier[subtable] * literal[int] ))
identifier[index] +=( identifier[len] ( identifier[subtable] )* literal[int] )* literal[int]
identifier[io] . identifier[seek] ( identifier[end_offset] ) | def to_file(Class, dict, file, is_little_endian=True):
"""
For constructing a CDB structure in a file. Able to calculate size on
disk and write to a file
"""
io = BinaryFileWriter(file, is_little_endian=is_little_endian)
start_offset = io.tell()
# Header is of fixed length
io.seek(start_offset + 8 * 256)
# For each item, key and value length (written as length prefixed
# strings). We also calculate the subtables on this pass.
# NOTE: This requires the key and value be byte strings, support for
# dealing with encoding specific value types should be
# added to this wrapper
subtables = [[] for i in range(256)]
for (key, value) in dict.items():
pair_offset = io.tell()
io.write_uint32(len(key))
io.write_uint32(len(value))
io.write(key)
io.write(value)
hash = cdbhash(key)
subtables[hash % 256].append((hash, pair_offset)) # depends on [control=['for'], data=[]]
# Save the offset where the subtables will start
subtable_offset = io.tell()
# Write subtables
for subtable in subtables:
if len(subtable) > 0:
# Construct hashtable to be twice the size of the number
# of items in the subtable, and built it in memory
ncells = len(subtable) * 2
cells = [(0, 0) for i in range(ncells)]
for (hash, pair_offset) in subtable:
index = (hash >> 8) % ncells
while cells[index][1] != 0:
index = (index + 1) % ncells # depends on [control=['while'], data=[]]
# Guaranteed to find a non-empty cell
cells[index] = (hash, pair_offset) # depends on [control=['for'], data=[]]
# Write subtable
for (hash, pair_offset) in cells:
io.write_uint32(hash)
io.write_uint32(pair_offset) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subtable']]
# Go back and write the header
end_offset = io.tell()
io.seek(start_offset)
index = subtable_offset
for subtable in subtables:
io.write_uint32(index)
io.write_uint32(len(subtable * 2))
# For each cell in the subtable, a hash and a pointer to a value
index += len(subtable) * 2 * 8 # depends on [control=['for'], data=['subtable']]
# Leave fp at end of cdb
io.seek(end_offset) |
def _to_ctfile(self):
"""Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string.
:return: ``CTfile`` formatted string.
:rtype: :py:class:`str`.
"""
output = io.StringIO()
for key in self:
if key == 'HeaderBlock':
for line in self[key].values():
output.write(line)
output.write('\n')
elif key == 'Ctab':
ctab_str = self[key]._to_ctfile()
output.write(ctab_str)
else:
raise KeyError('Molfile object does not supposed to have any other information: "{}".'.format(key))
return output.getvalue() | def function[_to_ctfile, parameter[self]]:
constant[Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string.
:return: ``CTfile`` formatted string.
:rtype: :py:class:`str`.
]
variable[output] assign[=] call[name[io].StringIO, parameter[]]
for taget[name[key]] in starred[name[self]] begin[:]
if compare[name[key] equal[==] constant[HeaderBlock]] begin[:]
for taget[name[line]] in starred[call[call[name[self]][name[key]].values, parameter[]]] begin[:]
call[name[output].write, parameter[name[line]]]
call[name[output].write, parameter[constant[
]]]
return[call[name[output].getvalue, parameter[]]] | keyword[def] identifier[_to_ctfile] ( identifier[self] ):
literal[string]
identifier[output] = identifier[io] . identifier[StringIO] ()
keyword[for] identifier[key] keyword[in] identifier[self] :
keyword[if] identifier[key] == literal[string] :
keyword[for] identifier[line] keyword[in] identifier[self] [ identifier[key] ]. identifier[values] ():
identifier[output] . identifier[write] ( identifier[line] )
identifier[output] . identifier[write] ( literal[string] )
keyword[elif] identifier[key] == literal[string] :
identifier[ctab_str] = identifier[self] [ identifier[key] ]. identifier[_to_ctfile] ()
identifier[output] . identifier[write] ( identifier[ctab_str] )
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] . identifier[format] ( identifier[key] ))
keyword[return] identifier[output] . identifier[getvalue] () | def _to_ctfile(self):
"""Convert :class:`~ctfile.ctfile.CTfile` into `CTfile` formatted string.
:return: ``CTfile`` formatted string.
:rtype: :py:class:`str`.
"""
output = io.StringIO()
for key in self:
if key == 'HeaderBlock':
for line in self[key].values():
output.write(line)
output.write('\n') # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=['key']]
elif key == 'Ctab':
ctab_str = self[key]._to_ctfile()
output.write(ctab_str) # depends on [control=['if'], data=['key']]
else:
raise KeyError('Molfile object does not supposed to have any other information: "{}".'.format(key)) # depends on [control=['for'], data=['key']]
return output.getvalue() |
def _replicate(n, tensor):
"""Replicate the input tensor n times along a new (major) dimension."""
# TODO(axch) Does this already exist somewhere? Should it get contributed?
multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0)
return tf.tile(tf.expand_dims(tensor, axis=0), multiples) | def function[_replicate, parameter[n, tensor]]:
constant[Replicate the input tensor n times along a new (major) dimension.]
variable[multiples] assign[=] call[name[tf].concat, parameter[list[[<ast.List object at 0x7da1b0322f80>, <ast.Call object at 0x7da1b0322dd0>]]]]
return[call[name[tf].tile, parameter[call[name[tf].expand_dims, parameter[name[tensor]]], name[multiples]]]] | keyword[def] identifier[_replicate] ( identifier[n] , identifier[tensor] ):
literal[string]
identifier[multiples] = identifier[tf] . identifier[concat] ([[ identifier[n] ], identifier[tf] . identifier[ones_like] ( identifier[tensor] . identifier[shape] )], identifier[axis] = literal[int] )
keyword[return] identifier[tf] . identifier[tile] ( identifier[tf] . identifier[expand_dims] ( identifier[tensor] , identifier[axis] = literal[int] ), identifier[multiples] ) | def _replicate(n, tensor):
"""Replicate the input tensor n times along a new (major) dimension."""
# TODO(axch) Does this already exist somewhere? Should it get contributed?
multiples = tf.concat([[n], tf.ones_like(tensor.shape)], axis=0)
return tf.tile(tf.expand_dims(tensor, axis=0), multiples) |
def _resolve_deps(self, formula_def):
'''
Return a list of packages which need to be installed, to resolve all
dependencies
'''
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {}
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = ''
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep]
else:
cant_has.append(dep)
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
also_can, also_cant, opt_dep, rec_dep = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep))
return can_has, cant_has, optional, recommended | def function[_resolve_deps, parameter[self, formula_def]]:
constant[
Return a list of packages which need to be installed, to resolve all
dependencies
]
variable[pkg_info] assign[=] call[call[name[self].pkgdb][call[constant[{0}.info].format, parameter[name[self].db_prov]]], parameter[call[name[formula_def]][constant[name]]]]
if <ast.UnaryOp object at 0x7da18f722cb0> begin[:]
variable[pkg_info] assign[=] dictionary[[], []]
variable[can_has] assign[=] dictionary[[], []]
variable[cant_has] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18f721600> begin[:]
call[name[formula_def]][constant[dependencies]] assign[=] constant[]
for taget[name[dep]] in starred[call[call[name[formula_def].get, parameter[constant[dependencies], constant[]]].split, parameter[constant[,]]]] begin[:]
variable[dep] assign[=] call[name[dep].strip, parameter[]]
if <ast.UnaryOp object at 0x7da18f7219f0> begin[:]
continue
if call[call[name[self].pkgdb][call[constant[{0}.info].format, parameter[name[self].db_prov]]], parameter[name[dep]]] begin[:]
continue
if compare[name[dep] in name[self].avail_pkgs] begin[:]
call[name[can_has]][name[dep]] assign[=] call[name[self].avail_pkgs][name[dep]]
variable[optional] assign[=] call[call[name[formula_def].get, parameter[constant[optional], constant[]]].split, parameter[constant[,]]]
variable[recommended] assign[=] call[call[name[formula_def].get, parameter[constant[recommended], constant[]]].split, parameter[constant[,]]]
variable[inspected] assign[=] list[[]]
variable[to_inspect] assign[=] call[name[can_has].copy, parameter[]]
while name[to_inspect] begin[:]
variable[dep] assign[=] call[name[next], parameter[call[name[six].iterkeys, parameter[name[to_inspect]]]]]
<ast.Delete object at 0x7da20c796a70>
if compare[name[dep] in name[inspected]] begin[:]
continue
call[name[inspected].append, parameter[name[dep]]]
variable[repo_contents] assign[=] call[name[self].repo_metadata.get, parameter[call[name[can_has]][name[dep]], dictionary[[], []]]]
variable[repo_packages] assign[=] call[name[repo_contents].get, parameter[constant[packages], dictionary[[], []]]]
variable[dep_formula] assign[=] call[call[name[repo_packages].get, parameter[name[dep], dictionary[[], []]]].get, parameter[constant[info], dictionary[[], []]]]
<ast.Tuple object at 0x7da20cabdf90> assign[=] call[name[self]._resolve_deps, parameter[name[dep_formula]]]
call[name[can_has].update, parameter[name[also_can]]]
variable[cant_has] assign[=] call[name[sorted], parameter[call[name[set], parameter[binary_operation[name[cant_has] + name[also_cant]]]]]]
variable[optional] assign[=] call[name[sorted], parameter[call[name[set], parameter[binary_operation[name[optional] + name[opt_dep]]]]]]
variable[recommended] assign[=] call[name[sorted], parameter[call[name[set], parameter[binary_operation[name[recommended] + name[rec_dep]]]]]]
return[tuple[[<ast.Name object at 0x7da20e9b1570>, <ast.Name object at 0x7da20e9b31f0>, <ast.Name object at 0x7da20e9b1ea0>, <ast.Name object at 0x7da20e9b0c10>]]] | keyword[def] identifier[_resolve_deps] ( identifier[self] , identifier[formula_def] ):
literal[string]
identifier[pkg_info] = identifier[self] . identifier[pkgdb] [ literal[string] . identifier[format] ( identifier[self] . identifier[db_prov] )]( identifier[formula_def] [ literal[string] ])
keyword[if] keyword[not] identifier[isinstance] ( identifier[pkg_info] , identifier[dict] ):
identifier[pkg_info] ={}
identifier[can_has] ={}
identifier[cant_has] =[]
keyword[if] literal[string] keyword[in] identifier[formula_def] keyword[and] identifier[formula_def] [ literal[string] ] keyword[is] keyword[None] :
identifier[formula_def] [ literal[string] ]= literal[string]
keyword[for] identifier[dep] keyword[in] identifier[formula_def] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] ):
identifier[dep] = identifier[dep] . identifier[strip] ()
keyword[if] keyword[not] identifier[dep] :
keyword[continue]
keyword[if] identifier[self] . identifier[pkgdb] [ literal[string] . identifier[format] ( identifier[self] . identifier[db_prov] )]( identifier[dep] ):
keyword[continue]
keyword[if] identifier[dep] keyword[in] identifier[self] . identifier[avail_pkgs] :
identifier[can_has] [ identifier[dep] ]= identifier[self] . identifier[avail_pkgs] [ identifier[dep] ]
keyword[else] :
identifier[cant_has] . identifier[append] ( identifier[dep] )
identifier[optional] = identifier[formula_def] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
identifier[recommended] = identifier[formula_def] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
identifier[inspected] =[]
identifier[to_inspect] = identifier[can_has] . identifier[copy] ()
keyword[while] identifier[to_inspect] :
identifier[dep] = identifier[next] ( identifier[six] . identifier[iterkeys] ( identifier[to_inspect] ))
keyword[del] identifier[to_inspect] [ identifier[dep] ]
keyword[if] identifier[dep] keyword[in] identifier[inspected] :
keyword[continue]
identifier[inspected] . identifier[append] ( identifier[dep] )
identifier[repo_contents] = identifier[self] . identifier[repo_metadata] . identifier[get] ( identifier[can_has] [ identifier[dep] ],{})
identifier[repo_packages] = identifier[repo_contents] . identifier[get] ( literal[string] ,{})
identifier[dep_formula] = identifier[repo_packages] . identifier[get] ( identifier[dep] ,{}). identifier[get] ( literal[string] ,{})
identifier[also_can] , identifier[also_cant] , identifier[opt_dep] , identifier[rec_dep] = identifier[self] . identifier[_resolve_deps] ( identifier[dep_formula] )
identifier[can_has] . identifier[update] ( identifier[also_can] )
identifier[cant_has] = identifier[sorted] ( identifier[set] ( identifier[cant_has] + identifier[also_cant] ))
identifier[optional] = identifier[sorted] ( identifier[set] ( identifier[optional] + identifier[opt_dep] ))
identifier[recommended] = identifier[sorted] ( identifier[set] ( identifier[recommended] + identifier[rec_dep] ))
keyword[return] identifier[can_has] , identifier[cant_has] , identifier[optional] , identifier[recommended] | def _resolve_deps(self, formula_def):
"""
Return a list of packages which need to be installed, to resolve all
dependencies
"""
pkg_info = self.pkgdb['{0}.info'.format(self.db_prov)](formula_def['name'])
if not isinstance(pkg_info, dict):
pkg_info = {} # depends on [control=['if'], data=[]]
can_has = {}
cant_has = []
if 'dependencies' in formula_def and formula_def['dependencies'] is None:
formula_def['dependencies'] = '' # depends on [control=['if'], data=[]]
for dep in formula_def.get('dependencies', '').split(','):
dep = dep.strip()
if not dep:
continue # depends on [control=['if'], data=[]]
if self.pkgdb['{0}.info'.format(self.db_prov)](dep):
continue # depends on [control=['if'], data=[]]
if dep in self.avail_pkgs:
can_has[dep] = self.avail_pkgs[dep] # depends on [control=['if'], data=['dep']]
else:
cant_has.append(dep) # depends on [control=['for'], data=['dep']]
optional = formula_def.get('optional', '').split(',')
recommended = formula_def.get('recommended', '').split(',')
inspected = []
to_inspect = can_has.copy()
while to_inspect:
dep = next(six.iterkeys(to_inspect))
del to_inspect[dep]
# Don't try to resolve the same package more than once
if dep in inspected:
continue # depends on [control=['if'], data=[]]
inspected.append(dep)
repo_contents = self.repo_metadata.get(can_has[dep], {})
repo_packages = repo_contents.get('packages', {})
dep_formula = repo_packages.get(dep, {}).get('info', {})
(also_can, also_cant, opt_dep, rec_dep) = self._resolve_deps(dep_formula)
can_has.update(also_can)
cant_has = sorted(set(cant_has + also_cant))
optional = sorted(set(optional + opt_dep))
recommended = sorted(set(recommended + rec_dep)) # depends on [control=['while'], data=[]]
return (can_has, cant_has, optional, recommended) |
def t_OPEN_TAG(t):
r'<[?%]((php[ \t\r\n]?)|=)?'
if '=' in t.value: t.type = 'OPEN_TAG_WITH_ECHO'
t.lexer.lineno += t.value.count("\n")
t.lexer.begin('php')
return t | def function[t_OPEN_TAG, parameter[t]]:
constant[<[?%]((php[ \t\r\n]?)|=)?]
if compare[constant[=] in name[t].value] begin[:]
name[t].type assign[=] constant[OPEN_TAG_WITH_ECHO]
<ast.AugAssign object at 0x7da2054a6ec0>
call[name[t].lexer.begin, parameter[constant[php]]]
return[name[t]] | keyword[def] identifier[t_OPEN_TAG] ( identifier[t] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[t] . identifier[value] : identifier[t] . identifier[type] = literal[string]
identifier[t] . identifier[lexer] . identifier[lineno] += identifier[t] . identifier[value] . identifier[count] ( literal[string] )
identifier[t] . identifier[lexer] . identifier[begin] ( literal[string] )
keyword[return] identifier[t] | def t_OPEN_TAG(t):
"""<[?%]((php[ \\t\\r\\n]?)|=)?"""
if '=' in t.value:
t.type = 'OPEN_TAG_WITH_ECHO' # depends on [control=['if'], data=[]]
t.lexer.lineno += t.value.count('\n')
t.lexer.begin('php')
return t |
def find_local_id(self, name_id):
"""
Only find persistent IDs
:param name_id:
:return:
"""
try:
return self.db[name_id.text]
except KeyError:
logger.debug("name: %s", name_id.text)
#logger.debug("id sub keys: %s", self.subkeys())
return None | def function[find_local_id, parameter[self, name_id]]:
constant[
Only find persistent IDs
:param name_id:
:return:
]
<ast.Try object at 0x7da20e9602e0> | keyword[def] identifier[find_local_id] ( identifier[self] , identifier[name_id] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[db] [ identifier[name_id] . identifier[text] ]
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[name_id] . identifier[text] )
keyword[return] keyword[None] | def find_local_id(self, name_id):
"""
Only find persistent IDs
:param name_id:
:return:
"""
try:
return self.db[name_id.text] # depends on [control=['try'], data=[]]
except KeyError:
logger.debug('name: %s', name_id.text)
#logger.debug("id sub keys: %s", self.subkeys())
return None # depends on [control=['except'], data=[]] |
def _process_generic_param(pval, def_unit, equivalencies=[]):
"""Process generic model parameter."""
if isinstance(pval, u.Quantity):
outval = pval.to(def_unit, equivalencies).value
else: # Assume already in desired unit
outval = pval
return outval | def function[_process_generic_param, parameter[pval, def_unit, equivalencies]]:
constant[Process generic model parameter.]
if call[name[isinstance], parameter[name[pval], name[u].Quantity]] begin[:]
variable[outval] assign[=] call[name[pval].to, parameter[name[def_unit], name[equivalencies]]].value
return[name[outval]] | keyword[def] identifier[_process_generic_param] ( identifier[pval] , identifier[def_unit] , identifier[equivalencies] =[]):
literal[string]
keyword[if] identifier[isinstance] ( identifier[pval] , identifier[u] . identifier[Quantity] ):
identifier[outval] = identifier[pval] . identifier[to] ( identifier[def_unit] , identifier[equivalencies] ). identifier[value]
keyword[else] :
identifier[outval] = identifier[pval]
keyword[return] identifier[outval] | def _process_generic_param(pval, def_unit, equivalencies=[]):
"""Process generic model parameter."""
if isinstance(pval, u.Quantity):
outval = pval.to(def_unit, equivalencies).value # depends on [control=['if'], data=[]]
else: # Assume already in desired unit
outval = pval
return outval |
def _precompile_substitution(self, kind, pattern):
"""Pre-compile the regexp for a substitution pattern.
This will speed up the substitutions that happen at the beginning of
the reply fetching process. With the default brain, this took the
time for _substitute down from 0.08s to 0.02s
:param str kind: One of ``sub``, ``person``.
:param str pattern: The substitution pattern.
"""
if pattern not in self._regexc[kind]:
qm = re.escape(pattern)
self._regexc[kind][pattern] = {
"qm": qm,
"sub1": re.compile(r'^' + qm + r'$'),
"sub2": re.compile(r'^' + qm + r'(\W+)'),
"sub3": re.compile(r'(\W+)' + qm + r'(\W+)'),
"sub4": re.compile(r'(\W+)' + qm + r'$'),
} | def function[_precompile_substitution, parameter[self, kind, pattern]]:
constant[Pre-compile the regexp for a substitution pattern.
This will speed up the substitutions that happen at the beginning of
the reply fetching process. With the default brain, this took the
time for _substitute down from 0.08s to 0.02s
:param str kind: One of ``sub``, ``person``.
:param str pattern: The substitution pattern.
]
if compare[name[pattern] <ast.NotIn object at 0x7da2590d7190> call[name[self]._regexc][name[kind]]] begin[:]
variable[qm] assign[=] call[name[re].escape, parameter[name[pattern]]]
call[call[name[self]._regexc][name[kind]]][name[pattern]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93190>, <ast.Constant object at 0x7da18fe90700>, <ast.Constant object at 0x7da18fe91180>, <ast.Constant object at 0x7da18fe90310>, <ast.Constant object at 0x7da18fe92740>], [<ast.Name object at 0x7da18fe92350>, <ast.Call object at 0x7da18fe92200>, <ast.Call object at 0x7da20c6c4880>, <ast.Call object at 0x7da20c6c4190>, <ast.Call object at 0x7da20c6c46a0>]] | keyword[def] identifier[_precompile_substitution] ( identifier[self] , identifier[kind] , identifier[pattern] ):
literal[string]
keyword[if] identifier[pattern] keyword[not] keyword[in] identifier[self] . identifier[_regexc] [ identifier[kind] ]:
identifier[qm] = identifier[re] . identifier[escape] ( identifier[pattern] )
identifier[self] . identifier[_regexc] [ identifier[kind] ][ identifier[pattern] ]={
literal[string] : identifier[qm] ,
literal[string] : identifier[re] . identifier[compile] ( literal[string] + identifier[qm] + literal[string] ),
literal[string] : identifier[re] . identifier[compile] ( literal[string] + identifier[qm] + literal[string] ),
literal[string] : identifier[re] . identifier[compile] ( literal[string] + identifier[qm] + literal[string] ),
literal[string] : identifier[re] . identifier[compile] ( literal[string] + identifier[qm] + literal[string] ),
} | def _precompile_substitution(self, kind, pattern):
"""Pre-compile the regexp for a substitution pattern.
This will speed up the substitutions that happen at the beginning of
the reply fetching process. With the default brain, this took the
time for _substitute down from 0.08s to 0.02s
:param str kind: One of ``sub``, ``person``.
:param str pattern: The substitution pattern.
"""
if pattern not in self._regexc[kind]:
qm = re.escape(pattern)
self._regexc[kind][pattern] = {'qm': qm, 'sub1': re.compile('^' + qm + '$'), 'sub2': re.compile('^' + qm + '(\\W+)'), 'sub3': re.compile('(\\W+)' + qm + '(\\W+)'), 'sub4': re.compile('(\\W+)' + qm + '$')} # depends on [control=['if'], data=['pattern']] |
def check_field_multiplicity(tag, previous_tags):
"""
Check the multiplicity of a 'field' for an object.
"""
fail = False
#If the field is single
if not tag.field.multiple:
#If the tag is being created...
if not tag.id:
#... and the new field was already included in the previous tags,
#fail
fail = previous_tags.filter(field=tag.field)
#If the tag is being modifying...
else:
#... but there is only one previous tag (the one that is being
#modifying), do not fail
fail = previous_tags.filter(field=tag.field).count() > 1
return fail | def function[check_field_multiplicity, parameter[tag, previous_tags]]:
constant[
Check the multiplicity of a 'field' for an object.
]
variable[fail] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b09675b0> begin[:]
if <ast.UnaryOp object at 0x7da1b0967790> begin[:]
variable[fail] assign[=] call[name[previous_tags].filter, parameter[]]
return[name[fail]] | keyword[def] identifier[check_field_multiplicity] ( identifier[tag] , identifier[previous_tags] ):
literal[string]
identifier[fail] = keyword[False]
keyword[if] keyword[not] identifier[tag] . identifier[field] . identifier[multiple] :
keyword[if] keyword[not] identifier[tag] . identifier[id] :
identifier[fail] = identifier[previous_tags] . identifier[filter] ( identifier[field] = identifier[tag] . identifier[field] )
keyword[else] :
identifier[fail] = identifier[previous_tags] . identifier[filter] ( identifier[field] = identifier[tag] . identifier[field] ). identifier[count] ()> literal[int]
keyword[return] identifier[fail] | def check_field_multiplicity(tag, previous_tags):
"""
Check the multiplicity of a 'field' for an object.
"""
fail = False
#If the field is single
if not tag.field.multiple:
#If the tag is being created...
if not tag.id:
#... and the new field was already included in the previous tags,
#fail
fail = previous_tags.filter(field=tag.field) # depends on [control=['if'], data=[]]
else:
#If the tag is being modifying...
#... but there is only one previous tag (the one that is being
#modifying), do not fail
fail = previous_tags.filter(field=tag.field).count() > 1 # depends on [control=['if'], data=[]]
return fail |
def make_error_response(self, cond):
"""Create error response for the any non-error presence stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:Types:
- `cond`: `unicode`
:return: new presence stanza.
:returntype: `Presence`
"""
if self.stanza_type == "error":
raise ValueError("Errors may not be generated in response"
" to errors")
stanza = Presence(stanza_type = "error", from_jid = self.from_jid,
to_jid = self.to_jid, stanza_id = self.stanza_id,
status = self._status, show = self._show,
priority = self._priority, error_cond = cond)
if self._payload is None:
self.decode_payload()
for payload in self._payload:
stanza.add_payload(payload)
return stanza | def function[make_error_response, parameter[self, cond]]:
constant[Create error response for the any non-error presence stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:Types:
- `cond`: `unicode`
:return: new presence stanza.
:returntype: `Presence`
]
if compare[name[self].stanza_type equal[==] constant[error]] begin[:]
<ast.Raise object at 0x7da2041d9360>
variable[stanza] assign[=] call[name[Presence], parameter[]]
if compare[name[self]._payload is constant[None]] begin[:]
call[name[self].decode_payload, parameter[]]
for taget[name[payload]] in starred[name[self]._payload] begin[:]
call[name[stanza].add_payload, parameter[name[payload]]]
return[name[stanza]] | keyword[def] identifier[make_error_response] ( identifier[self] , identifier[cond] ):
literal[string]
keyword[if] identifier[self] . identifier[stanza_type] == literal[string] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[stanza] = identifier[Presence] ( identifier[stanza_type] = literal[string] , identifier[from_jid] = identifier[self] . identifier[from_jid] ,
identifier[to_jid] = identifier[self] . identifier[to_jid] , identifier[stanza_id] = identifier[self] . identifier[stanza_id] ,
identifier[status] = identifier[self] . identifier[_status] , identifier[show] = identifier[self] . identifier[_show] ,
identifier[priority] = identifier[self] . identifier[_priority] , identifier[error_cond] = identifier[cond] )
keyword[if] identifier[self] . identifier[_payload] keyword[is] keyword[None] :
identifier[self] . identifier[decode_payload] ()
keyword[for] identifier[payload] keyword[in] identifier[self] . identifier[_payload] :
identifier[stanza] . identifier[add_payload] ( identifier[payload] )
keyword[return] identifier[stanza] | def make_error_response(self, cond):
"""Create error response for the any non-error presence stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:Types:
- `cond`: `unicode`
:return: new presence stanza.
:returntype: `Presence`
"""
if self.stanza_type == 'error':
raise ValueError('Errors may not be generated in response to errors') # depends on [control=['if'], data=[]]
stanza = Presence(stanza_type='error', from_jid=self.from_jid, to_jid=self.to_jid, stanza_id=self.stanza_id, status=self._status, show=self._show, priority=self._priority, error_cond=cond)
if self._payload is None:
self.decode_payload() # depends on [control=['if'], data=[]]
for payload in self._payload:
stanza.add_payload(payload) # depends on [control=['for'], data=['payload']]
return stanza |
def _GetRealImagArray(Array):
"""
Returns the real and imaginary components of each element in an array and returns them in 2 resulting arrays.
Parameters
----------
Array : ndarray
Input array
Returns
-------
RealArray : ndarray
The real components of the input array
ImagArray : ndarray
The imaginary components of the input array
"""
ImagArray = _np.array([num.imag for num in Array])
RealArray = _np.array([num.real for num in Array])
return RealArray, ImagArray | def function[_GetRealImagArray, parameter[Array]]:
constant[
Returns the real and imaginary components of each element in an array and returns them in 2 resulting arrays.
Parameters
----------
Array : ndarray
Input array
Returns
-------
RealArray : ndarray
The real components of the input array
ImagArray : ndarray
The imaginary components of the input array
]
variable[ImagArray] assign[=] call[name[_np].array, parameter[<ast.ListComp object at 0x7da1b28acaf0>]]
variable[RealArray] assign[=] call[name[_np].array, parameter[<ast.ListComp object at 0x7da1b28af640>]]
return[tuple[[<ast.Name object at 0x7da1b28af280>, <ast.Name object at 0x7da1b28af040>]]] | keyword[def] identifier[_GetRealImagArray] ( identifier[Array] ):
literal[string]
identifier[ImagArray] = identifier[_np] . identifier[array] ([ identifier[num] . identifier[imag] keyword[for] identifier[num] keyword[in] identifier[Array] ])
identifier[RealArray] = identifier[_np] . identifier[array] ([ identifier[num] . identifier[real] keyword[for] identifier[num] keyword[in] identifier[Array] ])
keyword[return] identifier[RealArray] , identifier[ImagArray] | def _GetRealImagArray(Array):
"""
Returns the real and imaginary components of each element in an array and returns them in 2 resulting arrays.
Parameters
----------
Array : ndarray
Input array
Returns
-------
RealArray : ndarray
The real components of the input array
ImagArray : ndarray
The imaginary components of the input array
"""
ImagArray = _np.array([num.imag for num in Array])
RealArray = _np.array([num.real for num in Array])
return (RealArray, ImagArray) |
def from_shape(cls, shape):
"""Try to linearize a curve (or an already linearized curve).
Args:
shape (Union[SubdividedCurve, \
~bezier._geometric_intersection.Linearization]): A curve or an
already linearized curve.
Returns:
Union[SubdividedCurve, \
~bezier._geometric_intersection.Linearization]: The
(potentially linearized) curve.
"""
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
if shape.__class__ is cls:
return shape
else:
error = linearization_error(shape.nodes)
if error < _ERROR_VAL:
linearized = cls(shape, error)
return linearized
else:
return shape | def function[from_shape, parameter[cls, shape]]:
constant[Try to linearize a curve (or an already linearized curve).
Args:
shape (Union[SubdividedCurve, ~bezier._geometric_intersection.Linearization]): A curve or an
already linearized curve.
Returns:
Union[SubdividedCurve, ~bezier._geometric_intersection.Linearization]: The
(potentially linearized) curve.
]
if compare[name[shape].__class__ is name[cls]] begin[:]
return[name[shape]] | keyword[def] identifier[from_shape] ( identifier[cls] , identifier[shape] ):
literal[string]
keyword[if] identifier[shape] . identifier[__class__] keyword[is] identifier[cls] :
keyword[return] identifier[shape]
keyword[else] :
identifier[error] = identifier[linearization_error] ( identifier[shape] . identifier[nodes] )
keyword[if] identifier[error] < identifier[_ERROR_VAL] :
identifier[linearized] = identifier[cls] ( identifier[shape] , identifier[error] )
keyword[return] identifier[linearized]
keyword[else] :
keyword[return] identifier[shape] | def from_shape(cls, shape):
"""Try to linearize a curve (or an already linearized curve).
Args:
shape (Union[SubdividedCurve, ~bezier._geometric_intersection.Linearization]): A curve or an
already linearized curve.
Returns:
Union[SubdividedCurve, ~bezier._geometric_intersection.Linearization]: The
(potentially linearized) curve.
"""
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
if shape.__class__ is cls:
return shape # depends on [control=['if'], data=[]]
else:
error = linearization_error(shape.nodes)
if error < _ERROR_VAL:
linearized = cls(shape, error)
return linearized # depends on [control=['if'], data=['error']]
else:
return shape |
def getAllSensors(self):
"""
Retrieve all the user's own sensors by iterating over the SensorsGet function
@return (list) - Array of sensors
"""
j = 0
sensors = []
parameters = {'page':0, 'per_page':1000, 'owned':1}
while True:
parameters['page'] = j
if self.SensorsGet(parameters):
s = json.loads(self.getResponse())['sensors']
sensors.extend(s)
else:
# if any of the calls fails, we cannot be cannot be sure about the sensors in CommonSense
return None
if len(s) < 1000:
break
j += 1
return sensors | def function[getAllSensors, parameter[self]]:
constant[
Retrieve all the user's own sensors by iterating over the SensorsGet function
@return (list) - Array of sensors
]
variable[j] assign[=] constant[0]
variable[sensors] assign[=] list[[]]
variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da18f00ce20>, <ast.Constant object at 0x7da18f00d8d0>, <ast.Constant object at 0x7da18f00c5e0>], [<ast.Constant object at 0x7da18f00e230>, <ast.Constant object at 0x7da18f00f520>, <ast.Constant object at 0x7da18f00d7e0>]]
while constant[True] begin[:]
call[name[parameters]][constant[page]] assign[=] name[j]
if call[name[self].SensorsGet, parameter[name[parameters]]] begin[:]
variable[s] assign[=] call[call[name[json].loads, parameter[call[name[self].getResponse, parameter[]]]]][constant[sensors]]
call[name[sensors].extend, parameter[name[s]]]
if compare[call[name[len], parameter[name[s]]] less[<] constant[1000]] begin[:]
break
<ast.AugAssign object at 0x7da18f00c490>
return[name[sensors]] | keyword[def] identifier[getAllSensors] ( identifier[self] ):
literal[string]
identifier[j] = literal[int]
identifier[sensors] =[]
identifier[parameters] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] }
keyword[while] keyword[True] :
identifier[parameters] [ literal[string] ]= identifier[j]
keyword[if] identifier[self] . identifier[SensorsGet] ( identifier[parameters] ):
identifier[s] = identifier[json] . identifier[loads] ( identifier[self] . identifier[getResponse] ())[ literal[string] ]
identifier[sensors] . identifier[extend] ( identifier[s] )
keyword[else] :
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[s] )< literal[int] :
keyword[break]
identifier[j] += literal[int]
keyword[return] identifier[sensors] | def getAllSensors(self):
"""
Retrieve all the user's own sensors by iterating over the SensorsGet function
@return (list) - Array of sensors
"""
j = 0
sensors = []
parameters = {'page': 0, 'per_page': 1000, 'owned': 1}
while True:
parameters['page'] = j
if self.SensorsGet(parameters):
s = json.loads(self.getResponse())['sensors']
sensors.extend(s) # depends on [control=['if'], data=[]]
else: # if any of the calls fails, we cannot be cannot be sure about the sensors in CommonSense
return None
if len(s) < 1000:
break # depends on [control=['if'], data=[]]
j += 1 # depends on [control=['while'], data=[]]
return sensors |
def render_file(self, context, result):
"""Perform appropriate metadata wrangling for returned open file handles."""
if __debug__:
log.debug("Processing file-like object.", extra=dict(request=id(context), result=repr(result)))
response = context.response
response.conditional_response = True
modified = mktime(gmtime(getmtime(result.name)))
response.last_modified = datetime.fromtimestamp(modified)
ct, ce = guess_type(result.name)
if not ct: ct = 'application/octet-stream'
response.content_type, response.content_encoding = ct, ce
response.etag = unicode(modified)
result.seek(0, 2) # Seek to the end of the file.
response.content_length = result.tell()
result.seek(0) # Seek back to the start of the file.
response.body_file = result
return True | def function[render_file, parameter[self, context, result]]:
constant[Perform appropriate metadata wrangling for returned open file handles.]
if name[__debug__] begin[:]
call[name[log].debug, parameter[constant[Processing file-like object.]]]
variable[response] assign[=] name[context].response
name[response].conditional_response assign[=] constant[True]
variable[modified] assign[=] call[name[mktime], parameter[call[name[gmtime], parameter[call[name[getmtime], parameter[name[result].name]]]]]]
name[response].last_modified assign[=] call[name[datetime].fromtimestamp, parameter[name[modified]]]
<ast.Tuple object at 0x7da1b0e3e7a0> assign[=] call[name[guess_type], parameter[name[result].name]]
if <ast.UnaryOp object at 0x7da1b0e3c190> begin[:]
variable[ct] assign[=] constant[application/octet-stream]
<ast.Tuple object at 0x7da1b0e3d090> assign[=] tuple[[<ast.Name object at 0x7da1b0e3e950>, <ast.Name object at 0x7da1b0e3c2e0>]]
name[response].etag assign[=] call[name[unicode], parameter[name[modified]]]
call[name[result].seek, parameter[constant[0], constant[2]]]
name[response].content_length assign[=] call[name[result].tell, parameter[]]
call[name[result].seek, parameter[constant[0]]]
name[response].body_file assign[=] name[result]
return[constant[True]] | keyword[def] identifier[render_file] ( identifier[self] , identifier[context] , identifier[result] ):
literal[string]
keyword[if] identifier[__debug__] :
identifier[log] . identifier[debug] ( literal[string] , identifier[extra] = identifier[dict] ( identifier[request] = identifier[id] ( identifier[context] ), identifier[result] = identifier[repr] ( identifier[result] )))
identifier[response] = identifier[context] . identifier[response]
identifier[response] . identifier[conditional_response] = keyword[True]
identifier[modified] = identifier[mktime] ( identifier[gmtime] ( identifier[getmtime] ( identifier[result] . identifier[name] )))
identifier[response] . identifier[last_modified] = identifier[datetime] . identifier[fromtimestamp] ( identifier[modified] )
identifier[ct] , identifier[ce] = identifier[guess_type] ( identifier[result] . identifier[name] )
keyword[if] keyword[not] identifier[ct] : identifier[ct] = literal[string]
identifier[response] . identifier[content_type] , identifier[response] . identifier[content_encoding] = identifier[ct] , identifier[ce]
identifier[response] . identifier[etag] = identifier[unicode] ( identifier[modified] )
identifier[result] . identifier[seek] ( literal[int] , literal[int] )
identifier[response] . identifier[content_length] = identifier[result] . identifier[tell] ()
identifier[result] . identifier[seek] ( literal[int] )
identifier[response] . identifier[body_file] = identifier[result]
keyword[return] keyword[True] | def render_file(self, context, result):
"""Perform appropriate metadata wrangling for returned open file handles."""
if __debug__:
log.debug('Processing file-like object.', extra=dict(request=id(context), result=repr(result))) # depends on [control=['if'], data=[]]
response = context.response
response.conditional_response = True
modified = mktime(gmtime(getmtime(result.name)))
response.last_modified = datetime.fromtimestamp(modified)
(ct, ce) = guess_type(result.name)
if not ct:
ct = 'application/octet-stream' # depends on [control=['if'], data=[]]
(response.content_type, response.content_encoding) = (ct, ce)
response.etag = unicode(modified)
result.seek(0, 2) # Seek to the end of the file.
response.content_length = result.tell()
result.seek(0) # Seek back to the start of the file.
response.body_file = result
return True |
def append_if_local_or_in_imports(self, definition):
"""Add definition to list.
Handles local definitions and adds to project_definitions.
"""
if isinstance(definition, LocalModuleDefinition):
self.definitions.append(definition)
elif self.import_names == ["*"]:
self.definitions.append(definition)
elif self.import_names and definition.name in self.import_names:
self.definitions.append(definition)
elif (self.import_alias_mapping and definition.name in
self.import_alias_mapping.values()):
self.definitions.append(definition)
if definition.parent_module_name:
self.definitions.append(definition)
if definition.node not in project_definitions:
project_definitions[definition.node] = definition | def function[append_if_local_or_in_imports, parameter[self, definition]]:
constant[Add definition to list.
Handles local definitions and adds to project_definitions.
]
if call[name[isinstance], parameter[name[definition], name[LocalModuleDefinition]]] begin[:]
call[name[self].definitions.append, parameter[name[definition]]]
if name[definition].parent_module_name begin[:]
call[name[self].definitions.append, parameter[name[definition]]]
if compare[name[definition].node <ast.NotIn object at 0x7da2590d7190> name[project_definitions]] begin[:]
call[name[project_definitions]][name[definition].node] assign[=] name[definition] | keyword[def] identifier[append_if_local_or_in_imports] ( identifier[self] , identifier[definition] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[definition] , identifier[LocalModuleDefinition] ):
identifier[self] . identifier[definitions] . identifier[append] ( identifier[definition] )
keyword[elif] identifier[self] . identifier[import_names] ==[ literal[string] ]:
identifier[self] . identifier[definitions] . identifier[append] ( identifier[definition] )
keyword[elif] identifier[self] . identifier[import_names] keyword[and] identifier[definition] . identifier[name] keyword[in] identifier[self] . identifier[import_names] :
identifier[self] . identifier[definitions] . identifier[append] ( identifier[definition] )
keyword[elif] ( identifier[self] . identifier[import_alias_mapping] keyword[and] identifier[definition] . identifier[name] keyword[in]
identifier[self] . identifier[import_alias_mapping] . identifier[values] ()):
identifier[self] . identifier[definitions] . identifier[append] ( identifier[definition] )
keyword[if] identifier[definition] . identifier[parent_module_name] :
identifier[self] . identifier[definitions] . identifier[append] ( identifier[definition] )
keyword[if] identifier[definition] . identifier[node] keyword[not] keyword[in] identifier[project_definitions] :
identifier[project_definitions] [ identifier[definition] . identifier[node] ]= identifier[definition] | def append_if_local_or_in_imports(self, definition):
"""Add definition to list.
Handles local definitions and adds to project_definitions.
"""
if isinstance(definition, LocalModuleDefinition):
self.definitions.append(definition) # depends on [control=['if'], data=[]]
elif self.import_names == ['*']:
self.definitions.append(definition) # depends on [control=['if'], data=[]]
elif self.import_names and definition.name in self.import_names:
self.definitions.append(definition) # depends on [control=['if'], data=[]]
elif self.import_alias_mapping and definition.name in self.import_alias_mapping.values():
self.definitions.append(definition) # depends on [control=['if'], data=[]]
if definition.parent_module_name:
self.definitions.append(definition) # depends on [control=['if'], data=[]]
if definition.node not in project_definitions:
project_definitions[definition.node] = definition # depends on [control=['if'], data=['project_definitions']] |
def s_bird(X, scales, n_runs, p_above, p_active=1, max_iter=100,
random_state=None, n_jobs=1, memory=Memory(None), verbose=False):
""" Multichannel version of BIRD (S-BIRD) seeking Structured Sparsity
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-n_samples array to be denoised where n_channels
is the number of sensors and n_samples the dimension
scales : list of int
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
p_above : float
probability of appearance of the max above which the noise hypothesis
is considered false
p_active : float
proportion of active channels (l in the paper)
max_iter : int
The maximum number of iterations in one pursuit.
random_state : None | int | np.random.RandomState
To specify the random generator state (seed).
n_jobs : int
The number of jobs to run in parallel.
memory : instance of Memory
The object to use to cache some computations. If cachedir is None, no
caching is performed.
verbose : bool
verbose mode
Returns
-------
X_denoise : array, shape (n_channels, n_times)
The denoised data.
"""
X, prepad = _pad(X)
# Computing Lambda_W(Phi, p_above)
n_channels = X.shape[0]
n_samples = float(X.shape[1])
# size of the full shift-invariant dictionary
M = np.sum(np.array(scales) / 2) * n_samples
sigma = sqrt((1.0 - (2.0 / np.pi)) / float(n_samples))
Lambda_W = sigma * sqrt(2.0) * erfinv((1.0 - p_above) ** (1.0 / float(M)))
lint = int(n_channels * p_active)
this_stop_crit = partial(stop_crit, lint=lint) # XXX : check lint here
this_selection_rule = partial(selection_rule, lint=lint)
print("Starting S-BIRD with MDCT dictionary of %d Atoms."
" Lambda_W=%1.3f, n_runs=%d, p_active=%1.1f" % (M, Lambda_W,
n_runs, p_active))
denoised = _bird_core(X, scales, n_runs, Lambda_W, verbose=verbose,
stop_crit=this_stop_crit, n_jobs=n_jobs,
selection_rule=this_selection_rule,
max_iter=max_iter,
indep=False, memory=memory)
return denoised[:, prepad:] | def function[s_bird, parameter[X, scales, n_runs, p_above, p_active, max_iter, random_state, n_jobs, memory, verbose]]:
constant[ Multichannel version of BIRD (S-BIRD) seeking Structured Sparsity
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-n_samples array to be denoised where n_channels
is the number of sensors and n_samples the dimension
scales : list of int
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
p_above : float
probability of appearance of the max above which the noise hypothesis
is considered false
p_active : float
proportion of active channels (l in the paper)
max_iter : int
The maximum number of iterations in one pursuit.
random_state : None | int | np.random.RandomState
To specify the random generator state (seed).
n_jobs : int
The number of jobs to run in parallel.
memory : instance of Memory
The object to use to cache some computations. If cachedir is None, no
caching is performed.
verbose : bool
verbose mode
Returns
-------
X_denoise : array, shape (n_channels, n_times)
The denoised data.
]
<ast.Tuple object at 0x7da204623040> assign[=] call[name[_pad], parameter[name[X]]]
variable[n_channels] assign[=] call[name[X].shape][constant[0]]
variable[n_samples] assign[=] call[name[float], parameter[call[name[X].shape][constant[1]]]]
variable[M] assign[=] binary_operation[call[name[np].sum, parameter[binary_operation[call[name[np].array, parameter[name[scales]]] / constant[2]]]] * name[n_samples]]
variable[sigma] assign[=] call[name[sqrt], parameter[binary_operation[binary_operation[constant[1.0] - binary_operation[constant[2.0] / name[np].pi]] / call[name[float], parameter[name[n_samples]]]]]]
variable[Lambda_W] assign[=] binary_operation[binary_operation[name[sigma] * call[name[sqrt], parameter[constant[2.0]]]] * call[name[erfinv], parameter[binary_operation[binary_operation[constant[1.0] - name[p_above]] ** binary_operation[constant[1.0] / call[name[float], parameter[name[M]]]]]]]]
variable[lint] assign[=] call[name[int], parameter[binary_operation[name[n_channels] * name[p_active]]]]
variable[this_stop_crit] assign[=] call[name[partial], parameter[name[stop_crit]]]
variable[this_selection_rule] assign[=] call[name[partial], parameter[name[selection_rule]]]
call[name[print], parameter[binary_operation[constant[Starting S-BIRD with MDCT dictionary of %d Atoms. Lambda_W=%1.3f, n_runs=%d, p_active=%1.1f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204620070>, <ast.Name object at 0x7da2046233d0>, <ast.Name object at 0x7da2046236a0>, <ast.Name object at 0x7da204622b90>]]]]]
variable[denoised] assign[=] call[name[_bird_core], parameter[name[X], name[scales], name[n_runs], name[Lambda_W]]]
return[call[name[denoised]][tuple[[<ast.Slice object at 0x7da2046203d0>, <ast.Slice object at 0x7da204620160>]]]] | keyword[def] identifier[s_bird] ( identifier[X] , identifier[scales] , identifier[n_runs] , identifier[p_above] , identifier[p_active] = literal[int] , identifier[max_iter] = literal[int] ,
identifier[random_state] = keyword[None] , identifier[n_jobs] = literal[int] , identifier[memory] = identifier[Memory] ( keyword[None] ), identifier[verbose] = keyword[False] ):
literal[string]
identifier[X] , identifier[prepad] = identifier[_pad] ( identifier[X] )
identifier[n_channels] = identifier[X] . identifier[shape] [ literal[int] ]
identifier[n_samples] = identifier[float] ( identifier[X] . identifier[shape] [ literal[int] ])
identifier[M] = identifier[np] . identifier[sum] ( identifier[np] . identifier[array] ( identifier[scales] )/ literal[int] )* identifier[n_samples]
identifier[sigma] = identifier[sqrt] (( literal[int] -( literal[int] / identifier[np] . identifier[pi] ))/ identifier[float] ( identifier[n_samples] ))
identifier[Lambda_W] = identifier[sigma] * identifier[sqrt] ( literal[int] )* identifier[erfinv] (( literal[int] - identifier[p_above] )**( literal[int] / identifier[float] ( identifier[M] )))
identifier[lint] = identifier[int] ( identifier[n_channels] * identifier[p_active] )
identifier[this_stop_crit] = identifier[partial] ( identifier[stop_crit] , identifier[lint] = identifier[lint] )
identifier[this_selection_rule] = identifier[partial] ( identifier[selection_rule] , identifier[lint] = identifier[lint] )
identifier[print] ( literal[string]
literal[string] %( identifier[M] , identifier[Lambda_W] ,
identifier[n_runs] , identifier[p_active] ))
identifier[denoised] = identifier[_bird_core] ( identifier[X] , identifier[scales] , identifier[n_runs] , identifier[Lambda_W] , identifier[verbose] = identifier[verbose] ,
identifier[stop_crit] = identifier[this_stop_crit] , identifier[n_jobs] = identifier[n_jobs] ,
identifier[selection_rule] = identifier[this_selection_rule] ,
identifier[max_iter] = identifier[max_iter] ,
identifier[indep] = keyword[False] , identifier[memory] = identifier[memory] )
keyword[return] identifier[denoised] [:, identifier[prepad] :] | def s_bird(X, scales, n_runs, p_above, p_active=1, max_iter=100, random_state=None, n_jobs=1, memory=Memory(None), verbose=False):
""" Multichannel version of BIRD (S-BIRD) seeking Structured Sparsity
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-n_samples array to be denoised where n_channels
is the number of sensors and n_samples the dimension
scales : list of int
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
p_above : float
probability of appearance of the max above which the noise hypothesis
is considered false
p_active : float
proportion of active channels (l in the paper)
max_iter : int
The maximum number of iterations in one pursuit.
random_state : None | int | np.random.RandomState
To specify the random generator state (seed).
n_jobs : int
The number of jobs to run in parallel.
memory : instance of Memory
The object to use to cache some computations. If cachedir is None, no
caching is performed.
verbose : bool
verbose mode
Returns
-------
X_denoise : array, shape (n_channels, n_times)
The denoised data.
"""
(X, prepad) = _pad(X)
# Computing Lambda_W(Phi, p_above)
n_channels = X.shape[0]
n_samples = float(X.shape[1])
# size of the full shift-invariant dictionary
M = np.sum(np.array(scales) / 2) * n_samples
sigma = sqrt((1.0 - 2.0 / np.pi) / float(n_samples))
Lambda_W = sigma * sqrt(2.0) * erfinv((1.0 - p_above) ** (1.0 / float(M)))
lint = int(n_channels * p_active)
this_stop_crit = partial(stop_crit, lint=lint) # XXX : check lint here
this_selection_rule = partial(selection_rule, lint=lint)
print('Starting S-BIRD with MDCT dictionary of %d Atoms. Lambda_W=%1.3f, n_runs=%d, p_active=%1.1f' % (M, Lambda_W, n_runs, p_active))
denoised = _bird_core(X, scales, n_runs, Lambda_W, verbose=verbose, stop_crit=this_stop_crit, n_jobs=n_jobs, selection_rule=this_selection_rule, max_iter=max_iter, indep=False, memory=memory)
return denoised[:, prepad:] |
def arg(*args, **kwargs):
"""Return an attrib() that can be fed as a command-line argument.
This function is a wrapper for an attr.attrib to create a corresponding
command line argument for it. Use it with the same arguments as argparse's
add_argument().
Example:
>>> @attrs
... class MyFeature(Feature):
... my_number = arg('-n', '--number', default=3)
... def run(self):
... print('Your number:', self.my_number)
Now you could run it like `firefed myfeature --number 5`.
"""
metadata = {'arg_params': (args, kwargs)}
return attrib(default=arg_default(*args, **kwargs), metadata=metadata) | def function[arg, parameter[]]:
constant[Return an attrib() that can be fed as a command-line argument.
This function is a wrapper for an attr.attrib to create a corresponding
command line argument for it. Use it with the same arguments as argparse's
add_argument().
Example:
>>> @attrs
... class MyFeature(Feature):
... my_number = arg('-n', '--number', default=3)
... def run(self):
... print('Your number:', self.my_number)
Now you could run it like `firefed myfeature --number 5`.
]
variable[metadata] assign[=] dictionary[[<ast.Constant object at 0x7da1b26a5930>], [<ast.Tuple object at 0x7da1b26a5990>]]
return[call[name[attrib], parameter[]]] | keyword[def] identifier[arg] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[metadata] ={ literal[string] :( identifier[args] , identifier[kwargs] )}
keyword[return] identifier[attrib] ( identifier[default] = identifier[arg_default] (* identifier[args] ,** identifier[kwargs] ), identifier[metadata] = identifier[metadata] ) | def arg(*args, **kwargs):
"""Return an attrib() that can be fed as a command-line argument.
This function is a wrapper for an attr.attrib to create a corresponding
command line argument for it. Use it with the same arguments as argparse's
add_argument().
Example:
>>> @attrs
... class MyFeature(Feature):
... my_number = arg('-n', '--number', default=3)
... def run(self):
... print('Your number:', self.my_number)
Now you could run it like `firefed myfeature --number 5`.
"""
metadata = {'arg_params': (args, kwargs)}
return attrib(default=arg_default(*args, **kwargs), metadata=metadata) |
def pathway_enrichment_no_overlap_correction(feature_weight_vector,
pathway_definitions,
gene_signature_definition,
alpha=0.05,
metadata=False):
"""Identify pathways overrepresented in a constructed feature
according to a user-specified criterion (see `gene_signature_definitions`
in the parameters list below) for identifying the feature's gene
signature. Overlap-correction is not applied to the pathway definitions.
Parameters
-----------
feature_weight_vector : pandas.Series(float), shape = n
A vector containing gene weights
pathway_definitions : dict(str -> set(str))
A pathway (key) is defined by a set of genes (value).
gene_signature_definition : functools.partial callable,
returns (set(), set())
Accepts the `feature_weight_vector` as input. Provide a function to
distinguish positive and/or negative gene signatures.
Both a positive & negative signature may be appropriate if the feature's
gene weight distribution spans positive and negative values. If this
is not the case, a user can just specify a single gene signature by
returning one or the other as an empty set.
alpha : float (default=0.05)
Significance level for pathway enrichment.
metadata : bool (default=False)
Return information about the gene signature(s)
Returns
-----------
tup([pandas.DataFrame|None], dict())
tup[0] : pandas.DataFrame: dataframe of significant pathways
| None if the gene signature does not contain any genes in the
pathway definitions
tup[1] : if `metadata`:
{"positive signature": <set() positive gene signature>,
"negative signature": <set() negative gene signature>,
"pathway definitions": <dict(str -> set()) the pathway genes
that are in the gene signature(s)>}
else: {}
"""
genes_in_pathway_definitions = set.union(*pathway_definitions.values())
positive_gene_signature, negative_gene_signature = \
gene_signature_definition(feature_weight_vector)
gene_signature = ((positive_gene_signature | negative_gene_signature) &
genes_in_pathway_definitions)
if not gene_signature:
return (None, {})
additional_information = {}
n_genes = len(feature_weight_vector)
if metadata:
additional_information["positive_signature"] = positive_gene_signature
additional_information["negative_signature"] = negative_gene_signature
collect_signature_pathway_definitions = {}
for pathway, definition in pathway_definitions.items():
signature_definition = gene_signature & definition
if signature_definition:
collect_signature_pathway_definitions[pathway] = (
signature_definition)
additional_information["pathway_definitions"] = (
collect_signature_pathway_definitions)
pathway_positive_series = single_side_pathway_enrichment(
pathway_definitions, positive_gene_signature, n_genes)
pathway_negative_series = single_side_pathway_enrichment(
pathway_definitions, negative_gene_signature, n_genes)
pvalue_information = pathway_positive_series.append(
pathway_negative_series)
side_information = _pathway_side_information(
pathway_positive_series, pathway_negative_series,
pvalue_information.index)
significant_pathways = _significant_pathways_dataframe(
pvalue_information, side_information, alpha)
return significant_pathways, additional_information | def function[pathway_enrichment_no_overlap_correction, parameter[feature_weight_vector, pathway_definitions, gene_signature_definition, alpha, metadata]]:
constant[Identify pathways overrepresented in a constructed feature
according to a user-specified criterion (see `gene_signature_definitions`
in the parameters list below) for identifying the feature's gene
signature. Overlap-correction is not applied to the pathway definitions.
Parameters
-----------
feature_weight_vector : pandas.Series(float), shape = n
A vector containing gene weights
pathway_definitions : dict(str -> set(str))
A pathway (key) is defined by a set of genes (value).
gene_signature_definition : functools.partial callable,
returns (set(), set())
Accepts the `feature_weight_vector` as input. Provide a function to
distinguish positive and/or negative gene signatures.
Both a positive & negative signature may be appropriate if the feature's
gene weight distribution spans positive and negative values. If this
is not the case, a user can just specify a single gene signature by
returning one or the other as an empty set.
alpha : float (default=0.05)
Significance level for pathway enrichment.
metadata : bool (default=False)
Return information about the gene signature(s)
Returns
-----------
tup([pandas.DataFrame|None], dict())
tup[0] : pandas.DataFrame: dataframe of significant pathways
| None if the gene signature does not contain any genes in the
pathway definitions
tup[1] : if `metadata`:
{"positive signature": <set() positive gene signature>,
"negative signature": <set() negative gene signature>,
"pathway definitions": <dict(str -> set()) the pathway genes
that are in the gene signature(s)>}
else: {}
]
variable[genes_in_pathway_definitions] assign[=] call[name[set].union, parameter[<ast.Starred object at 0x7da18f812860>]]
<ast.Tuple object at 0x7da1b26ada80> assign[=] call[name[gene_signature_definition], parameter[name[feature_weight_vector]]]
variable[gene_signature] assign[=] binary_operation[binary_operation[name[positive_gene_signature] <ast.BitOr object at 0x7da2590d6aa0> name[negative_gene_signature]] <ast.BitAnd object at 0x7da2590d6b60> name[genes_in_pathway_definitions]]
if <ast.UnaryOp object at 0x7da1b26af6d0> begin[:]
return[tuple[[<ast.Constant object at 0x7da1b26af5b0>, <ast.Dict object at 0x7da1b26adfc0>]]]
variable[additional_information] assign[=] dictionary[[], []]
variable[n_genes] assign[=] call[name[len], parameter[name[feature_weight_vector]]]
if name[metadata] begin[:]
call[name[additional_information]][constant[positive_signature]] assign[=] name[positive_gene_signature]
call[name[additional_information]][constant[negative_signature]] assign[=] name[negative_gene_signature]
variable[collect_signature_pathway_definitions] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b26ad060>, <ast.Name object at 0x7da1b26acd60>]]] in starred[call[name[pathway_definitions].items, parameter[]]] begin[:]
variable[signature_definition] assign[=] binary_operation[name[gene_signature] <ast.BitAnd object at 0x7da2590d6b60> name[definition]]
if name[signature_definition] begin[:]
call[name[collect_signature_pathway_definitions]][name[pathway]] assign[=] name[signature_definition]
call[name[additional_information]][constant[pathway_definitions]] assign[=] name[collect_signature_pathway_definitions]
variable[pathway_positive_series] assign[=] call[name[single_side_pathway_enrichment], parameter[name[pathway_definitions], name[positive_gene_signature], name[n_genes]]]
variable[pathway_negative_series] assign[=] call[name[single_side_pathway_enrichment], parameter[name[pathway_definitions], name[negative_gene_signature], name[n_genes]]]
variable[pvalue_information] assign[=] call[name[pathway_positive_series].append, parameter[name[pathway_negative_series]]]
variable[side_information] assign[=] call[name[_pathway_side_information], parameter[name[pathway_positive_series], name[pathway_negative_series], name[pvalue_information].index]]
variable[significant_pathways] assign[=] call[name[_significant_pathways_dataframe], parameter[name[pvalue_information], name[side_information], name[alpha]]]
return[tuple[[<ast.Name object at 0x7da18dc99180>, <ast.Name object at 0x7da18dc9ae30>]]] | keyword[def] identifier[pathway_enrichment_no_overlap_correction] ( identifier[feature_weight_vector] ,
identifier[pathway_definitions] ,
identifier[gene_signature_definition] ,
identifier[alpha] = literal[int] ,
identifier[metadata] = keyword[False] ):
literal[string]
identifier[genes_in_pathway_definitions] = identifier[set] . identifier[union] (* identifier[pathway_definitions] . identifier[values] ())
identifier[positive_gene_signature] , identifier[negative_gene_signature] = identifier[gene_signature_definition] ( identifier[feature_weight_vector] )
identifier[gene_signature] =(( identifier[positive_gene_signature] | identifier[negative_gene_signature] )&
identifier[genes_in_pathway_definitions] )
keyword[if] keyword[not] identifier[gene_signature] :
keyword[return] ( keyword[None] ,{})
identifier[additional_information] ={}
identifier[n_genes] = identifier[len] ( identifier[feature_weight_vector] )
keyword[if] identifier[metadata] :
identifier[additional_information] [ literal[string] ]= identifier[positive_gene_signature]
identifier[additional_information] [ literal[string] ]= identifier[negative_gene_signature]
identifier[collect_signature_pathway_definitions] ={}
keyword[for] identifier[pathway] , identifier[definition] keyword[in] identifier[pathway_definitions] . identifier[items] ():
identifier[signature_definition] = identifier[gene_signature] & identifier[definition]
keyword[if] identifier[signature_definition] :
identifier[collect_signature_pathway_definitions] [ identifier[pathway] ]=(
identifier[signature_definition] )
identifier[additional_information] [ literal[string] ]=(
identifier[collect_signature_pathway_definitions] )
identifier[pathway_positive_series] = identifier[single_side_pathway_enrichment] (
identifier[pathway_definitions] , identifier[positive_gene_signature] , identifier[n_genes] )
identifier[pathway_negative_series] = identifier[single_side_pathway_enrichment] (
identifier[pathway_definitions] , identifier[negative_gene_signature] , identifier[n_genes] )
identifier[pvalue_information] = identifier[pathway_positive_series] . identifier[append] (
identifier[pathway_negative_series] )
identifier[side_information] = identifier[_pathway_side_information] (
identifier[pathway_positive_series] , identifier[pathway_negative_series] ,
identifier[pvalue_information] . identifier[index] )
identifier[significant_pathways] = identifier[_significant_pathways_dataframe] (
identifier[pvalue_information] , identifier[side_information] , identifier[alpha] )
keyword[return] identifier[significant_pathways] , identifier[additional_information] | def pathway_enrichment_no_overlap_correction(feature_weight_vector, pathway_definitions, gene_signature_definition, alpha=0.05, metadata=False):
"""Identify pathways overrepresented in a constructed feature
according to a user-specified criterion (see `gene_signature_definitions`
in the parameters list below) for identifying the feature's gene
signature. Overlap-correction is not applied to the pathway definitions.
Parameters
-----------
feature_weight_vector : pandas.Series(float), shape = n
A vector containing gene weights
pathway_definitions : dict(str -> set(str))
A pathway (key) is defined by a set of genes (value).
gene_signature_definition : functools.partial callable,
returns (set(), set())
Accepts the `feature_weight_vector` as input. Provide a function to
distinguish positive and/or negative gene signatures.
Both a positive & negative signature may be appropriate if the feature's
gene weight distribution spans positive and negative values. If this
is not the case, a user can just specify a single gene signature by
returning one or the other as an empty set.
alpha : float (default=0.05)
Significance level for pathway enrichment.
metadata : bool (default=False)
Return information about the gene signature(s)
Returns
-----------
tup([pandas.DataFrame|None], dict())
tup[0] : pandas.DataFrame: dataframe of significant pathways
| None if the gene signature does not contain any genes in the
pathway definitions
tup[1] : if `metadata`:
{"positive signature": <set() positive gene signature>,
"negative signature": <set() negative gene signature>,
"pathway definitions": <dict(str -> set()) the pathway genes
that are in the gene signature(s)>}
else: {}
"""
genes_in_pathway_definitions = set.union(*pathway_definitions.values())
(positive_gene_signature, negative_gene_signature) = gene_signature_definition(feature_weight_vector)
gene_signature = (positive_gene_signature | negative_gene_signature) & genes_in_pathway_definitions
if not gene_signature:
return (None, {}) # depends on [control=['if'], data=[]]
additional_information = {}
n_genes = len(feature_weight_vector)
if metadata:
additional_information['positive_signature'] = positive_gene_signature
additional_information['negative_signature'] = negative_gene_signature
collect_signature_pathway_definitions = {}
for (pathway, definition) in pathway_definitions.items():
signature_definition = gene_signature & definition
if signature_definition:
collect_signature_pathway_definitions[pathway] = signature_definition # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
additional_information['pathway_definitions'] = collect_signature_pathway_definitions # depends on [control=['if'], data=[]]
pathway_positive_series = single_side_pathway_enrichment(pathway_definitions, positive_gene_signature, n_genes)
pathway_negative_series = single_side_pathway_enrichment(pathway_definitions, negative_gene_signature, n_genes)
pvalue_information = pathway_positive_series.append(pathway_negative_series)
side_information = _pathway_side_information(pathway_positive_series, pathway_negative_series, pvalue_information.index)
significant_pathways = _significant_pathways_dataframe(pvalue_information, side_information, alpha)
return (significant_pathways, additional_information) |
def create_single_dialog_train_example(context_dialog_path, candidate_dialog_paths, rng, positive_probability,
minimum_context_length=2, max_context_length=20):
"""
Creates a single example for training set.
:param context_dialog_path:
:param candidate_dialog_paths:
:param rng:
:param positive_probability:
:return:
"""
dialog = translate_dialog_to_lists(context_dialog_path)
context_str, next_utterance_ix = create_random_context(dialog, rng,
minimum_context_length=minimum_context_length,
max_context_length=max_context_length)
if positive_probability > rng.random():
# use the next utterance as positive example
response = singe_user_utterances_to_string(dialog[next_utterance_ix])
label = 1.0
else:
response = get_random_utterances_from_corpus(candidate_dialog_paths, rng, 1,
min_turn=minimum_context_length + 1,
max_turn=max_context_length)[0]
label = 0.0
return context_str, response, label | def function[create_single_dialog_train_example, parameter[context_dialog_path, candidate_dialog_paths, rng, positive_probability, minimum_context_length, max_context_length]]:
constant[
Creates a single example for training set.
:param context_dialog_path:
:param candidate_dialog_paths:
:param rng:
:param positive_probability:
:return:
]
variable[dialog] assign[=] call[name[translate_dialog_to_lists], parameter[name[context_dialog_path]]]
<ast.Tuple object at 0x7da20c6aa710> assign[=] call[name[create_random_context], parameter[name[dialog], name[rng]]]
if compare[name[positive_probability] greater[>] call[name[rng].random, parameter[]]] begin[:]
variable[response] assign[=] call[name[singe_user_utterances_to_string], parameter[call[name[dialog]][name[next_utterance_ix]]]]
variable[label] assign[=] constant[1.0]
return[tuple[[<ast.Name object at 0x7da20c6aa320>, <ast.Name object at 0x7da20c6a8bb0>, <ast.Name object at 0x7da20c6a9720>]]] | keyword[def] identifier[create_single_dialog_train_example] ( identifier[context_dialog_path] , identifier[candidate_dialog_paths] , identifier[rng] , identifier[positive_probability] ,
identifier[minimum_context_length] = literal[int] , identifier[max_context_length] = literal[int] ):
literal[string]
identifier[dialog] = identifier[translate_dialog_to_lists] ( identifier[context_dialog_path] )
identifier[context_str] , identifier[next_utterance_ix] = identifier[create_random_context] ( identifier[dialog] , identifier[rng] ,
identifier[minimum_context_length] = identifier[minimum_context_length] ,
identifier[max_context_length] = identifier[max_context_length] )
keyword[if] identifier[positive_probability] > identifier[rng] . identifier[random] ():
identifier[response] = identifier[singe_user_utterances_to_string] ( identifier[dialog] [ identifier[next_utterance_ix] ])
identifier[label] = literal[int]
keyword[else] :
identifier[response] = identifier[get_random_utterances_from_corpus] ( identifier[candidate_dialog_paths] , identifier[rng] , literal[int] ,
identifier[min_turn] = identifier[minimum_context_length] + literal[int] ,
identifier[max_turn] = identifier[max_context_length] )[ literal[int] ]
identifier[label] = literal[int]
keyword[return] identifier[context_str] , identifier[response] , identifier[label] | def create_single_dialog_train_example(context_dialog_path, candidate_dialog_paths, rng, positive_probability, minimum_context_length=2, max_context_length=20):
"""
Creates a single example for training set.
:param context_dialog_path:
:param candidate_dialog_paths:
:param rng:
:param positive_probability:
:return:
"""
dialog = translate_dialog_to_lists(context_dialog_path)
(context_str, next_utterance_ix) = create_random_context(dialog, rng, minimum_context_length=minimum_context_length, max_context_length=max_context_length)
if positive_probability > rng.random():
# use the next utterance as positive example
response = singe_user_utterances_to_string(dialog[next_utterance_ix])
label = 1.0 # depends on [control=['if'], data=[]]
else:
response = get_random_utterances_from_corpus(candidate_dialog_paths, rng, 1, min_turn=minimum_context_length + 1, max_turn=max_context_length)[0]
label = 0.0
return (context_str, response, label) |
def _npy2fits(d, table_type='binary', write_bitcols=False):
"""
d is the full element from the descr
"""
npy_dtype = d[1][1:]
if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
name, form, dim = _npy_string2fits(d, table_type=table_type)
else:
name, form, dim = _npy_num2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
return name, form, dim | def function[_npy2fits, parameter[d, table_type, write_bitcols]]:
constant[
d is the full element from the descr
]
variable[npy_dtype] assign[=] call[call[name[d]][constant[1]]][<ast.Slice object at 0x7da204347760>]
if <ast.BoolOp object at 0x7da204344ac0> begin[:]
<ast.Tuple object at 0x7da204346740> assign[=] call[name[_npy_string2fits], parameter[name[d]]]
return[tuple[[<ast.Name object at 0x7da18f00cd60>, <ast.Name object at 0x7da18f00d9c0>, <ast.Name object at 0x7da18f00fe80>]]] | keyword[def] identifier[_npy2fits] ( identifier[d] , identifier[table_type] = literal[string] , identifier[write_bitcols] = keyword[False] ):
literal[string]
identifier[npy_dtype] = identifier[d] [ literal[int] ][ literal[int] :]
keyword[if] identifier[npy_dtype] [ literal[int] ]== literal[string] keyword[or] identifier[npy_dtype] [ literal[int] ]== literal[string] :
identifier[name] , identifier[form] , identifier[dim] = identifier[_npy_string2fits] ( identifier[d] , identifier[table_type] = identifier[table_type] )
keyword[else] :
identifier[name] , identifier[form] , identifier[dim] = identifier[_npy_num2fits] (
identifier[d] , identifier[table_type] = identifier[table_type] , identifier[write_bitcols] = identifier[write_bitcols] )
keyword[return] identifier[name] , identifier[form] , identifier[dim] | def _npy2fits(d, table_type='binary', write_bitcols=False):
"""
d is the full element from the descr
"""
npy_dtype = d[1][1:]
if npy_dtype[0] == 'S' or npy_dtype[0] == 'U':
(name, form, dim) = _npy_string2fits(d, table_type=table_type) # depends on [control=['if'], data=[]]
else:
(name, form, dim) = _npy_num2fits(d, table_type=table_type, write_bitcols=write_bitcols)
return (name, form, dim) |
def _read_openjp2(self, rlevel=0, layer=None, area=None, tile=None,
verbose=False):
"""Read a JPEG 2000 image using libopenjp2.
Parameters
----------
layer : int, optional
Number of quality layer to decode.
rlevel : int, optional
Factor by which to rlevel output resolution. Use -1 to get the
lowest resolution thumbnail.
area : tuple, optional
Specifies decoding image area,
(first_row, first_col, last_row, last_col)
tile : int, optional
Number of tile to decode.
verbose : bool, optional
Print informational messages produced by the OpenJPEG library.
Returns
-------
ndarray
The image data.
Raises
------
RuntimeError
If the image has differing subsample factors.
"""
self.layer = layer
self._subsampling_sanity_check()
self._populate_dparams(rlevel, tile=tile, area=area)
image = self._read_openjp2_common()
return image | def function[_read_openjp2, parameter[self, rlevel, layer, area, tile, verbose]]:
constant[Read a JPEG 2000 image using libopenjp2.
Parameters
----------
layer : int, optional
Number of quality layer to decode.
rlevel : int, optional
Factor by which to rlevel output resolution. Use -1 to get the
lowest resolution thumbnail.
area : tuple, optional
Specifies decoding image area,
(first_row, first_col, last_row, last_col)
tile : int, optional
Number of tile to decode.
verbose : bool, optional
Print informational messages produced by the OpenJPEG library.
Returns
-------
ndarray
The image data.
Raises
------
RuntimeError
If the image has differing subsample factors.
]
name[self].layer assign[=] name[layer]
call[name[self]._subsampling_sanity_check, parameter[]]
call[name[self]._populate_dparams, parameter[name[rlevel]]]
variable[image] assign[=] call[name[self]._read_openjp2_common, parameter[]]
return[name[image]] | keyword[def] identifier[_read_openjp2] ( identifier[self] , identifier[rlevel] = literal[int] , identifier[layer] = keyword[None] , identifier[area] = keyword[None] , identifier[tile] = keyword[None] ,
identifier[verbose] = keyword[False] ):
literal[string]
identifier[self] . identifier[layer] = identifier[layer]
identifier[self] . identifier[_subsampling_sanity_check] ()
identifier[self] . identifier[_populate_dparams] ( identifier[rlevel] , identifier[tile] = identifier[tile] , identifier[area] = identifier[area] )
identifier[image] = identifier[self] . identifier[_read_openjp2_common] ()
keyword[return] identifier[image] | def _read_openjp2(self, rlevel=0, layer=None, area=None, tile=None, verbose=False):
"""Read a JPEG 2000 image using libopenjp2.
Parameters
----------
layer : int, optional
Number of quality layer to decode.
rlevel : int, optional
Factor by which to rlevel output resolution. Use -1 to get the
lowest resolution thumbnail.
area : tuple, optional
Specifies decoding image area,
(first_row, first_col, last_row, last_col)
tile : int, optional
Number of tile to decode.
verbose : bool, optional
Print informational messages produced by the OpenJPEG library.
Returns
-------
ndarray
The image data.
Raises
------
RuntimeError
If the image has differing subsample factors.
"""
self.layer = layer
self._subsampling_sanity_check()
self._populate_dparams(rlevel, tile=tile, area=area)
image = self._read_openjp2_common()
return image |
def discover_connections(self, x=255, y=255):
"""Attempt to discover all available Ethernet connections to a machine.
After calling this method, :py:class:`.MachineController` will attempt
to communicate via the Ethernet connection on the same board as the
destination chip for all commands.
If called multiple times, existing connections will be retained in
preference to new ones.
.. note::
The system must be booted for this command to succeed.
.. note::
Currently, only systems comprised of multiple Ethernet-connected
SpiNN-5 boards are supported.
Parameters
----------
x : int
y : int
(Optional) The coordinates of the chip to initially use to query
the system for the set of live chips.
Returns
-------
int
The number of new connections established.
"""
working_chips = set(
(x, y)
for (x, y), route in iteritems(self.get_p2p_routing_table(x, y))
if route != consts.P2PTableEntry.none)
self._width = max(x for x, y in working_chips) + 1
self._height = max(y for x, y in working_chips) + 1
num_new_connections = 0
for x, y in spinn5_eth_coords(self._width, self._height,
*self.root_chip):
if (x, y) in working_chips and (x, y) not in self.connections:
# Discover the chip's IP address
try:
ip = self.get_ip_address(x, y)
except SCPError:
continue
if ip is not None:
# Create a connection to the IP
self.connections[(x, y)] = \
SCPConnection(ip, self.scp_port,
self.n_tries, self.timeout)
# Attempt to use the connection (and remove it if it
# doesn't work)
try:
self.get_software_version(x, y, 0)
num_new_connections += 1
except SCPError:
self.connections.pop((x, y)).close()
return num_new_connections | def function[discover_connections, parameter[self, x, y]]:
constant[Attempt to discover all available Ethernet connections to a machine.
After calling this method, :py:class:`.MachineController` will attempt
to communicate via the Ethernet connection on the same board as the
destination chip for all commands.
If called multiple times, existing connections will be retained in
preference to new ones.
.. note::
The system must be booted for this command to succeed.
.. note::
Currently, only systems comprised of multiple Ethernet-connected
SpiNN-5 boards are supported.
Parameters
----------
x : int
y : int
(Optional) The coordinates of the chip to initially use to query
the system for the set of live chips.
Returns
-------
int
The number of new connections established.
]
variable[working_chips] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b19ca7d0>]]
name[self]._width assign[=] binary_operation[call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b19c8b50>]] + constant[1]]
name[self]._height assign[=] binary_operation[call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b19caf80>]] + constant[1]]
variable[num_new_connections] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b19cb550>, <ast.Name object at 0x7da1b19c9c00>]]] in starred[call[name[spinn5_eth_coords], parameter[name[self]._width, name[self]._height, <ast.Starred object at 0x7da1b19cb5e0>]]] begin[:]
if <ast.BoolOp object at 0x7da1b19c8af0> begin[:]
<ast.Try object at 0x7da1b19cbfa0>
if compare[name[ip] is_not constant[None]] begin[:]
call[name[self].connections][tuple[[<ast.Name object at 0x7da1b19cb8b0>, <ast.Name object at 0x7da1b19cb310>]]] assign[=] call[name[SCPConnection], parameter[name[ip], name[self].scp_port, name[self].n_tries, name[self].timeout]]
<ast.Try object at 0x7da1b19c8220>
return[name[num_new_connections]] | keyword[def] identifier[discover_connections] ( identifier[self] , identifier[x] = literal[int] , identifier[y] = literal[int] ):
literal[string]
identifier[working_chips] = identifier[set] (
( identifier[x] , identifier[y] )
keyword[for] ( identifier[x] , identifier[y] ), identifier[route] keyword[in] identifier[iteritems] ( identifier[self] . identifier[get_p2p_routing_table] ( identifier[x] , identifier[y] ))
keyword[if] identifier[route] != identifier[consts] . identifier[P2PTableEntry] . identifier[none] )
identifier[self] . identifier[_width] = identifier[max] ( identifier[x] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[working_chips] )+ literal[int]
identifier[self] . identifier[_height] = identifier[max] ( identifier[y] keyword[for] identifier[x] , identifier[y] keyword[in] identifier[working_chips] )+ literal[int]
identifier[num_new_connections] = literal[int]
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[spinn5_eth_coords] ( identifier[self] . identifier[_width] , identifier[self] . identifier[_height] ,
* identifier[self] . identifier[root_chip] ):
keyword[if] ( identifier[x] , identifier[y] ) keyword[in] identifier[working_chips] keyword[and] ( identifier[x] , identifier[y] ) keyword[not] keyword[in] identifier[self] . identifier[connections] :
keyword[try] :
identifier[ip] = identifier[self] . identifier[get_ip_address] ( identifier[x] , identifier[y] )
keyword[except] identifier[SCPError] :
keyword[continue]
keyword[if] identifier[ip] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[connections] [( identifier[x] , identifier[y] )]= identifier[SCPConnection] ( identifier[ip] , identifier[self] . identifier[scp_port] ,
identifier[self] . identifier[n_tries] , identifier[self] . identifier[timeout] )
keyword[try] :
identifier[self] . identifier[get_software_version] ( identifier[x] , identifier[y] , literal[int] )
identifier[num_new_connections] += literal[int]
keyword[except] identifier[SCPError] :
identifier[self] . identifier[connections] . identifier[pop] (( identifier[x] , identifier[y] )). identifier[close] ()
keyword[return] identifier[num_new_connections] | def discover_connections(self, x=255, y=255):
"""Attempt to discover all available Ethernet connections to a machine.
After calling this method, :py:class:`.MachineController` will attempt
to communicate via the Ethernet connection on the same board as the
destination chip for all commands.
If called multiple times, existing connections will be retained in
preference to new ones.
.. note::
The system must be booted for this command to succeed.
.. note::
Currently, only systems comprised of multiple Ethernet-connected
SpiNN-5 boards are supported.
Parameters
----------
x : int
y : int
(Optional) The coordinates of the chip to initially use to query
the system for the set of live chips.
Returns
-------
int
The number of new connections established.
"""
working_chips = set(((x, y) for ((x, y), route) in iteritems(self.get_p2p_routing_table(x, y)) if route != consts.P2PTableEntry.none))
self._width = max((x for (x, y) in working_chips)) + 1
self._height = max((y for (x, y) in working_chips)) + 1
num_new_connections = 0
for (x, y) in spinn5_eth_coords(self._width, self._height, *self.root_chip):
if (x, y) in working_chips and (x, y) not in self.connections:
# Discover the chip's IP address
try:
ip = self.get_ip_address(x, y) # depends on [control=['try'], data=[]]
except SCPError:
continue # depends on [control=['except'], data=[]]
if ip is not None:
# Create a connection to the IP
self.connections[x, y] = SCPConnection(ip, self.scp_port, self.n_tries, self.timeout)
# Attempt to use the connection (and remove it if it
# doesn't work)
try:
self.get_software_version(x, y, 0)
num_new_connections += 1 # depends on [control=['try'], data=[]]
except SCPError:
self.connections.pop((x, y)).close() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['ip']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return num_new_connections |
def _subproc(scons_env, cmd, error = 'ignore', **kw):
"""Do common setup for a subprocess.Popen() call
This function is still in draft mode. We're going to need something like
it in the long run as more and more places use subprocess, but I'm sure
it'll have to be tweaked to get the full desired functionality.
one special arg (so far?), 'error', to tell what to do with exceptions.
"""
# allow std{in,out,err} to be "'devnull'"
io = kw.get('stdin')
if is_String(io) and io == 'devnull':
kw['stdin'] = open(os.devnull)
io = kw.get('stdout')
if is_String(io) and io == 'devnull':
kw['stdout'] = open(os.devnull, 'w')
io = kw.get('stderr')
if is_String(io) and io == 'devnull':
kw['stderr'] = open(os.devnull, 'w')
# Figure out what shell environment to use
ENV = kw.get('env', None)
if ENV is None: ENV = get_default_ENV(scons_env)
# Ensure that the ENV values are all strings:
new_env = {}
for key, value in ENV.items():
if is_List(value):
# If the value is a list, then we assume it is a path list,
# because that's a pretty common list-like value to stick
# in an environment variable:
value = SCons.Util.flatten_sequence(value)
new_env[key] = os.pathsep.join(map(str, value))
else:
# It's either a string or something else. If it's a string,
# we still want to call str() because it might be a *Unicode*
# string, which makes subprocess.Popen() gag. If it isn't a
# string or a list, then we just coerce it to a string, which
# is the proper way to handle Dir and File instances and will
# produce something reasonable for just about everything else:
new_env[key] = str(value)
kw['env'] = new_env
try:
return subprocess.Popen(cmd, **kw)
except EnvironmentError as e:
if error == 'raise': raise
# return a dummy Popen instance that only returns error
class dummyPopen(object):
def __init__(self, e): self.exception = e
def communicate(self, input=None): return ('', '')
def wait(self): return -self.exception.errno
stdin = None
class f(object):
def read(self): return ''
def readline(self): return ''
def __iter__(self): return iter(())
stdout = stderr = f()
return dummyPopen(e) | def function[_subproc, parameter[scons_env, cmd, error]]:
constant[Do common setup for a subprocess.Popen() call
This function is still in draft mode. We're going to need something like
it in the long run as more and more places use subprocess, but I'm sure
it'll have to be tweaked to get the full desired functionality.
one special arg (so far?), 'error', to tell what to do with exceptions.
]
variable[io] assign[=] call[name[kw].get, parameter[constant[stdin]]]
if <ast.BoolOp object at 0x7da18f58dc00> begin[:]
call[name[kw]][constant[stdin]] assign[=] call[name[open], parameter[name[os].devnull]]
variable[io] assign[=] call[name[kw].get, parameter[constant[stdout]]]
if <ast.BoolOp object at 0x7da18f58cca0> begin[:]
call[name[kw]][constant[stdout]] assign[=] call[name[open], parameter[name[os].devnull, constant[w]]]
variable[io] assign[=] call[name[kw].get, parameter[constant[stderr]]]
if <ast.BoolOp object at 0x7da18f58e9b0> begin[:]
call[name[kw]][constant[stderr]] assign[=] call[name[open], parameter[name[os].devnull, constant[w]]]
variable[ENV] assign[=] call[name[kw].get, parameter[constant[env], constant[None]]]
if compare[name[ENV] is constant[None]] begin[:]
variable[ENV] assign[=] call[name[get_default_ENV], parameter[name[scons_env]]]
variable[new_env] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18f58e260>, <ast.Name object at 0x7da18f58d5a0>]]] in starred[call[name[ENV].items, parameter[]]] begin[:]
if call[name[is_List], parameter[name[value]]] begin[:]
variable[value] assign[=] call[name[SCons].Util.flatten_sequence, parameter[name[value]]]
call[name[new_env]][name[key]] assign[=] call[name[os].pathsep.join, parameter[call[name[map], parameter[name[str], name[value]]]]]
call[name[kw]][constant[env]] assign[=] name[new_env]
<ast.Try object at 0x7da18f58f7f0> | keyword[def] identifier[_subproc] ( identifier[scons_env] , identifier[cmd] , identifier[error] = literal[string] ,** identifier[kw] ):
literal[string]
identifier[io] = identifier[kw] . identifier[get] ( literal[string] )
keyword[if] identifier[is_String] ( identifier[io] ) keyword[and] identifier[io] == literal[string] :
identifier[kw] [ literal[string] ]= identifier[open] ( identifier[os] . identifier[devnull] )
identifier[io] = identifier[kw] . identifier[get] ( literal[string] )
keyword[if] identifier[is_String] ( identifier[io] ) keyword[and] identifier[io] == literal[string] :
identifier[kw] [ literal[string] ]= identifier[open] ( identifier[os] . identifier[devnull] , literal[string] )
identifier[io] = identifier[kw] . identifier[get] ( literal[string] )
keyword[if] identifier[is_String] ( identifier[io] ) keyword[and] identifier[io] == literal[string] :
identifier[kw] [ literal[string] ]= identifier[open] ( identifier[os] . identifier[devnull] , literal[string] )
identifier[ENV] = identifier[kw] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[ENV] keyword[is] keyword[None] : identifier[ENV] = identifier[get_default_ENV] ( identifier[scons_env] )
identifier[new_env] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[ENV] . identifier[items] ():
keyword[if] identifier[is_List] ( identifier[value] ):
identifier[value] = identifier[SCons] . identifier[Util] . identifier[flatten_sequence] ( identifier[value] )
identifier[new_env] [ identifier[key] ]= identifier[os] . identifier[pathsep] . identifier[join] ( identifier[map] ( identifier[str] , identifier[value] ))
keyword[else] :
identifier[new_env] [ identifier[key] ]= identifier[str] ( identifier[value] )
identifier[kw] [ literal[string] ]= identifier[new_env]
keyword[try] :
keyword[return] identifier[subprocess] . identifier[Popen] ( identifier[cmd] ,** identifier[kw] )
keyword[except] identifier[EnvironmentError] keyword[as] identifier[e] :
keyword[if] identifier[error] == literal[string] : keyword[raise]
keyword[class] identifier[dummyPopen] ( identifier[object] ):
keyword[def] identifier[__init__] ( identifier[self] , identifier[e] ): identifier[self] . identifier[exception] = identifier[e]
keyword[def] identifier[communicate] ( identifier[self] , identifier[input] = keyword[None] ): keyword[return] ( literal[string] , literal[string] )
keyword[def] identifier[wait] ( identifier[self] ): keyword[return] - identifier[self] . identifier[exception] . identifier[errno]
identifier[stdin] = keyword[None]
keyword[class] identifier[f] ( identifier[object] ):
keyword[def] identifier[read] ( identifier[self] ): keyword[return] literal[string]
keyword[def] identifier[readline] ( identifier[self] ): keyword[return] literal[string]
keyword[def] identifier[__iter__] ( identifier[self] ): keyword[return] identifier[iter] (())
identifier[stdout] = identifier[stderr] = identifier[f] ()
keyword[return] identifier[dummyPopen] ( identifier[e] ) | def _subproc(scons_env, cmd, error='ignore', **kw):
"""Do common setup for a subprocess.Popen() call
This function is still in draft mode. We're going to need something like
it in the long run as more and more places use subprocess, but I'm sure
it'll have to be tweaked to get the full desired functionality.
one special arg (so far?), 'error', to tell what to do with exceptions.
"""
# allow std{in,out,err} to be "'devnull'"
io = kw.get('stdin')
if is_String(io) and io == 'devnull':
kw['stdin'] = open(os.devnull) # depends on [control=['if'], data=[]]
io = kw.get('stdout')
if is_String(io) and io == 'devnull':
kw['stdout'] = open(os.devnull, 'w') # depends on [control=['if'], data=[]]
io = kw.get('stderr')
if is_String(io) and io == 'devnull':
kw['stderr'] = open(os.devnull, 'w') # depends on [control=['if'], data=[]]
# Figure out what shell environment to use
ENV = kw.get('env', None)
if ENV is None:
ENV = get_default_ENV(scons_env) # depends on [control=['if'], data=['ENV']]
# Ensure that the ENV values are all strings:
new_env = {}
for (key, value) in ENV.items():
if is_List(value):
# If the value is a list, then we assume it is a path list,
# because that's a pretty common list-like value to stick
# in an environment variable:
value = SCons.Util.flatten_sequence(value)
new_env[key] = os.pathsep.join(map(str, value)) # depends on [control=['if'], data=[]]
else:
# It's either a string or something else. If it's a string,
# we still want to call str() because it might be a *Unicode*
# string, which makes subprocess.Popen() gag. If it isn't a
# string or a list, then we just coerce it to a string, which
# is the proper way to handle Dir and File instances and will
# produce something reasonable for just about everything else:
new_env[key] = str(value) # depends on [control=['for'], data=[]]
kw['env'] = new_env
try:
return subprocess.Popen(cmd, **kw) # depends on [control=['try'], data=[]]
except EnvironmentError as e:
if error == 'raise':
raise # depends on [control=['if'], data=[]]
# return a dummy Popen instance that only returns error
class dummyPopen(object):
def __init__(self, e):
self.exception = e
def communicate(self, input=None):
return ('', '')
def wait(self):
return -self.exception.errno
stdin = None
class f(object):
def read(self):
return ''
def readline(self):
return ''
def __iter__(self):
return iter(())
stdout = stderr = f()
return dummyPopen(e) # depends on [control=['except'], data=['e']] |
def get_disabled(self):
"""Sorted list of (username, napp_name) of disabled napps.
The difference of installed and enabled.
"""
installed = set(self.get_installed())
enabled = set(self.get_enabled())
return sorted(installed - enabled) | def function[get_disabled, parameter[self]]:
constant[Sorted list of (username, napp_name) of disabled napps.
The difference of installed and enabled.
]
variable[installed] assign[=] call[name[set], parameter[call[name[self].get_installed, parameter[]]]]
variable[enabled] assign[=] call[name[set], parameter[call[name[self].get_enabled, parameter[]]]]
return[call[name[sorted], parameter[binary_operation[name[installed] - name[enabled]]]]] | keyword[def] identifier[get_disabled] ( identifier[self] ):
literal[string]
identifier[installed] = identifier[set] ( identifier[self] . identifier[get_installed] ())
identifier[enabled] = identifier[set] ( identifier[self] . identifier[get_enabled] ())
keyword[return] identifier[sorted] ( identifier[installed] - identifier[enabled] ) | def get_disabled(self):
"""Sorted list of (username, napp_name) of disabled napps.
The difference of installed and enabled.
"""
installed = set(self.get_installed())
enabled = set(self.get_enabled())
return sorted(installed - enabled) |
def update_pull_request_iteration_statuses(self, patch_document, repository_id, pull_request_id, iteration_id, project=None):
"""UpdatePullRequestIterationStatuses.
[Preview API] Update pull request iteration statuses collection. The only supported operation type is `remove`.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_1.git.models.[JsonPatchOperation]>` patch_document: Operations to apply to the pull request statuses in JSON Patch format.
:param str repository_id: The repository ID of the pull request’s target branch.
:param int pull_request_id: ID of the pull request.
:param int iteration_id: ID of the pull request iteration.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int')
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'int')
content = self._serialize.body(patch_document, '[JsonPatchOperation]')
self._send(http_method='PATCH',
location_id='75cf11c5-979f-4038-a76e-058a06adf2bf',
version='5.1-preview.1',
route_values=route_values,
content=content,
media_type='application/json-patch+json') | def function[update_pull_request_iteration_statuses, parameter[self, patch_document, repository_id, pull_request_id, iteration_id, project]]:
constant[UpdatePullRequestIterationStatuses.
[Preview API] Update pull request iteration statuses collection. The only supported operation type is `remove`.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_1.git.models.[JsonPatchOperation]>` patch_document: Operations to apply to the pull request statuses in JSON Patch format.
:param str repository_id: The repository ID of the pull request’s target branch.
:param int pull_request_id: ID of the pull request.
:param int iteration_id: ID of the pull request iteration.
:param str project: Project ID or project name
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]]
if compare[name[repository_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[repositoryId]] assign[=] call[name[self]._serialize.url, parameter[constant[repository_id], name[repository_id], constant[str]]]
if compare[name[pull_request_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[pullRequestId]] assign[=] call[name[self]._serialize.url, parameter[constant[pull_request_id], name[pull_request_id], constant[int]]]
if compare[name[iteration_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[iterationId]] assign[=] call[name[self]._serialize.url, parameter[constant[iteration_id], name[iteration_id], constant[int]]]
variable[content] assign[=] call[name[self]._serialize.body, parameter[name[patch_document], constant[[JsonPatchOperation]]]]
call[name[self]._send, parameter[]] | keyword[def] identifier[update_pull_request_iteration_statuses] ( identifier[self] , identifier[patch_document] , identifier[repository_id] , identifier[pull_request_id] , identifier[iteration_id] , identifier[project] = keyword[None] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
keyword[if] identifier[repository_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[repository_id] , literal[string] )
keyword[if] identifier[pull_request_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[pull_request_id] , literal[string] )
keyword[if] identifier[iteration_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[iteration_id] , literal[string] )
identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[patch_document] , literal[string] )
identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[content] = identifier[content] ,
identifier[media_type] = literal[string] ) | def update_pull_request_iteration_statuses(self, patch_document, repository_id, pull_request_id, iteration_id, project=None):
"""UpdatePullRequestIterationStatuses.
[Preview API] Update pull request iteration statuses collection. The only supported operation type is `remove`.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_1.git.models.[JsonPatchOperation]>` patch_document: Operations to apply to the pull request statuses in JSON Patch format.
:param str repository_id: The repository ID of the pull request’s target branch.
:param int pull_request_id: ID of the pull request.
:param int iteration_id: ID of the pull request iteration.
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']]
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') # depends on [control=['if'], data=['repository_id']]
if pull_request_id is not None:
route_values['pullRequestId'] = self._serialize.url('pull_request_id', pull_request_id, 'int') # depends on [control=['if'], data=['pull_request_id']]
if iteration_id is not None:
route_values['iterationId'] = self._serialize.url('iteration_id', iteration_id, 'int') # depends on [control=['if'], data=['iteration_id']]
content = self._serialize.body(patch_document, '[JsonPatchOperation]')
self._send(http_method='PATCH', location_id='75cf11c5-979f-4038-a76e-058a06adf2bf', version='5.1-preview.1', route_values=route_values, content=content, media_type='application/json-patch+json') |
def _get_nits(self, filename):
"""Iterate over the instances style checker and yield Nits.
:param filename: str pointing to a file within the buildroot.
"""
try:
python_file = PythonFile.parse(filename, root=self._root_dir)
except CheckSyntaxError as e:
yield e.as_nit()
return
if noqa_file_filter(python_file):
return
if self._excluder:
# Filter out any suppressed plugins
check_plugins = [(plugin_name, plugin_factory)
for plugin_name, plugin_factory in self._plugin_factories.items()
if self._excluder.should_include(filename, plugin_name)]
else:
check_plugins = self._plugin_factories.items()
for plugin_name, plugin_factory in check_plugins:
for i, nit in enumerate(plugin_factory(python_file)):
if i == 0:
# NB: Add debug log header for nits from each plugin, but only if there are nits from it.
self.log.debug('Nits from plugin {} for {}'.format(plugin_name, filename))
if not nit.has_lines_to_display:
yield nit
continue
if all(not line_contains_noqa(line) for line in nit.lines):
yield nit | def function[_get_nits, parameter[self, filename]]:
constant[Iterate over the instances style checker and yield Nits.
:param filename: str pointing to a file within the buildroot.
]
<ast.Try object at 0x7da1b224ba60>
if call[name[noqa_file_filter], parameter[name[python_file]]] begin[:]
return[None]
if name[self]._excluder begin[:]
variable[check_plugins] assign[=] <ast.ListComp object at 0x7da1b2249660>
for taget[tuple[[<ast.Name object at 0x7da1b224a200>, <ast.Name object at 0x7da1b2248b50>]]] in starred[name[check_plugins]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b22496f0>, <ast.Name object at 0x7da1b224a2c0>]]] in starred[call[name[enumerate], parameter[call[name[plugin_factory], parameter[name[python_file]]]]]] begin[:]
if compare[name[i] equal[==] constant[0]] begin[:]
call[name[self].log.debug, parameter[call[constant[Nits from plugin {} for {}].format, parameter[name[plugin_name], name[filename]]]]]
if <ast.UnaryOp object at 0x7da1b224bbe0> begin[:]
<ast.Yield object at 0x7da1b224bfd0>
continue
if call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b2248be0>]] begin[:]
<ast.Yield object at 0x7da1b22482b0> | keyword[def] identifier[_get_nits] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[try] :
identifier[python_file] = identifier[PythonFile] . identifier[parse] ( identifier[filename] , identifier[root] = identifier[self] . identifier[_root_dir] )
keyword[except] identifier[CheckSyntaxError] keyword[as] identifier[e] :
keyword[yield] identifier[e] . identifier[as_nit] ()
keyword[return]
keyword[if] identifier[noqa_file_filter] ( identifier[python_file] ):
keyword[return]
keyword[if] identifier[self] . identifier[_excluder] :
identifier[check_plugins] =[( identifier[plugin_name] , identifier[plugin_factory] )
keyword[for] identifier[plugin_name] , identifier[plugin_factory] keyword[in] identifier[self] . identifier[_plugin_factories] . identifier[items] ()
keyword[if] identifier[self] . identifier[_excluder] . identifier[should_include] ( identifier[filename] , identifier[plugin_name] )]
keyword[else] :
identifier[check_plugins] = identifier[self] . identifier[_plugin_factories] . identifier[items] ()
keyword[for] identifier[plugin_name] , identifier[plugin_factory] keyword[in] identifier[check_plugins] :
keyword[for] identifier[i] , identifier[nit] keyword[in] identifier[enumerate] ( identifier[plugin_factory] ( identifier[python_file] )):
keyword[if] identifier[i] == literal[int] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[plugin_name] , identifier[filename] ))
keyword[if] keyword[not] identifier[nit] . identifier[has_lines_to_display] :
keyword[yield] identifier[nit]
keyword[continue]
keyword[if] identifier[all] ( keyword[not] identifier[line_contains_noqa] ( identifier[line] ) keyword[for] identifier[line] keyword[in] identifier[nit] . identifier[lines] ):
keyword[yield] identifier[nit] | def _get_nits(self, filename):
"""Iterate over the instances style checker and yield Nits.
:param filename: str pointing to a file within the buildroot.
"""
try:
python_file = PythonFile.parse(filename, root=self._root_dir) # depends on [control=['try'], data=[]]
except CheckSyntaxError as e:
yield e.as_nit()
return # depends on [control=['except'], data=['e']]
if noqa_file_filter(python_file):
return # depends on [control=['if'], data=[]]
if self._excluder:
# Filter out any suppressed plugins
check_plugins = [(plugin_name, plugin_factory) for (plugin_name, plugin_factory) in self._plugin_factories.items() if self._excluder.should_include(filename, plugin_name)] # depends on [control=['if'], data=[]]
else:
check_plugins = self._plugin_factories.items()
for (plugin_name, plugin_factory) in check_plugins:
for (i, nit) in enumerate(plugin_factory(python_file)):
if i == 0:
# NB: Add debug log header for nits from each plugin, but only if there are nits from it.
self.log.debug('Nits from plugin {} for {}'.format(plugin_name, filename)) # depends on [control=['if'], data=[]]
if not nit.has_lines_to_display:
yield nit
continue # depends on [control=['if'], data=[]]
if all((not line_contains_noqa(line) for line in nit.lines)):
yield nit # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] |
def make_module_spec(vocabulary_file, vocab_size, embeddings_dim,
num_oov_buckets, preprocess_text):
"""Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
tokens = tf.placeholder(shape=[None], dtype=tf.string, name="tokens")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
)
ids = lookup_table.lookup(tokens)
combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids)
hub.add_signature("default", {"tokens": tokens},
{"default": combined_embedding})
def module_fn_with_preprocessing():
"""Spec function for a full-text embedding module with preprocessing."""
sentences = tf.placeholder(shape=[None], dtype=tf.string, name="sentences")
# Perform a minimalistic text preprocessing by removing punctuation and
# splitting on spaces.
normalized_sentences = tf.regex_replace(
input=sentences, pattern=r"\pP", rewrite="")
tokens = tf.string_split(normalized_sentences, " ")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
)
sparse_ids = tf.SparseTensor(
indices=tokens.indices,
values=lookup_table.lookup(tokens.values),
dense_shape=tokens.dense_shape)
# In case some of the input sentences are empty before or after
# normalization, we will end up with empty rows. We do however want to
# return embedding for every row, so we have to fill in the empty rows with
# a default.
sparse_ids, _ = tf.sparse_fill_empty_rows(
sparse_ids, lookup_table.lookup(tf.constant("")))
# In case all of the input sentences are empty before or after
# normalization, we will end up with a SparseTensor with shape [?, 0]. After
# filling in the empty rows we must ensure the shape is set properly to
# [?, 1]. At this point, there are no empty rows, so the new shape will be
# [sparse_ids.dense_shape[0], max(1, sparse_ids.dense_shape[1])].
sparse_ids = tf.sparse_reset_shape(sparse_ids)
combined_embedding = tf.nn.embedding_lookup_sparse(
params=embeddings_var,
sp_ids=sparse_ids,
sp_weights=None,
combiner="sqrtn")
hub.add_signature("default", {"sentences": sentences},
{"default": combined_embedding})
if preprocess_text:
return hub.create_module_spec(module_fn_with_preprocessing)
else:
return hub.create_module_spec(module_fn) | def function[make_module_spec, parameter[vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text]]:
constant[Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module.
]
def function[module_fn, parameter[]]:
constant[Spec function for a token embedding module.]
variable[tokens] assign[=] call[name[tf].placeholder, parameter[]]
variable[embeddings_var] assign[=] call[name[tf].get_variable, parameter[]]
variable[lookup_table] assign[=] call[name[tf].contrib.lookup.index_table_from_file, parameter[]]
variable[ids] assign[=] call[name[lookup_table].lookup, parameter[name[tokens]]]
variable[combined_embedding] assign[=] call[name[tf].nn.embedding_lookup, parameter[]]
call[name[hub].add_signature, parameter[constant[default], dictionary[[<ast.Constant object at 0x7da1b1fe44c0>], [<ast.Name object at 0x7da1b1fe6050>]], dictionary[[<ast.Constant object at 0x7da1b1fe5180>], [<ast.Name object at 0x7da1b1fe61a0>]]]]
def function[module_fn_with_preprocessing, parameter[]]:
constant[Spec function for a full-text embedding module with preprocessing.]
variable[sentences] assign[=] call[name[tf].placeholder, parameter[]]
variable[normalized_sentences] assign[=] call[name[tf].regex_replace, parameter[]]
variable[tokens] assign[=] call[name[tf].string_split, parameter[name[normalized_sentences], constant[ ]]]
variable[embeddings_var] assign[=] call[name[tf].get_variable, parameter[]]
variable[lookup_table] assign[=] call[name[tf].contrib.lookup.index_table_from_file, parameter[]]
variable[sparse_ids] assign[=] call[name[tf].SparseTensor, parameter[]]
<ast.Tuple object at 0x7da2044c0c70> assign[=] call[name[tf].sparse_fill_empty_rows, parameter[name[sparse_ids], call[name[lookup_table].lookup, parameter[call[name[tf].constant, parameter[constant[]]]]]]]
variable[sparse_ids] assign[=] call[name[tf].sparse_reset_shape, parameter[name[sparse_ids]]]
variable[combined_embedding] assign[=] call[name[tf].nn.embedding_lookup_sparse, parameter[]]
call[name[hub].add_signature, parameter[constant[default], dictionary[[<ast.Constant object at 0x7da2044c3dc0>], [<ast.Name object at 0x7da2044c3250>]], dictionary[[<ast.Constant object at 0x7da2044c02e0>], [<ast.Name object at 0x7da2044c3ac0>]]]]
if name[preprocess_text] begin[:]
return[call[name[hub].create_module_spec, parameter[name[module_fn_with_preprocessing]]]] | keyword[def] identifier[make_module_spec] ( identifier[vocabulary_file] , identifier[vocab_size] , identifier[embeddings_dim] ,
identifier[num_oov_buckets] , identifier[preprocess_text] ):
literal[string]
keyword[def] identifier[module_fn] ():
literal[string]
identifier[tokens] = identifier[tf] . identifier[placeholder] ( identifier[shape] =[ keyword[None] ], identifier[dtype] = identifier[tf] . identifier[string] , identifier[name] = literal[string] )
identifier[embeddings_var] = identifier[tf] . identifier[get_variable] (
identifier[initializer] = identifier[tf] . identifier[zeros] ([ identifier[vocab_size] + identifier[num_oov_buckets] , identifier[embeddings_dim] ]),
identifier[name] = identifier[EMBEDDINGS_VAR_NAME] ,
identifier[dtype] = identifier[tf] . identifier[float32] )
identifier[lookup_table] = identifier[tf] . identifier[contrib] . identifier[lookup] . identifier[index_table_from_file] (
identifier[vocabulary_file] = identifier[vocabulary_file] ,
identifier[num_oov_buckets] = identifier[num_oov_buckets] ,
)
identifier[ids] = identifier[lookup_table] . identifier[lookup] ( identifier[tokens] )
identifier[combined_embedding] = identifier[tf] . identifier[nn] . identifier[embedding_lookup] ( identifier[params] = identifier[embeddings_var] , identifier[ids] = identifier[ids] )
identifier[hub] . identifier[add_signature] ( literal[string] ,{ literal[string] : identifier[tokens] },
{ literal[string] : identifier[combined_embedding] })
keyword[def] identifier[module_fn_with_preprocessing] ():
literal[string]
identifier[sentences] = identifier[tf] . identifier[placeholder] ( identifier[shape] =[ keyword[None] ], identifier[dtype] = identifier[tf] . identifier[string] , identifier[name] = literal[string] )
identifier[normalized_sentences] = identifier[tf] . identifier[regex_replace] (
identifier[input] = identifier[sentences] , identifier[pattern] = literal[string] , identifier[rewrite] = literal[string] )
identifier[tokens] = identifier[tf] . identifier[string_split] ( identifier[normalized_sentences] , literal[string] )
identifier[embeddings_var] = identifier[tf] . identifier[get_variable] (
identifier[initializer] = identifier[tf] . identifier[zeros] ([ identifier[vocab_size] + identifier[num_oov_buckets] , identifier[embeddings_dim] ]),
identifier[name] = identifier[EMBEDDINGS_VAR_NAME] ,
identifier[dtype] = identifier[tf] . identifier[float32] )
identifier[lookup_table] = identifier[tf] . identifier[contrib] . identifier[lookup] . identifier[index_table_from_file] (
identifier[vocabulary_file] = identifier[vocabulary_file] ,
identifier[num_oov_buckets] = identifier[num_oov_buckets] ,
)
identifier[sparse_ids] = identifier[tf] . identifier[SparseTensor] (
identifier[indices] = identifier[tokens] . identifier[indices] ,
identifier[values] = identifier[lookup_table] . identifier[lookup] ( identifier[tokens] . identifier[values] ),
identifier[dense_shape] = identifier[tokens] . identifier[dense_shape] )
identifier[sparse_ids] , identifier[_] = identifier[tf] . identifier[sparse_fill_empty_rows] (
identifier[sparse_ids] , identifier[lookup_table] . identifier[lookup] ( identifier[tf] . identifier[constant] ( literal[string] )))
identifier[sparse_ids] = identifier[tf] . identifier[sparse_reset_shape] ( identifier[sparse_ids] )
identifier[combined_embedding] = identifier[tf] . identifier[nn] . identifier[embedding_lookup_sparse] (
identifier[params] = identifier[embeddings_var] ,
identifier[sp_ids] = identifier[sparse_ids] ,
identifier[sp_weights] = keyword[None] ,
identifier[combiner] = literal[string] )
identifier[hub] . identifier[add_signature] ( literal[string] ,{ literal[string] : identifier[sentences] },
{ literal[string] : identifier[combined_embedding] })
keyword[if] identifier[preprocess_text] :
keyword[return] identifier[hub] . identifier[create_module_spec] ( identifier[module_fn_with_preprocessing] )
keyword[else] :
keyword[return] identifier[hub] . identifier[create_module_spec] ( identifier[module_fn] ) | def make_module_spec(vocabulary_file, vocab_size, embeddings_dim, num_oov_buckets, preprocess_text):
"""Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
tokens = tf.placeholder(shape=[None], dtype=tf.string, name='tokens')
embeddings_var = tf.get_variable(initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets)
ids = lookup_table.lookup(tokens)
combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids)
hub.add_signature('default', {'tokens': tokens}, {'default': combined_embedding})
def module_fn_with_preprocessing():
"""Spec function for a full-text embedding module with preprocessing."""
sentences = tf.placeholder(shape=[None], dtype=tf.string, name='sentences')
# Perform a minimalistic text preprocessing by removing punctuation and
# splitting on spaces.
normalized_sentences = tf.regex_replace(input=sentences, pattern='\\pP', rewrite='')
tokens = tf.string_split(normalized_sentences, ' ')
embeddings_var = tf.get_variable(initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]), name=EMBEDDINGS_VAR_NAME, dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets)
sparse_ids = tf.SparseTensor(indices=tokens.indices, values=lookup_table.lookup(tokens.values), dense_shape=tokens.dense_shape)
# In case some of the input sentences are empty before or after
# normalization, we will end up with empty rows. We do however want to
# return embedding for every row, so we have to fill in the empty rows with
# a default.
(sparse_ids, _) = tf.sparse_fill_empty_rows(sparse_ids, lookup_table.lookup(tf.constant('')))
# In case all of the input sentences are empty before or after
# normalization, we will end up with a SparseTensor with shape [?, 0]. After
# filling in the empty rows we must ensure the shape is set properly to
# [?, 1]. At this point, there are no empty rows, so the new shape will be
# [sparse_ids.dense_shape[0], max(1, sparse_ids.dense_shape[1])].
sparse_ids = tf.sparse_reset_shape(sparse_ids)
combined_embedding = tf.nn.embedding_lookup_sparse(params=embeddings_var, sp_ids=sparse_ids, sp_weights=None, combiner='sqrtn')
hub.add_signature('default', {'sentences': sentences}, {'default': combined_embedding})
if preprocess_text:
return hub.create_module_spec(module_fn_with_preprocessing) # depends on [control=['if'], data=[]]
else:
return hub.create_module_spec(module_fn) |
def next_flightmode_colour(self):
'''allocate a colour to be used for a flight mode'''
if self.flightmode_colour_index > len(flightmode_colours):
print("Out of colours; reusing")
self.flightmode_colour_index = 0
ret = flightmode_colours[self.flightmode_colour_index]
self.flightmode_colour_index += 1
return ret | def function[next_flightmode_colour, parameter[self]]:
constant[allocate a colour to be used for a flight mode]
if compare[name[self].flightmode_colour_index greater[>] call[name[len], parameter[name[flightmode_colours]]]] begin[:]
call[name[print], parameter[constant[Out of colours; reusing]]]
name[self].flightmode_colour_index assign[=] constant[0]
variable[ret] assign[=] call[name[flightmode_colours]][name[self].flightmode_colour_index]
<ast.AugAssign object at 0x7da1b23444f0>
return[name[ret]] | keyword[def] identifier[next_flightmode_colour] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[flightmode_colour_index] > identifier[len] ( identifier[flightmode_colours] ):
identifier[print] ( literal[string] )
identifier[self] . identifier[flightmode_colour_index] = literal[int]
identifier[ret] = identifier[flightmode_colours] [ identifier[self] . identifier[flightmode_colour_index] ]
identifier[self] . identifier[flightmode_colour_index] += literal[int]
keyword[return] identifier[ret] | def next_flightmode_colour(self):
"""allocate a colour to be used for a flight mode"""
if self.flightmode_colour_index > len(flightmode_colours):
print('Out of colours; reusing')
self.flightmode_colour_index = 0 # depends on [control=['if'], data=[]]
ret = flightmode_colours[self.flightmode_colour_index]
self.flightmode_colour_index += 1
return ret |
def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic) | def function[store, parameter[self, mutagen_file, pictures]]:
constant[``pictures`` is a list of mutagen.flac.Picture instances.
]
call[name[mutagen_file].clear_pictures, parameter[]]
for taget[name[pic]] in starred[name[pictures]] begin[:]
call[name[mutagen_file].add_picture, parameter[name[pic]]] | keyword[def] identifier[store] ( identifier[self] , identifier[mutagen_file] , identifier[pictures] ):
literal[string]
identifier[mutagen_file] . identifier[clear_pictures] ()
keyword[for] identifier[pic] keyword[in] identifier[pictures] :
identifier[mutagen_file] . identifier[add_picture] ( identifier[pic] ) | def store(self, mutagen_file, pictures):
"""``pictures`` is a list of mutagen.flac.Picture instances.
"""
mutagen_file.clear_pictures()
for pic in pictures:
mutagen_file.add_picture(pic) # depends on [control=['for'], data=['pic']] |
def build_usage_logger(self, log):
'''Return a method that logs progress/mem usage.'''
def _log_usage():
log.debug("{}mb peak memory", peak_memory())
log.debug("{} pending alignment pairs; {} peak pairs",
self.pending_pair_count,
self.pending_pair_peak_count)
return _log_usage | def function[build_usage_logger, parameter[self, log]]:
constant[Return a method that logs progress/mem usage.]
def function[_log_usage, parameter[]]:
call[name[log].debug, parameter[constant[{}mb peak memory], call[name[peak_memory], parameter[]]]]
call[name[log].debug, parameter[constant[{} pending alignment pairs; {} peak pairs], name[self].pending_pair_count, name[self].pending_pair_peak_count]]
return[name[_log_usage]] | keyword[def] identifier[build_usage_logger] ( identifier[self] , identifier[log] ):
literal[string]
keyword[def] identifier[_log_usage] ():
identifier[log] . identifier[debug] ( literal[string] , identifier[peak_memory] ())
identifier[log] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[pending_pair_count] ,
identifier[self] . identifier[pending_pair_peak_count] )
keyword[return] identifier[_log_usage] | def build_usage_logger(self, log):
"""Return a method that logs progress/mem usage."""
def _log_usage():
log.debug('{}mb peak memory', peak_memory())
log.debug('{} pending alignment pairs; {} peak pairs', self.pending_pair_count, self.pending_pair_peak_count)
return _log_usage |
def dehtml(text):
'''Remove HTML tag in input text and format the texts
accordingly. '''
# added by BoPeng to handle html output from kernel
#
# Do not understand why, but I cannot define the class outside of the
# function.
try:
# python 2
from HTMLParser import HTMLParser
except ImportError:
# python 3
from html.parser import HTMLParser
# added by BoPeng to handle html output from kernel
class _DeHTMLParser(HTMLParser):
'''This parser analyzes input text, removes HTML tags such as
<p>, <br>, <ul>, <li> etc and returns properly formatted texts.
'''
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
text = data.strip()
if len(text) > 0:
text = re.sub('[ \t\r\n]+', ' ', text)
self.__text.append(text + ' ')
def handle_starttag(self, tag, attrs):
if tag == 'p':
self.__text.append('\n\n\n\n')
elif tag == 'br':
self.__text.append('\n\n')
elif tag == 'ul':
self.__text.append('')
elif tag == 'li':
self.__text.append('\n\n * ')
def handle_endtag(self, tag):
if tag == 'ul':
self.__text.append('\n\n')
if tag == 'li':
self.__text.append('\n\n')
def handle_startendtag(self, tag, attrs):
if tag == 'br':
self.__text.append('\n\n')
def text(self):
return ''.join(self.__text).strip()
try:
parser = _DeHTMLParser()
parser.feed(text)
parser.close()
return parser.text()
except Exception as e:
return text | def function[dehtml, parameter[text]]:
constant[Remove HTML tag in input text and format the texts
accordingly. ]
<ast.Try object at 0x7da1b122e590>
class class[_DeHTMLParser, parameter[]] begin[:]
constant[This parser analyzes input text, removes HTML tags such as
<p>, <br>, <ul>, <li> etc and returns properly formatted texts.
]
def function[__init__, parameter[self]]:
call[name[HTMLParser].__init__, parameter[name[self]]]
name[self].__text assign[=] list[[]]
def function[handle_data, parameter[self, data]]:
variable[text] assign[=] call[name[data].strip, parameter[]]
if compare[call[name[len], parameter[name[text]]] greater[>] constant[0]] begin[:]
variable[text] assign[=] call[name[re].sub, parameter[constant[[
]+], constant[ ], name[text]]]
call[name[self].__text.append, parameter[binary_operation[name[text] + constant[ ]]]]
def function[handle_starttag, parameter[self, tag, attrs]]:
if compare[name[tag] equal[==] constant[p]] begin[:]
call[name[self].__text.append, parameter[constant[
]]]
def function[handle_endtag, parameter[self, tag]]:
if compare[name[tag] equal[==] constant[ul]] begin[:]
call[name[self].__text.append, parameter[constant[
]]]
if compare[name[tag] equal[==] constant[li]] begin[:]
call[name[self].__text.append, parameter[constant[
]]]
def function[handle_startendtag, parameter[self, tag, attrs]]:
if compare[name[tag] equal[==] constant[br]] begin[:]
call[name[self].__text.append, parameter[constant[
]]]
def function[text, parameter[self]]:
return[call[call[constant[].join, parameter[name[self].__text]].strip, parameter[]]]
<ast.Try object at 0x7da20c7c8af0> | keyword[def] identifier[dehtml] ( identifier[text] ):
literal[string]
keyword[try] :
keyword[from] identifier[HTMLParser] keyword[import] identifier[HTMLParser]
keyword[except] identifier[ImportError] :
keyword[from] identifier[html] . identifier[parser] keyword[import] identifier[HTMLParser]
keyword[class] identifier[_DeHTMLParser] ( identifier[HTMLParser] ):
literal[string]
keyword[def] identifier[__init__] ( identifier[self] ):
identifier[HTMLParser] . identifier[__init__] ( identifier[self] )
identifier[self] . identifier[__text] =[]
keyword[def] identifier[handle_data] ( identifier[self] , identifier[data] ):
identifier[text] = identifier[data] . identifier[strip] ()
keyword[if] identifier[len] ( identifier[text] )> literal[int] :
identifier[text] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[text] )
identifier[self] . identifier[__text] . identifier[append] ( identifier[text] + literal[string] )
keyword[def] identifier[handle_starttag] ( identifier[self] , identifier[tag] , identifier[attrs] ):
keyword[if] identifier[tag] == literal[string] :
identifier[self] . identifier[__text] . identifier[append] ( literal[string] )
keyword[elif] identifier[tag] == literal[string] :
identifier[self] . identifier[__text] . identifier[append] ( literal[string] )
keyword[elif] identifier[tag] == literal[string] :
identifier[self] . identifier[__text] . identifier[append] ( literal[string] )
keyword[elif] identifier[tag] == literal[string] :
identifier[self] . identifier[__text] . identifier[append] ( literal[string] )
keyword[def] identifier[handle_endtag] ( identifier[self] , identifier[tag] ):
keyword[if] identifier[tag] == literal[string] :
identifier[self] . identifier[__text] . identifier[append] ( literal[string] )
keyword[if] identifier[tag] == literal[string] :
identifier[self] . identifier[__text] . identifier[append] ( literal[string] )
keyword[def] identifier[handle_startendtag] ( identifier[self] , identifier[tag] , identifier[attrs] ):
keyword[if] identifier[tag] == literal[string] :
identifier[self] . identifier[__text] . identifier[append] ( literal[string] )
keyword[def] identifier[text] ( identifier[self] ):
keyword[return] literal[string] . identifier[join] ( identifier[self] . identifier[__text] ). identifier[strip] ()
keyword[try] :
identifier[parser] = identifier[_DeHTMLParser] ()
identifier[parser] . identifier[feed] ( identifier[text] )
identifier[parser] . identifier[close] ()
keyword[return] identifier[parser] . identifier[text] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] identifier[text] | def dehtml(text):
"""Remove HTML tag in input text and format the texts
accordingly. """
# added by BoPeng to handle html output from kernel
#
# Do not understand why, but I cannot define the class outside of the
# function.
try:
# python 2
from HTMLParser import HTMLParser # depends on [control=['try'], data=[]]
except ImportError:
# python 3
from html.parser import HTMLParser # depends on [control=['except'], data=[]]
# added by BoPeng to handle html output from kernel
class _DeHTMLParser(HTMLParser):
"""This parser analyzes input text, removes HTML tags such as
<p>, <br>, <ul>, <li> etc and returns properly formatted texts.
"""
def __init__(self):
HTMLParser.__init__(self)
self.__text = []
def handle_data(self, data):
text = data.strip()
if len(text) > 0:
text = re.sub('[ \t\r\n]+', ' ', text)
self.__text.append(text + ' ') # depends on [control=['if'], data=[]]
def handle_starttag(self, tag, attrs):
if tag == 'p':
self.__text.append('\n\n\n\n') # depends on [control=['if'], data=[]]
elif tag == 'br':
self.__text.append('\n\n') # depends on [control=['if'], data=[]]
elif tag == 'ul':
self.__text.append('') # depends on [control=['if'], data=[]]
elif tag == 'li':
self.__text.append('\n\n * ') # depends on [control=['if'], data=[]]
def handle_endtag(self, tag):
if tag == 'ul':
self.__text.append('\n\n') # depends on [control=['if'], data=[]]
if tag == 'li':
self.__text.append('\n\n') # depends on [control=['if'], data=[]]
def handle_startendtag(self, tag, attrs):
if tag == 'br':
self.__text.append('\n\n') # depends on [control=['if'], data=[]]
def text(self):
return ''.join(self.__text).strip()
try:
parser = _DeHTMLParser()
parser.feed(text)
parser.close()
return parser.text() # depends on [control=['try'], data=[]]
except Exception as e:
return text # depends on [control=['except'], data=[]] |
def stream_execute(self, code: str = '', *,
mode: str = 'query',
opts: dict = None) -> WebSocketResponse:
'''
Executes a code snippet in the streaming mode.
Since the returned websocket represents a run loop, there is no need to
specify *run_id* explicitly.
'''
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key
opts = {} if opts is None else opts
if mode == 'query':
opts = {}
elif mode == 'batch':
opts = {
'clean': opts.get('clean', None),
'build': opts.get('build', None),
'buildLog': bool(opts.get('buildLog', False)),
'exec': opts.get('exec', None),
}
else:
msg = 'Invalid stream-execution mode: {0}'.format(mode)
raise BackendClientError(msg)
request = Request(self.session,
'GET', '/stream/kernel/{}/execute'.format(self.kernel_id),
params=params)
async def send_code(ws):
await ws.send_json({
'code': code,
'mode': mode,
'options': opts,
})
return request.connect_websocket(on_enter=send_code) | def function[stream_execute, parameter[self, code]]:
constant[
Executes a code snippet in the streaming mode.
Since the returned websocket represents a run loop, there is no need to
specify *run_id* explicitly.
]
variable[params] assign[=] dictionary[[], []]
if name[self].owner_access_key begin[:]
call[name[params]][constant[owner_access_key]] assign[=] name[self].owner_access_key
variable[opts] assign[=] <ast.IfExp object at 0x7da207f9a740>
if compare[name[mode] equal[==] constant[query]] begin[:]
variable[opts] assign[=] dictionary[[], []]
variable[request] assign[=] call[name[Request], parameter[name[self].session, constant[GET], call[constant[/stream/kernel/{}/execute].format, parameter[name[self].kernel_id]]]]
<ast.AsyncFunctionDef object at 0x7da20c6c7fd0>
return[call[name[request].connect_websocket, parameter[]]] | keyword[def] identifier[stream_execute] ( identifier[self] , identifier[code] : identifier[str] = literal[string] ,*,
identifier[mode] : identifier[str] = literal[string] ,
identifier[opts] : identifier[dict] = keyword[None] )-> identifier[WebSocketResponse] :
literal[string]
identifier[params] ={}
keyword[if] identifier[self] . identifier[owner_access_key] :
identifier[params] [ literal[string] ]= identifier[self] . identifier[owner_access_key]
identifier[opts] ={} keyword[if] identifier[opts] keyword[is] keyword[None] keyword[else] identifier[opts]
keyword[if] identifier[mode] == literal[string] :
identifier[opts] ={}
keyword[elif] identifier[mode] == literal[string] :
identifier[opts] ={
literal[string] : identifier[opts] . identifier[get] ( literal[string] , keyword[None] ),
literal[string] : identifier[opts] . identifier[get] ( literal[string] , keyword[None] ),
literal[string] : identifier[bool] ( identifier[opts] . identifier[get] ( literal[string] , keyword[False] )),
literal[string] : identifier[opts] . identifier[get] ( literal[string] , keyword[None] ),
}
keyword[else] :
identifier[msg] = literal[string] . identifier[format] ( identifier[mode] )
keyword[raise] identifier[BackendClientError] ( identifier[msg] )
identifier[request] = identifier[Request] ( identifier[self] . identifier[session] ,
literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[kernel_id] ),
identifier[params] = identifier[params] )
keyword[async] keyword[def] identifier[send_code] ( identifier[ws] ):
keyword[await] identifier[ws] . identifier[send_json] ({
literal[string] : identifier[code] ,
literal[string] : identifier[mode] ,
literal[string] : identifier[opts] ,
})
keyword[return] identifier[request] . identifier[connect_websocket] ( identifier[on_enter] = identifier[send_code] ) | def stream_execute(self, code: str='', *, mode: str='query', opts: dict=None) -> WebSocketResponse:
"""
Executes a code snippet in the streaming mode.
Since the returned websocket represents a run loop, there is no need to
specify *run_id* explicitly.
"""
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key # depends on [control=['if'], data=[]]
opts = {} if opts is None else opts
if mode == 'query':
opts = {} # depends on [control=['if'], data=[]]
elif mode == 'batch':
opts = {'clean': opts.get('clean', None), 'build': opts.get('build', None), 'buildLog': bool(opts.get('buildLog', False)), 'exec': opts.get('exec', None)} # depends on [control=['if'], data=[]]
else:
msg = 'Invalid stream-execution mode: {0}'.format(mode)
raise BackendClientError(msg)
request = Request(self.session, 'GET', '/stream/kernel/{}/execute'.format(self.kernel_id), params=params)
async def send_code(ws):
await ws.send_json({'code': code, 'mode': mode, 'options': opts})
return request.connect_websocket(on_enter=send_code) |
def process(self, senderKeyName, senderKeyDistributionMessage):
"""
:type senderKeyName: SenderKeyName
:type senderKeyDistributionMessage: SenderKeyDistributionMessage
"""
senderKeyRecord = self.senderKeyStore.loadSenderKey(senderKeyName)
senderKeyRecord.addSenderKeyState(senderKeyDistributionMessage.getId(),
senderKeyDistributionMessage.getIteration(),
senderKeyDistributionMessage.getChainKey(),
senderKeyDistributionMessage.getSignatureKey())
self.senderKeyStore.storeSenderKey(senderKeyName, senderKeyRecord) | def function[process, parameter[self, senderKeyName, senderKeyDistributionMessage]]:
constant[
:type senderKeyName: SenderKeyName
:type senderKeyDistributionMessage: SenderKeyDistributionMessage
]
variable[senderKeyRecord] assign[=] call[name[self].senderKeyStore.loadSenderKey, parameter[name[senderKeyName]]]
call[name[senderKeyRecord].addSenderKeyState, parameter[call[name[senderKeyDistributionMessage].getId, parameter[]], call[name[senderKeyDistributionMessage].getIteration, parameter[]], call[name[senderKeyDistributionMessage].getChainKey, parameter[]], call[name[senderKeyDistributionMessage].getSignatureKey, parameter[]]]]
call[name[self].senderKeyStore.storeSenderKey, parameter[name[senderKeyName], name[senderKeyRecord]]] | keyword[def] identifier[process] ( identifier[self] , identifier[senderKeyName] , identifier[senderKeyDistributionMessage] ):
literal[string]
identifier[senderKeyRecord] = identifier[self] . identifier[senderKeyStore] . identifier[loadSenderKey] ( identifier[senderKeyName] )
identifier[senderKeyRecord] . identifier[addSenderKeyState] ( identifier[senderKeyDistributionMessage] . identifier[getId] (),
identifier[senderKeyDistributionMessage] . identifier[getIteration] (),
identifier[senderKeyDistributionMessage] . identifier[getChainKey] (),
identifier[senderKeyDistributionMessage] . identifier[getSignatureKey] ())
identifier[self] . identifier[senderKeyStore] . identifier[storeSenderKey] ( identifier[senderKeyName] , identifier[senderKeyRecord] ) | def process(self, senderKeyName, senderKeyDistributionMessage):
"""
:type senderKeyName: SenderKeyName
:type senderKeyDistributionMessage: SenderKeyDistributionMessage
"""
senderKeyRecord = self.senderKeyStore.loadSenderKey(senderKeyName)
senderKeyRecord.addSenderKeyState(senderKeyDistributionMessage.getId(), senderKeyDistributionMessage.getIteration(), senderKeyDistributionMessage.getChainKey(), senderKeyDistributionMessage.getSignatureKey())
self.senderKeyStore.storeSenderKey(senderKeyName, senderKeyRecord) |
def list_products(all=False, refresh=False, root=None):
'''
List all available or installed SUSE products.
all
List all products available or only installed. Default is False.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
Includes handling for OEM products, which read the OEM productline file
and overwrite the release value.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_products
salt '*' pkg.list_products all=True
'''
if refresh:
refresh_db(root)
ret = list()
OEM_PATH = '/var/lib/suseRegister/OEM'
if root:
OEM_PATH = os.path.join(root, os.path.relpath(OEM_PATH, os.path.sep))
cmd = list()
if not all:
cmd.append('--disable-repos')
cmd.append('products')
if not all:
cmd.append('-i')
product_list = __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName('product-list')
if not product_list:
return ret # No products found
for prd in product_list[0].getElementsByTagName('product'):
p_nfo = dict()
for k_p_nfo, v_p_nfo in prd.attributes.items():
if k_p_nfo in ['isbase', 'installed']:
p_nfo[k_p_nfo] = bool(v_p_nfo in ['true', '1'])
elif v_p_nfo:
p_nfo[k_p_nfo] = v_p_nfo
eol = prd.getElementsByTagName('endoflife')
if eol:
p_nfo['eol'] = eol[0].getAttribute('text')
p_nfo['eol_t'] = int(eol[0].getAttribute('time_t') or 0)
p_nfo['description'] = " ".join(
[line.strip() for line in _get_first_aggregate_text(
prd.getElementsByTagName('description')
).split(os.linesep)]
)
if 'productline' in p_nfo and p_nfo['productline']:
oem_file = os.path.join(OEM_PATH, p_nfo['productline'])
if os.path.isfile(oem_file):
with salt.utils.files.fopen(oem_file, 'r') as rfile:
oem_release = salt.utils.stringutils.to_unicode(rfile.readline()).strip()
if oem_release:
p_nfo['release'] = oem_release
ret.append(p_nfo)
return ret | def function[list_products, parameter[all, refresh, root]]:
constant[
List all available or installed SUSE products.
all
List all products available or only installed. Default is False.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
Includes handling for OEM products, which read the OEM productline file
and overwrite the release value.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_products
salt '*' pkg.list_products all=True
]
if name[refresh] begin[:]
call[name[refresh_db], parameter[name[root]]]
variable[ret] assign[=] call[name[list], parameter[]]
variable[OEM_PATH] assign[=] constant[/var/lib/suseRegister/OEM]
if name[root] begin[:]
variable[OEM_PATH] assign[=] call[name[os].path.join, parameter[name[root], call[name[os].path.relpath, parameter[name[OEM_PATH], name[os].path.sep]]]]
variable[cmd] assign[=] call[name[list], parameter[]]
if <ast.UnaryOp object at 0x7da1b1c1b4c0> begin[:]
call[name[cmd].append, parameter[constant[--disable-repos]]]
call[name[cmd].append, parameter[constant[products]]]
if <ast.UnaryOp object at 0x7da1b1c19d20> begin[:]
call[name[cmd].append, parameter[constant[-i]]]
variable[product_list] assign[=] call[call[call[name[__zypper__], parameter[]].nolock.xml.call, parameter[<ast.Starred object at 0x7da1b1c19f00>]].getElementsByTagName, parameter[constant[product-list]]]
if <ast.UnaryOp object at 0x7da1b1c19d80> begin[:]
return[name[ret]]
for taget[name[prd]] in starred[call[call[name[product_list]][constant[0]].getElementsByTagName, parameter[constant[product]]]] begin[:]
variable[p_nfo] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1c18820>, <ast.Name object at 0x7da1b1c19690>]]] in starred[call[name[prd].attributes.items, parameter[]]] begin[:]
if compare[name[k_p_nfo] in list[[<ast.Constant object at 0x7da1b1ca0f70>, <ast.Constant object at 0x7da1b1ca2ec0>]]] begin[:]
call[name[p_nfo]][name[k_p_nfo]] assign[=] call[name[bool], parameter[compare[name[v_p_nfo] in list[[<ast.Constant object at 0x7da1b1ca2380>, <ast.Constant object at 0x7da1b1ca2050>]]]]]
variable[eol] assign[=] call[name[prd].getElementsByTagName, parameter[constant[endoflife]]]
if name[eol] begin[:]
call[name[p_nfo]][constant[eol]] assign[=] call[call[name[eol]][constant[0]].getAttribute, parameter[constant[text]]]
call[name[p_nfo]][constant[eol_t]] assign[=] call[name[int], parameter[<ast.BoolOp object at 0x7da1b1ca32e0>]]
call[name[p_nfo]][constant[description]] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b1ca3d30>]]
if <ast.BoolOp object at 0x7da1b1ca07f0> begin[:]
variable[oem_file] assign[=] call[name[os].path.join, parameter[name[OEM_PATH], call[name[p_nfo]][constant[productline]]]]
if call[name[os].path.isfile, parameter[name[oem_file]]] begin[:]
with call[name[salt].utils.files.fopen, parameter[name[oem_file], constant[r]]] begin[:]
variable[oem_release] assign[=] call[call[name[salt].utils.stringutils.to_unicode, parameter[call[name[rfile].readline, parameter[]]]].strip, parameter[]]
if name[oem_release] begin[:]
call[name[p_nfo]][constant[release]] assign[=] name[oem_release]
call[name[ret].append, parameter[name[p_nfo]]]
return[name[ret]] | keyword[def] identifier[list_products] ( identifier[all] = keyword[False] , identifier[refresh] = keyword[False] , identifier[root] = keyword[None] ):
literal[string]
keyword[if] identifier[refresh] :
identifier[refresh_db] ( identifier[root] )
identifier[ret] = identifier[list] ()
identifier[OEM_PATH] = literal[string]
keyword[if] identifier[root] :
identifier[OEM_PATH] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[os] . identifier[path] . identifier[relpath] ( identifier[OEM_PATH] , identifier[os] . identifier[path] . identifier[sep] ))
identifier[cmd] = identifier[list] ()
keyword[if] keyword[not] identifier[all] :
identifier[cmd] . identifier[append] ( literal[string] )
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] keyword[not] identifier[all] :
identifier[cmd] . identifier[append] ( literal[string] )
identifier[product_list] = identifier[__zypper__] ( identifier[root] = identifier[root] ). identifier[nolock] . identifier[xml] . identifier[call] (* identifier[cmd] ). identifier[getElementsByTagName] ( literal[string] )
keyword[if] keyword[not] identifier[product_list] :
keyword[return] identifier[ret]
keyword[for] identifier[prd] keyword[in] identifier[product_list] [ literal[int] ]. identifier[getElementsByTagName] ( literal[string] ):
identifier[p_nfo] = identifier[dict] ()
keyword[for] identifier[k_p_nfo] , identifier[v_p_nfo] keyword[in] identifier[prd] . identifier[attributes] . identifier[items] ():
keyword[if] identifier[k_p_nfo] keyword[in] [ literal[string] , literal[string] ]:
identifier[p_nfo] [ identifier[k_p_nfo] ]= identifier[bool] ( identifier[v_p_nfo] keyword[in] [ literal[string] , literal[string] ])
keyword[elif] identifier[v_p_nfo] :
identifier[p_nfo] [ identifier[k_p_nfo] ]= identifier[v_p_nfo]
identifier[eol] = identifier[prd] . identifier[getElementsByTagName] ( literal[string] )
keyword[if] identifier[eol] :
identifier[p_nfo] [ literal[string] ]= identifier[eol] [ literal[int] ]. identifier[getAttribute] ( literal[string] )
identifier[p_nfo] [ literal[string] ]= identifier[int] ( identifier[eol] [ literal[int] ]. identifier[getAttribute] ( literal[string] ) keyword[or] literal[int] )
identifier[p_nfo] [ literal[string] ]= literal[string] . identifier[join] (
[ identifier[line] . identifier[strip] () keyword[for] identifier[line] keyword[in] identifier[_get_first_aggregate_text] (
identifier[prd] . identifier[getElementsByTagName] ( literal[string] )
). identifier[split] ( identifier[os] . identifier[linesep] )]
)
keyword[if] literal[string] keyword[in] identifier[p_nfo] keyword[and] identifier[p_nfo] [ literal[string] ]:
identifier[oem_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[OEM_PATH] , identifier[p_nfo] [ literal[string] ])
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[oem_file] ):
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[oem_file] , literal[string] ) keyword[as] identifier[rfile] :
identifier[oem_release] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[rfile] . identifier[readline] ()). identifier[strip] ()
keyword[if] identifier[oem_release] :
identifier[p_nfo] [ literal[string] ]= identifier[oem_release]
identifier[ret] . identifier[append] ( identifier[p_nfo] )
keyword[return] identifier[ret] | def list_products(all=False, refresh=False, root=None):
"""
List all available or installed SUSE products.
all
List all products available or only installed. Default is False.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
Includes handling for OEM products, which read the OEM productline file
and overwrite the release value.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_products
salt '*' pkg.list_products all=True
"""
if refresh:
refresh_db(root) # depends on [control=['if'], data=[]]
ret = list()
OEM_PATH = '/var/lib/suseRegister/OEM'
if root:
OEM_PATH = os.path.join(root, os.path.relpath(OEM_PATH, os.path.sep)) # depends on [control=['if'], data=[]]
cmd = list()
if not all:
cmd.append('--disable-repos') # depends on [control=['if'], data=[]]
cmd.append('products')
if not all:
cmd.append('-i') # depends on [control=['if'], data=[]]
product_list = __zypper__(root=root).nolock.xml.call(*cmd).getElementsByTagName('product-list')
if not product_list:
return ret # No products found # depends on [control=['if'], data=[]]
for prd in product_list[0].getElementsByTagName('product'):
p_nfo = dict()
for (k_p_nfo, v_p_nfo) in prd.attributes.items():
if k_p_nfo in ['isbase', 'installed']:
p_nfo[k_p_nfo] = bool(v_p_nfo in ['true', '1']) # depends on [control=['if'], data=['k_p_nfo']]
elif v_p_nfo:
p_nfo[k_p_nfo] = v_p_nfo # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
eol = prd.getElementsByTagName('endoflife')
if eol:
p_nfo['eol'] = eol[0].getAttribute('text')
p_nfo['eol_t'] = int(eol[0].getAttribute('time_t') or 0) # depends on [control=['if'], data=[]]
p_nfo['description'] = ' '.join([line.strip() for line in _get_first_aggregate_text(prd.getElementsByTagName('description')).split(os.linesep)])
if 'productline' in p_nfo and p_nfo['productline']:
oem_file = os.path.join(OEM_PATH, p_nfo['productline'])
if os.path.isfile(oem_file):
with salt.utils.files.fopen(oem_file, 'r') as rfile:
oem_release = salt.utils.stringutils.to_unicode(rfile.readline()).strip()
if oem_release:
p_nfo['release'] = oem_release # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['rfile']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
ret.append(p_nfo) # depends on [control=['for'], data=['prd']]
return ret |
def execute(self, i, o):
"""
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
"""
super(ResetCommand, self).execute(i, o)
dialog = self.get_helper('dialog')
confirm = dialog.ask_confirmation(
o,
'<question>Are you sure you want to reset all of the migrations?</question> ',
False
)
if not confirm:
return
database = i.get_option('database')
repository = DatabaseMigrationRepository(self._resolver, 'migrations')
migrator = Migrator(repository, self._resolver)
self._prepare_database(migrator, database, i, o)
pretend = bool(i.get_option('pretend'))
path = i.get_option('path')
if path is None:
path = self._get_migration_path()
while True:
count = migrator.rollback(path, pretend)
for note in migrator.get_notes():
o.writeln(note)
if count == 0:
break | def function[execute, parameter[self, i, o]]:
constant[
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
]
call[call[name[super], parameter[name[ResetCommand], name[self]]].execute, parameter[name[i], name[o]]]
variable[dialog] assign[=] call[name[self].get_helper, parameter[constant[dialog]]]
variable[confirm] assign[=] call[name[dialog].ask_confirmation, parameter[name[o], constant[<question>Are you sure you want to reset all of the migrations?</question> ], constant[False]]]
if <ast.UnaryOp object at 0x7da1b10f7ca0> begin[:]
return[None]
variable[database] assign[=] call[name[i].get_option, parameter[constant[database]]]
variable[repository] assign[=] call[name[DatabaseMigrationRepository], parameter[name[self]._resolver, constant[migrations]]]
variable[migrator] assign[=] call[name[Migrator], parameter[name[repository], name[self]._resolver]]
call[name[self]._prepare_database, parameter[name[migrator], name[database], name[i], name[o]]]
variable[pretend] assign[=] call[name[bool], parameter[call[name[i].get_option, parameter[constant[pretend]]]]]
variable[path] assign[=] call[name[i].get_option, parameter[constant[path]]]
if compare[name[path] is constant[None]] begin[:]
variable[path] assign[=] call[name[self]._get_migration_path, parameter[]]
while constant[True] begin[:]
variable[count] assign[=] call[name[migrator].rollback, parameter[name[path], name[pretend]]]
for taget[name[note]] in starred[call[name[migrator].get_notes, parameter[]]] begin[:]
call[name[o].writeln, parameter[name[note]]]
if compare[name[count] equal[==] constant[0]] begin[:]
break | keyword[def] identifier[execute] ( identifier[self] , identifier[i] , identifier[o] ):
literal[string]
identifier[super] ( identifier[ResetCommand] , identifier[self] ). identifier[execute] ( identifier[i] , identifier[o] )
identifier[dialog] = identifier[self] . identifier[get_helper] ( literal[string] )
identifier[confirm] = identifier[dialog] . identifier[ask_confirmation] (
identifier[o] ,
literal[string] ,
keyword[False]
)
keyword[if] keyword[not] identifier[confirm] :
keyword[return]
identifier[database] = identifier[i] . identifier[get_option] ( literal[string] )
identifier[repository] = identifier[DatabaseMigrationRepository] ( identifier[self] . identifier[_resolver] , literal[string] )
identifier[migrator] = identifier[Migrator] ( identifier[repository] , identifier[self] . identifier[_resolver] )
identifier[self] . identifier[_prepare_database] ( identifier[migrator] , identifier[database] , identifier[i] , identifier[o] )
identifier[pretend] = identifier[bool] ( identifier[i] . identifier[get_option] ( literal[string] ))
identifier[path] = identifier[i] . identifier[get_option] ( literal[string] )
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[path] = identifier[self] . identifier[_get_migration_path] ()
keyword[while] keyword[True] :
identifier[count] = identifier[migrator] . identifier[rollback] ( identifier[path] , identifier[pretend] )
keyword[for] identifier[note] keyword[in] identifier[migrator] . identifier[get_notes] ():
identifier[o] . identifier[writeln] ( identifier[note] )
keyword[if] identifier[count] == literal[int] :
keyword[break] | def execute(self, i, o):
"""
Executes the command.
:type i: cleo.inputs.input.Input
:type o: cleo.outputs.output.Output
"""
super(ResetCommand, self).execute(i, o)
dialog = self.get_helper('dialog')
confirm = dialog.ask_confirmation(o, '<question>Are you sure you want to reset all of the migrations?</question> ', False)
if not confirm:
return # depends on [control=['if'], data=[]]
database = i.get_option('database')
repository = DatabaseMigrationRepository(self._resolver, 'migrations')
migrator = Migrator(repository, self._resolver)
self._prepare_database(migrator, database, i, o)
pretend = bool(i.get_option('pretend'))
path = i.get_option('path')
if path is None:
path = self._get_migration_path() # depends on [control=['if'], data=['path']]
while True:
count = migrator.rollback(path, pretend)
for note in migrator.get_notes():
o.writeln(note) # depends on [control=['for'], data=['note']]
if count == 0:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def _from_stream(cls, stream, blob, filename=None):
"""
Return an instance of the |Image| subclass corresponding to the
format of the image in *stream*.
"""
image_header = _ImageHeaderFactory(stream)
if filename is None:
filename = 'image.%s' % image_header.default_ext
return cls(blob, filename, image_header) | def function[_from_stream, parameter[cls, stream, blob, filename]]:
constant[
Return an instance of the |Image| subclass corresponding to the
format of the image in *stream*.
]
variable[image_header] assign[=] call[name[_ImageHeaderFactory], parameter[name[stream]]]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] binary_operation[constant[image.%s] <ast.Mod object at 0x7da2590d6920> name[image_header].default_ext]
return[call[name[cls], parameter[name[blob], name[filename], name[image_header]]]] | keyword[def] identifier[_from_stream] ( identifier[cls] , identifier[stream] , identifier[blob] , identifier[filename] = keyword[None] ):
literal[string]
identifier[image_header] = identifier[_ImageHeaderFactory] ( identifier[stream] )
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = literal[string] % identifier[image_header] . identifier[default_ext]
keyword[return] identifier[cls] ( identifier[blob] , identifier[filename] , identifier[image_header] ) | def _from_stream(cls, stream, blob, filename=None):
"""
Return an instance of the |Image| subclass corresponding to the
format of the image in *stream*.
"""
image_header = _ImageHeaderFactory(stream)
if filename is None:
filename = 'image.%s' % image_header.default_ext # depends on [control=['if'], data=['filename']]
return cls(blob, filename, image_header) |
def lnprior(self, X):
"""
Use a uniform, bounded prior.
"""
if np.any(X < self._lower_left) or np.any(X > self._upper_right):
return -np.inf
else:
return 0.0 | def function[lnprior, parameter[self, X]]:
constant[
Use a uniform, bounded prior.
]
if <ast.BoolOp object at 0x7da1b25360b0> begin[:]
return[<ast.UnaryOp object at 0x7da1b258aef0>] | keyword[def] identifier[lnprior] ( identifier[self] , identifier[X] ):
literal[string]
keyword[if] identifier[np] . identifier[any] ( identifier[X] < identifier[self] . identifier[_lower_left] ) keyword[or] identifier[np] . identifier[any] ( identifier[X] > identifier[self] . identifier[_upper_right] ):
keyword[return] - identifier[np] . identifier[inf]
keyword[else] :
keyword[return] literal[int] | def lnprior(self, X):
"""
Use a uniform, bounded prior.
"""
if np.any(X < self._lower_left) or np.any(X > self._upper_right):
return -np.inf # depends on [control=['if'], data=[]]
else:
return 0.0 |
def generate_python_config(self):
"""
Update Python server configuration with the options saved in our
config system.
"""
python_config = PYTHON_CONFIG.copy()
# Server options
cmd = self.get_option('advanced/command_launch')
host = self.get_option('advanced/host')
port = self.get_option('advanced/port')
# Pycodestyle
cs_exclude = self.get_option('pycodestyle/exclude').split(',')
cs_filename = self.get_option('pycodestyle/filename').split(',')
cs_select = self.get_option('pycodestyle/select').split(',')
cs_ignore = self.get_option('pycodestyle/ignore').split(',')
cs_max_line_length = self.get_option('pycodestyle/max_line_length')
pycodestyle = {
'enabled': self.get_option('pycodestyle'),
'exclude': [exclude.strip() for exclude in cs_exclude],
'filename': [filename.strip() for filename in cs_filename],
'select': [select.strip() for select in cs_select],
'ignore': [ignore.strip() for ignore in cs_ignore],
'hangClosing': False,
'maxLineLength': cs_max_line_length
}
# Linting - Pyflakes
pyflakes = {
'enabled': self.get_option('pyflakes')
}
# Pydocstyle
convention = self.get_option('pydocstyle/convention')
if convention == 'Custom':
ds_ignore = self.get_option('pydocstyle/ignore').split(',')
ds_select = self.get_option('pydocstyle/select').split(',')
ds_add_ignore = []
ds_add_select = []
else:
ds_ignore = []
ds_select = []
ds_add_ignore = self.get_option('pydocstyle/ignore').split(',')
ds_add_select = self.get_option('pydocstyle/select').split(',')
pydocstyle = {
'enabled': self.get_option('pydocstyle'),
'convention': convention,
'addIgnore': [ignore.strip() for ignore in ds_add_ignore],
'addSelect': [select.strip() for select in ds_add_select],
'ignore': [ignore.strip() for ignore in ds_ignore],
'select': [select.strip() for select in ds_select],
'match': self.get_option('pydocstyle/match'),
'matchDir': self.get_option('pydocstyle/match_dir')
}
# Code completion
jedi_completion = {
'enabled': self.get_option('code_completion'),
'include_params': False
}
jedi_signature_help = {
'enabled': self.get_option('jedi_signature_help')
}
jedi_definition = {
'enabled': self.get_option('jedi_definition'),
'follow_imports': self.get_option('jedi_definition/follow_imports')
}
# Advanced
external_server = self.get_option('advanced/external')
# Setup options in json
python_config['cmd'] = cmd
if host in self.LOCALHOST:
python_config['args'] = '--host {host} --port {port} --tcp'
else:
python_config['args'] = ''
python_config['external'] = external_server
python_config['host'] = host
python_config['port'] = port
plugins = python_config['configurations']['pyls']['plugins']
plugins['pycodestyle'] = pycodestyle
plugins['pyflakes'] = pyflakes
plugins['pydocstyle'] = pydocstyle
plugins['jedi_completion'] = jedi_completion
plugins['jedi_signature_help'] = jedi_signature_help
plugins['preload']['modules'] = self.get_option('preload_modules')
plugins['jedi_definition'] = jedi_definition
return python_config | def function[generate_python_config, parameter[self]]:
constant[
Update Python server configuration with the options saved in our
config system.
]
variable[python_config] assign[=] call[name[PYTHON_CONFIG].copy, parameter[]]
variable[cmd] assign[=] call[name[self].get_option, parameter[constant[advanced/command_launch]]]
variable[host] assign[=] call[name[self].get_option, parameter[constant[advanced/host]]]
variable[port] assign[=] call[name[self].get_option, parameter[constant[advanced/port]]]
variable[cs_exclude] assign[=] call[call[name[self].get_option, parameter[constant[pycodestyle/exclude]]].split, parameter[constant[,]]]
variable[cs_filename] assign[=] call[call[name[self].get_option, parameter[constant[pycodestyle/filename]]].split, parameter[constant[,]]]
variable[cs_select] assign[=] call[call[name[self].get_option, parameter[constant[pycodestyle/select]]].split, parameter[constant[,]]]
variable[cs_ignore] assign[=] call[call[name[self].get_option, parameter[constant[pycodestyle/ignore]]].split, parameter[constant[,]]]
variable[cs_max_line_length] assign[=] call[name[self].get_option, parameter[constant[pycodestyle/max_line_length]]]
variable[pycodestyle] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e7e50>, <ast.Constant object at 0x7da20c6e4ac0>, <ast.Constant object at 0x7da20c6e6c80>, <ast.Constant object at 0x7da20c6e7160>, <ast.Constant object at 0x7da20c6e58a0>, <ast.Constant object at 0x7da20c6e55a0>, <ast.Constant object at 0x7da20c6e6830>], [<ast.Call object at 0x7da20c6e5930>, <ast.ListComp object at 0x7da20c6e70a0>, <ast.ListComp object at 0x7da20c6e6080>, <ast.ListComp object at 0x7da20c6e62f0>, <ast.ListComp object at 0x7da20c6e5750>, <ast.Constant object at 0x7da20c6e43a0>, <ast.Name object at 0x7da20c6e5570>]]
variable[pyflakes] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e6110>], [<ast.Call object at 0x7da20c6e5c90>]]
variable[convention] assign[=] call[name[self].get_option, parameter[constant[pydocstyle/convention]]]
if compare[name[convention] equal[==] constant[Custom]] begin[:]
variable[ds_ignore] assign[=] call[call[name[self].get_option, parameter[constant[pydocstyle/ignore]]].split, parameter[constant[,]]]
variable[ds_select] assign[=] call[call[name[self].get_option, parameter[constant[pydocstyle/select]]].split, parameter[constant[,]]]
variable[ds_add_ignore] assign[=] list[[]]
variable[ds_add_select] assign[=] list[[]]
variable[pydocstyle] assign[=] dictionary[[<ast.Constant object at 0x7da1b2346980>, <ast.Constant object at 0x7da1b2345060>, <ast.Constant object at 0x7da1b2346350>, <ast.Constant object at 0x7da1b2347d60>, <ast.Constant object at 0x7da1b23451e0>, <ast.Constant object at 0x7da1b23459f0>, <ast.Constant object at 0x7da1b2346560>, <ast.Constant object at 0x7da1b23450c0>], [<ast.Call object at 0x7da1b23452a0>, <ast.Name object at 0x7da1b2346e00>, <ast.ListComp object at 0x7da1b2345510>, <ast.ListComp object at 0x7da1b2346860>, <ast.ListComp object at 0x7da1b2345180>, <ast.ListComp object at 0x7da1b2344310>, <ast.Call object at 0x7da1b2344b20>, <ast.Call object at 0x7da1b2345990>]]
variable[jedi_completion] assign[=] dictionary[[<ast.Constant object at 0x7da1b2347400>, <ast.Constant object at 0x7da1b2345720>], [<ast.Call object at 0x7da1b2344880>, <ast.Constant object at 0x7da1b2344df0>]]
variable[jedi_signature_help] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344730>], [<ast.Call object at 0x7da1b2346800>]]
variable[jedi_definition] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344fa0>, <ast.Constant object at 0x7da1b23445e0>], [<ast.Call object at 0x7da1b2345c90>, <ast.Call object at 0x7da1b2345300>]]
variable[external_server] assign[=] call[name[self].get_option, parameter[constant[advanced/external]]]
call[name[python_config]][constant[cmd]] assign[=] name[cmd]
if compare[name[host] in name[self].LOCALHOST] begin[:]
call[name[python_config]][constant[args]] assign[=] constant[--host {host} --port {port} --tcp]
call[name[python_config]][constant[external]] assign[=] name[external_server]
call[name[python_config]][constant[host]] assign[=] name[host]
call[name[python_config]][constant[port]] assign[=] name[port]
variable[plugins] assign[=] call[call[call[name[python_config]][constant[configurations]]][constant[pyls]]][constant[plugins]]
call[name[plugins]][constant[pycodestyle]] assign[=] name[pycodestyle]
call[name[plugins]][constant[pyflakes]] assign[=] name[pyflakes]
call[name[plugins]][constant[pydocstyle]] assign[=] name[pydocstyle]
call[name[plugins]][constant[jedi_completion]] assign[=] name[jedi_completion]
call[name[plugins]][constant[jedi_signature_help]] assign[=] name[jedi_signature_help]
call[call[name[plugins]][constant[preload]]][constant[modules]] assign[=] call[name[self].get_option, parameter[constant[preload_modules]]]
call[name[plugins]][constant[jedi_definition]] assign[=] name[jedi_definition]
return[name[python_config]] | keyword[def] identifier[generate_python_config] ( identifier[self] ):
literal[string]
identifier[python_config] = identifier[PYTHON_CONFIG] . identifier[copy] ()
identifier[cmd] = identifier[self] . identifier[get_option] ( literal[string] )
identifier[host] = identifier[self] . identifier[get_option] ( literal[string] )
identifier[port] = identifier[self] . identifier[get_option] ( literal[string] )
identifier[cs_exclude] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[cs_filename] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[cs_select] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[cs_ignore] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[cs_max_line_length] = identifier[self] . identifier[get_option] ( literal[string] )
identifier[pycodestyle] ={
literal[string] : identifier[self] . identifier[get_option] ( literal[string] ),
literal[string] :[ identifier[exclude] . identifier[strip] () keyword[for] identifier[exclude] keyword[in] identifier[cs_exclude] ],
literal[string] :[ identifier[filename] . identifier[strip] () keyword[for] identifier[filename] keyword[in] identifier[cs_filename] ],
literal[string] :[ identifier[select] . identifier[strip] () keyword[for] identifier[select] keyword[in] identifier[cs_select] ],
literal[string] :[ identifier[ignore] . identifier[strip] () keyword[for] identifier[ignore] keyword[in] identifier[cs_ignore] ],
literal[string] : keyword[False] ,
literal[string] : identifier[cs_max_line_length]
}
identifier[pyflakes] ={
literal[string] : identifier[self] . identifier[get_option] ( literal[string] )
}
identifier[convention] = identifier[self] . identifier[get_option] ( literal[string] )
keyword[if] identifier[convention] == literal[string] :
identifier[ds_ignore] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[ds_select] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[ds_add_ignore] =[]
identifier[ds_add_select] =[]
keyword[else] :
identifier[ds_ignore] =[]
identifier[ds_select] =[]
identifier[ds_add_ignore] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[ds_add_select] = identifier[self] . identifier[get_option] ( literal[string] ). identifier[split] ( literal[string] )
identifier[pydocstyle] ={
literal[string] : identifier[self] . identifier[get_option] ( literal[string] ),
literal[string] : identifier[convention] ,
literal[string] :[ identifier[ignore] . identifier[strip] () keyword[for] identifier[ignore] keyword[in] identifier[ds_add_ignore] ],
literal[string] :[ identifier[select] . identifier[strip] () keyword[for] identifier[select] keyword[in] identifier[ds_add_select] ],
literal[string] :[ identifier[ignore] . identifier[strip] () keyword[for] identifier[ignore] keyword[in] identifier[ds_ignore] ],
literal[string] :[ identifier[select] . identifier[strip] () keyword[for] identifier[select] keyword[in] identifier[ds_select] ],
literal[string] : identifier[self] . identifier[get_option] ( literal[string] ),
literal[string] : identifier[self] . identifier[get_option] ( literal[string] )
}
identifier[jedi_completion] ={
literal[string] : identifier[self] . identifier[get_option] ( literal[string] ),
literal[string] : keyword[False]
}
identifier[jedi_signature_help] ={
literal[string] : identifier[self] . identifier[get_option] ( literal[string] )
}
identifier[jedi_definition] ={
literal[string] : identifier[self] . identifier[get_option] ( literal[string] ),
literal[string] : identifier[self] . identifier[get_option] ( literal[string] )
}
identifier[external_server] = identifier[self] . identifier[get_option] ( literal[string] )
identifier[python_config] [ literal[string] ]= identifier[cmd]
keyword[if] identifier[host] keyword[in] identifier[self] . identifier[LOCALHOST] :
identifier[python_config] [ literal[string] ]= literal[string]
keyword[else] :
identifier[python_config] [ literal[string] ]= literal[string]
identifier[python_config] [ literal[string] ]= identifier[external_server]
identifier[python_config] [ literal[string] ]= identifier[host]
identifier[python_config] [ literal[string] ]= identifier[port]
identifier[plugins] = identifier[python_config] [ literal[string] ][ literal[string] ][ literal[string] ]
identifier[plugins] [ literal[string] ]= identifier[pycodestyle]
identifier[plugins] [ literal[string] ]= identifier[pyflakes]
identifier[plugins] [ literal[string] ]= identifier[pydocstyle]
identifier[plugins] [ literal[string] ]= identifier[jedi_completion]
identifier[plugins] [ literal[string] ]= identifier[jedi_signature_help]
identifier[plugins] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[get_option] ( literal[string] )
identifier[plugins] [ literal[string] ]= identifier[jedi_definition]
keyword[return] identifier[python_config] | def generate_python_config(self):
"""
Update Python server configuration with the options saved in our
config system.
"""
python_config = PYTHON_CONFIG.copy()
# Server options
cmd = self.get_option('advanced/command_launch')
host = self.get_option('advanced/host')
port = self.get_option('advanced/port')
# Pycodestyle
cs_exclude = self.get_option('pycodestyle/exclude').split(',')
cs_filename = self.get_option('pycodestyle/filename').split(',')
cs_select = self.get_option('pycodestyle/select').split(',')
cs_ignore = self.get_option('pycodestyle/ignore').split(',')
cs_max_line_length = self.get_option('pycodestyle/max_line_length')
pycodestyle = {'enabled': self.get_option('pycodestyle'), 'exclude': [exclude.strip() for exclude in cs_exclude], 'filename': [filename.strip() for filename in cs_filename], 'select': [select.strip() for select in cs_select], 'ignore': [ignore.strip() for ignore in cs_ignore], 'hangClosing': False, 'maxLineLength': cs_max_line_length}
# Linting - Pyflakes
pyflakes = {'enabled': self.get_option('pyflakes')}
# Pydocstyle
convention = self.get_option('pydocstyle/convention')
if convention == 'Custom':
ds_ignore = self.get_option('pydocstyle/ignore').split(',')
ds_select = self.get_option('pydocstyle/select').split(',')
ds_add_ignore = []
ds_add_select = [] # depends on [control=['if'], data=[]]
else:
ds_ignore = []
ds_select = []
ds_add_ignore = self.get_option('pydocstyle/ignore').split(',')
ds_add_select = self.get_option('pydocstyle/select').split(',')
pydocstyle = {'enabled': self.get_option('pydocstyle'), 'convention': convention, 'addIgnore': [ignore.strip() for ignore in ds_add_ignore], 'addSelect': [select.strip() for select in ds_add_select], 'ignore': [ignore.strip() for ignore in ds_ignore], 'select': [select.strip() for select in ds_select], 'match': self.get_option('pydocstyle/match'), 'matchDir': self.get_option('pydocstyle/match_dir')}
# Code completion
jedi_completion = {'enabled': self.get_option('code_completion'), 'include_params': False}
jedi_signature_help = {'enabled': self.get_option('jedi_signature_help')}
jedi_definition = {'enabled': self.get_option('jedi_definition'), 'follow_imports': self.get_option('jedi_definition/follow_imports')}
# Advanced
external_server = self.get_option('advanced/external')
# Setup options in json
python_config['cmd'] = cmd
if host in self.LOCALHOST:
python_config['args'] = '--host {host} --port {port} --tcp' # depends on [control=['if'], data=[]]
else:
python_config['args'] = ''
python_config['external'] = external_server
python_config['host'] = host
python_config['port'] = port
plugins = python_config['configurations']['pyls']['plugins']
plugins['pycodestyle'] = pycodestyle
plugins['pyflakes'] = pyflakes
plugins['pydocstyle'] = pydocstyle
plugins['jedi_completion'] = jedi_completion
plugins['jedi_signature_help'] = jedi_signature_help
plugins['preload']['modules'] = self.get_option('preload_modules')
plugins['jedi_definition'] = jedi_definition
return python_config |
def all_entity_classes():
"""Return the list of all concrete persistent classes that are subclasses
of Entity."""
persistent_classes = Entity._decl_class_registry.values()
# with sqlalchemy 0.8 _decl_class_registry holds object that are not
# classes
return [
cls for cls in persistent_classes if isclass(cls) and issubclass(cls, Entity)
] | def function[all_entity_classes, parameter[]]:
constant[Return the list of all concrete persistent classes that are subclasses
of Entity.]
variable[persistent_classes] assign[=] call[name[Entity]._decl_class_registry.values, parameter[]]
return[<ast.ListComp object at 0x7da20c6c5db0>] | keyword[def] identifier[all_entity_classes] ():
literal[string]
identifier[persistent_classes] = identifier[Entity] . identifier[_decl_class_registry] . identifier[values] ()
keyword[return] [
identifier[cls] keyword[for] identifier[cls] keyword[in] identifier[persistent_classes] keyword[if] identifier[isclass] ( identifier[cls] ) keyword[and] identifier[issubclass] ( identifier[cls] , identifier[Entity] )
] | def all_entity_classes():
"""Return the list of all concrete persistent classes that are subclasses
of Entity."""
persistent_classes = Entity._decl_class_registry.values()
# with sqlalchemy 0.8 _decl_class_registry holds object that are not
# classes
return [cls for cls in persistent_classes if isclass(cls) and issubclass(cls, Entity)] |
def _find_bounds(py_line_no, py_by_line_no, cheetah_by_line_no):
"""Searches before and after in the python source to find comments which
denote cheetah line numbers. If a lower bound is not found, 0 is
substituted. If an upper bound is not found, len(cheetah lines) is
returned. The result is a lower-inclusive upper-exclusive range:
[..., ...)
"""
# Find lower bound
for line_no in range(py_line_no, 0, -1):
lower_bound = _get_line_no_from_comments(py_by_line_no[line_no])
if lower_bound != 0:
break
else:
lower_bound = 0
# Find upper bound
for line_no in range(py_line_no, len(py_by_line_no)):
upper_bound = _get_line_no_from_comments(py_by_line_no[line_no])
if upper_bound != 0:
# Since we'll eventually be building a range(), let's make this
# the non-inclusive upper-bound
upper_bound += 1
break
else:
upper_bound = len(cheetah_by_line_no)
return lower_bound, upper_bound | def function[_find_bounds, parameter[py_line_no, py_by_line_no, cheetah_by_line_no]]:
constant[Searches before and after in the python source to find comments which
denote cheetah line numbers. If a lower bound is not found, 0 is
substituted. If an upper bound is not found, len(cheetah lines) is
returned. The result is a lower-inclusive upper-exclusive range:
[..., ...)
]
for taget[name[line_no]] in starred[call[name[range], parameter[name[py_line_no], constant[0], <ast.UnaryOp object at 0x7da2044c0f40>]]] begin[:]
variable[lower_bound] assign[=] call[name[_get_line_no_from_comments], parameter[call[name[py_by_line_no]][name[line_no]]]]
if compare[name[lower_bound] not_equal[!=] constant[0]] begin[:]
break
for taget[name[line_no]] in starred[call[name[range], parameter[name[py_line_no], call[name[len], parameter[name[py_by_line_no]]]]]] begin[:]
variable[upper_bound] assign[=] call[name[_get_line_no_from_comments], parameter[call[name[py_by_line_no]][name[line_no]]]]
if compare[name[upper_bound] not_equal[!=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da2044c2920>
break
return[tuple[[<ast.Name object at 0x7da2044c1180>, <ast.Name object at 0x7da2044c2110>]]] | keyword[def] identifier[_find_bounds] ( identifier[py_line_no] , identifier[py_by_line_no] , identifier[cheetah_by_line_no] ):
literal[string]
keyword[for] identifier[line_no] keyword[in] identifier[range] ( identifier[py_line_no] , literal[int] ,- literal[int] ):
identifier[lower_bound] = identifier[_get_line_no_from_comments] ( identifier[py_by_line_no] [ identifier[line_no] ])
keyword[if] identifier[lower_bound] != literal[int] :
keyword[break]
keyword[else] :
identifier[lower_bound] = literal[int]
keyword[for] identifier[line_no] keyword[in] identifier[range] ( identifier[py_line_no] , identifier[len] ( identifier[py_by_line_no] )):
identifier[upper_bound] = identifier[_get_line_no_from_comments] ( identifier[py_by_line_no] [ identifier[line_no] ])
keyword[if] identifier[upper_bound] != literal[int] :
identifier[upper_bound] += literal[int]
keyword[break]
keyword[else] :
identifier[upper_bound] = identifier[len] ( identifier[cheetah_by_line_no] )
keyword[return] identifier[lower_bound] , identifier[upper_bound] | def _find_bounds(py_line_no, py_by_line_no, cheetah_by_line_no):
"""Searches before and after in the python source to find comments which
denote cheetah line numbers. If a lower bound is not found, 0 is
substituted. If an upper bound is not found, len(cheetah lines) is
returned. The result is a lower-inclusive upper-exclusive range:
[..., ...)
"""
# Find lower bound
for line_no in range(py_line_no, 0, -1):
lower_bound = _get_line_no_from_comments(py_by_line_no[line_no])
if lower_bound != 0:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line_no']]
else:
lower_bound = 0
# Find upper bound
for line_no in range(py_line_no, len(py_by_line_no)):
upper_bound = _get_line_no_from_comments(py_by_line_no[line_no])
if upper_bound != 0:
# Since we'll eventually be building a range(), let's make this
# the non-inclusive upper-bound
upper_bound += 1
break # depends on [control=['if'], data=['upper_bound']] # depends on [control=['for'], data=['line_no']]
else:
upper_bound = len(cheetah_by_line_no)
return (lower_bound, upper_bound) |
def wait(self, timeout=None):
"""wait to be woken up by the condition
.. note::
this method will block the current coroutine until a :meth:`notify`
wakes it back up.
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
current = compat.getcurrent()
waketime = None if timeout is None else time.time() + timeout
if timeout is not None:
scheduler.schedule_at(waketime, current)
self._waiters.append((current, waketime))
self._lock.release()
scheduler.state.mainloop.switch()
self._lock.acquire()
if timeout is not None:
timedout = not scheduler._remove_timer(waketime, current)
if timedout:
self._waiters.remove((current, waketime))
return timedout
return False | def function[wait, parameter[self, timeout]]:
constant[wait to be woken up by the condition
.. note::
this method will block the current coroutine until a :meth:`notify`
wakes it back up.
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
]
if <ast.UnaryOp object at 0x7da18bc70820> begin[:]
<ast.Raise object at 0x7da18bc72a10>
variable[current] assign[=] call[name[compat].getcurrent, parameter[]]
variable[waketime] assign[=] <ast.IfExp object at 0x7da18bc70fa0>
if compare[name[timeout] is_not constant[None]] begin[:]
call[name[scheduler].schedule_at, parameter[name[waketime], name[current]]]
call[name[self]._waiters.append, parameter[tuple[[<ast.Name object at 0x7da18bc72d10>, <ast.Name object at 0x7da18bc730d0>]]]]
call[name[self]._lock.release, parameter[]]
call[name[scheduler].state.mainloop.switch, parameter[]]
call[name[self]._lock.acquire, parameter[]]
if compare[name[timeout] is_not constant[None]] begin[:]
variable[timedout] assign[=] <ast.UnaryOp object at 0x7da18bc721d0>
if name[timedout] begin[:]
call[name[self]._waiters.remove, parameter[tuple[[<ast.Name object at 0x7da18bc70af0>, <ast.Name object at 0x7da18bc722c0>]]]]
return[name[timedout]]
return[constant[False]] | keyword[def] identifier[wait] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_owned] ():
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[current] = identifier[compat] . identifier[getcurrent] ()
identifier[waketime] = keyword[None] keyword[if] identifier[timeout] keyword[is] keyword[None] keyword[else] identifier[time] . identifier[time] ()+ identifier[timeout]
keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] :
identifier[scheduler] . identifier[schedule_at] ( identifier[waketime] , identifier[current] )
identifier[self] . identifier[_waiters] . identifier[append] (( identifier[current] , identifier[waketime] ))
identifier[self] . identifier[_lock] . identifier[release] ()
identifier[scheduler] . identifier[state] . identifier[mainloop] . identifier[switch] ()
identifier[self] . identifier[_lock] . identifier[acquire] ()
keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] :
identifier[timedout] = keyword[not] identifier[scheduler] . identifier[_remove_timer] ( identifier[waketime] , identifier[current] )
keyword[if] identifier[timedout] :
identifier[self] . identifier[_waiters] . identifier[remove] (( identifier[current] , identifier[waketime] ))
keyword[return] identifier[timedout]
keyword[return] keyword[False] | def wait(self, timeout=None):
"""wait to be woken up by the condition
.. note::
this method will block the current coroutine until a :meth:`notify`
wakes it back up.
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
"""
if not self._is_owned():
raise RuntimeError('cannot wait on un-acquired lock') # depends on [control=['if'], data=[]]
current = compat.getcurrent()
waketime = None if timeout is None else time.time() + timeout
if timeout is not None:
scheduler.schedule_at(waketime, current) # depends on [control=['if'], data=[]]
self._waiters.append((current, waketime))
self._lock.release()
scheduler.state.mainloop.switch()
self._lock.acquire()
if timeout is not None:
timedout = not scheduler._remove_timer(waketime, current)
if timedout:
self._waiters.remove((current, waketime)) # depends on [control=['if'], data=[]]
return timedout # depends on [control=['if'], data=[]]
return False |
def derive_fields(self):
"""
Derives our fields.
"""
if self.fields is not None:
fields = list(self.fields)
else:
form = self.form
fields = []
for field in form:
fields.append(field.name)
# this is slightly confusing but we add in readonly fields here because they will still
# need to be displayed
readonly = self.derive_readonly()
if readonly:
fields += readonly
# remove any excluded fields
for exclude in self.derive_exclude():
if exclude in fields:
fields.remove(exclude)
return fields | def function[derive_fields, parameter[self]]:
constant[
Derives our fields.
]
if compare[name[self].fields is_not constant[None]] begin[:]
variable[fields] assign[=] call[name[list], parameter[name[self].fields]]
for taget[name[exclude]] in starred[call[name[self].derive_exclude, parameter[]]] begin[:]
if compare[name[exclude] in name[fields]] begin[:]
call[name[fields].remove, parameter[name[exclude]]]
return[name[fields]] | keyword[def] identifier[derive_fields] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[fields] keyword[is] keyword[not] keyword[None] :
identifier[fields] = identifier[list] ( identifier[self] . identifier[fields] )
keyword[else] :
identifier[form] = identifier[self] . identifier[form]
identifier[fields] =[]
keyword[for] identifier[field] keyword[in] identifier[form] :
identifier[fields] . identifier[append] ( identifier[field] . identifier[name] )
identifier[readonly] = identifier[self] . identifier[derive_readonly] ()
keyword[if] identifier[readonly] :
identifier[fields] += identifier[readonly]
keyword[for] identifier[exclude] keyword[in] identifier[self] . identifier[derive_exclude] ():
keyword[if] identifier[exclude] keyword[in] identifier[fields] :
identifier[fields] . identifier[remove] ( identifier[exclude] )
keyword[return] identifier[fields] | def derive_fields(self):
"""
Derives our fields.
"""
if self.fields is not None:
fields = list(self.fields) # depends on [control=['if'], data=[]]
else:
form = self.form
fields = []
for field in form:
fields.append(field.name) # depends on [control=['for'], data=['field']]
# this is slightly confusing but we add in readonly fields here because they will still
# need to be displayed
readonly = self.derive_readonly()
if readonly:
fields += readonly # depends on [control=['if'], data=[]]
# remove any excluded fields
for exclude in self.derive_exclude():
if exclude in fields:
fields.remove(exclude) # depends on [control=['if'], data=['exclude', 'fields']] # depends on [control=['for'], data=['exclude']]
return fields |
def find_position(edges, prow, bstart, bend, total=5):
"""Find a EMIR CSU bar position in a edge image.
Parameters
==========
edges; ndarray,
a 2d image with 1 where is a border, 0 otherwise
prow: int,
reference 'row' of the bars
bstart: int,
minimum 'x' position of a bar (0-based)
bend: int
maximum 'x' position of a bar (0 based)
total: int
number of rows to check near `prow`
Return
======
list of (x, y) centroids
"""
nt = total // 2
# This bar is too near the border
if prow-nt < 0 or prow + nt >= edges.shape[0]:
return []
s2edges = edges[prow-nt:prow+nt+1, bstart:bend]
structure = morph.generate_binary_structure(2,2) # 8 way conection
har, num_f = mes.label(s2edges, structure=structure)
cen_of_mass = mes.center_of_mass(s2edges, labels=har, index=range(1, num_f + 1))
# center_of_mass returns y, x coordinates
cen_of_mass_off = [(x + bstart, prow-nt + y) for y,x in cen_of_mass]
return cen_of_mass_off | def function[find_position, parameter[edges, prow, bstart, bend, total]]:
constant[Find a EMIR CSU bar position in a edge image.
Parameters
==========
edges; ndarray,
a 2d image with 1 where is a border, 0 otherwise
prow: int,
reference 'row' of the bars
bstart: int,
minimum 'x' position of a bar (0-based)
bend: int
maximum 'x' position of a bar (0 based)
total: int
number of rows to check near `prow`
Return
======
list of (x, y) centroids
]
variable[nt] assign[=] binary_operation[name[total] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]
if <ast.BoolOp object at 0x7da1b1a1d6c0> begin[:]
return[list[[]]]
variable[s2edges] assign[=] call[name[edges]][tuple[[<ast.Slice object at 0x7da1b1a1c3a0>, <ast.Slice object at 0x7da1b1a1e440>]]]
variable[structure] assign[=] call[name[morph].generate_binary_structure, parameter[constant[2], constant[2]]]
<ast.Tuple object at 0x7da1b1a1e890> assign[=] call[name[mes].label, parameter[name[s2edges]]]
variable[cen_of_mass] assign[=] call[name[mes].center_of_mass, parameter[name[s2edges]]]
variable[cen_of_mass_off] assign[=] <ast.ListComp object at 0x7da20c6e4e20>
return[name[cen_of_mass_off]] | keyword[def] identifier[find_position] ( identifier[edges] , identifier[prow] , identifier[bstart] , identifier[bend] , identifier[total] = literal[int] ):
literal[string]
identifier[nt] = identifier[total] // literal[int]
keyword[if] identifier[prow] - identifier[nt] < literal[int] keyword[or] identifier[prow] + identifier[nt] >= identifier[edges] . identifier[shape] [ literal[int] ]:
keyword[return] []
identifier[s2edges] = identifier[edges] [ identifier[prow] - identifier[nt] : identifier[prow] + identifier[nt] + literal[int] , identifier[bstart] : identifier[bend] ]
identifier[structure] = identifier[morph] . identifier[generate_binary_structure] ( literal[int] , literal[int] )
identifier[har] , identifier[num_f] = identifier[mes] . identifier[label] ( identifier[s2edges] , identifier[structure] = identifier[structure] )
identifier[cen_of_mass] = identifier[mes] . identifier[center_of_mass] ( identifier[s2edges] , identifier[labels] = identifier[har] , identifier[index] = identifier[range] ( literal[int] , identifier[num_f] + literal[int] ))
identifier[cen_of_mass_off] =[( identifier[x] + identifier[bstart] , identifier[prow] - identifier[nt] + identifier[y] ) keyword[for] identifier[y] , identifier[x] keyword[in] identifier[cen_of_mass] ]
keyword[return] identifier[cen_of_mass_off] | def find_position(edges, prow, bstart, bend, total=5):
"""Find a EMIR CSU bar position in a edge image.
Parameters
==========
edges; ndarray,
a 2d image with 1 where is a border, 0 otherwise
prow: int,
reference 'row' of the bars
bstart: int,
minimum 'x' position of a bar (0-based)
bend: int
maximum 'x' position of a bar (0 based)
total: int
number of rows to check near `prow`
Return
======
list of (x, y) centroids
"""
nt = total // 2
# This bar is too near the border
if prow - nt < 0 or prow + nt >= edges.shape[0]:
return [] # depends on [control=['if'], data=[]]
s2edges = edges[prow - nt:prow + nt + 1, bstart:bend]
structure = morph.generate_binary_structure(2, 2) # 8 way conection
(har, num_f) = mes.label(s2edges, structure=structure)
cen_of_mass = mes.center_of_mass(s2edges, labels=har, index=range(1, num_f + 1))
# center_of_mass returns y, x coordinates
cen_of_mass_off = [(x + bstart, prow - nt + y) for (y, x) in cen_of_mass]
return cen_of_mass_off |
def copy_file(
host,
file_path,
remote_path='.',
username=None,
key_path=None,
action='put'
):
""" Copy a file via SCP, proxied through the mesos master
:param host: host or IP of the machine to execute the command on
:type host: str
:param file_path: the local path to the file to be copied
:type file_path: str
:param remote_path: the remote path to copy the file to
:type remote_path: str
:param username: SSH username
:type username: str
:param key_path: path to the SSH private key to use for SSH authentication
:type key_path: str
:return: True if successful, False otherwise
:rtype: bool
"""
if not username:
username = shakedown.cli.ssh_user
if not key_path:
key_path = shakedown.cli.ssh_key_file
key = validate_key(key_path)
transport = get_transport(host, username, key)
transport = start_transport(transport, username, key)
if transport.is_authenticated():
start = time.time()
channel = scp.SCPClient(transport)
if action == 'get':
print("\n{}scp {}:{} {}\n".format(shakedown.cli.helpers.fchr('>>'), host, remote_path, file_path))
channel.get(remote_path, file_path)
else:
print("\n{}scp {} {}:{}\n".format(shakedown.cli.helpers.fchr('>>'), file_path, host, remote_path))
channel.put(file_path, remote_path)
print("{} bytes copied in {} seconds.".format(str(os.path.getsize(file_path)), str(round(time.time() - start, 2))))
try_close(channel)
try_close(transport)
return True
else:
print("error: unable to authenticate {}@{} with key {}".format(username, host, key_path))
return False | def function[copy_file, parameter[host, file_path, remote_path, username, key_path, action]]:
constant[ Copy a file via SCP, proxied through the mesos master
:param host: host or IP of the machine to execute the command on
:type host: str
:param file_path: the local path to the file to be copied
:type file_path: str
:param remote_path: the remote path to copy the file to
:type remote_path: str
:param username: SSH username
:type username: str
:param key_path: path to the SSH private key to use for SSH authentication
:type key_path: str
:return: True if successful, False otherwise
:rtype: bool
]
if <ast.UnaryOp object at 0x7da1b1924130> begin[:]
variable[username] assign[=] name[shakedown].cli.ssh_user
if <ast.UnaryOp object at 0x7da1b1977a30> begin[:]
variable[key_path] assign[=] name[shakedown].cli.ssh_key_file
variable[key] assign[=] call[name[validate_key], parameter[name[key_path]]]
variable[transport] assign[=] call[name[get_transport], parameter[name[host], name[username], name[key]]]
variable[transport] assign[=] call[name[start_transport], parameter[name[transport], name[username], name[key]]]
if call[name[transport].is_authenticated, parameter[]] begin[:]
variable[start] assign[=] call[name[time].time, parameter[]]
variable[channel] assign[=] call[name[scp].SCPClient, parameter[name[transport]]]
if compare[name[action] equal[==] constant[get]] begin[:]
call[name[print], parameter[call[constant[
{}scp {}:{} {}
].format, parameter[call[name[shakedown].cli.helpers.fchr, parameter[constant[>>]]], name[host], name[remote_path], name[file_path]]]]]
call[name[channel].get, parameter[name[remote_path], name[file_path]]]
call[name[print], parameter[call[constant[{} bytes copied in {} seconds.].format, parameter[call[name[str], parameter[call[name[os].path.getsize, parameter[name[file_path]]]]], call[name[str], parameter[call[name[round], parameter[binary_operation[call[name[time].time, parameter[]] - name[start]], constant[2]]]]]]]]]
call[name[try_close], parameter[name[channel]]]
call[name[try_close], parameter[name[transport]]]
return[constant[True]] | keyword[def] identifier[copy_file] (
identifier[host] ,
identifier[file_path] ,
identifier[remote_path] = literal[string] ,
identifier[username] = keyword[None] ,
identifier[key_path] = keyword[None] ,
identifier[action] = literal[string]
):
literal[string]
keyword[if] keyword[not] identifier[username] :
identifier[username] = identifier[shakedown] . identifier[cli] . identifier[ssh_user]
keyword[if] keyword[not] identifier[key_path] :
identifier[key_path] = identifier[shakedown] . identifier[cli] . identifier[ssh_key_file]
identifier[key] = identifier[validate_key] ( identifier[key_path] )
identifier[transport] = identifier[get_transport] ( identifier[host] , identifier[username] , identifier[key] )
identifier[transport] = identifier[start_transport] ( identifier[transport] , identifier[username] , identifier[key] )
keyword[if] identifier[transport] . identifier[is_authenticated] ():
identifier[start] = identifier[time] . identifier[time] ()
identifier[channel] = identifier[scp] . identifier[SCPClient] ( identifier[transport] )
keyword[if] identifier[action] == literal[string] :
identifier[print] ( literal[string] . identifier[format] ( identifier[shakedown] . identifier[cli] . identifier[helpers] . identifier[fchr] ( literal[string] ), identifier[host] , identifier[remote_path] , identifier[file_path] ))
identifier[channel] . identifier[get] ( identifier[remote_path] , identifier[file_path] )
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[shakedown] . identifier[cli] . identifier[helpers] . identifier[fchr] ( literal[string] ), identifier[file_path] , identifier[host] , identifier[remote_path] ))
identifier[channel] . identifier[put] ( identifier[file_path] , identifier[remote_path] )
identifier[print] ( literal[string] . identifier[format] ( identifier[str] ( identifier[os] . identifier[path] . identifier[getsize] ( identifier[file_path] )), identifier[str] ( identifier[round] ( identifier[time] . identifier[time] ()- identifier[start] , literal[int] ))))
identifier[try_close] ( identifier[channel] )
identifier[try_close] ( identifier[transport] )
keyword[return] keyword[True]
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[username] , identifier[host] , identifier[key_path] ))
keyword[return] keyword[False] | def copy_file(host, file_path, remote_path='.', username=None, key_path=None, action='put'):
""" Copy a file via SCP, proxied through the mesos master
:param host: host or IP of the machine to execute the command on
:type host: str
:param file_path: the local path to the file to be copied
:type file_path: str
:param remote_path: the remote path to copy the file to
:type remote_path: str
:param username: SSH username
:type username: str
:param key_path: path to the SSH private key to use for SSH authentication
:type key_path: str
:return: True if successful, False otherwise
:rtype: bool
"""
if not username:
username = shakedown.cli.ssh_user # depends on [control=['if'], data=[]]
if not key_path:
key_path = shakedown.cli.ssh_key_file # depends on [control=['if'], data=[]]
key = validate_key(key_path)
transport = get_transport(host, username, key)
transport = start_transport(transport, username, key)
if transport.is_authenticated():
start = time.time()
channel = scp.SCPClient(transport)
if action == 'get':
print('\n{}scp {}:{} {}\n'.format(shakedown.cli.helpers.fchr('>>'), host, remote_path, file_path))
channel.get(remote_path, file_path) # depends on [control=['if'], data=[]]
else:
print('\n{}scp {} {}:{}\n'.format(shakedown.cli.helpers.fchr('>>'), file_path, host, remote_path))
channel.put(file_path, remote_path)
print('{} bytes copied in {} seconds.'.format(str(os.path.getsize(file_path)), str(round(time.time() - start, 2))))
try_close(channel)
try_close(transport)
return True # depends on [control=['if'], data=[]]
else:
print('error: unable to authenticate {}@{} with key {}'.format(username, host, key_path))
return False |
def confirm(text='', title='', buttons=[OK_TEXT, CANCEL_TEXT], root=None, timeout=None):
"""Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on."""
assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox'
return _buttonbox(msg=text, title=title, choices=[str(b) for b in buttons], root=root, timeout=timeout) | def function[confirm, parameter[text, title, buttons, root, timeout]]:
constant[Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on.]
assert[name[TKINTER_IMPORT_SUCCEEDED]]
return[call[name[_buttonbox], parameter[]]] | keyword[def] identifier[confirm] ( identifier[text] = literal[string] , identifier[title] = literal[string] , identifier[buttons] =[ identifier[OK_TEXT] , identifier[CANCEL_TEXT] ], identifier[root] = keyword[None] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[assert] identifier[TKINTER_IMPORT_SUCCEEDED] , literal[string]
keyword[return] identifier[_buttonbox] ( identifier[msg] = identifier[text] , identifier[title] = identifier[title] , identifier[choices] =[ identifier[str] ( identifier[b] ) keyword[for] identifier[b] keyword[in] identifier[buttons] ], identifier[root] = identifier[root] , identifier[timeout] = identifier[timeout] ) | def confirm(text='', title='', buttons=[OK_TEXT, CANCEL_TEXT], root=None, timeout=None):
"""Displays a message box with OK and Cancel buttons. Number and text of buttons can be customized. Returns the text of the button clicked on."""
assert TKINTER_IMPORT_SUCCEEDED, 'Tkinter is required for pymsgbox'
return _buttonbox(msg=text, title=title, choices=[str(b) for b in buttons], root=root, timeout=timeout) |
def get_template_namespace(self) -> Dict[str, Any]:
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url,
)
namespace.update(self.ui)
return namespace | def function[get_template_namespace, parameter[self]]:
constant[Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
]
variable[namespace] assign[=] call[name[dict], parameter[]]
call[name[namespace].update, parameter[name[self].ui]]
return[name[namespace]] | keyword[def] identifier[get_template_namespace] ( identifier[self] )-> identifier[Dict] [ identifier[str] , identifier[Any] ]:
literal[string]
identifier[namespace] = identifier[dict] (
identifier[handler] = identifier[self] ,
identifier[request] = identifier[self] . identifier[request] ,
identifier[current_user] = identifier[self] . identifier[current_user] ,
identifier[locale] = identifier[self] . identifier[locale] ,
identifier[_] = identifier[self] . identifier[locale] . identifier[translate] ,
identifier[pgettext] = identifier[self] . identifier[locale] . identifier[pgettext] ,
identifier[static_url] = identifier[self] . identifier[static_url] ,
identifier[xsrf_form_html] = identifier[self] . identifier[xsrf_form_html] ,
identifier[reverse_url] = identifier[self] . identifier[reverse_url] ,
)
identifier[namespace] . identifier[update] ( identifier[self] . identifier[ui] )
keyword[return] identifier[namespace] | def get_template_namespace(self) -> Dict[str, Any]:
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(handler=self, request=self.request, current_user=self.current_user, locale=self.locale, _=self.locale.translate, pgettext=self.locale.pgettext, static_url=self.static_url, xsrf_form_html=self.xsrf_form_html, reverse_url=self.reverse_url)
namespace.update(self.ui)
return namespace |
def as_quote(self, quote):
""" Returns the price instance so that the quote asset is ``quote``.
Note: This makes a copy of the object!
"""
if quote == self["quote"]["symbol"]:
return self.copy()
elif quote == self["base"]["symbol"]:
return self.copy().invert()
else:
raise InvalidAssetException | def function[as_quote, parameter[self, quote]]:
constant[ Returns the price instance so that the quote asset is ``quote``.
Note: This makes a copy of the object!
]
if compare[name[quote] equal[==] call[call[name[self]][constant[quote]]][constant[symbol]]] begin[:]
return[call[name[self].copy, parameter[]]] | keyword[def] identifier[as_quote] ( identifier[self] , identifier[quote] ):
literal[string]
keyword[if] identifier[quote] == identifier[self] [ literal[string] ][ literal[string] ]:
keyword[return] identifier[self] . identifier[copy] ()
keyword[elif] identifier[quote] == identifier[self] [ literal[string] ][ literal[string] ]:
keyword[return] identifier[self] . identifier[copy] (). identifier[invert] ()
keyword[else] :
keyword[raise] identifier[InvalidAssetException] | def as_quote(self, quote):
""" Returns the price instance so that the quote asset is ``quote``.
Note: This makes a copy of the object!
"""
if quote == self['quote']['symbol']:
return self.copy() # depends on [control=['if'], data=[]]
elif quote == self['base']['symbol']:
return self.copy().invert() # depends on [control=['if'], data=[]]
else:
raise InvalidAssetException |
async def cli(ctx, endpoint, debug, websocket, post):
"""Songpal CLI."""
lvl = logging.INFO
if debug:
lvl = logging.DEBUG
click.echo("Setting debug level to %s" % debug)
logging.basicConfig(level=lvl)
if ctx.invoked_subcommand == "discover":
ctx.obj = {"debug": debug}
return
if endpoint is None:
err("Endpoint is required except when with 'discover'!")
return
protocol = None
if post and websocket:
err("You can force either --post or --websocket")
return
elif websocket:
protocol = ProtocolType.WebSocket
elif post:
protocol = ProtocolType.XHRPost
logging.debug("Using endpoint %s", endpoint)
x = Device(endpoint, force_protocol=protocol, debug=debug)
try:
await x.get_supported_methods()
except (requests.exceptions.ConnectionError, SongpalException) as ex:
err("Unable to get supported methods: %s" % ex)
sys.exit(-1)
ctx.obj = x | <ast.AsyncFunctionDef object at 0x7da2054a7b80> | keyword[async] keyword[def] identifier[cli] ( identifier[ctx] , identifier[endpoint] , identifier[debug] , identifier[websocket] , identifier[post] ):
literal[string]
identifier[lvl] = identifier[logging] . identifier[INFO]
keyword[if] identifier[debug] :
identifier[lvl] = identifier[logging] . identifier[DEBUG]
identifier[click] . identifier[echo] ( literal[string] % identifier[debug] )
identifier[logging] . identifier[basicConfig] ( identifier[level] = identifier[lvl] )
keyword[if] identifier[ctx] . identifier[invoked_subcommand] == literal[string] :
identifier[ctx] . identifier[obj] ={ literal[string] : identifier[debug] }
keyword[return]
keyword[if] identifier[endpoint] keyword[is] keyword[None] :
identifier[err] ( literal[string] )
keyword[return]
identifier[protocol] = keyword[None]
keyword[if] identifier[post] keyword[and] identifier[websocket] :
identifier[err] ( literal[string] )
keyword[return]
keyword[elif] identifier[websocket] :
identifier[protocol] = identifier[ProtocolType] . identifier[WebSocket]
keyword[elif] identifier[post] :
identifier[protocol] = identifier[ProtocolType] . identifier[XHRPost]
identifier[logging] . identifier[debug] ( literal[string] , identifier[endpoint] )
identifier[x] = identifier[Device] ( identifier[endpoint] , identifier[force_protocol] = identifier[protocol] , identifier[debug] = identifier[debug] )
keyword[try] :
keyword[await] identifier[x] . identifier[get_supported_methods] ()
keyword[except] ( identifier[requests] . identifier[exceptions] . identifier[ConnectionError] , identifier[SongpalException] ) keyword[as] identifier[ex] :
identifier[err] ( literal[string] % identifier[ex] )
identifier[sys] . identifier[exit] (- literal[int] )
identifier[ctx] . identifier[obj] = identifier[x] | async def cli(ctx, endpoint, debug, websocket, post):
"""Songpal CLI."""
lvl = logging.INFO
if debug:
lvl = logging.DEBUG
click.echo('Setting debug level to %s' % debug) # depends on [control=['if'], data=[]]
logging.basicConfig(level=lvl)
if ctx.invoked_subcommand == 'discover':
ctx.obj = {'debug': debug}
return # depends on [control=['if'], data=[]]
if endpoint is None:
err("Endpoint is required except when with 'discover'!")
return # depends on [control=['if'], data=[]]
protocol = None
if post and websocket:
err('You can force either --post or --websocket')
return # depends on [control=['if'], data=[]]
elif websocket:
protocol = ProtocolType.WebSocket # depends on [control=['if'], data=[]]
elif post:
protocol = ProtocolType.XHRPost # depends on [control=['if'], data=[]]
logging.debug('Using endpoint %s', endpoint)
x = Device(endpoint, force_protocol=protocol, debug=debug)
try:
await x.get_supported_methods() # depends on [control=['try'], data=[]]
except (requests.exceptions.ConnectionError, SongpalException) as ex:
err('Unable to get supported methods: %s' % ex)
sys.exit(-1) # depends on [control=['except'], data=['ex']]
ctx.obj = x |
def parse_path(path):
'''Parses an address into directory and port parts.
The last segment of the address will be checked to see if it matches a port
specification (i.e. contains a colon followed by text). This will be
returned separately from the directory parts.
If a leading / is given, that will be returned as the first directory
component. All other / characters are removed.
All leading / characters are condensed into a single leading /.
Any path components that are . will be removed, as they just point to the
previous path component. For example, '/localhost/.' will become
'/localhost'. Any path components that are .. will be removed, along with
the previous path component. If this renders the path empty, it will be
replaced with '/'.
Examples:
>>> parse_path('localhost:30000/manager/comp0.rtc')
(['localhost:30000', 'manager', 'comp0.rtc'], None)
>>> parse_path('localhost/manager/comp0.rtc:in')
(['localhost', 'manager', 'comp0.rtc'], 'in')
>>> parse_path('/localhost/manager/comp0.rtc')
(['/', 'localhost', 'manager', 'comp0.rtc'], None)
>>> parse_path('/localhost/manager/comp0.rtc:in')
(['/', 'localhost', 'manager', 'comp0.rtc'], 'in')
>>> parse_path('manager/comp0.rtc')
(['manager', 'comp0.rtc'], None)
>>> parse_path('comp0.rtc')
(['comp0.rtc'], None)
'''
bits = path.lstrip('/').split('/')
if not bits:
raise exceptions.BadPathError(path)
if bits[-1]:
bits[-1], port = get_port(bits[-1])
else:
port = None
if path[0] == '/':
bits = ['/'] + bits
condensed_bits = []
for bit in bits:
if bit == '.':
continue
if bit == '..':
condensed_bits = condensed_bits[:-1]
continue
condensed_bits.append(bit)
if not condensed_bits:
condensed_bits = ['/']
return condensed_bits, port | def function[parse_path, parameter[path]]:
constant[Parses an address into directory and port parts.
The last segment of the address will be checked to see if it matches a port
specification (i.e. contains a colon followed by text). This will be
returned separately from the directory parts.
If a leading / is given, that will be returned as the first directory
component. All other / characters are removed.
All leading / characters are condensed into a single leading /.
Any path components that are . will be removed, as they just point to the
previous path component. For example, '/localhost/.' will become
'/localhost'. Any path components that are .. will be removed, along with
the previous path component. If this renders the path empty, it will be
replaced with '/'.
Examples:
>>> parse_path('localhost:30000/manager/comp0.rtc')
(['localhost:30000', 'manager', 'comp0.rtc'], None)
>>> parse_path('localhost/manager/comp0.rtc:in')
(['localhost', 'manager', 'comp0.rtc'], 'in')
>>> parse_path('/localhost/manager/comp0.rtc')
(['/', 'localhost', 'manager', 'comp0.rtc'], None)
>>> parse_path('/localhost/manager/comp0.rtc:in')
(['/', 'localhost', 'manager', 'comp0.rtc'], 'in')
>>> parse_path('manager/comp0.rtc')
(['manager', 'comp0.rtc'], None)
>>> parse_path('comp0.rtc')
(['comp0.rtc'], None)
]
variable[bits] assign[=] call[call[name[path].lstrip, parameter[constant[/]]].split, parameter[constant[/]]]
if <ast.UnaryOp object at 0x7da1b0bb0fd0> begin[:]
<ast.Raise object at 0x7da1b0bb0040>
if call[name[bits]][<ast.UnaryOp object at 0x7da1b0bb38b0>] begin[:]
<ast.Tuple object at 0x7da1b0bb05b0> assign[=] call[name[get_port], parameter[call[name[bits]][<ast.UnaryOp object at 0x7da1b0bb0c70>]]]
if compare[call[name[path]][constant[0]] equal[==] constant[/]] begin[:]
variable[bits] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b0bb0940>]] + name[bits]]
variable[condensed_bits] assign[=] list[[]]
for taget[name[bit]] in starred[name[bits]] begin[:]
if compare[name[bit] equal[==] constant[.]] begin[:]
continue
if compare[name[bit] equal[==] constant[..]] begin[:]
variable[condensed_bits] assign[=] call[name[condensed_bits]][<ast.Slice object at 0x7da1b0bb0bb0>]
continue
call[name[condensed_bits].append, parameter[name[bit]]]
if <ast.UnaryOp object at 0x7da1b0bb0eb0> begin[:]
variable[condensed_bits] assign[=] list[[<ast.Constant object at 0x7da1b0bb0820>]]
return[tuple[[<ast.Name object at 0x7da18f09d930>, <ast.Name object at 0x7da18f09c9a0>]]] | keyword[def] identifier[parse_path] ( identifier[path] ):
literal[string]
identifier[bits] = identifier[path] . identifier[lstrip] ( literal[string] ). identifier[split] ( literal[string] )
keyword[if] keyword[not] identifier[bits] :
keyword[raise] identifier[exceptions] . identifier[BadPathError] ( identifier[path] )
keyword[if] identifier[bits] [- literal[int] ]:
identifier[bits] [- literal[int] ], identifier[port] = identifier[get_port] ( identifier[bits] [- literal[int] ])
keyword[else] :
identifier[port] = keyword[None]
keyword[if] identifier[path] [ literal[int] ]== literal[string] :
identifier[bits] =[ literal[string] ]+ identifier[bits]
identifier[condensed_bits] =[]
keyword[for] identifier[bit] keyword[in] identifier[bits] :
keyword[if] identifier[bit] == literal[string] :
keyword[continue]
keyword[if] identifier[bit] == literal[string] :
identifier[condensed_bits] = identifier[condensed_bits] [:- literal[int] ]
keyword[continue]
identifier[condensed_bits] . identifier[append] ( identifier[bit] )
keyword[if] keyword[not] identifier[condensed_bits] :
identifier[condensed_bits] =[ literal[string] ]
keyword[return] identifier[condensed_bits] , identifier[port] | def parse_path(path):
"""Parses an address into directory and port parts.
The last segment of the address will be checked to see if it matches a port
specification (i.e. contains a colon followed by text). This will be
returned separately from the directory parts.
If a leading / is given, that will be returned as the first directory
component. All other / characters are removed.
All leading / characters are condensed into a single leading /.
Any path components that are . will be removed, as they just point to the
previous path component. For example, '/localhost/.' will become
'/localhost'. Any path components that are .. will be removed, along with
the previous path component. If this renders the path empty, it will be
replaced with '/'.
Examples:
>>> parse_path('localhost:30000/manager/comp0.rtc')
(['localhost:30000', 'manager', 'comp0.rtc'], None)
>>> parse_path('localhost/manager/comp0.rtc:in')
(['localhost', 'manager', 'comp0.rtc'], 'in')
>>> parse_path('/localhost/manager/comp0.rtc')
(['/', 'localhost', 'manager', 'comp0.rtc'], None)
>>> parse_path('/localhost/manager/comp0.rtc:in')
(['/', 'localhost', 'manager', 'comp0.rtc'], 'in')
>>> parse_path('manager/comp0.rtc')
(['manager', 'comp0.rtc'], None)
>>> parse_path('comp0.rtc')
(['comp0.rtc'], None)
"""
bits = path.lstrip('/').split('/')
if not bits:
raise exceptions.BadPathError(path) # depends on [control=['if'], data=[]]
if bits[-1]:
(bits[-1], port) = get_port(bits[-1]) # depends on [control=['if'], data=[]]
else:
port = None
if path[0] == '/':
bits = ['/'] + bits # depends on [control=['if'], data=[]]
condensed_bits = []
for bit in bits:
if bit == '.':
continue # depends on [control=['if'], data=[]]
if bit == '..':
condensed_bits = condensed_bits[:-1]
continue # depends on [control=['if'], data=[]]
condensed_bits.append(bit) # depends on [control=['for'], data=['bit']]
if not condensed_bits:
condensed_bits = ['/'] # depends on [control=['if'], data=[]]
return (condensed_bits, port) |
def export_freq_band(xfreq, bands, filename):
"""Write frequency analysis data to CSV by pre-defined band."""
heading_row_1 = ['Segment index',
'Start time',
'End time',
'Duration',
'Stitches',
'Stage',
'Cycle',
'Event type',
'Channel',
]
spacer = [''] * (len(heading_row_1) - 1)
band_hdr = [str(b1) + '-' + str(b2) for b1, b2 in bands]
xband = xfreq.copy()
for seg in xband:
bandlist = []
for i, b in enumerate(bands):
pwr, _ = band_power(seg['data'], b)
bandlist.append(pwr)
seg['band'] = bandlist
as_matrix = asarray([
[x['band'][y][chan] for y in range(len(x['band']))] \
for x in xband for chan in x['band'][0].keys()])
desc = get_descriptives(as_matrix)
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
csv_file = writer(f)
csv_file.writerow(['Wonambi v{}'.format(__version__)])
csv_file.writerow(heading_row_1 + band_hdr)
csv_file.writerow(['Mean'] + spacer + list(desc['mean']))
csv_file.writerow(['SD'] + spacer + list(desc['sd']))
csv_file.writerow(['Mean of ln'] + spacer + list(desc['mean_log']))
csv_file.writerow(['SD of ln'] + spacer + list(desc['sd_log']))
idx = 0
for seg in xband:
for chan in seg['band'][0].keys():
idx += 1
cyc = None
if seg['cycle'] is not None:
cyc = seg['cycle'][2]
data_row = list(
[seg['band'][x][chan] for x in range(
len(seg['band']))])
csv_file.writerow([idx,
seg['start'],
seg['end'],
seg['duration'],
seg['n_stitch'],
seg['stage'],
cyc,
seg['name'],
chan,
] + data_row) | def function[export_freq_band, parameter[xfreq, bands, filename]]:
constant[Write frequency analysis data to CSV by pre-defined band.]
variable[heading_row_1] assign[=] list[[<ast.Constant object at 0x7da18bccbc40>, <ast.Constant object at 0x7da18bcc9300>, <ast.Constant object at 0x7da18bcc8340>, <ast.Constant object at 0x7da18bccabc0>, <ast.Constant object at 0x7da18bcc9e40>, <ast.Constant object at 0x7da18bcca860>, <ast.Constant object at 0x7da18bcc9f60>, <ast.Constant object at 0x7da18bcc9060>, <ast.Constant object at 0x7da18bcc9600>]]
variable[spacer] assign[=] binary_operation[list[[<ast.Constant object at 0x7da18bcca5f0>]] * binary_operation[call[name[len], parameter[name[heading_row_1]]] - constant[1]]]
variable[band_hdr] assign[=] <ast.ListComp object at 0x7da18bcca920>
variable[xband] assign[=] call[name[xfreq].copy, parameter[]]
for taget[name[seg]] in starred[name[xband]] begin[:]
variable[bandlist] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18bcc8a60>, <ast.Name object at 0x7da18bccb7f0>]]] in starred[call[name[enumerate], parameter[name[bands]]]] begin[:]
<ast.Tuple object at 0x7da18bcc9240> assign[=] call[name[band_power], parameter[call[name[seg]][constant[data]], name[b]]]
call[name[bandlist].append, parameter[name[pwr]]]
call[name[seg]][constant[band]] assign[=] name[bandlist]
variable[as_matrix] assign[=] call[name[asarray], parameter[<ast.ListComp object at 0x7da18bccab30>]]
variable[desc] assign[=] call[name[get_descriptives], parameter[name[as_matrix]]]
with call[name[open], parameter[name[filename], constant[w]]] begin[:]
call[name[lg].info, parameter[binary_operation[constant[Writing to ] + call[name[str], parameter[name[filename]]]]]]
variable[csv_file] assign[=] call[name[writer], parameter[name[f]]]
call[name[csv_file].writerow, parameter[list[[<ast.Call object at 0x7da18bcc9ba0>]]]]
call[name[csv_file].writerow, parameter[binary_operation[name[heading_row_1] + name[band_hdr]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da18bcc8d00>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[mean]]]]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da18bcc9360>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[sd]]]]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da18bcc8670>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[mean_log]]]]]]]
call[name[csv_file].writerow, parameter[binary_operation[binary_operation[list[[<ast.Constant object at 0x7da18bccb100>]] + name[spacer]] + call[name[list], parameter[call[name[desc]][constant[sd_log]]]]]]]
variable[idx] assign[=] constant[0]
for taget[name[seg]] in starred[name[xband]] begin[:]
for taget[name[chan]] in starred[call[call[call[name[seg]][constant[band]]][constant[0]].keys, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da18c4cf730>
variable[cyc] assign[=] constant[None]
if compare[call[name[seg]][constant[cycle]] is_not constant[None]] begin[:]
variable[cyc] assign[=] call[call[name[seg]][constant[cycle]]][constant[2]]
variable[data_row] assign[=] call[name[list], parameter[<ast.ListComp object at 0x7da1b0dee3b0>]]
call[name[csv_file].writerow, parameter[binary_operation[list[[<ast.Name object at 0x7da1b0deee00>, <ast.Subscript object at 0x7da1b0dee980>, <ast.Subscript object at 0x7da1b0dee5c0>, <ast.Subscript object at 0x7da1b0dee620>, <ast.Subscript object at 0x7da1b0dee9e0>, <ast.Subscript object at 0x7da1b0deeb60>, <ast.Name object at 0x7da1b0deee30>, <ast.Subscript object at 0x7da1b0deeec0>, <ast.Name object at 0x7da1b0dee410>]] + name[data_row]]]] | keyword[def] identifier[export_freq_band] ( identifier[xfreq] , identifier[bands] , identifier[filename] ):
literal[string]
identifier[heading_row_1] =[ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[spacer] =[ literal[string] ]*( identifier[len] ( identifier[heading_row_1] )- literal[int] )
identifier[band_hdr] =[ identifier[str] ( identifier[b1] )+ literal[string] + identifier[str] ( identifier[b2] ) keyword[for] identifier[b1] , identifier[b2] keyword[in] identifier[bands] ]
identifier[xband] = identifier[xfreq] . identifier[copy] ()
keyword[for] identifier[seg] keyword[in] identifier[xband] :
identifier[bandlist] =[]
keyword[for] identifier[i] , identifier[b] keyword[in] identifier[enumerate] ( identifier[bands] ):
identifier[pwr] , identifier[_] = identifier[band_power] ( identifier[seg] [ literal[string] ], identifier[b] )
identifier[bandlist] . identifier[append] ( identifier[pwr] )
identifier[seg] [ literal[string] ]= identifier[bandlist]
identifier[as_matrix] = identifier[asarray] ([
[ identifier[x] [ literal[string] ][ identifier[y] ][ identifier[chan] ] keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[len] ( identifier[x] [ literal[string] ]))] keyword[for] identifier[x] keyword[in] identifier[xband] keyword[for] identifier[chan] keyword[in] identifier[x] [ literal[string] ][ literal[int] ]. identifier[keys] ()])
identifier[desc] = identifier[get_descriptives] ( identifier[as_matrix] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] , identifier[newline] = literal[string] ) keyword[as] identifier[f] :
identifier[lg] . identifier[info] ( literal[string] + identifier[str] ( identifier[filename] ))
identifier[csv_file] = identifier[writer] ( identifier[f] )
identifier[csv_file] . identifier[writerow] ([ literal[string] . identifier[format] ( identifier[__version__] )])
identifier[csv_file] . identifier[writerow] ( identifier[heading_row_1] + identifier[band_hdr] )
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[csv_file] . identifier[writerow] ([ literal[string] ]+ identifier[spacer] + identifier[list] ( identifier[desc] [ literal[string] ]))
identifier[idx] = literal[int]
keyword[for] identifier[seg] keyword[in] identifier[xband] :
keyword[for] identifier[chan] keyword[in] identifier[seg] [ literal[string] ][ literal[int] ]. identifier[keys] ():
identifier[idx] += literal[int]
identifier[cyc] = keyword[None]
keyword[if] identifier[seg] [ literal[string] ] keyword[is] keyword[not] keyword[None] :
identifier[cyc] = identifier[seg] [ literal[string] ][ literal[int] ]
identifier[data_row] = identifier[list] (
[ identifier[seg] [ literal[string] ][ identifier[x] ][ identifier[chan] ] keyword[for] identifier[x] keyword[in] identifier[range] (
identifier[len] ( identifier[seg] [ literal[string] ]))])
identifier[csv_file] . identifier[writerow] ([ identifier[idx] ,
identifier[seg] [ literal[string] ],
identifier[seg] [ literal[string] ],
identifier[seg] [ literal[string] ],
identifier[seg] [ literal[string] ],
identifier[seg] [ literal[string] ],
identifier[cyc] ,
identifier[seg] [ literal[string] ],
identifier[chan] ,
]+ identifier[data_row] ) | def export_freq_band(xfreq, bands, filename):
"""Write frequency analysis data to CSV by pre-defined band."""
heading_row_1 = ['Segment index', 'Start time', 'End time', 'Duration', 'Stitches', 'Stage', 'Cycle', 'Event type', 'Channel']
spacer = [''] * (len(heading_row_1) - 1)
band_hdr = [str(b1) + '-' + str(b2) for (b1, b2) in bands]
xband = xfreq.copy()
for seg in xband:
bandlist = []
for (i, b) in enumerate(bands):
(pwr, _) = band_power(seg['data'], b)
bandlist.append(pwr) # depends on [control=['for'], data=[]]
seg['band'] = bandlist # depends on [control=['for'], data=['seg']]
as_matrix = asarray([[x['band'][y][chan] for y in range(len(x['band']))] for x in xband for chan in x['band'][0].keys()])
desc = get_descriptives(as_matrix)
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
csv_file = writer(f)
csv_file.writerow(['Wonambi v{}'.format(__version__)])
csv_file.writerow(heading_row_1 + band_hdr)
csv_file.writerow(['Mean'] + spacer + list(desc['mean']))
csv_file.writerow(['SD'] + spacer + list(desc['sd']))
csv_file.writerow(['Mean of ln'] + spacer + list(desc['mean_log']))
csv_file.writerow(['SD of ln'] + spacer + list(desc['sd_log']))
idx = 0
for seg in xband:
for chan in seg['band'][0].keys():
idx += 1
cyc = None
if seg['cycle'] is not None:
cyc = seg['cycle'][2] # depends on [control=['if'], data=[]]
data_row = list([seg['band'][x][chan] for x in range(len(seg['band']))])
csv_file.writerow([idx, seg['start'], seg['end'], seg['duration'], seg['n_stitch'], seg['stage'], cyc, seg['name'], chan] + data_row) # depends on [control=['for'], data=['chan']] # depends on [control=['for'], data=['seg']] # depends on [control=['with'], data=['f']] |
def load_jinja_template(file_name):
"""
Loads the jinja2 HTML template from the given file.
Assumes that the file is in the same directory as the script.
"""
original_script_path = sys.argv[0]
#script_path = os.path.dirname(os.path.realpath(__file__))
script_dir = os.path.dirname(original_script_path)
# file_path = os.path.join(script_path, file_name)
# with open(file_path, 'r') as template_file:
# return template_file.read()
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(script_dir))
template = env.get_template(file_name)
return template | def function[load_jinja_template, parameter[file_name]]:
constant[
Loads the jinja2 HTML template from the given file.
Assumes that the file is in the same directory as the script.
]
variable[original_script_path] assign[=] call[name[sys].argv][constant[0]]
variable[script_dir] assign[=] call[name[os].path.dirname, parameter[name[original_script_path]]]
from relative_module[jinja2] import module[Environment], module[FileSystemLoader]
variable[env] assign[=] call[name[Environment], parameter[]]
variable[template] assign[=] call[name[env].get_template, parameter[name[file_name]]]
return[name[template]] | keyword[def] identifier[load_jinja_template] ( identifier[file_name] ):
literal[string]
identifier[original_script_path] = identifier[sys] . identifier[argv] [ literal[int] ]
identifier[script_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[original_script_path] )
keyword[from] identifier[jinja2] keyword[import] identifier[Environment] , identifier[FileSystemLoader]
identifier[env] = identifier[Environment] ( identifier[loader] = identifier[FileSystemLoader] ( identifier[script_dir] ))
identifier[template] = identifier[env] . identifier[get_template] ( identifier[file_name] )
keyword[return] identifier[template] | def load_jinja_template(file_name):
"""
Loads the jinja2 HTML template from the given file.
Assumes that the file is in the same directory as the script.
"""
original_script_path = sys.argv[0]
#script_path = os.path.dirname(os.path.realpath(__file__))
script_dir = os.path.dirname(original_script_path)
# file_path = os.path.join(script_path, file_name)
# with open(file_path, 'r') as template_file:
# return template_file.read()
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(script_dir))
template = env.get_template(file_name)
return template |
def clear_source(self):
"""stub"""
if (self.get_source_metadata().is_read_only() or
self.get_source_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['texts']['source'] = \
self._source_metadata['default_string_values'][0] | def function[clear_source, parameter[self]]:
constant[stub]
if <ast.BoolOp object at 0x7da20c7967d0> begin[:]
<ast.Raise object at 0x7da20c7962c0>
call[call[name[self].my_osid_object_form._my_map][constant[texts]]][constant[source]] assign[=] call[call[name[self]._source_metadata][constant[default_string_values]]][constant[0]] | keyword[def] identifier[clear_source] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[get_source_metadata] (). identifier[is_read_only] () keyword[or]
identifier[self] . identifier[get_source_metadata] (). identifier[is_required] ()):
keyword[raise] identifier[NoAccess] ()
identifier[self] . identifier[my_osid_object_form] . identifier[_my_map] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[_source_metadata] [ literal[string] ][ literal[int] ] | def clear_source(self):
"""stub"""
if self.get_source_metadata().is_read_only() or self.get_source_metadata().is_required():
raise NoAccess() # depends on [control=['if'], data=[]]
self.my_osid_object_form._my_map['texts']['source'] = self._source_metadata['default_string_values'][0] |
def modify_order(self, modify_order_op, order_id, qty, price, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):
"""
详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。
:param modify_order_op:
:param order_id:
:param qty:
:param price:
:param adjust_limit:
:param trd_env:
:param acc_id:
:return:
"""
return super(OpenHKCCTradeContext, self).modify_order(modify_order_op=modify_order_op,
order_id=order_id,
qty=qty,
price=price,
adjust_limit=adjust_limit,
trd_env=trd_env,
acc_id=acc_id,
acc_index=acc_index) | def function[modify_order, parameter[self, modify_order_op, order_id, qty, price, adjust_limit, trd_env, acc_id, acc_index]]:
constant[
详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。
:param modify_order_op:
:param order_id:
:param qty:
:param price:
:param adjust_limit:
:param trd_env:
:param acc_id:
:return:
]
return[call[call[name[super], parameter[name[OpenHKCCTradeContext], name[self]]].modify_order, parameter[]]] | keyword[def] identifier[modify_order] ( identifier[self] , identifier[modify_order_op] , identifier[order_id] , identifier[qty] , identifier[price] , identifier[adjust_limit] = literal[int] , identifier[trd_env] = identifier[TrdEnv] . identifier[REAL] , identifier[acc_id] = literal[int] , identifier[acc_index] = literal[int] ):
literal[string]
keyword[return] identifier[super] ( identifier[OpenHKCCTradeContext] , identifier[self] ). identifier[modify_order] ( identifier[modify_order_op] = identifier[modify_order_op] ,
identifier[order_id] = identifier[order_id] ,
identifier[qty] = identifier[qty] ,
identifier[price] = identifier[price] ,
identifier[adjust_limit] = identifier[adjust_limit] ,
identifier[trd_env] = identifier[trd_env] ,
identifier[acc_id] = identifier[acc_id] ,
identifier[acc_index] = identifier[acc_index] ) | def modify_order(self, modify_order_op, order_id, qty, price, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):
"""
详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。
:param modify_order_op:
:param order_id:
:param qty:
:param price:
:param adjust_limit:
:param trd_env:
:param acc_id:
:return:
"""
return super(OpenHKCCTradeContext, self).modify_order(modify_order_op=modify_order_op, order_id=order_id, qty=qty, price=price, adjust_limit=adjust_limit, trd_env=trd_env, acc_id=acc_id, acc_index=acc_index) |
def lesspager(lines):
"""
Use for streaming writes to a less process
Taken from pydoc.pipepager:
/usr/lib/python2.7/pydoc.py
and
/usr/lib/python3.5/pydoc.py
"""
cmd = "less -S"
if sys.version_info[0] >= 3:
"""Page through text by feeding it to another program."""
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
try:
for l in lines:
pipe.write(l)
except KeyboardInterrupt:
# We've hereby abandoned whatever text hasn't been written,
# but the pager is still in control of the terminal.
pass
except OSError:
pass # Ignore broken pipes caused by quitting the pager program.
while True:
try:
proc.wait()
break
except KeyboardInterrupt:
# Ignore ctl-c like the pager itself does. Otherwise the pager is
# left running and the terminal is in raw mode and unusable.
pass
else:
proc = os.popen(cmd, 'w')
try:
for l in lines:
proc.write(l)
except IOError:
proc.close()
sys.exit() | def function[lesspager, parameter[lines]]:
constant[
Use for streaming writes to a less process
Taken from pydoc.pipepager:
/usr/lib/python2.7/pydoc.py
and
/usr/lib/python3.5/pydoc.py
]
variable[cmd] assign[=] constant[less -S]
if compare[call[name[sys].version_info][constant[0]] greater_or_equal[>=] constant[3]] begin[:]
constant[Page through text by feeding it to another program.]
import module[subprocess]
variable[proc] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]]
<ast.Try object at 0x7da1afe3a590>
while constant[True] begin[:]
<ast.Try object at 0x7da1afe3b280> | keyword[def] identifier[lesspager] ( identifier[lines] ):
literal[string]
identifier[cmd] = literal[string]
keyword[if] identifier[sys] . identifier[version_info] [ literal[int] ]>= literal[int] :
literal[string]
keyword[import] identifier[subprocess]
identifier[proc] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[shell] = keyword[True] , identifier[stdin] = identifier[subprocess] . identifier[PIPE] )
keyword[try] :
keyword[with] identifier[io] . identifier[TextIOWrapper] ( identifier[proc] . identifier[stdin] , identifier[errors] = literal[string] ) keyword[as] identifier[pipe] :
keyword[try] :
keyword[for] identifier[l] keyword[in] identifier[lines] :
identifier[pipe] . identifier[write] ( identifier[l] )
keyword[except] identifier[KeyboardInterrupt] :
keyword[pass]
keyword[except] identifier[OSError] :
keyword[pass]
keyword[while] keyword[True] :
keyword[try] :
identifier[proc] . identifier[wait] ()
keyword[break]
keyword[except] identifier[KeyboardInterrupt] :
keyword[pass]
keyword[else] :
identifier[proc] = identifier[os] . identifier[popen] ( identifier[cmd] , literal[string] )
keyword[try] :
keyword[for] identifier[l] keyword[in] identifier[lines] :
identifier[proc] . identifier[write] ( identifier[l] )
keyword[except] identifier[IOError] :
identifier[proc] . identifier[close] ()
identifier[sys] . identifier[exit] () | def lesspager(lines):
"""
Use for streaming writes to a less process
Taken from pydoc.pipepager:
/usr/lib/python2.7/pydoc.py
and
/usr/lib/python3.5/pydoc.py
"""
cmd = 'less -S'
if sys.version_info[0] >= 3:
'Page through text by feeding it to another program.'
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
try:
for l in lines:
pipe.write(l) # depends on [control=['for'], data=['l']] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
# We've hereby abandoned whatever text hasn't been written,
# but the pager is still in control of the terminal.
pass # depends on [control=['except'], data=[]] # depends on [control=['with'], data=['pipe']] # depends on [control=['try'], data=[]]
except OSError:
pass # Ignore broken pipes caused by quitting the pager program. # depends on [control=['except'], data=[]]
while True:
try:
proc.wait()
break # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
# Ignore ctl-c like the pager itself does. Otherwise the pager is
# left running and the terminal is in raw mode and unusable.
pass # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
proc = os.popen(cmd, 'w')
try:
for l in lines:
proc.write(l) # depends on [control=['for'], data=['l']] # depends on [control=['try'], data=[]]
except IOError:
proc.close()
sys.exit() # depends on [control=['except'], data=[]] |
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
"""Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
"""
# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly
if ssbio.utils.is_ipynb():
import nglview as nv
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if not self.structure_file:
raise ValueError("Structure file not loaded")
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':
view = nv.NGLWidget()
view.add_component(self.structure_path)
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity)
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
return view | def function[view_structure, parameter[self, only_chains, opacity, recolor, gui]]:
constant[Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
]
if call[name[ssbio].utils.is_ipynb, parameter[]] begin[:]
import module[nglview] as alias[nv]
if <ast.UnaryOp object at 0x7da20c76f6d0> begin[:]
<ast.Raise object at 0x7da20c76dd80>
variable[only_chains] assign[=] call[name[ssbio].utils.force_list, parameter[name[only_chains]]]
variable[to_show_chains] assign[=] constant[( ]
for taget[name[c]] in starred[name[only_chains]] begin[:]
<ast.AugAssign object at 0x7da20c76fc40>
variable[to_show_chains] assign[=] call[name[to_show_chains].strip, parameter[constant[ or ]]]
<ast.AugAssign object at 0x7da20c76e980>
if <ast.BoolOp object at 0x7da20c76f0a0> begin[:]
variable[view] assign[=] call[name[nv].NGLWidget, parameter[]]
call[name[view].add_component, parameter[name[self].structure_path]]
if name[recolor] begin[:]
call[name[view].clear_representations, parameter[]]
if name[only_chains] begin[:]
call[name[view].add_cartoon, parameter[]]
return[name[view]] | keyword[def] identifier[view_structure] ( identifier[self] , identifier[only_chains] = keyword[None] , identifier[opacity] = literal[int] , identifier[recolor] = keyword[False] , identifier[gui] = keyword[False] ):
literal[string]
keyword[if] identifier[ssbio] . identifier[utils] . identifier[is_ipynb] ():
keyword[import] identifier[nglview] keyword[as] identifier[nv]
keyword[else] :
keyword[raise] identifier[EnvironmentError] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[structure_file] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[only_chains] = identifier[ssbio] . identifier[utils] . identifier[force_list] ( identifier[only_chains] )
identifier[to_show_chains] = literal[string]
keyword[for] identifier[c] keyword[in] identifier[only_chains] :
identifier[to_show_chains] += literal[string] . identifier[format] ( identifier[c] )
identifier[to_show_chains] = identifier[to_show_chains] . identifier[strip] ( literal[string] )
identifier[to_show_chains] += literal[string]
keyword[if] identifier[self] . identifier[file_type] == literal[string] keyword[or] identifier[self] . identifier[file_type] == literal[string] :
identifier[view] = identifier[nv] . identifier[NGLWidget] ()
identifier[view] . identifier[add_component] ( identifier[self] . identifier[structure_path] )
keyword[else] :
identifier[view] = identifier[nv] . identifier[show_structure_file] ( identifier[self] . identifier[structure_path] , identifier[gui] = identifier[gui] )
keyword[if] identifier[recolor] :
identifier[view] . identifier[clear_representations] ()
keyword[if] identifier[only_chains] :
identifier[view] . identifier[add_cartoon] ( identifier[selection] = literal[string] . identifier[format] ( identifier[to_show_chains] ), identifier[color] = literal[string] , identifier[opacity] = identifier[opacity] )
keyword[else] :
identifier[view] . identifier[add_cartoon] ( identifier[selection] = literal[string] , identifier[color] = literal[string] , identifier[opacity] = identifier[opacity] )
keyword[elif] identifier[only_chains] :
identifier[view] . identifier[clear_representations] ()
identifier[view] . identifier[add_cartoon] ( identifier[selection] = literal[string] . identifier[format] ( identifier[to_show_chains] ), identifier[color] = literal[string] , identifier[opacity] = identifier[opacity] )
keyword[return] identifier[view] | def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
"""Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
"""
# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly
if ssbio.utils.is_ipynb():
import nglview as nv # depends on [control=['if'], data=[]]
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if not self.structure_file:
raise ValueError('Structure file not loaded') # depends on [control=['if'], data=[]]
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c) # depends on [control=['for'], data=['c']]
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':
view = nv.NGLWidget()
view.add_component(self.structure_path) # depends on [control=['if'], data=[]]
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity) # depends on [control=['if'], data=[]]
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity) # depends on [control=['if'], data=[]]
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity) # depends on [control=['if'], data=[]]
return view |
def install_from_exchange():
''' Install from experiment exchange. '''
parser = argparse.ArgumentParser(
description='Download experiment from the psiturk.org experiment\
exchange (http://psiturk.org/ee).'
)
parser.add_argument(
'exp_id', metavar='exp_id', type=str, help='the id number of the\
experiment in the exchange'
)
args = parser.parse_args()
exp_exch = ExperimentExchangeServices()
exp_exch.download_experiment(args.exp_id) | def function[install_from_exchange, parameter[]]:
constant[ Install from experiment exchange. ]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[exp_id]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[]]
variable[exp_exch] assign[=] call[name[ExperimentExchangeServices], parameter[]]
call[name[exp_exch].download_experiment, parameter[name[args].exp_id]] | keyword[def] identifier[install_from_exchange] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[description] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] , identifier[metavar] = literal[string] , identifier[type] = identifier[str] , identifier[help] = literal[string]
)
identifier[args] = identifier[parser] . identifier[parse_args] ()
identifier[exp_exch] = identifier[ExperimentExchangeServices] ()
identifier[exp_exch] . identifier[download_experiment] ( identifier[args] . identifier[exp_id] ) | def install_from_exchange():
""" Install from experiment exchange. """
parser = argparse.ArgumentParser(description='Download experiment from the psiturk.org experiment exchange (http://psiturk.org/ee).')
parser.add_argument('exp_id', metavar='exp_id', type=str, help='the id number of the experiment in the exchange')
args = parser.parse_args()
exp_exch = ExperimentExchangeServices()
exp_exch.download_experiment(args.exp_id) |
def runcode(code):
"""Run the given code line by line with printing, as list of lines, and return variable 'ans'."""
for line in code:
print('# '+line)
exec(line,globals())
print('# return ans')
return ans | def function[runcode, parameter[code]]:
constant[Run the given code line by line with printing, as list of lines, and return variable 'ans'.]
for taget[name[line]] in starred[name[code]] begin[:]
call[name[print], parameter[binary_operation[constant[# ] + name[line]]]]
call[name[exec], parameter[name[line], call[name[globals], parameter[]]]]
call[name[print], parameter[constant[# return ans]]]
return[name[ans]] | keyword[def] identifier[runcode] ( identifier[code] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[code] :
identifier[print] ( literal[string] + identifier[line] )
identifier[exec] ( identifier[line] , identifier[globals] ())
identifier[print] ( literal[string] )
keyword[return] identifier[ans] | def runcode(code):
"""Run the given code line by line with printing, as list of lines, and return variable 'ans'."""
for line in code:
print('# ' + line)
exec(line, globals()) # depends on [control=['for'], data=['line']]
print('# return ans')
return ans |
def _callback(self, *dummy):
"""
This gets called on any attempt to change the value
"""
# retrieve the value from the Entry
value = self._variable.get()
# run the validation. Returns None if no good
newvalue = self.validate(value)
if newvalue is None:
# Invalid: restores previously stored value
# no checker run.
self._variable.set(self._value)
elif newvalue != value:
# If the value is different update appropriately
# Store new value.
self._value = newvalue
self._variable.set(self.newvalue)
else:
# Store new value
self._value = value | def function[_callback, parameter[self]]:
constant[
This gets called on any attempt to change the value
]
variable[value] assign[=] call[name[self]._variable.get, parameter[]]
variable[newvalue] assign[=] call[name[self].validate, parameter[name[value]]]
if compare[name[newvalue] is constant[None]] begin[:]
call[name[self]._variable.set, parameter[name[self]._value]] | keyword[def] identifier[_callback] ( identifier[self] ,* identifier[dummy] ):
literal[string]
identifier[value] = identifier[self] . identifier[_variable] . identifier[get] ()
identifier[newvalue] = identifier[self] . identifier[validate] ( identifier[value] )
keyword[if] identifier[newvalue] keyword[is] keyword[None] :
identifier[self] . identifier[_variable] . identifier[set] ( identifier[self] . identifier[_value] )
keyword[elif] identifier[newvalue] != identifier[value] :
identifier[self] . identifier[_value] = identifier[newvalue]
identifier[self] . identifier[_variable] . identifier[set] ( identifier[self] . identifier[newvalue] )
keyword[else] :
identifier[self] . identifier[_value] = identifier[value] | def _callback(self, *dummy):
"""
This gets called on any attempt to change the value
"""
# retrieve the value from the Entry
value = self._variable.get()
# run the validation. Returns None if no good
newvalue = self.validate(value)
if newvalue is None:
# Invalid: restores previously stored value
# no checker run.
self._variable.set(self._value) # depends on [control=['if'], data=[]]
elif newvalue != value:
# If the value is different update appropriately
# Store new value.
self._value = newvalue
self._variable.set(self.newvalue) # depends on [control=['if'], data=['newvalue']]
else:
# Store new value
self._value = value |
def augmentTextWithCONLLstr( conll_str_array, text ):
''' Augments given Text object with the information from Maltparser's output.
More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and
DEPREL to each token in the Text object;
'''
j = 0
for sentence in text.divide( layer=WORDS, by=SENTENCES ):
sentence = __sort_analyses(sentence)
for i in range(len(sentence)):
estnltkToken = sentence[i]
maltparserToken = conll_str_array[j]
if len( maltparserToken ) > 1:
maltParserAnalysis = maltparserToken.split('\t')
if estnltkToken[TEXT] == maltParserAnalysis[1]:
# Fetch information about the syntactic relation:
estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0]
estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6]
# Fetch the name of the surface syntactic relation
estnltkToken[DEPREL] = maltParserAnalysis[7]
else:
raise Exception("A misalignment between Text and Maltparser's output: ",\
estnltkToken, maltparserToken )
j += 1
j += 1 | def function[augmentTextWithCONLLstr, parameter[conll_str_array, text]]:
constant[ Augments given Text object with the information from Maltparser's output.
More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and
DEPREL to each token in the Text object;
]
variable[j] assign[=] constant[0]
for taget[name[sentence]] in starred[call[name[text].divide, parameter[]]] begin[:]
variable[sentence] assign[=] call[name[__sort_analyses], parameter[name[sentence]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[sentence]]]]]] begin[:]
variable[estnltkToken] assign[=] call[name[sentence]][name[i]]
variable[maltparserToken] assign[=] call[name[conll_str_array]][name[j]]
if compare[call[name[len], parameter[name[maltparserToken]]] greater[>] constant[1]] begin[:]
variable[maltParserAnalysis] assign[=] call[name[maltparserToken].split, parameter[constant[ ]]]
if compare[call[name[estnltkToken]][name[TEXT]] equal[==] call[name[maltParserAnalysis]][constant[1]]] begin[:]
call[name[estnltkToken]][name[SYNTAX_LABEL]] assign[=] call[name[maltParserAnalysis]][constant[0]]
call[name[estnltkToken]][name[SYNTAX_HEAD]] assign[=] call[name[maltParserAnalysis]][constant[6]]
call[name[estnltkToken]][name[DEPREL]] assign[=] call[name[maltParserAnalysis]][constant[7]]
<ast.AugAssign object at 0x7da18fe93f70>
<ast.AugAssign object at 0x7da18fe93010> | keyword[def] identifier[augmentTextWithCONLLstr] ( identifier[conll_str_array] , identifier[text] ):
literal[string]
identifier[j] = literal[int]
keyword[for] identifier[sentence] keyword[in] identifier[text] . identifier[divide] ( identifier[layer] = identifier[WORDS] , identifier[by] = identifier[SENTENCES] ):
identifier[sentence] = identifier[__sort_analyses] ( identifier[sentence] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sentence] )):
identifier[estnltkToken] = identifier[sentence] [ identifier[i] ]
identifier[maltparserToken] = identifier[conll_str_array] [ identifier[j] ]
keyword[if] identifier[len] ( identifier[maltparserToken] )> literal[int] :
identifier[maltParserAnalysis] = identifier[maltparserToken] . identifier[split] ( literal[string] )
keyword[if] identifier[estnltkToken] [ identifier[TEXT] ]== identifier[maltParserAnalysis] [ literal[int] ]:
identifier[estnltkToken] [ identifier[SYNTAX_LABEL] ]= identifier[maltParserAnalysis] [ literal[int] ]
identifier[estnltkToken] [ identifier[SYNTAX_HEAD] ]= identifier[maltParserAnalysis] [ literal[int] ]
identifier[estnltkToken] [ identifier[DEPREL] ]= identifier[maltParserAnalysis] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] , identifier[estnltkToken] , identifier[maltparserToken] )
identifier[j] += literal[int]
identifier[j] += literal[int] | def augmentTextWithCONLLstr(conll_str_array, text):
""" Augments given Text object with the information from Maltparser's output.
More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and
DEPREL to each token in the Text object;
"""
j = 0
for sentence in text.divide(layer=WORDS, by=SENTENCES):
sentence = __sort_analyses(sentence)
for i in range(len(sentence)):
estnltkToken = sentence[i]
maltparserToken = conll_str_array[j]
if len(maltparserToken) > 1:
maltParserAnalysis = maltparserToken.split('\t')
if estnltkToken[TEXT] == maltParserAnalysis[1]:
# Fetch information about the syntactic relation:
estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0]
estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6]
# Fetch the name of the surface syntactic relation
estnltkToken[DEPREL] = maltParserAnalysis[7] # depends on [control=['if'], data=[]]
else:
raise Exception("A misalignment between Text and Maltparser's output: ", estnltkToken, maltparserToken) # depends on [control=['if'], data=[]]
j += 1 # depends on [control=['for'], data=['i']]
j += 1 # depends on [control=['for'], data=['sentence']] |
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credentials = None
if self._cache:
json = self._cache.get(self._key_name)
if json:
credentials = client.Credentials.new_from_json(json)
if credentials is None:
entity = self._get_entity()
if entity is not None:
credentials = getattr(entity, self._property_name)
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
if credentials and hasattr(credentials, 'set_store'):
credentials.set_store(self)
return credentials | def function[locked_get, parameter[self]]:
constant[Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
]
variable[credentials] assign[=] constant[None]
if name[self]._cache begin[:]
variable[json] assign[=] call[name[self]._cache.get, parameter[name[self]._key_name]]
if name[json] begin[:]
variable[credentials] assign[=] call[name[client].Credentials.new_from_json, parameter[name[json]]]
if compare[name[credentials] is constant[None]] begin[:]
variable[entity] assign[=] call[name[self]._get_entity, parameter[]]
if compare[name[entity] is_not constant[None]] begin[:]
variable[credentials] assign[=] call[name[getattr], parameter[name[entity], name[self]._property_name]]
if name[self]._cache begin[:]
call[name[self]._cache.set, parameter[name[self]._key_name, call[name[credentials].to_json, parameter[]]]]
if <ast.BoolOp object at 0x7da1b0152560> begin[:]
call[name[credentials].set_store, parameter[name[self]]]
return[name[credentials]] | keyword[def] identifier[locked_get] ( identifier[self] ):
literal[string]
identifier[credentials] = keyword[None]
keyword[if] identifier[self] . identifier[_cache] :
identifier[json] = identifier[self] . identifier[_cache] . identifier[get] ( identifier[self] . identifier[_key_name] )
keyword[if] identifier[json] :
identifier[credentials] = identifier[client] . identifier[Credentials] . identifier[new_from_json] ( identifier[json] )
keyword[if] identifier[credentials] keyword[is] keyword[None] :
identifier[entity] = identifier[self] . identifier[_get_entity] ()
keyword[if] identifier[entity] keyword[is] keyword[not] keyword[None] :
identifier[credentials] = identifier[getattr] ( identifier[entity] , identifier[self] . identifier[_property_name] )
keyword[if] identifier[self] . identifier[_cache] :
identifier[self] . identifier[_cache] . identifier[set] ( identifier[self] . identifier[_key_name] , identifier[credentials] . identifier[to_json] ())
keyword[if] identifier[credentials] keyword[and] identifier[hasattr] ( identifier[credentials] , literal[string] ):
identifier[credentials] . identifier[set_store] ( identifier[self] )
keyword[return] identifier[credentials] | def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credentials = None
if self._cache:
json = self._cache.get(self._key_name)
if json:
credentials = client.Credentials.new_from_json(json) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if credentials is None:
entity = self._get_entity()
if entity is not None:
credentials = getattr(entity, self._property_name)
if self._cache:
self._cache.set(self._key_name, credentials.to_json()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['entity']] # depends on [control=['if'], data=['credentials']]
if credentials and hasattr(credentials, 'set_store'):
credentials.set_store(self) # depends on [control=['if'], data=[]]
return credentials |
def get_uid(brain_or_object):
"""Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string
"""
if is_portal(brain_or_object):
return '0'
if is_brain(brain_or_object) and base_hasattr(brain_or_object, "UID"):
return brain_or_object.UID
return get_object(brain_or_object).UID() | def function[get_uid, parameter[brain_or_object]]:
constant[Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string
]
if call[name[is_portal], parameter[name[brain_or_object]]] begin[:]
return[constant[0]]
if <ast.BoolOp object at 0x7da1b246bf40> begin[:]
return[name[brain_or_object].UID]
return[call[call[name[get_object], parameter[name[brain_or_object]]].UID, parameter[]]] | keyword[def] identifier[get_uid] ( identifier[brain_or_object] ):
literal[string]
keyword[if] identifier[is_portal] ( identifier[brain_or_object] ):
keyword[return] literal[string]
keyword[if] identifier[is_brain] ( identifier[brain_or_object] ) keyword[and] identifier[base_hasattr] ( identifier[brain_or_object] , literal[string] ):
keyword[return] identifier[brain_or_object] . identifier[UID]
keyword[return] identifier[get_object] ( identifier[brain_or_object] ). identifier[UID] () | def get_uid(brain_or_object):
"""Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string
"""
if is_portal(brain_or_object):
return '0' # depends on [control=['if'], data=[]]
if is_brain(brain_or_object) and base_hasattr(brain_or_object, 'UID'):
return brain_or_object.UID # depends on [control=['if'], data=[]]
return get_object(brain_or_object).UID() |
def flavor_create(name, # pylint: disable=C0103
flavor_id=0, # pylint: disable=C0103
ram=0,
disk=0,
vcpus=1,
is_public=True,
profile=None, **kwargs):
'''
Add a flavor to nova (nova flavor-create). The following parameters are
required:
name
Name of the new flavor (must be first)
flavor_id
Unique integer ID for the new flavor
ram
Memory size in MB
disk
Disk size in GB
vcpus
Number of vcpus
is_public
Whether flavor is public. Default is True.
CLI Example:
.. code-block:: bash
salt '*' nova.flavor_create myflavor flavor_id=6 ram=4096 disk=10 vcpus=1
'''
conn = _auth(profile, **kwargs)
return conn.flavor_create(
name,
flavor_id,
ram,
disk,
vcpus,
is_public
) | def function[flavor_create, parameter[name, flavor_id, ram, disk, vcpus, is_public, profile]]:
constant[
Add a flavor to nova (nova flavor-create). The following parameters are
required:
name
Name of the new flavor (must be first)
flavor_id
Unique integer ID for the new flavor
ram
Memory size in MB
disk
Disk size in GB
vcpus
Number of vcpus
is_public
Whether flavor is public. Default is True.
CLI Example:
.. code-block:: bash
salt '*' nova.flavor_create myflavor flavor_id=6 ram=4096 disk=10 vcpus=1
]
variable[conn] assign[=] call[name[_auth], parameter[name[profile]]]
return[call[name[conn].flavor_create, parameter[name[name], name[flavor_id], name[ram], name[disk], name[vcpus], name[is_public]]]] | keyword[def] identifier[flavor_create] ( identifier[name] ,
identifier[flavor_id] = literal[int] ,
identifier[ram] = literal[int] ,
identifier[disk] = literal[int] ,
identifier[vcpus] = literal[int] ,
identifier[is_public] = keyword[True] ,
identifier[profile] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[conn] = identifier[_auth] ( identifier[profile] ,** identifier[kwargs] )
keyword[return] identifier[conn] . identifier[flavor_create] (
identifier[name] ,
identifier[flavor_id] ,
identifier[ram] ,
identifier[disk] ,
identifier[vcpus] ,
identifier[is_public]
) | def flavor_create(name, flavor_id=0, ram=0, disk=0, vcpus=1, is_public=True, profile=None, **kwargs): # pylint: disable=C0103
# pylint: disable=C0103
"\n Add a flavor to nova (nova flavor-create). The following parameters are\n required:\n\n name\n Name of the new flavor (must be first)\n flavor_id\n Unique integer ID for the new flavor\n ram\n Memory size in MB\n disk\n Disk size in GB\n vcpus\n Number of vcpus\n is_public\n Whether flavor is public. Default is True.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' nova.flavor_create myflavor flavor_id=6 ram=4096 disk=10 vcpus=1\n "
conn = _auth(profile, **kwargs)
return conn.flavor_create(name, flavor_id, ram, disk, vcpus, is_public) |
def set_toolbar_children_widgets(self):
"""
Sets the toolBar children widgets.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Adding 'Application_Logo_label' widget!")
self.addWidget(self.get_application_logo_label())
LOGGER.debug("> Adding 'Spacer_label' widget!")
self.addWidget(self.get_spacer_label())
LOGGER.debug("> Adding 'Development_active_label', 'Preferences_active_label' widgets!")
for layout_active_label in self.get_layouts_active_labels():
self.addWidget(layout_active_label)
LOGGER.debug("> Adding 'Custom_Layouts_active_label' widget!")
self.addWidget(self.get_custom_layouts_active_label())
LOGGER.debug("> Adding 'Miscellaneous_active_label' widget!")
self.addWidget(self.get_miscellaneous_active_label())
LOGGER.debug("> Adding 'Closure_Spacer_label' widget!")
self.addWidget(self.get_closure_spacer_label())
return True | def function[set_toolbar_children_widgets, parameter[self]]:
constant[
Sets the toolBar children widgets.
:return: Method success.
:rtype: bool
]
call[name[LOGGER].debug, parameter[constant[> Adding 'Application_Logo_label' widget!]]]
call[name[self].addWidget, parameter[call[name[self].get_application_logo_label, parameter[]]]]
call[name[LOGGER].debug, parameter[constant[> Adding 'Spacer_label' widget!]]]
call[name[self].addWidget, parameter[call[name[self].get_spacer_label, parameter[]]]]
call[name[LOGGER].debug, parameter[constant[> Adding 'Development_active_label', 'Preferences_active_label' widgets!]]]
for taget[name[layout_active_label]] in starred[call[name[self].get_layouts_active_labels, parameter[]]] begin[:]
call[name[self].addWidget, parameter[name[layout_active_label]]]
call[name[LOGGER].debug, parameter[constant[> Adding 'Custom_Layouts_active_label' widget!]]]
call[name[self].addWidget, parameter[call[name[self].get_custom_layouts_active_label, parameter[]]]]
call[name[LOGGER].debug, parameter[constant[> Adding 'Miscellaneous_active_label' widget!]]]
call[name[self].addWidget, parameter[call[name[self].get_miscellaneous_active_label, parameter[]]]]
call[name[LOGGER].debug, parameter[constant[> Adding 'Closure_Spacer_label' widget!]]]
call[name[self].addWidget, parameter[call[name[self].get_closure_spacer_label, parameter[]]]]
return[constant[True]] | keyword[def] identifier[set_toolbar_children_widgets] ( identifier[self] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[self] . identifier[addWidget] ( identifier[self] . identifier[get_application_logo_label] ())
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[self] . identifier[addWidget] ( identifier[self] . identifier[get_spacer_label] ())
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[for] identifier[layout_active_label] keyword[in] identifier[self] . identifier[get_layouts_active_labels] ():
identifier[self] . identifier[addWidget] ( identifier[layout_active_label] )
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[self] . identifier[addWidget] ( identifier[self] . identifier[get_custom_layouts_active_label] ())
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[self] . identifier[addWidget] ( identifier[self] . identifier[get_miscellaneous_active_label] ())
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[self] . identifier[addWidget] ( identifier[self] . identifier[get_closure_spacer_label] ())
keyword[return] keyword[True] | def set_toolbar_children_widgets(self):
"""
Sets the toolBar children widgets.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Adding 'Application_Logo_label' widget!")
self.addWidget(self.get_application_logo_label())
LOGGER.debug("> Adding 'Spacer_label' widget!")
self.addWidget(self.get_spacer_label())
LOGGER.debug("> Adding 'Development_active_label', 'Preferences_active_label' widgets!")
for layout_active_label in self.get_layouts_active_labels():
self.addWidget(layout_active_label) # depends on [control=['for'], data=['layout_active_label']]
LOGGER.debug("> Adding 'Custom_Layouts_active_label' widget!")
self.addWidget(self.get_custom_layouts_active_label())
LOGGER.debug("> Adding 'Miscellaneous_active_label' widget!")
self.addWidget(self.get_miscellaneous_active_label())
LOGGER.debug("> Adding 'Closure_Spacer_label' widget!")
self.addWidget(self.get_closure_spacer_label())
return True |
def onTagDel(self, name, func):
'''
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
if '*' in name:
self.ontagdelglobs.add(name, func)
else:
self.ontagdels[name].append(func) | def function[onTagDel, parameter[self, name, func]]:
constant[
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
]
if compare[constant[*] in name[name]] begin[:]
call[name[self].ontagdelglobs.add, parameter[name[name], name[func]]] | keyword[def] identifier[onTagDel] ( identifier[self] , identifier[name] , identifier[func] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[name] :
identifier[self] . identifier[ontagdelglobs] . identifier[add] ( identifier[name] , identifier[func] )
keyword[else] :
identifier[self] . identifier[ontagdels] [ identifier[name] ]. identifier[append] ( identifier[func] ) | def onTagDel(self, name, func):
"""
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
"""
if '*' in name:
self.ontagdelglobs.add(name, func) # depends on [control=['if'], data=['name']]
else:
self.ontagdels[name].append(func) |
def until_state(self, state, timeout=None):
"""Future that resolves when a certain client state is attained
Parameters
----------
state : str
Desired state, one of ("disconnected", "syncing", "synced")
timeout: float
Timeout for operation in seconds.
"""
return self._state.until_state(state, timeout=timeout) | def function[until_state, parameter[self, state, timeout]]:
constant[Future that resolves when a certain client state is attained
Parameters
----------
state : str
Desired state, one of ("disconnected", "syncing", "synced")
timeout: float
Timeout for operation in seconds.
]
return[call[name[self]._state.until_state, parameter[name[state]]]] | keyword[def] identifier[until_state] ( identifier[self] , identifier[state] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_state] . identifier[until_state] ( identifier[state] , identifier[timeout] = identifier[timeout] ) | def until_state(self, state, timeout=None):
"""Future that resolves when a certain client state is attained
Parameters
----------
state : str
Desired state, one of ("disconnected", "syncing", "synced")
timeout: float
Timeout for operation in seconds.
"""
return self._state.until_state(state, timeout=timeout) |
def read_array(self, infile, var_name):
"""Directly return a numpy array for a given variable name"""
file_handle = self.read_cdf(infile)
try:
# return the data array
return file_handle.variables[var_name][:]
except KeyError:
print("Cannot find variable: {0}".format(var_name))
raise KeyError | def function[read_array, parameter[self, infile, var_name]]:
constant[Directly return a numpy array for a given variable name]
variable[file_handle] assign[=] call[name[self].read_cdf, parameter[name[infile]]]
<ast.Try object at 0x7da1b27e2140> | keyword[def] identifier[read_array] ( identifier[self] , identifier[infile] , identifier[var_name] ):
literal[string]
identifier[file_handle] = identifier[self] . identifier[read_cdf] ( identifier[infile] )
keyword[try] :
keyword[return] identifier[file_handle] . identifier[variables] [ identifier[var_name] ][:]
keyword[except] identifier[KeyError] :
identifier[print] ( literal[string] . identifier[format] ( identifier[var_name] ))
keyword[raise] identifier[KeyError] | def read_array(self, infile, var_name):
"""Directly return a numpy array for a given variable name"""
file_handle = self.read_cdf(infile)
try:
# return the data array
return file_handle.variables[var_name][:] # depends on [control=['try'], data=[]]
except KeyError:
print('Cannot find variable: {0}'.format(var_name))
raise KeyError # depends on [control=['except'], data=[]] |
def tangelo_import(*args, **kwargs):
"""
When we are asked to import a module, if we get an import error and the
calling script is one we are serving (not one in the python libraries), try
again in the same directory as the script that is calling import.
It seems like we should use sys.meta_path and combine our path with the
path sent to imp.find_module. This requires duplicating a bunch of logic
from the imp module and is actually heavier than this technique.
:params: see __builtin__.__import__
"""
try:
return builtin_import(*args, **kwargs)
except ImportError:
if not hasattr(cherrypy.thread_data, "modulepath"):
raise
path = os.path.abspath(cherrypy.thread_data.modulepath)
root = os.path.abspath(cherrypy.config.get("webroot"))
result = None
imp.acquire_lock()
oldpath = sys.path
try:
# If the module's path isn't in the system path but is in our
# serving area, temporarily add it and try the import again.
if path not in sys.path and (path == root or path.startswith(root + os.path.sep)):
sys.path = [path] + sys.path
result = builtin_import(*args, **kwargs)
finally:
sys.path = oldpath
imp.release_lock()
if result is not None:
return result
raise | def function[tangelo_import, parameter[]]:
constant[
When we are asked to import a module, if we get an import error and the
calling script is one we are serving (not one in the python libraries), try
again in the same directory as the script that is calling import.
It seems like we should use sys.meta_path and combine our path with the
path sent to imp.find_module. This requires duplicating a bunch of logic
from the imp module and is actually heavier than this technique.
:params: see __builtin__.__import__
]
<ast.Try object at 0x7da18bc72dd0> | keyword[def] identifier[tangelo_import] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[return] identifier[builtin_import] (* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[ImportError] :
keyword[if] keyword[not] identifier[hasattr] ( identifier[cherrypy] . identifier[thread_data] , literal[string] ):
keyword[raise]
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[cherrypy] . identifier[thread_data] . identifier[modulepath] )
identifier[root] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[cherrypy] . identifier[config] . identifier[get] ( literal[string] ))
identifier[result] = keyword[None]
identifier[imp] . identifier[acquire_lock] ()
identifier[oldpath] = identifier[sys] . identifier[path]
keyword[try] :
keyword[if] identifier[path] keyword[not] keyword[in] identifier[sys] . identifier[path] keyword[and] ( identifier[path] == identifier[root] keyword[or] identifier[path] . identifier[startswith] ( identifier[root] + identifier[os] . identifier[path] . identifier[sep] )):
identifier[sys] . identifier[path] =[ identifier[path] ]+ identifier[sys] . identifier[path]
identifier[result] = identifier[builtin_import] (* identifier[args] ,** identifier[kwargs] )
keyword[finally] :
identifier[sys] . identifier[path] = identifier[oldpath]
identifier[imp] . identifier[release_lock] ()
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[result]
keyword[raise] | def tangelo_import(*args, **kwargs):
"""
When we are asked to import a module, if we get an import error and the
calling script is one we are serving (not one in the python libraries), try
again in the same directory as the script that is calling import.
It seems like we should use sys.meta_path and combine our path with the
path sent to imp.find_module. This requires duplicating a bunch of logic
from the imp module and is actually heavier than this technique.
:params: see __builtin__.__import__
"""
try:
return builtin_import(*args, **kwargs) # depends on [control=['try'], data=[]]
except ImportError:
if not hasattr(cherrypy.thread_data, 'modulepath'):
raise # depends on [control=['if'], data=[]]
path = os.path.abspath(cherrypy.thread_data.modulepath)
root = os.path.abspath(cherrypy.config.get('webroot'))
result = None
imp.acquire_lock()
oldpath = sys.path
try:
# If the module's path isn't in the system path but is in our
# serving area, temporarily add it and try the import again.
if path not in sys.path and (path == root or path.startswith(root + os.path.sep)):
sys.path = [path] + sys.path
result = builtin_import(*args, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
finally:
sys.path = oldpath
imp.release_lock()
if result is not None:
return result # depends on [control=['if'], data=['result']]
raise # depends on [control=['except'], data=[]] |
def register_classes():
"""Register these classes with the `LinkFactory` """
CopyBaseROI.register_class()
CopyBaseROI_SG.register_class()
SimulateROI.register_class()
SimulateROI_SG.register_class()
RandomDirGen.register_class()
RandomDirGen_SG.register_class() | def function[register_classes, parameter[]]:
constant[Register these classes with the `LinkFactory` ]
call[name[CopyBaseROI].register_class, parameter[]]
call[name[CopyBaseROI_SG].register_class, parameter[]]
call[name[SimulateROI].register_class, parameter[]]
call[name[SimulateROI_SG].register_class, parameter[]]
call[name[RandomDirGen].register_class, parameter[]]
call[name[RandomDirGen_SG].register_class, parameter[]] | keyword[def] identifier[register_classes] ():
literal[string]
identifier[CopyBaseROI] . identifier[register_class] ()
identifier[CopyBaseROI_SG] . identifier[register_class] ()
identifier[SimulateROI] . identifier[register_class] ()
identifier[SimulateROI_SG] . identifier[register_class] ()
identifier[RandomDirGen] . identifier[register_class] ()
identifier[RandomDirGen_SG] . identifier[register_class] () | def register_classes():
"""Register these classes with the `LinkFactory` """
CopyBaseROI.register_class()
CopyBaseROI_SG.register_class()
SimulateROI.register_class()
SimulateROI_SG.register_class()
RandomDirGen.register_class()
RandomDirGen_SG.register_class() |
def is_in_manifest(dynamodb_client, table_name, run_id):
"""Check if run_id is stored in DynamoDB table.
Return True if run_id is stored or False otherwise.
Arguments:
dynamodb_client - boto3 DynamoDB client (not service)
table_name - string representing existing table name
run_id - string representing run_id to store
"""
response = dynamodb_client.get_item(
TableName=table_name,
Key={
DYNAMODB_RUNID_ATTRIBUTE: {
'S': run_id
}
}
)
return response.get('Item') is not None | def function[is_in_manifest, parameter[dynamodb_client, table_name, run_id]]:
constant[Check if run_id is stored in DynamoDB table.
Return True if run_id is stored or False otherwise.
Arguments:
dynamodb_client - boto3 DynamoDB client (not service)
table_name - string representing existing table name
run_id - string representing run_id to store
]
variable[response] assign[=] call[name[dynamodb_client].get_item, parameter[]]
return[compare[call[name[response].get, parameter[constant[Item]]] is_not constant[None]]] | keyword[def] identifier[is_in_manifest] ( identifier[dynamodb_client] , identifier[table_name] , identifier[run_id] ):
literal[string]
identifier[response] = identifier[dynamodb_client] . identifier[get_item] (
identifier[TableName] = identifier[table_name] ,
identifier[Key] ={
identifier[DYNAMODB_RUNID_ATTRIBUTE] :{
literal[string] : identifier[run_id]
}
}
)
keyword[return] identifier[response] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] | def is_in_manifest(dynamodb_client, table_name, run_id):
"""Check if run_id is stored in DynamoDB table.
Return True if run_id is stored or False otherwise.
Arguments:
dynamodb_client - boto3 DynamoDB client (not service)
table_name - string representing existing table name
run_id - string representing run_id to store
"""
response = dynamodb_client.get_item(TableName=table_name, Key={DYNAMODB_RUNID_ATTRIBUTE: {'S': run_id}})
return response.get('Item') is not None |
def extract_keywords(func):
"""
Parses the keywords from the given function.
:param func | <function>
"""
if hasattr(func, 'im_func'):
func = func.im_func
try:
return func.func_code.co_varnames[-len(func.func_defaults):]
except (TypeError, ValueError, IndexError):
return tuple() | def function[extract_keywords, parameter[func]]:
constant[
Parses the keywords from the given function.
:param func | <function>
]
if call[name[hasattr], parameter[name[func], constant[im_func]]] begin[:]
variable[func] assign[=] name[func].im_func
<ast.Try object at 0x7da18f721f00> | keyword[def] identifier[extract_keywords] ( identifier[func] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[func] , literal[string] ):
identifier[func] = identifier[func] . identifier[im_func]
keyword[try] :
keyword[return] identifier[func] . identifier[func_code] . identifier[co_varnames] [- identifier[len] ( identifier[func] . identifier[func_defaults] ):]
keyword[except] ( identifier[TypeError] , identifier[ValueError] , identifier[IndexError] ):
keyword[return] identifier[tuple] () | def extract_keywords(func):
"""
Parses the keywords from the given function.
:param func | <function>
"""
if hasattr(func, 'im_func'):
func = func.im_func # depends on [control=['if'], data=[]]
try:
return func.func_code.co_varnames[-len(func.func_defaults):] # depends on [control=['try'], data=[]]
except (TypeError, ValueError, IndexError):
return tuple() # depends on [control=['except'], data=[]] |
def default_change_form_template(self):
"""
Determine what the actual `change_form_template` should be.
"""
opts = self.model._meta
app_label = opts.app_label
return select_template_name((
"admin/{0}/{1}/change_form.html".format(app_label, opts.object_name.lower()),
"admin/{0}/change_form.html".format(app_label),
"admin/change_form.html"
)) | def function[default_change_form_template, parameter[self]]:
constant[
Determine what the actual `change_form_template` should be.
]
variable[opts] assign[=] name[self].model._meta
variable[app_label] assign[=] name[opts].app_label
return[call[name[select_template_name], parameter[tuple[[<ast.Call object at 0x7da18f09dd80>, <ast.Call object at 0x7da18f09ceb0>, <ast.Constant object at 0x7da18f09e9b0>]]]]] | keyword[def] identifier[default_change_form_template] ( identifier[self] ):
literal[string]
identifier[opts] = identifier[self] . identifier[model] . identifier[_meta]
identifier[app_label] = identifier[opts] . identifier[app_label]
keyword[return] identifier[select_template_name] ((
literal[string] . identifier[format] ( identifier[app_label] , identifier[opts] . identifier[object_name] . identifier[lower] ()),
literal[string] . identifier[format] ( identifier[app_label] ),
literal[string]
)) | def default_change_form_template(self):
"""
Determine what the actual `change_form_template` should be.
"""
opts = self.model._meta
app_label = opts.app_label
return select_template_name(('admin/{0}/{1}/change_form.html'.format(app_label, opts.object_name.lower()), 'admin/{0}/change_form.html'.format(app_label), 'admin/change_form.html')) |
def popFile(title="Lackey Open File"):
""" Creates a file selection dialog with the specified message and options.
Returns the selected file. """
root = tk.Tk()
root.withdraw()
return str(tkFileDialog.askopenfilename(title=title)) | def function[popFile, parameter[title]]:
constant[ Creates a file selection dialog with the specified message and options.
Returns the selected file. ]
variable[root] assign[=] call[name[tk].Tk, parameter[]]
call[name[root].withdraw, parameter[]]
return[call[name[str], parameter[call[name[tkFileDialog].askopenfilename, parameter[]]]]] | keyword[def] identifier[popFile] ( identifier[title] = literal[string] ):
literal[string]
identifier[root] = identifier[tk] . identifier[Tk] ()
identifier[root] . identifier[withdraw] ()
keyword[return] identifier[str] ( identifier[tkFileDialog] . identifier[askopenfilename] ( identifier[title] = identifier[title] )) | def popFile(title='Lackey Open File'):
""" Creates a file selection dialog with the specified message and options.
Returns the selected file. """
root = tk.Tk()
root.withdraw()
return str(tkFileDialog.askopenfilename(title=title)) |
def closest_cell_center(self,x,y):
"""
Given arbitrary sheet coordinates, return the sheet coordinates
of the center of the closest unit.
"""
return self.matrixidx2sheet(*self.sheet2matrixidx(x,y)) | def function[closest_cell_center, parameter[self, x, y]]:
constant[
Given arbitrary sheet coordinates, return the sheet coordinates
of the center of the closest unit.
]
return[call[name[self].matrixidx2sheet, parameter[<ast.Starred object at 0x7da18fe909a0>]]] | keyword[def] identifier[closest_cell_center] ( identifier[self] , identifier[x] , identifier[y] ):
literal[string]
keyword[return] identifier[self] . identifier[matrixidx2sheet] (* identifier[self] . identifier[sheet2matrixidx] ( identifier[x] , identifier[y] )) | def closest_cell_center(self, x, y):
"""
Given arbitrary sheet coordinates, return the sheet coordinates
of the center of the closest unit.
"""
return self.matrixidx2sheet(*self.sheet2matrixidx(x, y)) |
def get_table_objects_by_name(self, dbname, tbl_names):
"""
Parameters:
- dbname
- tbl_names
"""
self.send_get_table_objects_by_name(dbname, tbl_names)
return self.recv_get_table_objects_by_name() | def function[get_table_objects_by_name, parameter[self, dbname, tbl_names]]:
constant[
Parameters:
- dbname
- tbl_names
]
call[name[self].send_get_table_objects_by_name, parameter[name[dbname], name[tbl_names]]]
return[call[name[self].recv_get_table_objects_by_name, parameter[]]] | keyword[def] identifier[get_table_objects_by_name] ( identifier[self] , identifier[dbname] , identifier[tbl_names] ):
literal[string]
identifier[self] . identifier[send_get_table_objects_by_name] ( identifier[dbname] , identifier[tbl_names] )
keyword[return] identifier[self] . identifier[recv_get_table_objects_by_name] () | def get_table_objects_by_name(self, dbname, tbl_names):
"""
Parameters:
- dbname
- tbl_names
"""
self.send_get_table_objects_by_name(dbname, tbl_names)
return self.recv_get_table_objects_by_name() |
def replace_entities(entities, pattern):
"""
Replaces all entity names in a given pattern with the corresponding
values provided by entities.
Args:
entities (dict): A dictionary mapping entity names to entity values.
pattern (str): A path pattern that contains entity names denoted
by curly braces. Optional portions denoted by square braces.
For example: 'sub-{subject}/[var-{name}/]{id}.csv'
Accepted entity values, using regex matching, denoted within angle
brackets.
For example: 'sub-{subject<01|02>}/{task}.csv'
Returns:
A new string with the entity values inserted where entity names
were denoted in the provided pattern.
"""
ents = re.findall(r'\{(.*?)\}', pattern)
new_path = pattern
for ent in ents:
match = re.search(r'([^|<]+)(<.*?>)?(\|.*)?', ent)
if match is None:
return None
name, valid, default = match.groups()
default = default[1:] if default is not None else default
if name in entities and valid is not None:
ent_val = str(entities[name])
if not re.match(valid[1:-1], ent_val):
if default is None:
return None
entities[name] = default
ent_val = entities.get(name, default)
if ent_val is None:
return None
new_path = new_path.replace('{%s}' % ent, str(ent_val))
return new_path | def function[replace_entities, parameter[entities, pattern]]:
constant[
Replaces all entity names in a given pattern with the corresponding
values provided by entities.
Args:
entities (dict): A dictionary mapping entity names to entity values.
pattern (str): A path pattern that contains entity names denoted
by curly braces. Optional portions denoted by square braces.
For example: 'sub-{subject}/[var-{name}/]{id}.csv'
Accepted entity values, using regex matching, denoted within angle
brackets.
For example: 'sub-{subject<01|02>}/{task}.csv'
Returns:
A new string with the entity values inserted where entity names
were denoted in the provided pattern.
]
variable[ents] assign[=] call[name[re].findall, parameter[constant[\{(.*?)\}], name[pattern]]]
variable[new_path] assign[=] name[pattern]
for taget[name[ent]] in starred[name[ents]] begin[:]
variable[match] assign[=] call[name[re].search, parameter[constant[([^|<]+)(<.*?>)?(\|.*)?], name[ent]]]
if compare[name[match] is constant[None]] begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da1b101a0e0> assign[=] call[name[match].groups, parameter[]]
variable[default] assign[=] <ast.IfExp object at 0x7da1b101b700>
if <ast.BoolOp object at 0x7da1b101a920> begin[:]
variable[ent_val] assign[=] call[name[str], parameter[call[name[entities]][name[name]]]]
if <ast.UnaryOp object at 0x7da1b1083b50> begin[:]
if compare[name[default] is constant[None]] begin[:]
return[constant[None]]
call[name[entities]][name[name]] assign[=] name[default]
variable[ent_val] assign[=] call[name[entities].get, parameter[name[name], name[default]]]
if compare[name[ent_val] is constant[None]] begin[:]
return[constant[None]]
variable[new_path] assign[=] call[name[new_path].replace, parameter[binary_operation[constant[{%s}] <ast.Mod object at 0x7da2590d6920> name[ent]], call[name[str], parameter[name[ent_val]]]]]
return[name[new_path]] | keyword[def] identifier[replace_entities] ( identifier[entities] , identifier[pattern] ):
literal[string]
identifier[ents] = identifier[re] . identifier[findall] ( literal[string] , identifier[pattern] )
identifier[new_path] = identifier[pattern]
keyword[for] identifier[ent] keyword[in] identifier[ents] :
identifier[match] = identifier[re] . identifier[search] ( literal[string] , identifier[ent] )
keyword[if] identifier[match] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[name] , identifier[valid] , identifier[default] = identifier[match] . identifier[groups] ()
identifier[default] = identifier[default] [ literal[int] :] keyword[if] identifier[default] keyword[is] keyword[not] keyword[None] keyword[else] identifier[default]
keyword[if] identifier[name] keyword[in] identifier[entities] keyword[and] identifier[valid] keyword[is] keyword[not] keyword[None] :
identifier[ent_val] = identifier[str] ( identifier[entities] [ identifier[name] ])
keyword[if] keyword[not] identifier[re] . identifier[match] ( identifier[valid] [ literal[int] :- literal[int] ], identifier[ent_val] ):
keyword[if] identifier[default] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[entities] [ identifier[name] ]= identifier[default]
identifier[ent_val] = identifier[entities] . identifier[get] ( identifier[name] , identifier[default] )
keyword[if] identifier[ent_val] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[new_path] = identifier[new_path] . identifier[replace] ( literal[string] % identifier[ent] , identifier[str] ( identifier[ent_val] ))
keyword[return] identifier[new_path] | def replace_entities(entities, pattern):
"""
Replaces all entity names in a given pattern with the corresponding
values provided by entities.
Args:
entities (dict): A dictionary mapping entity names to entity values.
pattern (str): A path pattern that contains entity names denoted
by curly braces. Optional portions denoted by square braces.
For example: 'sub-{subject}/[var-{name}/]{id}.csv'
Accepted entity values, using regex matching, denoted within angle
brackets.
For example: 'sub-{subject<01|02>}/{task}.csv'
Returns:
A new string with the entity values inserted where entity names
were denoted in the provided pattern.
"""
ents = re.findall('\\{(.*?)\\}', pattern)
new_path = pattern
for ent in ents:
match = re.search('([^|<]+)(<.*?>)?(\\|.*)?', ent)
if match is None:
return None # depends on [control=['if'], data=[]]
(name, valid, default) = match.groups()
default = default[1:] if default is not None else default
if name in entities and valid is not None:
ent_val = str(entities[name])
if not re.match(valid[1:-1], ent_val):
if default is None:
return None # depends on [control=['if'], data=[]]
entities[name] = default # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
ent_val = entities.get(name, default)
if ent_val is None:
return None # depends on [control=['if'], data=[]]
new_path = new_path.replace('{%s}' % ent, str(ent_val)) # depends on [control=['for'], data=['ent']]
return new_path |
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False) | def function[reset, parameter[self]]:
constant[Toggle the drone's emergency state.]
call[name[self].at, parameter[name[ardrone].at.ref, constant[False], constant[True]]]
call[name[time].sleep, parameter[constant[0.1]]]
call[name[self].at, parameter[name[ardrone].at.ref, constant[False], constant[False]]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[self] . identifier[at] ( identifier[ardrone] . identifier[at] . identifier[ref] , keyword[False] , keyword[True] )
identifier[time] . identifier[sleep] ( literal[int] )
identifier[self] . identifier[at] ( identifier[ardrone] . identifier[at] . identifier[ref] , keyword[False] , keyword[False] ) | def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False) |
def process(self):
""" Receive data from socket and process request """
response = None
try:
payload = self.receive()
method, args, ref = self.parse(payload)
response = self.execute(method, args, ref)
except AuthenticateError as exception:
logging.error(
'Service error while authenticating request: {}'
.format(exception), exc_info=1)
except AuthenticatorInvalidSignature as exception:
logging.error(
'Service error while authenticating request: {}'
.format(exception), exc_info=1)
except DecodeError as exception:
logging.error(
'Service error while decoding request: {}'
.format(exception), exc_info=1)
except RequestParseError as exception:
logging.error(
'Service error while parsing request: {}'
.format(exception), exc_info=1)
else:
logging.debug('Service received payload: {}'.format(payload))
if response:
self.send(response)
else:
self.send('') | def function[process, parameter[self]]:
constant[ Receive data from socket and process request ]
variable[response] assign[=] constant[None]
<ast.Try object at 0x7da1b1020820>
if name[response] begin[:]
call[name[self].send, parameter[name[response]]] | keyword[def] identifier[process] ( identifier[self] ):
literal[string]
identifier[response] = keyword[None]
keyword[try] :
identifier[payload] = identifier[self] . identifier[receive] ()
identifier[method] , identifier[args] , identifier[ref] = identifier[self] . identifier[parse] ( identifier[payload] )
identifier[response] = identifier[self] . identifier[execute] ( identifier[method] , identifier[args] , identifier[ref] )
keyword[except] identifier[AuthenticateError] keyword[as] identifier[exception] :
identifier[logging] . identifier[error] (
literal[string]
. identifier[format] ( identifier[exception] ), identifier[exc_info] = literal[int] )
keyword[except] identifier[AuthenticatorInvalidSignature] keyword[as] identifier[exception] :
identifier[logging] . identifier[error] (
literal[string]
. identifier[format] ( identifier[exception] ), identifier[exc_info] = literal[int] )
keyword[except] identifier[DecodeError] keyword[as] identifier[exception] :
identifier[logging] . identifier[error] (
literal[string]
. identifier[format] ( identifier[exception] ), identifier[exc_info] = literal[int] )
keyword[except] identifier[RequestParseError] keyword[as] identifier[exception] :
identifier[logging] . identifier[error] (
literal[string]
. identifier[format] ( identifier[exception] ), identifier[exc_info] = literal[int] )
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[payload] ))
keyword[if] identifier[response] :
identifier[self] . identifier[send] ( identifier[response] )
keyword[else] :
identifier[self] . identifier[send] ( literal[string] ) | def process(self):
""" Receive data from socket and process request """
response = None
try:
payload = self.receive()
(method, args, ref) = self.parse(payload)
response = self.execute(method, args, ref) # depends on [control=['try'], data=[]]
except AuthenticateError as exception:
logging.error('Service error while authenticating request: {}'.format(exception), exc_info=1) # depends on [control=['except'], data=['exception']]
except AuthenticatorInvalidSignature as exception:
logging.error('Service error while authenticating request: {}'.format(exception), exc_info=1) # depends on [control=['except'], data=['exception']]
except DecodeError as exception:
logging.error('Service error while decoding request: {}'.format(exception), exc_info=1) # depends on [control=['except'], data=['exception']]
except RequestParseError as exception:
logging.error('Service error while parsing request: {}'.format(exception), exc_info=1) # depends on [control=['except'], data=['exception']]
else:
logging.debug('Service received payload: {}'.format(payload))
if response:
self.send(response) # depends on [control=['if'], data=[]]
else:
self.send('') |
def WebLookup(url, urlQuery=None, utf8=True):
"""
Look up webpage at given url with optional query string
Parameters
----------
url : string
Web url.
urlQuery : dictionary [optional: default = None]
Parameter to be passed to GET method of requests module
utf8 : boolean [optional: default = True]
Set response encoding
Returns
----------
string
GET response text
"""
goodlogging.Log.Info("UTIL", "Looking up info from URL:{0} with QUERY:{1})".format(url, urlQuery), verbosity=goodlogging.Verbosity.MINIMAL)
response = requests.get(url, params=urlQuery)
goodlogging.Log.Info("UTIL", "Full url: {0}".format(response.url), verbosity=goodlogging.Verbosity.MINIMAL)
if utf8 is True:
response.encoding = 'utf-8'
if(response.status_code == requests.codes.ok):
return(response.text)
else:
response.raise_for_status() | def function[WebLookup, parameter[url, urlQuery, utf8]]:
constant[
Look up webpage at given url with optional query string
Parameters
----------
url : string
Web url.
urlQuery : dictionary [optional: default = None]
Parameter to be passed to GET method of requests module
utf8 : boolean [optional: default = True]
Set response encoding
Returns
----------
string
GET response text
]
call[name[goodlogging].Log.Info, parameter[constant[UTIL], call[constant[Looking up info from URL:{0} with QUERY:{1})].format, parameter[name[url], name[urlQuery]]]]]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
call[name[goodlogging].Log.Info, parameter[constant[UTIL], call[constant[Full url: {0}].format, parameter[name[response].url]]]]
if compare[name[utf8] is constant[True]] begin[:]
name[response].encoding assign[=] constant[utf-8]
if compare[name[response].status_code equal[==] name[requests].codes.ok] begin[:]
return[name[response].text] | keyword[def] identifier[WebLookup] ( identifier[url] , identifier[urlQuery] = keyword[None] , identifier[utf8] = keyword[True] ):
literal[string]
identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[url] , identifier[urlQuery] ), identifier[verbosity] = identifier[goodlogging] . identifier[Verbosity] . identifier[MINIMAL] )
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[params] = identifier[urlQuery] )
identifier[goodlogging] . identifier[Log] . identifier[Info] ( literal[string] , literal[string] . identifier[format] ( identifier[response] . identifier[url] ), identifier[verbosity] = identifier[goodlogging] . identifier[Verbosity] . identifier[MINIMAL] )
keyword[if] identifier[utf8] keyword[is] keyword[True] :
identifier[response] . identifier[encoding] = literal[string]
keyword[if] ( identifier[response] . identifier[status_code] == identifier[requests] . identifier[codes] . identifier[ok] ):
keyword[return] ( identifier[response] . identifier[text] )
keyword[else] :
identifier[response] . identifier[raise_for_status] () | def WebLookup(url, urlQuery=None, utf8=True):
"""
Look up webpage at given url with optional query string
Parameters
----------
url : string
Web url.
urlQuery : dictionary [optional: default = None]
Parameter to be passed to GET method of requests module
utf8 : boolean [optional: default = True]
Set response encoding
Returns
----------
string
GET response text
"""
goodlogging.Log.Info('UTIL', 'Looking up info from URL:{0} with QUERY:{1})'.format(url, urlQuery), verbosity=goodlogging.Verbosity.MINIMAL)
response = requests.get(url, params=urlQuery)
goodlogging.Log.Info('UTIL', 'Full url: {0}'.format(response.url), verbosity=goodlogging.Verbosity.MINIMAL)
if utf8 is True:
response.encoding = 'utf-8' # depends on [control=['if'], data=[]]
if response.status_code == requests.codes.ok:
return response.text # depends on [control=['if'], data=[]]
else:
response.raise_for_status() |
def Extract(self, components):
"""Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings).
"""
rundll_index = -1
for index, component in enumerate(components):
if component.lower().endswith("rundll32.exe"):
rundll_index = index
if rundll_index == -1:
return []
components = components[(rundll_index + 1):]
# We expect components after "rundll32.exe" to point at a DLL and a
# function. For example:
# rundll32.exe "C:\Windows\system32\advpack.dll",DelNodeRunDLL32
last_component = components[-1].rsplit(",", 1)[0]
extracted_path = " ".join(components[0:-1] + [last_component])
return [extracted_path] | def function[Extract, parameter[self, components]]:
constant[Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings).
]
variable[rundll_index] assign[=] <ast.UnaryOp object at 0x7da1b1ce6740>
for taget[tuple[[<ast.Name object at 0x7da1b1ce7cd0>, <ast.Name object at 0x7da1b1ce59c0>]]] in starred[call[name[enumerate], parameter[name[components]]]] begin[:]
if call[call[name[component].lower, parameter[]].endswith, parameter[constant[rundll32.exe]]] begin[:]
variable[rundll_index] assign[=] name[index]
if compare[name[rundll_index] equal[==] <ast.UnaryOp object at 0x7da1b1ce5fc0>] begin[:]
return[list[[]]]
variable[components] assign[=] call[name[components]][<ast.Slice object at 0x7da1b1cc3d90>]
variable[last_component] assign[=] call[call[call[name[components]][<ast.UnaryOp object at 0x7da1b1cc3220>].rsplit, parameter[constant[,], constant[1]]]][constant[0]]
variable[extracted_path] assign[=] call[constant[ ].join, parameter[binary_operation[call[name[components]][<ast.Slice object at 0x7da1b1cc3070>] + list[[<ast.Name object at 0x7da1b1cc3940>]]]]]
return[list[[<ast.Name object at 0x7da1b1cc3eb0>]]] | keyword[def] identifier[Extract] ( identifier[self] , identifier[components] ):
literal[string]
identifier[rundll_index] =- literal[int]
keyword[for] identifier[index] , identifier[component] keyword[in] identifier[enumerate] ( identifier[components] ):
keyword[if] identifier[component] . identifier[lower] (). identifier[endswith] ( literal[string] ):
identifier[rundll_index] = identifier[index]
keyword[if] identifier[rundll_index] ==- literal[int] :
keyword[return] []
identifier[components] = identifier[components] [( identifier[rundll_index] + literal[int] ):]
identifier[last_component] = identifier[components] [- literal[int] ]. identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
identifier[extracted_path] = literal[string] . identifier[join] ( identifier[components] [ literal[int] :- literal[int] ]+[ identifier[last_component] ])
keyword[return] [ identifier[extracted_path] ] | def Extract(self, components):
"""Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings).
"""
rundll_index = -1
for (index, component) in enumerate(components):
if component.lower().endswith('rundll32.exe'):
rundll_index = index # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if rundll_index == -1:
return [] # depends on [control=['if'], data=[]]
components = components[rundll_index + 1:]
# We expect components after "rundll32.exe" to point at a DLL and a
# function. For example:
# rundll32.exe "C:\Windows\system32\advpack.dll",DelNodeRunDLL32
last_component = components[-1].rsplit(',', 1)[0]
extracted_path = ' '.join(components[0:-1] + [last_component])
return [extracted_path] |
def find_commons(lists):
"""Finds common values
:param lists: List of lists
:return: List of values that are in common between inner lists
"""
others = lists[1:]
return [
val
for val in lists[0]
if is_in_all(val, others)
] | def function[find_commons, parameter[lists]]:
constant[Finds common values
:param lists: List of lists
:return: List of values that are in common between inner lists
]
variable[others] assign[=] call[name[lists]][<ast.Slice object at 0x7da2044c1240>]
return[<ast.ListComp object at 0x7da2044c2140>] | keyword[def] identifier[find_commons] ( identifier[lists] ):
literal[string]
identifier[others] = identifier[lists] [ literal[int] :]
keyword[return] [
identifier[val]
keyword[for] identifier[val] keyword[in] identifier[lists] [ literal[int] ]
keyword[if] identifier[is_in_all] ( identifier[val] , identifier[others] )
] | def find_commons(lists):
"""Finds common values
:param lists: List of lists
:return: List of values that are in common between inner lists
"""
others = lists[1:]
return [val for val in lists[0] if is_in_all(val, others)] |
def send_rpc(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
'''
if not self._peer.up:
raise errors.Unroutable()
return self._dispatcher.send_proxied_rpc(service, routing_id, method,
args or (), kwargs or {}, not broadcast) | def function[send_rpc, parameter[self, service, routing_id, method, args, kwargs, broadcast]]:
constant[Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
]
if <ast.UnaryOp object at 0x7da1b11edb10> begin[:]
<ast.Raise object at 0x7da1b11ecdc0>
return[call[name[self]._dispatcher.send_proxied_rpc, parameter[name[service], name[routing_id], name[method], <ast.BoolOp object at 0x7da18f09ff40>, <ast.BoolOp object at 0x7da18f09c7f0>, <ast.UnaryOp object at 0x7da18f09f880>]]] | keyword[def] identifier[send_rpc] ( identifier[self] , identifier[service] , identifier[routing_id] , identifier[method] , identifier[args] = keyword[None] , identifier[kwargs] = keyword[None] ,
identifier[broadcast] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_peer] . identifier[up] :
keyword[raise] identifier[errors] . identifier[Unroutable] ()
keyword[return] identifier[self] . identifier[_dispatcher] . identifier[send_proxied_rpc] ( identifier[service] , identifier[routing_id] , identifier[method] ,
identifier[args] keyword[or] (), identifier[kwargs] keyword[or] {}, keyword[not] identifier[broadcast] ) | def send_rpc(self, service, routing_id, method, args=None, kwargs=None, broadcast=False):
"""Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
"""
if not self._peer.up:
raise errors.Unroutable() # depends on [control=['if'], data=[]]
return self._dispatcher.send_proxied_rpc(service, routing_id, method, args or (), kwargs or {}, not broadcast) |
def _set_es_workers(self, **kwargs):
"""
Creates index worker instances for each class to index
kwargs:
-------
idx_only_base[bool]: True will only index the base class
"""
def make_es_worker(search_conn, es_index, es_doc_type, class_name):
"""
Returns a new es_worker instance
args:
-----
search_conn: the connection to elasticsearch
es_index: the name of the elasticsearch index
es_doc_type: the name of the elasticsearch doctype
class_name: name of the rdf class that is being indexed
"""
new_esbase = copy.copy(search_conn)
new_esbase.es_index = es_index
new_esbase.doc_type = es_doc_type
log.info("Indexing '%s' into ES index '%s' doctype '%s'",
class_name.pyuri,
es_index,
es_doc_type)
return new_esbase
def additional_indexers(rdf_class):
"""
returns additional classes to index based off of the es definitions
"""
rtn_list = rdf_class.es_indexers()
rtn_list.remove(rdf_class)
return rtn_list
self.es_worker = make_es_worker(self.search_conn,
self.es_index,
self.es_doc_type,
self.rdf_class.__name__)
if not kwargs.get("idx_only_base"):
self.other_indexers = {item.__name__: make_es_worker(
self.search_conn,
item.es_defs.get('kds_esIndex')[0],
item.es_defs.get('kds_esDocType')[0],
item.__name__)
for item in additional_indexers(self.rdf_class)}
else:
self.other_indexers = {} | def function[_set_es_workers, parameter[self]]:
constant[
Creates index worker instances for each class to index
kwargs:
-------
idx_only_base[bool]: True will only index the base class
]
def function[make_es_worker, parameter[search_conn, es_index, es_doc_type, class_name]]:
constant[
Returns a new es_worker instance
args:
-----
search_conn: the connection to elasticsearch
es_index: the name of the elasticsearch index
es_doc_type: the name of the elasticsearch doctype
class_name: name of the rdf class that is being indexed
]
variable[new_esbase] assign[=] call[name[copy].copy, parameter[name[search_conn]]]
name[new_esbase].es_index assign[=] name[es_index]
name[new_esbase].doc_type assign[=] name[es_doc_type]
call[name[log].info, parameter[constant[Indexing '%s' into ES index '%s' doctype '%s'], name[class_name].pyuri, name[es_index], name[es_doc_type]]]
return[name[new_esbase]]
def function[additional_indexers, parameter[rdf_class]]:
constant[
returns additional classes to index based off of the es definitions
]
variable[rtn_list] assign[=] call[name[rdf_class].es_indexers, parameter[]]
call[name[rtn_list].remove, parameter[name[rdf_class]]]
return[name[rtn_list]]
name[self].es_worker assign[=] call[name[make_es_worker], parameter[name[self].search_conn, name[self].es_index, name[self].es_doc_type, name[self].rdf_class.__name__]]
if <ast.UnaryOp object at 0x7da1b15f2e90> begin[:]
name[self].other_indexers assign[=] <ast.DictComp object at 0x7da1b15f0a00> | keyword[def] identifier[_set_es_workers] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[make_es_worker] ( identifier[search_conn] , identifier[es_index] , identifier[es_doc_type] , identifier[class_name] ):
literal[string]
identifier[new_esbase] = identifier[copy] . identifier[copy] ( identifier[search_conn] )
identifier[new_esbase] . identifier[es_index] = identifier[es_index]
identifier[new_esbase] . identifier[doc_type] = identifier[es_doc_type]
identifier[log] . identifier[info] ( literal[string] ,
identifier[class_name] . identifier[pyuri] ,
identifier[es_index] ,
identifier[es_doc_type] )
keyword[return] identifier[new_esbase]
keyword[def] identifier[additional_indexers] ( identifier[rdf_class] ):
literal[string]
identifier[rtn_list] = identifier[rdf_class] . identifier[es_indexers] ()
identifier[rtn_list] . identifier[remove] ( identifier[rdf_class] )
keyword[return] identifier[rtn_list]
identifier[self] . identifier[es_worker] = identifier[make_es_worker] ( identifier[self] . identifier[search_conn] ,
identifier[self] . identifier[es_index] ,
identifier[self] . identifier[es_doc_type] ,
identifier[self] . identifier[rdf_class] . identifier[__name__] )
keyword[if] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] ):
identifier[self] . identifier[other_indexers] ={ identifier[item] . identifier[__name__] : identifier[make_es_worker] (
identifier[self] . identifier[search_conn] ,
identifier[item] . identifier[es_defs] . identifier[get] ( literal[string] )[ literal[int] ],
identifier[item] . identifier[es_defs] . identifier[get] ( literal[string] )[ literal[int] ],
identifier[item] . identifier[__name__] )
keyword[for] identifier[item] keyword[in] identifier[additional_indexers] ( identifier[self] . identifier[rdf_class] )}
keyword[else] :
identifier[self] . identifier[other_indexers] ={} | def _set_es_workers(self, **kwargs):
"""
Creates index worker instances for each class to index
kwargs:
-------
idx_only_base[bool]: True will only index the base class
"""
def make_es_worker(search_conn, es_index, es_doc_type, class_name):
"""
Returns a new es_worker instance
args:
-----
search_conn: the connection to elasticsearch
es_index: the name of the elasticsearch index
es_doc_type: the name of the elasticsearch doctype
class_name: name of the rdf class that is being indexed
"""
new_esbase = copy.copy(search_conn)
new_esbase.es_index = es_index
new_esbase.doc_type = es_doc_type
log.info("Indexing '%s' into ES index '%s' doctype '%s'", class_name.pyuri, es_index, es_doc_type)
return new_esbase
def additional_indexers(rdf_class):
"""
returns additional classes to index based off of the es definitions
"""
rtn_list = rdf_class.es_indexers()
rtn_list.remove(rdf_class)
return rtn_list
self.es_worker = make_es_worker(self.search_conn, self.es_index, self.es_doc_type, self.rdf_class.__name__)
if not kwargs.get('idx_only_base'):
self.other_indexers = {item.__name__: make_es_worker(self.search_conn, item.es_defs.get('kds_esIndex')[0], item.es_defs.get('kds_esDocType')[0], item.__name__) for item in additional_indexers(self.rdf_class)} # depends on [control=['if'], data=[]]
else:
self.other_indexers = {} |
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info
"""
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified')
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
# Check if we have any PDBs
if self.num_structures_experimental == 0:
log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))
return
downloaded_pdb_ids = []
# Download the PDBs
for s in self.get_experimental_structures():
log.debug('{}: downloading structure file from the PDB...'.format(s.id))
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id)
return downloaded_pdb_ids | def function[pdb_downloader_and_metadata, parameter[self, outdir, pdb_file_type, force_rerun]]:
constant[Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info
]
if <ast.UnaryOp object at 0x7da1b0ea2650> begin[:]
variable[outdir] assign[=] name[self].structure_dir
if <ast.UnaryOp object at 0x7da1b0ea2e00> begin[:]
<ast.Raise object at 0x7da1b0ea2dd0>
if <ast.UnaryOp object at 0x7da1b0ea2e90> begin[:]
variable[pdb_file_type] assign[=] name[self].pdb_file_type
if compare[name[self].num_structures_experimental equal[==] constant[0]] begin[:]
call[name[log].debug, parameter[call[constant[{}: no structures available - nothing will be downloaded].format, parameter[name[self].id]]]]
return[None]
variable[downloaded_pdb_ids] assign[=] list[[]]
for taget[name[s]] in starred[call[name[self].get_experimental_structures, parameter[]]] begin[:]
call[name[log].debug, parameter[call[constant[{}: downloading structure file from the PDB...].format, parameter[name[s].id]]]]
call[name[s].download_structure_file, parameter[]]
call[name[downloaded_pdb_ids].append, parameter[name[s].id]]
return[name[downloaded_pdb_ids]] | keyword[def] identifier[pdb_downloader_and_metadata] ( identifier[self] , identifier[outdir] = keyword[None] , identifier[pdb_file_type] = keyword[None] , identifier[force_rerun] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[outdir] :
identifier[outdir] = identifier[self] . identifier[structure_dir]
keyword[if] keyword[not] identifier[outdir] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[pdb_file_type] :
identifier[pdb_file_type] = identifier[self] . identifier[pdb_file_type]
keyword[if] identifier[self] . identifier[num_structures_experimental] == literal[int] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[id] ))
keyword[return]
identifier[downloaded_pdb_ids] =[]
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[get_experimental_structures] ():
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[s] . identifier[id] ))
identifier[s] . identifier[download_structure_file] ( identifier[outdir] = identifier[outdir] , identifier[file_type] = identifier[pdb_file_type] , identifier[force_rerun] = identifier[force_rerun] , identifier[load_header_metadata] = keyword[True] )
identifier[downloaded_pdb_ids] . identifier[append] ( identifier[s] . identifier[id] )
keyword[return] identifier[downloaded_pdb_ids] | def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
Returns:
list: List of PDB IDs that were downloaded
Todo:
* Parse mmtf or PDB file for header information, rather than always getting the cif file for header info
"""
if not outdir:
outdir = self.structure_dir
if not outdir:
raise ValueError('Output directory must be specified') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not pdb_file_type:
pdb_file_type = self.pdb_file_type # depends on [control=['if'], data=[]]
# Check if we have any PDBs
if self.num_structures_experimental == 0:
log.debug('{}: no structures available - nothing will be downloaded'.format(self.id))
return # depends on [control=['if'], data=[]]
downloaded_pdb_ids = []
# Download the PDBs
for s in self.get_experimental_structures():
log.debug('{}: downloading structure file from the PDB...'.format(s.id))
s.download_structure_file(outdir=outdir, file_type=pdb_file_type, force_rerun=force_rerun, load_header_metadata=True)
downloaded_pdb_ids.append(s.id) # depends on [control=['for'], data=['s']]
return downloaded_pdb_ids |
def send(self, message=None, api_key=None, secure=None, test=None,
**request_args):
'''Send request to Postmark API.
Returns result of :func:`requests.post`.
:param message: Your Postmark message data.
:type message: `dict` or :class:`Message`
:param api_key: Your Postmark API key.
:type api_key: `str`
:param test: Make a test request to the Postmark API.
:param secure: Use the https Postmark API.
:param \*\*request_args: Passed to :func:`requests.post`
:rtype: :class:`requests.Response`
'''
headers = self._get_headers(api_key=api_key, test=test,
request_args=request_args)
data = self._get_request_content(message)
url = self._get_api_url(secure=secure)
return self._request(url, data=data, headers=headers, **request_args) | def function[send, parameter[self, message, api_key, secure, test]]:
constant[Send request to Postmark API.
Returns result of :func:`requests.post`.
:param message: Your Postmark message data.
:type message: `dict` or :class:`Message`
:param api_key: Your Postmark API key.
:type api_key: `str`
:param test: Make a test request to the Postmark API.
:param secure: Use the https Postmark API.
:param \*\*request_args: Passed to :func:`requests.post`
:rtype: :class:`requests.Response`
]
variable[headers] assign[=] call[name[self]._get_headers, parameter[]]
variable[data] assign[=] call[name[self]._get_request_content, parameter[name[message]]]
variable[url] assign[=] call[name[self]._get_api_url, parameter[]]
return[call[name[self]._request, parameter[name[url]]]] | keyword[def] identifier[send] ( identifier[self] , identifier[message] = keyword[None] , identifier[api_key] = keyword[None] , identifier[secure] = keyword[None] , identifier[test] = keyword[None] ,
** identifier[request_args] ):
literal[string]
identifier[headers] = identifier[self] . identifier[_get_headers] ( identifier[api_key] = identifier[api_key] , identifier[test] = identifier[test] ,
identifier[request_args] = identifier[request_args] )
identifier[data] = identifier[self] . identifier[_get_request_content] ( identifier[message] )
identifier[url] = identifier[self] . identifier[_get_api_url] ( identifier[secure] = identifier[secure] )
keyword[return] identifier[self] . identifier[_request] ( identifier[url] , identifier[data] = identifier[data] , identifier[headers] = identifier[headers] ,** identifier[request_args] ) | def send(self, message=None, api_key=None, secure=None, test=None, **request_args):
"""Send request to Postmark API.
Returns result of :func:`requests.post`.
:param message: Your Postmark message data.
:type message: `dict` or :class:`Message`
:param api_key: Your Postmark API key.
:type api_key: `str`
:param test: Make a test request to the Postmark API.
:param secure: Use the https Postmark API.
:param \\*\\*request_args: Passed to :func:`requests.post`
:rtype: :class:`requests.Response`
"""
headers = self._get_headers(api_key=api_key, test=test, request_args=request_args)
data = self._get_request_content(message)
url = self._get_api_url(secure=secure)
return self._request(url, data=data, headers=headers, **request_args) |
def depth(self):
"""
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
"""
categories = self._categories
if not categories:
return 0
first_depth = categories[0].depth
for category in categories[1:]:
if category.depth != first_depth:
raise ValueError('category depth not uniform')
return first_depth | def function[depth, parameter[self]]:
constant[
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
]
variable[categories] assign[=] name[self]._categories
if <ast.UnaryOp object at 0x7da204961c60> begin[:]
return[constant[0]]
variable[first_depth] assign[=] call[name[categories]][constant[0]].depth
for taget[name[category]] in starred[call[name[categories]][<ast.Slice object at 0x7da2049607f0>]] begin[:]
if compare[name[category].depth not_equal[!=] name[first_depth]] begin[:]
<ast.Raise object at 0x7da204963c70>
return[name[first_depth]] | keyword[def] identifier[depth] ( identifier[self] ):
literal[string]
identifier[categories] = identifier[self] . identifier[_categories]
keyword[if] keyword[not] identifier[categories] :
keyword[return] literal[int]
identifier[first_depth] = identifier[categories] [ literal[int] ]. identifier[depth]
keyword[for] identifier[category] keyword[in] identifier[categories] [ literal[int] :]:
keyword[if] identifier[category] . identifier[depth] != identifier[first_depth] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[first_depth] | def depth(self):
"""
The number of hierarchy levels in this category graph. Returns 0 if
it contains no categories.
"""
categories = self._categories
if not categories:
return 0 # depends on [control=['if'], data=[]]
first_depth = categories[0].depth
for category in categories[1:]:
if category.depth != first_depth:
raise ValueError('category depth not uniform') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['category']]
return first_depth |
def detectWebOSTablet(self):
"""Return detection of an HP WebOS tablet
Detects if the current browser is on an HP tablet running WebOS.
"""
return UAgentInfo.deviceWebOShp in self.__userAgent \
and UAgentInfo.deviceTablet in self.__userAgent | def function[detectWebOSTablet, parameter[self]]:
constant[Return detection of an HP WebOS tablet
Detects if the current browser is on an HP tablet running WebOS.
]
return[<ast.BoolOp object at 0x7da1b0aa47c0>] | keyword[def] identifier[detectWebOSTablet] ( identifier[self] ):
literal[string]
keyword[return] identifier[UAgentInfo] . identifier[deviceWebOShp] keyword[in] identifier[self] . identifier[__userAgent] keyword[and] identifier[UAgentInfo] . identifier[deviceTablet] keyword[in] identifier[self] . identifier[__userAgent] | def detectWebOSTablet(self):
"""Return detection of an HP WebOS tablet
Detects if the current browser is on an HP tablet running WebOS.
"""
return UAgentInfo.deviceWebOShp in self.__userAgent and UAgentInfo.deviceTablet in self.__userAgent |
def match_mstring(self, tokens, item, use_bytes=None):
"""Match prefix and suffix string."""
prefix, name, suffix = tokens
if use_bytes is None:
if prefix.startswith("b") or suffix.startswith("b"):
if prefix.startswith("b") and suffix.startswith("b"):
use_bytes = True
else:
raise CoconutDeferredSyntaxError("string literals and byte literals cannot be added in patterns", self.loc)
if use_bytes:
self.add_check("_coconut.isinstance(" + item + ", _coconut.bytes)")
else:
self.add_check("_coconut.isinstance(" + item + ", _coconut.str)")
if prefix is not None:
self.add_check(item + ".startswith(" + prefix + ")")
if suffix is not None:
self.add_check(item + ".endswith(" + suffix + ")")
if name != wildcard:
self.add_def(
name + " = " + item + "[" +
("" if prefix is None else "_coconut.len(" + prefix + ")") + ":"
+ ("" if suffix is None else "-_coconut.len(" + suffix + ")") + "]",
) | def function[match_mstring, parameter[self, tokens, item, use_bytes]]:
constant[Match prefix and suffix string.]
<ast.Tuple object at 0x7da20e962ce0> assign[=] name[tokens]
if compare[name[use_bytes] is constant[None]] begin[:]
if <ast.BoolOp object at 0x7da20e963280> begin[:]
if <ast.BoolOp object at 0x7da20e961ed0> begin[:]
variable[use_bytes] assign[=] constant[True]
if name[use_bytes] begin[:]
call[name[self].add_check, parameter[binary_operation[binary_operation[constant[_coconut.isinstance(] + name[item]] + constant[, _coconut.bytes)]]]]
if compare[name[prefix] is_not constant[None]] begin[:]
call[name[self].add_check, parameter[binary_operation[binary_operation[binary_operation[name[item] + constant[.startswith(]] + name[prefix]] + constant[)]]]]
if compare[name[suffix] is_not constant[None]] begin[:]
call[name[self].add_check, parameter[binary_operation[binary_operation[binary_operation[name[item] + constant[.endswith(]] + name[suffix]] + constant[)]]]]
if compare[name[name] not_equal[!=] name[wildcard]] begin[:]
call[name[self].add_def, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[name] + constant[ = ]] + name[item]] + constant[[]] + <ast.IfExp object at 0x7da20c6e5600>] + constant[:]] + <ast.IfExp object at 0x7da20c6e60e0>] + constant[]]]]] | keyword[def] identifier[match_mstring] ( identifier[self] , identifier[tokens] , identifier[item] , identifier[use_bytes] = keyword[None] ):
literal[string]
identifier[prefix] , identifier[name] , identifier[suffix] = identifier[tokens]
keyword[if] identifier[use_bytes] keyword[is] keyword[None] :
keyword[if] identifier[prefix] . identifier[startswith] ( literal[string] ) keyword[or] identifier[suffix] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[prefix] . identifier[startswith] ( literal[string] ) keyword[and] identifier[suffix] . identifier[startswith] ( literal[string] ):
identifier[use_bytes] = keyword[True]
keyword[else] :
keyword[raise] identifier[CoconutDeferredSyntaxError] ( literal[string] , identifier[self] . identifier[loc] )
keyword[if] identifier[use_bytes] :
identifier[self] . identifier[add_check] ( literal[string] + identifier[item] + literal[string] )
keyword[else] :
identifier[self] . identifier[add_check] ( literal[string] + identifier[item] + literal[string] )
keyword[if] identifier[prefix] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[add_check] ( identifier[item] + literal[string] + identifier[prefix] + literal[string] )
keyword[if] identifier[suffix] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[add_check] ( identifier[item] + literal[string] + identifier[suffix] + literal[string] )
keyword[if] identifier[name] != identifier[wildcard] :
identifier[self] . identifier[add_def] (
identifier[name] + literal[string] + identifier[item] + literal[string] +
( literal[string] keyword[if] identifier[prefix] keyword[is] keyword[None] keyword[else] literal[string] + identifier[prefix] + literal[string] )+ literal[string]
+( literal[string] keyword[if] identifier[suffix] keyword[is] keyword[None] keyword[else] literal[string] + identifier[suffix] + literal[string] )+ literal[string] ,
) | def match_mstring(self, tokens, item, use_bytes=None):
"""Match prefix and suffix string."""
(prefix, name, suffix) = tokens
if use_bytes is None:
if prefix.startswith('b') or suffix.startswith('b'):
if prefix.startswith('b') and suffix.startswith('b'):
use_bytes = True # depends on [control=['if'], data=[]]
else:
raise CoconutDeferredSyntaxError('string literals and byte literals cannot be added in patterns', self.loc) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['use_bytes']]
if use_bytes:
self.add_check('_coconut.isinstance(' + item + ', _coconut.bytes)') # depends on [control=['if'], data=[]]
else:
self.add_check('_coconut.isinstance(' + item + ', _coconut.str)')
if prefix is not None:
self.add_check(item + '.startswith(' + prefix + ')') # depends on [control=['if'], data=['prefix']]
if suffix is not None:
self.add_check(item + '.endswith(' + suffix + ')') # depends on [control=['if'], data=['suffix']]
if name != wildcard:
self.add_def(name + ' = ' + item + '[' + ('' if prefix is None else '_coconut.len(' + prefix + ')') + ':' + ('' if suffix is None else '-_coconut.len(' + suffix + ')') + ']') # depends on [control=['if'], data=['name']] |
def role_update(auth=None, **kwargs):
'''
Update a role
CLI Example:
.. code-block:: bash
salt '*' keystoneng.role_update name=role1 new_name=newrole
salt '*' keystoneng.role_update name=1eb6edd5525e4ac39af571adee673559 new_name=newrole
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
if 'new_name' in kwargs:
kwargs['name'] = kwargs.pop('new_name')
return cloud.update_role(**kwargs) | def function[role_update, parameter[auth]]:
constant[
Update a role
CLI Example:
.. code-block:: bash
salt '*' keystoneng.role_update name=role1 new_name=newrole
salt '*' keystoneng.role_update name=1eb6edd5525e4ac39af571adee673559 new_name=newrole
]
variable[cloud] assign[=] call[name[get_operator_cloud], parameter[name[auth]]]
variable[kwargs] assign[=] call[name[_clean_kwargs], parameter[]]
if compare[constant[new_name] in name[kwargs]] begin[:]
call[name[kwargs]][constant[name]] assign[=] call[name[kwargs].pop, parameter[constant[new_name]]]
return[call[name[cloud].update_role, parameter[]]] | keyword[def] identifier[role_update] ( identifier[auth] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[cloud] = identifier[get_operator_cloud] ( identifier[auth] )
identifier[kwargs] = identifier[_clean_kwargs] (** identifier[kwargs] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[kwargs] [ literal[string] ]= identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[return] identifier[cloud] . identifier[update_role] (** identifier[kwargs] ) | def role_update(auth=None, **kwargs):
"""
Update a role
CLI Example:
.. code-block:: bash
salt '*' keystoneng.role_update name=role1 new_name=newrole
salt '*' keystoneng.role_update name=1eb6edd5525e4ac39af571adee673559 new_name=newrole
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
if 'new_name' in kwargs:
kwargs['name'] = kwargs.pop('new_name') # depends on [control=['if'], data=['kwargs']]
return cloud.update_role(**kwargs) |
def execute( self, conn, daoinput, transaction = False ):
"""
daoinput must be validated to have the following keys:
child_parent_id__list[[cid, pid],...], block_name
"""
binds = {}
bindlist=[]
if isinstance(daoinput, dict) and "block_name" in daoinput.keys():
binds = {"block_name": daoinput["block_name"]}
r = self.dbi.processData(self.sql_sel, binds, conn, False)
bfile = self.format(r)
bfile_list = []
for f in bfile:
bfile_list.append(f[0])
if "child_parent_id_list" in daoinput.keys():
files = []
for i in daoinput["child_parent_id_list"]:
files.append(i[0])
if set(files)-set(bfile_list):
dbsExceptionHandler('dbsException-invalid-input2', "Files required in the same block for FileParent/insert2 dao.", self.logger.exception)
else:
dbsExceptionHandler('dbsException-invalid-input2', "child_parent_id_list required for FileParent/insert2 dao.", self.logger.exception)
else:
dbsExceptionHandler('dbsException-invalid-input2', "Block_name required in the same block for FileParent/insert2 dao.", self.logger.exception)
binds = {}
for pf in daoinput["child_parent_id_list"]:
binds = {"this_file_id":pf[0], "parent_file_id": pf[1]}
bindlist.append(binds)
self.dbi.processData(self.sql, bindlist, conn, transaction) | def function[execute, parameter[self, conn, daoinput, transaction]]:
constant[
daoinput must be validated to have the following keys:
child_parent_id__list[[cid, pid],...], block_name
]
variable[binds] assign[=] dictionary[[], []]
variable[bindlist] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b10c0a60> begin[:]
variable[binds] assign[=] dictionary[[<ast.Constant object at 0x7da1b10c2890>], [<ast.Subscript object at 0x7da1b10c3c70>]]
variable[r] assign[=] call[name[self].dbi.processData, parameter[name[self].sql_sel, name[binds], name[conn], constant[False]]]
variable[bfile] assign[=] call[name[self].format, parameter[name[r]]]
variable[bfile_list] assign[=] list[[]]
for taget[name[f]] in starred[name[bfile]] begin[:]
call[name[bfile_list].append, parameter[call[name[f]][constant[0]]]]
if compare[constant[child_parent_id_list] in call[name[daoinput].keys, parameter[]]] begin[:]
variable[files] assign[=] list[[]]
for taget[name[i]] in starred[call[name[daoinput]][constant[child_parent_id_list]]] begin[:]
call[name[files].append, parameter[call[name[i]][constant[0]]]]
if binary_operation[call[name[set], parameter[name[files]]] - call[name[set], parameter[name[bfile_list]]]] begin[:]
call[name[dbsExceptionHandler], parameter[constant[dbsException-invalid-input2], constant[Files required in the same block for FileParent/insert2 dao.], name[self].logger.exception]]
variable[binds] assign[=] dictionary[[], []]
for taget[name[pf]] in starred[call[name[daoinput]][constant[child_parent_id_list]]] begin[:]
variable[binds] assign[=] dictionary[[<ast.Constant object at 0x7da1b10c3e50>, <ast.Constant object at 0x7da1b10c34c0>], [<ast.Subscript object at 0x7da1b10c2b60>, <ast.Subscript object at 0x7da1b10c2e60>]]
call[name[bindlist].append, parameter[name[binds]]]
call[name[self].dbi.processData, parameter[name[self].sql, name[bindlist], name[conn], name[transaction]]] | keyword[def] identifier[execute] ( identifier[self] , identifier[conn] , identifier[daoinput] , identifier[transaction] = keyword[False] ):
literal[string]
identifier[binds] ={}
identifier[bindlist] =[]
keyword[if] identifier[isinstance] ( identifier[daoinput] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[daoinput] . identifier[keys] ():
identifier[binds] ={ literal[string] : identifier[daoinput] [ literal[string] ]}
identifier[r] = identifier[self] . identifier[dbi] . identifier[processData] ( identifier[self] . identifier[sql_sel] , identifier[binds] , identifier[conn] , keyword[False] )
identifier[bfile] = identifier[self] . identifier[format] ( identifier[r] )
identifier[bfile_list] =[]
keyword[for] identifier[f] keyword[in] identifier[bfile] :
identifier[bfile_list] . identifier[append] ( identifier[f] [ literal[int] ])
keyword[if] literal[string] keyword[in] identifier[daoinput] . identifier[keys] ():
identifier[files] =[]
keyword[for] identifier[i] keyword[in] identifier[daoinput] [ literal[string] ]:
identifier[files] . identifier[append] ( identifier[i] [ literal[int] ])
keyword[if] identifier[set] ( identifier[files] )- identifier[set] ( identifier[bfile_list] ):
identifier[dbsExceptionHandler] ( literal[string] , literal[string] , identifier[self] . identifier[logger] . identifier[exception] )
keyword[else] :
identifier[dbsExceptionHandler] ( literal[string] , literal[string] , identifier[self] . identifier[logger] . identifier[exception] )
keyword[else] :
identifier[dbsExceptionHandler] ( literal[string] , literal[string] , identifier[self] . identifier[logger] . identifier[exception] )
identifier[binds] ={}
keyword[for] identifier[pf] keyword[in] identifier[daoinput] [ literal[string] ]:
identifier[binds] ={ literal[string] : identifier[pf] [ literal[int] ], literal[string] : identifier[pf] [ literal[int] ]}
identifier[bindlist] . identifier[append] ( identifier[binds] )
identifier[self] . identifier[dbi] . identifier[processData] ( identifier[self] . identifier[sql] , identifier[bindlist] , identifier[conn] , identifier[transaction] ) | def execute(self, conn, daoinput, transaction=False):
"""
daoinput must be validated to have the following keys:
child_parent_id__list[[cid, pid],...], block_name
"""
binds = {}
bindlist = []
if isinstance(daoinput, dict) and 'block_name' in daoinput.keys():
binds = {'block_name': daoinput['block_name']}
r = self.dbi.processData(self.sql_sel, binds, conn, False)
bfile = self.format(r)
bfile_list = []
for f in bfile:
bfile_list.append(f[0]) # depends on [control=['for'], data=['f']]
if 'child_parent_id_list' in daoinput.keys():
files = []
for i in daoinput['child_parent_id_list']:
files.append(i[0]) # depends on [control=['for'], data=['i']]
if set(files) - set(bfile_list):
dbsExceptionHandler('dbsException-invalid-input2', 'Files required in the same block for FileParent/insert2 dao.', self.logger.exception) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
dbsExceptionHandler('dbsException-invalid-input2', 'child_parent_id_list required for FileParent/insert2 dao.', self.logger.exception) # depends on [control=['if'], data=[]]
else:
dbsExceptionHandler('dbsException-invalid-input2', 'Block_name required in the same block for FileParent/insert2 dao.', self.logger.exception)
binds = {}
for pf in daoinput['child_parent_id_list']:
binds = {'this_file_id': pf[0], 'parent_file_id': pf[1]}
bindlist.append(binds) # depends on [control=['for'], data=['pf']]
self.dbi.processData(self.sql, bindlist, conn, transaction) |
def fromkeys(cls, seq, value=None, **kwargs):
"""
Create a new collection with keys from *seq* and values set to
*value*. The keyword arguments are passed to the persistent ``Dict``.
"""
other = cls(**kwargs)
other.update(((key, value) for key in seq))
return other | def function[fromkeys, parameter[cls, seq, value]]:
constant[
Create a new collection with keys from *seq* and values set to
*value*. The keyword arguments are passed to the persistent ``Dict``.
]
variable[other] assign[=] call[name[cls], parameter[]]
call[name[other].update, parameter[<ast.GeneratorExp object at 0x7da20c6c4850>]]
return[name[other]] | keyword[def] identifier[fromkeys] ( identifier[cls] , identifier[seq] , identifier[value] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[other] = identifier[cls] (** identifier[kwargs] )
identifier[other] . identifier[update] ((( identifier[key] , identifier[value] ) keyword[for] identifier[key] keyword[in] identifier[seq] ))
keyword[return] identifier[other] | def fromkeys(cls, seq, value=None, **kwargs):
"""
Create a new collection with keys from *seq* and values set to
*value*. The keyword arguments are passed to the persistent ``Dict``.
"""
other = cls(**kwargs)
other.update(((key, value) for key in seq))
return other |
def split_number_and_unit(s):
"""Parse a string that consists of a integer number and an optional unit.
@param s a non-empty string that starts with an int and is followed by some letters
@return a triple of the number (as int) and the unit
"""
if not s:
raise ValueError('empty value')
s = s.strip()
pos = len(s)
while pos and not s[pos-1].isdigit():
pos -= 1
number = int(s[:pos])
unit = s[pos:].strip()
return (number, unit) | def function[split_number_and_unit, parameter[s]]:
constant[Parse a string that consists of a integer number and an optional unit.
@param s a non-empty string that starts with an int and is followed by some letters
@return a triple of the number (as int) and the unit
]
if <ast.UnaryOp object at 0x7da18f813be0> begin[:]
<ast.Raise object at 0x7da18f8117e0>
variable[s] assign[=] call[name[s].strip, parameter[]]
variable[pos] assign[=] call[name[len], parameter[name[s]]]
while <ast.BoolOp object at 0x7da18f813c70> begin[:]
<ast.AugAssign object at 0x7da18f811000>
variable[number] assign[=] call[name[int], parameter[call[name[s]][<ast.Slice object at 0x7da18f8131c0>]]]
variable[unit] assign[=] call[call[name[s]][<ast.Slice object at 0x7da18bccbaf0>].strip, parameter[]]
return[tuple[[<ast.Name object at 0x7da18bcc9e40>, <ast.Name object at 0x7da18bccb8e0>]]] | keyword[def] identifier[split_number_and_unit] ( identifier[s] ):
literal[string]
keyword[if] keyword[not] identifier[s] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[s] = identifier[s] . identifier[strip] ()
identifier[pos] = identifier[len] ( identifier[s] )
keyword[while] identifier[pos] keyword[and] keyword[not] identifier[s] [ identifier[pos] - literal[int] ]. identifier[isdigit] ():
identifier[pos] -= literal[int]
identifier[number] = identifier[int] ( identifier[s] [: identifier[pos] ])
identifier[unit] = identifier[s] [ identifier[pos] :]. identifier[strip] ()
keyword[return] ( identifier[number] , identifier[unit] ) | def split_number_and_unit(s):
"""Parse a string that consists of a integer number and an optional unit.
@param s a non-empty string that starts with an int and is followed by some letters
@return a triple of the number (as int) and the unit
"""
if not s:
raise ValueError('empty value') # depends on [control=['if'], data=[]]
s = s.strip()
pos = len(s)
while pos and (not s[pos - 1].isdigit()):
pos -= 1 # depends on [control=['while'], data=[]]
number = int(s[:pos])
unit = s[pos:].strip()
return (number, unit) |
def _parse_attributes(self, value):
"""Parse non standard atrributes."""
from zigpy.zcl import foundation as f
attributes = {}
attribute_names = {
1: BATTERY_VOLTAGE_MV,
3: TEMPERATURE,
4: XIAOMI_ATTR_4,
5: XIAOMI_ATTR_5,
6: XIAOMI_ATTR_6,
10: PATH
}
result = {}
while value:
skey = int(value[0])
svalue, value = f.TypeValue.deserialize(value[1:])
result[skey] = svalue.value
for item, value in result.items():
key = attribute_names[item] \
if item in attribute_names else "0xff01-" + str(item)
attributes[key] = value
if BATTERY_VOLTAGE_MV in attributes:
attributes[BATTERY_LEVEL] = int(
self._calculate_remaining_battery_percentage(
attributes[BATTERY_VOLTAGE_MV]
)
)
return attributes | def function[_parse_attributes, parameter[self, value]]:
constant[Parse non standard atrributes.]
from relative_module[zigpy.zcl] import module[foundation]
variable[attributes] assign[=] dictionary[[], []]
variable[attribute_names] assign[=] dictionary[[<ast.Constant object at 0x7da20e962bc0>, <ast.Constant object at 0x7da20e9606a0>, <ast.Constant object at 0x7da20e961ab0>, <ast.Constant object at 0x7da20e962dd0>, <ast.Constant object at 0x7da20e9600a0>, <ast.Constant object at 0x7da20e963700>], [<ast.Name object at 0x7da20e9627d0>, <ast.Name object at 0x7da20e961060>, <ast.Name object at 0x7da20e963b50>, <ast.Name object at 0x7da20e963fa0>, <ast.Name object at 0x7da20e962680>, <ast.Name object at 0x7da20e963af0>]]
variable[result] assign[=] dictionary[[], []]
while name[value] begin[:]
variable[skey] assign[=] call[name[int], parameter[call[name[value]][constant[0]]]]
<ast.Tuple object at 0x7da2041dbe50> assign[=] call[name[f].TypeValue.deserialize, parameter[call[name[value]][<ast.Slice object at 0x7da2041d8d60>]]]
call[name[result]][name[skey]] assign[=] name[svalue].value
for taget[tuple[[<ast.Name object at 0x7da2041db790>, <ast.Name object at 0x7da2041d86d0>]]] in starred[call[name[result].items, parameter[]]] begin[:]
variable[key] assign[=] <ast.IfExp object at 0x7da2041da800>
call[name[attributes]][name[key]] assign[=] name[value]
if compare[name[BATTERY_VOLTAGE_MV] in name[attributes]] begin[:]
call[name[attributes]][name[BATTERY_LEVEL]] assign[=] call[name[int], parameter[call[name[self]._calculate_remaining_battery_percentage, parameter[call[name[attributes]][name[BATTERY_VOLTAGE_MV]]]]]]
return[name[attributes]] | keyword[def] identifier[_parse_attributes] ( identifier[self] , identifier[value] ):
literal[string]
keyword[from] identifier[zigpy] . identifier[zcl] keyword[import] identifier[foundation] keyword[as] identifier[f]
identifier[attributes] ={}
identifier[attribute_names] ={
literal[int] : identifier[BATTERY_VOLTAGE_MV] ,
literal[int] : identifier[TEMPERATURE] ,
literal[int] : identifier[XIAOMI_ATTR_4] ,
literal[int] : identifier[XIAOMI_ATTR_5] ,
literal[int] : identifier[XIAOMI_ATTR_6] ,
literal[int] : identifier[PATH]
}
identifier[result] ={}
keyword[while] identifier[value] :
identifier[skey] = identifier[int] ( identifier[value] [ literal[int] ])
identifier[svalue] , identifier[value] = identifier[f] . identifier[TypeValue] . identifier[deserialize] ( identifier[value] [ literal[int] :])
identifier[result] [ identifier[skey] ]= identifier[svalue] . identifier[value]
keyword[for] identifier[item] , identifier[value] keyword[in] identifier[result] . identifier[items] ():
identifier[key] = identifier[attribute_names] [ identifier[item] ] keyword[if] identifier[item] keyword[in] identifier[attribute_names] keyword[else] literal[string] + identifier[str] ( identifier[item] )
identifier[attributes] [ identifier[key] ]= identifier[value]
keyword[if] identifier[BATTERY_VOLTAGE_MV] keyword[in] identifier[attributes] :
identifier[attributes] [ identifier[BATTERY_LEVEL] ]= identifier[int] (
identifier[self] . identifier[_calculate_remaining_battery_percentage] (
identifier[attributes] [ identifier[BATTERY_VOLTAGE_MV] ]
)
)
keyword[return] identifier[attributes] | def _parse_attributes(self, value):
"""Parse non standard atrributes."""
from zigpy.zcl import foundation as f
attributes = {}
attribute_names = {1: BATTERY_VOLTAGE_MV, 3: TEMPERATURE, 4: XIAOMI_ATTR_4, 5: XIAOMI_ATTR_5, 6: XIAOMI_ATTR_6, 10: PATH}
result = {}
while value:
skey = int(value[0])
(svalue, value) = f.TypeValue.deserialize(value[1:])
result[skey] = svalue.value # depends on [control=['while'], data=[]]
for (item, value) in result.items():
key = attribute_names[item] if item in attribute_names else '0xff01-' + str(item)
attributes[key] = value # depends on [control=['for'], data=[]]
if BATTERY_VOLTAGE_MV in attributes:
attributes[BATTERY_LEVEL] = int(self._calculate_remaining_battery_percentage(attributes[BATTERY_VOLTAGE_MV])) # depends on [control=['if'], data=['BATTERY_VOLTAGE_MV', 'attributes']]
return attributes |
def simple_pattern_exists_in_gcs(file_pattern, credentials=None):
"""True iff an object exists matching the input GCS pattern.
The GCS pattern must be a full object reference or a "simple pattern" that
conforms to the dsub input and output parameter restrictions:
* No support for **, ? wildcards or [] character ranges
* Wildcards may only appear in the file name
Args:
file_pattern: eg. 'gs://foo/ba*'
credentials: Optional credential to be used to load the file from gcs.
Raises:
ValueError: if file_pattern breaks the rules.
Returns:
True iff a file exists that matches that pattern.
"""
if '*' not in file_pattern:
return _file_exists_in_gcs(file_pattern, credentials)
if not file_pattern.startswith('gs://'):
raise ValueError('file name must start with gs://')
gcs_service = _get_storage_service(credentials)
bucket_name, prefix = file_pattern[len('gs://'):].split('/', 1)
if '*' in bucket_name:
raise ValueError('Wildcards may not appear in the bucket name')
# There is a '*' in prefix because we checked there's one in file_pattern
# and there isn't one in bucket_name. Hence it must be in prefix.
assert '*' in prefix
prefix_no_wildcard = prefix[:prefix.index('*')]
request = gcs_service.objects().list(
bucket=bucket_name, prefix=prefix_no_wildcard)
response = request.execute()
if 'items' not in response:
return False
items_list = [i['name'] for i in response['items']]
return any(fnmatch.fnmatch(i, prefix) for i in items_list) | def function[simple_pattern_exists_in_gcs, parameter[file_pattern, credentials]]:
constant[True iff an object exists matching the input GCS pattern.
The GCS pattern must be a full object reference or a "simple pattern" that
conforms to the dsub input and output parameter restrictions:
* No support for **, ? wildcards or [] character ranges
* Wildcards may only appear in the file name
Args:
file_pattern: eg. 'gs://foo/ba*'
credentials: Optional credential to be used to load the file from gcs.
Raises:
ValueError: if file_pattern breaks the rules.
Returns:
True iff a file exists that matches that pattern.
]
if compare[constant[*] <ast.NotIn object at 0x7da2590d7190> name[file_pattern]] begin[:]
return[call[name[_file_exists_in_gcs], parameter[name[file_pattern], name[credentials]]]]
if <ast.UnaryOp object at 0x7da1b00172b0> begin[:]
<ast.Raise object at 0x7da1b0141360>
variable[gcs_service] assign[=] call[name[_get_storage_service], parameter[name[credentials]]]
<ast.Tuple object at 0x7da1b0141840> assign[=] call[call[name[file_pattern]][<ast.Slice object at 0x7da1b0141060>].split, parameter[constant[/], constant[1]]]
if compare[constant[*] in name[bucket_name]] begin[:]
<ast.Raise object at 0x7da1b013fe50>
assert[compare[constant[*] in name[prefix]]]
variable[prefix_no_wildcard] assign[=] call[name[prefix]][<ast.Slice object at 0x7da1b013d690>]
variable[request] assign[=] call[call[name[gcs_service].objects, parameter[]].list, parameter[]]
variable[response] assign[=] call[name[request].execute, parameter[]]
if compare[constant[items] <ast.NotIn object at 0x7da2590d7190> name[response]] begin[:]
return[constant[False]]
variable[items_list] assign[=] <ast.ListComp object at 0x7da1b013e0b0>
return[call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b013c700>]]] | keyword[def] identifier[simple_pattern_exists_in_gcs] ( identifier[file_pattern] , identifier[credentials] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[file_pattern] :
keyword[return] identifier[_file_exists_in_gcs] ( identifier[file_pattern] , identifier[credentials] )
keyword[if] keyword[not] identifier[file_pattern] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[gcs_service] = identifier[_get_storage_service] ( identifier[credentials] )
identifier[bucket_name] , identifier[prefix] = identifier[file_pattern] [ identifier[len] ( literal[string] ):]. identifier[split] ( literal[string] , literal[int] )
keyword[if] literal[string] keyword[in] identifier[bucket_name] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[assert] literal[string] keyword[in] identifier[prefix]
identifier[prefix_no_wildcard] = identifier[prefix] [: identifier[prefix] . identifier[index] ( literal[string] )]
identifier[request] = identifier[gcs_service] . identifier[objects] (). identifier[list] (
identifier[bucket] = identifier[bucket_name] , identifier[prefix] = identifier[prefix_no_wildcard] )
identifier[response] = identifier[request] . identifier[execute] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[response] :
keyword[return] keyword[False]
identifier[items_list] =[ identifier[i] [ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[response] [ literal[string] ]]
keyword[return] identifier[any] ( identifier[fnmatch] . identifier[fnmatch] ( identifier[i] , identifier[prefix] ) keyword[for] identifier[i] keyword[in] identifier[items_list] ) | def simple_pattern_exists_in_gcs(file_pattern, credentials=None):
"""True iff an object exists matching the input GCS pattern.
The GCS pattern must be a full object reference or a "simple pattern" that
conforms to the dsub input and output parameter restrictions:
* No support for **, ? wildcards or [] character ranges
* Wildcards may only appear in the file name
Args:
file_pattern: eg. 'gs://foo/ba*'
credentials: Optional credential to be used to load the file from gcs.
Raises:
ValueError: if file_pattern breaks the rules.
Returns:
True iff a file exists that matches that pattern.
"""
if '*' not in file_pattern:
return _file_exists_in_gcs(file_pattern, credentials) # depends on [control=['if'], data=['file_pattern']]
if not file_pattern.startswith('gs://'):
raise ValueError('file name must start with gs://') # depends on [control=['if'], data=[]]
gcs_service = _get_storage_service(credentials)
(bucket_name, prefix) = file_pattern[len('gs://'):].split('/', 1)
if '*' in bucket_name:
raise ValueError('Wildcards may not appear in the bucket name') # depends on [control=['if'], data=[]]
# There is a '*' in prefix because we checked there's one in file_pattern
# and there isn't one in bucket_name. Hence it must be in prefix.
assert '*' in prefix
prefix_no_wildcard = prefix[:prefix.index('*')]
request = gcs_service.objects().list(bucket=bucket_name, prefix=prefix_no_wildcard)
response = request.execute()
if 'items' not in response:
return False # depends on [control=['if'], data=[]]
items_list = [i['name'] for i in response['items']]
return any((fnmatch.fnmatch(i, prefix) for i in items_list)) |
def dec2dms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
sign = np.copysign(1.0,dec)
fdeg = np.abs(dec)
deg = int(fdeg)
fminute = (fdeg - deg)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
deg = int(deg * sign)
return (deg, minute, second) | def function[dec2dms, parameter[dec]]:
constant[
ADW: This should really be replaced by astropy
]
variable[DEGREE] assign[=] constant[360.0]
variable[HOUR] assign[=] constant[24.0]
variable[MINUTE] assign[=] constant[60.0]
variable[SECOND] assign[=] constant[3600.0]
variable[dec] assign[=] call[name[float], parameter[name[dec]]]
variable[sign] assign[=] call[name[np].copysign, parameter[constant[1.0], name[dec]]]
variable[fdeg] assign[=] call[name[np].abs, parameter[name[dec]]]
variable[deg] assign[=] call[name[int], parameter[name[fdeg]]]
variable[fminute] assign[=] binary_operation[binary_operation[name[fdeg] - name[deg]] * name[MINUTE]]
variable[minute] assign[=] call[name[int], parameter[name[fminute]]]
variable[second] assign[=] binary_operation[binary_operation[name[fminute] - name[minute]] * name[MINUTE]]
variable[deg] assign[=] call[name[int], parameter[binary_operation[name[deg] * name[sign]]]]
return[tuple[[<ast.Name object at 0x7da18bcc86d0>, <ast.Name object at 0x7da18bcca950>, <ast.Name object at 0x7da18bcca380>]]] | keyword[def] identifier[dec2dms] ( identifier[dec] ):
literal[string]
identifier[DEGREE] = literal[int]
identifier[HOUR] = literal[int]
identifier[MINUTE] = literal[int]
identifier[SECOND] = literal[int]
identifier[dec] = identifier[float] ( identifier[dec] )
identifier[sign] = identifier[np] . identifier[copysign] ( literal[int] , identifier[dec] )
identifier[fdeg] = identifier[np] . identifier[abs] ( identifier[dec] )
identifier[deg] = identifier[int] ( identifier[fdeg] )
identifier[fminute] =( identifier[fdeg] - identifier[deg] )* identifier[MINUTE]
identifier[minute] = identifier[int] ( identifier[fminute] )
identifier[second] =( identifier[fminute] - identifier[minute] )* identifier[MINUTE]
identifier[deg] = identifier[int] ( identifier[deg] * identifier[sign] )
keyword[return] ( identifier[deg] , identifier[minute] , identifier[second] ) | def dec2dms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.0
HOUR = 24.0
MINUTE = 60.0
SECOND = 3600.0
dec = float(dec)
sign = np.copysign(1.0, dec)
fdeg = np.abs(dec)
deg = int(fdeg)
fminute = (fdeg - deg) * MINUTE
minute = int(fminute)
second = (fminute - minute) * MINUTE
deg = int(deg * sign)
return (deg, minute, second) |
def fix_sign_with_K(dataframe):
"""Swap electrode denotations so that geometrical (K) factors become
positive. Also, swap signs of all parameters affected by this process.
Affected parameters, at the moment, are:
* K
* r
* Vmn
* Zt
* rho_a
* rpha
Parameters
----------
dataframe : pandas.DateFrame
dataframe holding the data
Returns
-------
dataframe : pandas.DateFrame
the fixed dataframe
"""
# check for required columns
if 'k' not in dataframe or 'r' not in dataframe:
raise Exception('k and r columns required!')
indices_negative = (dataframe['k'] < 0) & (dataframe['r'] < 0)
if np.where(indices_negative)[0].size == 0:
# nothing to do here
return dataframe
dataframe.ix[indices_negative, ['k', 'r']] *= -1
# switch potential electrodes
indices_switched_ab = indices_negative & (dataframe['a'] > dataframe['b'])
indices_switched_mn = indices_negative & (dataframe['a'] < dataframe['b'])
dataframe.ix[indices_switched_ab, ['a', 'b']] = dataframe.ix[
indices_switched_ab, ['b', 'a']
].values
dataframe.ix[indices_switched_mn, ['m', 'n']] = dataframe.ix[
indices_switched_mn, ['n', 'm']
].values
# switch sign of voltages
if 'Vmn' in dataframe:
dataframe.ix[indices_negative, 'Vmn'] *= -1
if 'Zt' in dataframe:
dataframe.ix[indices_negative, 'Zt'] *= -1
if 'rho_a' in dataframe:
dataframe['rho_a'] = dataframe['r'] * dataframe['k']
if 'Mx' in dataframe:
# for now we have to loop here because we store numpy arrays within
# each cell
for index in np.where(indices_negative)[0]:
# import IPython
# IPython.embed()
# exit()
dataframe.at[index, 'Mx'] *= -1
# recompute phase values
if 'rpha' in dataframe:
if 'Zt' in dataframe:
# recompute
dataframe['rpha'] = np.arctan2(
dataframe['Zt'].imag, dataframe['Zt'].real
) * 1e3
else:
raise Exception(
'Recomputation of phase without Zt not implemented yet. ' +
'See source code for more information'
)
"""
when the complex number is located in the fourth sector instead of
the first, this corresponds to a phase shift by pi. For all values
where magnitude < 0 and phase < 3000 mrad reverse this shift by pi
by multiplying the complex number by -1:
new_value = - 1 * (Magnitude * exp(i phi))
Test this function by setting one measurement to
-85.02069 -183.25 in radic column 6 and 7, should get -58 mrad when
converted
"""
# Make sure a, b, m, n stay integers.
for col in ('a', 'b', 'm', 'n'):
dataframe[col] = dataframe[col].astype(int)
return dataframe | def function[fix_sign_with_K, parameter[dataframe]]:
constant[Swap electrode denotations so that geometrical (K) factors become
positive. Also, swap signs of all parameters affected by this process.
Affected parameters, at the moment, are:
* K
* r
* Vmn
* Zt
* rho_a
* rpha
Parameters
----------
dataframe : pandas.DateFrame
dataframe holding the data
Returns
-------
dataframe : pandas.DateFrame
the fixed dataframe
]
if <ast.BoolOp object at 0x7da20eb2b9d0> begin[:]
<ast.Raise object at 0x7da20eb29de0>
variable[indices_negative] assign[=] binary_operation[compare[call[name[dataframe]][constant[k]] less[<] constant[0]] <ast.BitAnd object at 0x7da2590d6b60> compare[call[name[dataframe]][constant[r]] less[<] constant[0]]]
if compare[call[call[name[np].where, parameter[name[indices_negative]]]][constant[0]].size equal[==] constant[0]] begin[:]
return[name[dataframe]]
<ast.AugAssign object at 0x7da20c6e6aa0>
variable[indices_switched_ab] assign[=] binary_operation[name[indices_negative] <ast.BitAnd object at 0x7da2590d6b60> compare[call[name[dataframe]][constant[a]] greater[>] call[name[dataframe]][constant[b]]]]
variable[indices_switched_mn] assign[=] binary_operation[name[indices_negative] <ast.BitAnd object at 0x7da2590d6b60> compare[call[name[dataframe]][constant[a]] less[<] call[name[dataframe]][constant[b]]]]
call[name[dataframe].ix][tuple[[<ast.Name object at 0x7da20c6e4b50>, <ast.List object at 0x7da20c6e6020>]]] assign[=] call[name[dataframe].ix][tuple[[<ast.Name object at 0x7da20c6e73a0>, <ast.List object at 0x7da20c6e79d0>]]].values
call[name[dataframe].ix][tuple[[<ast.Name object at 0x7da20c6e7bb0>, <ast.List object at 0x7da20c6e50f0>]]] assign[=] call[name[dataframe].ix][tuple[[<ast.Name object at 0x7da20c6e76a0>, <ast.List object at 0x7da20c6e5c60>]]].values
if compare[constant[Vmn] in name[dataframe]] begin[:]
<ast.AugAssign object at 0x7da20c6e62f0>
if compare[constant[Zt] in name[dataframe]] begin[:]
<ast.AugAssign object at 0x7da1b2587520>
if compare[constant[rho_a] in name[dataframe]] begin[:]
call[name[dataframe]][constant[rho_a]] assign[=] binary_operation[call[name[dataframe]][constant[r]] * call[name[dataframe]][constant[k]]]
if compare[constant[Mx] in name[dataframe]] begin[:]
for taget[name[index]] in starred[call[call[name[np].where, parameter[name[indices_negative]]]][constant[0]]] begin[:]
<ast.AugAssign object at 0x7da1b25851e0>
if compare[constant[rpha] in name[dataframe]] begin[:]
if compare[constant[Zt] in name[dataframe]] begin[:]
call[name[dataframe]][constant[rpha]] assign[=] binary_operation[call[name[np].arctan2, parameter[call[name[dataframe]][constant[Zt]].imag, call[name[dataframe]][constant[Zt]].real]] * constant[1000.0]]
for taget[name[col]] in starred[tuple[[<ast.Constant object at 0x7da1b2587790>, <ast.Constant object at 0x7da1b2585210>, <ast.Constant object at 0x7da1b2585330>, <ast.Constant object at 0x7da1b2585450>]]] begin[:]
call[name[dataframe]][name[col]] assign[=] call[call[name[dataframe]][name[col]].astype, parameter[name[int]]]
return[name[dataframe]] | keyword[def] identifier[fix_sign_with_K] ( identifier[dataframe] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[dataframe] keyword[or] literal[string] keyword[not] keyword[in] identifier[dataframe] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[indices_negative] =( identifier[dataframe] [ literal[string] ]< literal[int] )&( identifier[dataframe] [ literal[string] ]< literal[int] )
keyword[if] identifier[np] . identifier[where] ( identifier[indices_negative] )[ literal[int] ]. identifier[size] == literal[int] :
keyword[return] identifier[dataframe]
identifier[dataframe] . identifier[ix] [ identifier[indices_negative] ,[ literal[string] , literal[string] ]]*=- literal[int]
identifier[indices_switched_ab] = identifier[indices_negative] &( identifier[dataframe] [ literal[string] ]> identifier[dataframe] [ literal[string] ])
identifier[indices_switched_mn] = identifier[indices_negative] &( identifier[dataframe] [ literal[string] ]< identifier[dataframe] [ literal[string] ])
identifier[dataframe] . identifier[ix] [ identifier[indices_switched_ab] ,[ literal[string] , literal[string] ]]= identifier[dataframe] . identifier[ix] [
identifier[indices_switched_ab] ,[ literal[string] , literal[string] ]
]. identifier[values]
identifier[dataframe] . identifier[ix] [ identifier[indices_switched_mn] ,[ literal[string] , literal[string] ]]= identifier[dataframe] . identifier[ix] [
identifier[indices_switched_mn] ,[ literal[string] , literal[string] ]
]. identifier[values]
keyword[if] literal[string] keyword[in] identifier[dataframe] :
identifier[dataframe] . identifier[ix] [ identifier[indices_negative] , literal[string] ]*=- literal[int]
keyword[if] literal[string] keyword[in] identifier[dataframe] :
identifier[dataframe] . identifier[ix] [ identifier[indices_negative] , literal[string] ]*=- literal[int]
keyword[if] literal[string] keyword[in] identifier[dataframe] :
identifier[dataframe] [ literal[string] ]= identifier[dataframe] [ literal[string] ]* identifier[dataframe] [ literal[string] ]
keyword[if] literal[string] keyword[in] identifier[dataframe] :
keyword[for] identifier[index] keyword[in] identifier[np] . identifier[where] ( identifier[indices_negative] )[ literal[int] ]:
identifier[dataframe] . identifier[at] [ identifier[index] , literal[string] ]*=- literal[int]
keyword[if] literal[string] keyword[in] identifier[dataframe] :
keyword[if] literal[string] keyword[in] identifier[dataframe] :
identifier[dataframe] [ literal[string] ]= identifier[np] . identifier[arctan2] (
identifier[dataframe] [ literal[string] ]. identifier[imag] , identifier[dataframe] [ literal[string] ]. identifier[real]
)* literal[int]
keyword[else] :
keyword[raise] identifier[Exception] (
literal[string] +
literal[string]
)
literal[string]
keyword[for] identifier[col] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[dataframe] [ identifier[col] ]= identifier[dataframe] [ identifier[col] ]. identifier[astype] ( identifier[int] )
keyword[return] identifier[dataframe] | def fix_sign_with_K(dataframe):
"""Swap electrode denotations so that geometrical (K) factors become
positive. Also, swap signs of all parameters affected by this process.
Affected parameters, at the moment, are:
* K
* r
* Vmn
* Zt
* rho_a
* rpha
Parameters
----------
dataframe : pandas.DateFrame
dataframe holding the data
Returns
-------
dataframe : pandas.DateFrame
the fixed dataframe
"""
# check for required columns
if 'k' not in dataframe or 'r' not in dataframe:
raise Exception('k and r columns required!') # depends on [control=['if'], data=[]]
indices_negative = (dataframe['k'] < 0) & (dataframe['r'] < 0)
if np.where(indices_negative)[0].size == 0:
# nothing to do here
return dataframe # depends on [control=['if'], data=[]]
dataframe.ix[indices_negative, ['k', 'r']] *= -1
# switch potential electrodes
indices_switched_ab = indices_negative & (dataframe['a'] > dataframe['b'])
indices_switched_mn = indices_negative & (dataframe['a'] < dataframe['b'])
dataframe.ix[indices_switched_ab, ['a', 'b']] = dataframe.ix[indices_switched_ab, ['b', 'a']].values
dataframe.ix[indices_switched_mn, ['m', 'n']] = dataframe.ix[indices_switched_mn, ['n', 'm']].values
# switch sign of voltages
if 'Vmn' in dataframe:
dataframe.ix[indices_negative, 'Vmn'] *= -1 # depends on [control=['if'], data=['dataframe']]
if 'Zt' in dataframe:
dataframe.ix[indices_negative, 'Zt'] *= -1 # depends on [control=['if'], data=['dataframe']]
if 'rho_a' in dataframe:
dataframe['rho_a'] = dataframe['r'] * dataframe['k'] # depends on [control=['if'], data=['dataframe']]
if 'Mx' in dataframe:
# for now we have to loop here because we store numpy arrays within
# each cell
for index in np.where(indices_negative)[0]:
# import IPython
# IPython.embed()
# exit()
dataframe.at[index, 'Mx'] *= -1 # depends on [control=['for'], data=['index']] # depends on [control=['if'], data=['dataframe']]
# recompute phase values
if 'rpha' in dataframe:
if 'Zt' in dataframe:
# recompute
dataframe['rpha'] = np.arctan2(dataframe['Zt'].imag, dataframe['Zt'].real) * 1000.0 # depends on [control=['if'], data=['dataframe']]
else:
raise Exception('Recomputation of phase without Zt not implemented yet. ' + 'See source code for more information')
'\n when the complex number is located in the fourth sector instead of\n the first, this corresponds to a phase shift by pi. For all values\n where magnitude < 0 and phase < 3000 mrad reverse this shift by pi\n by multiplying the complex number by -1:\n new_value = - 1 * (Magnitude * exp(i phi))\n Test this function by setting one measurement to\n -85.02069 -183.25 in radic column 6 and 7, should get -58 mrad when\n converted\n ' # depends on [control=['if'], data=['dataframe']]
# Make sure a, b, m, n stay integers.
for col in ('a', 'b', 'm', 'n'):
dataframe[col] = dataframe[col].astype(int) # depends on [control=['for'], data=['col']]
return dataframe |
def _change_mode(self, mode, major, minor):
""" Change mode of operation, with some sanity checks. """
if self._mode:
if self._mode != mode:
raise RuntimeError('Can\'t change mode (from %s to %s)' % (self._mode, mode))
self._require_version(major=major, minor=minor)
self._mode = mode
# when setting mode, we reset all flags
self.ticket_flags = YubiKeyConfigBits(0x0)
self.config_flags = YubiKeyConfigBits(0x0)
self.extended_flags = YubiKeyConfigBits(0x0)
if mode != 'YUBIKEY_OTP':
self.ticket_flag(mode, True) | def function[_change_mode, parameter[self, mode, major, minor]]:
constant[ Change mode of operation, with some sanity checks. ]
if name[self]._mode begin[:]
if compare[name[self]._mode not_equal[!=] name[mode]] begin[:]
<ast.Raise object at 0x7da1b086d330>
call[name[self]._require_version, parameter[]]
name[self]._mode assign[=] name[mode]
name[self].ticket_flags assign[=] call[name[YubiKeyConfigBits], parameter[constant[0]]]
name[self].config_flags assign[=] call[name[YubiKeyConfigBits], parameter[constant[0]]]
name[self].extended_flags assign[=] call[name[YubiKeyConfigBits], parameter[constant[0]]]
if compare[name[mode] not_equal[!=] constant[YUBIKEY_OTP]] begin[:]
call[name[self].ticket_flag, parameter[name[mode], constant[True]]] | keyword[def] identifier[_change_mode] ( identifier[self] , identifier[mode] , identifier[major] , identifier[minor] ):
literal[string]
keyword[if] identifier[self] . identifier[_mode] :
keyword[if] identifier[self] . identifier[_mode] != identifier[mode] :
keyword[raise] identifier[RuntimeError] ( literal[string] %( identifier[self] . identifier[_mode] , identifier[mode] ))
identifier[self] . identifier[_require_version] ( identifier[major] = identifier[major] , identifier[minor] = identifier[minor] )
identifier[self] . identifier[_mode] = identifier[mode]
identifier[self] . identifier[ticket_flags] = identifier[YubiKeyConfigBits] ( literal[int] )
identifier[self] . identifier[config_flags] = identifier[YubiKeyConfigBits] ( literal[int] )
identifier[self] . identifier[extended_flags] = identifier[YubiKeyConfigBits] ( literal[int] )
keyword[if] identifier[mode] != literal[string] :
identifier[self] . identifier[ticket_flag] ( identifier[mode] , keyword[True] ) | def _change_mode(self, mode, major, minor):
""" Change mode of operation, with some sanity checks. """
if self._mode:
if self._mode != mode:
raise RuntimeError("Can't change mode (from %s to %s)" % (self._mode, mode)) # depends on [control=['if'], data=['mode']] # depends on [control=['if'], data=[]]
self._require_version(major=major, minor=minor)
self._mode = mode
# when setting mode, we reset all flags
self.ticket_flags = YubiKeyConfigBits(0)
self.config_flags = YubiKeyConfigBits(0)
self.extended_flags = YubiKeyConfigBits(0)
if mode != 'YUBIKEY_OTP':
self.ticket_flag(mode, True) # depends on [control=['if'], data=['mode']] |
def _getImpl(self, model):
""" Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance
"""
impl = _IterationPhaseInferOnly(model=model,
nIters=self.__nIters,
inferenceArgs=self.__inferenceArgs)
return impl | def function[_getImpl, parameter[self, model]]:
constant[ Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance
]
variable[impl] assign[=] call[name[_IterationPhaseInferOnly], parameter[]]
return[name[impl]] | keyword[def] identifier[_getImpl] ( identifier[self] , identifier[model] ):
literal[string]
identifier[impl] = identifier[_IterationPhaseInferOnly] ( identifier[model] = identifier[model] ,
identifier[nIters] = identifier[self] . identifier[__nIters] ,
identifier[inferenceArgs] = identifier[self] . identifier[__inferenceArgs] )
keyword[return] identifier[impl] | def _getImpl(self, model):
""" Creates and returns the _IterationPhase-based instance corresponding
to this phase specification
model: Model instance
"""
impl = _IterationPhaseInferOnly(model=model, nIters=self.__nIters, inferenceArgs=self.__inferenceArgs)
return impl |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.