code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def tile_coords_to_bbox(cls, x, y, zoom):
"""
Calculates the lon/lat estrema of the bounding box corresponding to specific tile coordinates. Output coodinates
are in degrees and in the Mercator Projection (http://en.wikipedia.org/wiki/Mercator_projection)
:param x: the x tile coordinates
:param y: the y tile coordinates
:param zoom: the zoom level
:return: tuple with (lon_left, lat_bottom, lon_right, lat_top)
"""
def tile_to_geocoords(x, y, zoom):
n = 2. ** zoom
lon = x / n * 360. - 180.
lat = math.degrees(math.atan(math.sinh(math.pi * (1 - 2 * y / n))))
return lat, lon
north_west_corner = tile_to_geocoords(x, y, zoom)
south_east_corner = tile_to_geocoords(x+1, y+1, zoom)
return north_west_corner[1], south_east_corner[0], south_east_corner[1], north_west_corner[0]
|
def function[tile_coords_to_bbox, parameter[cls, x, y, zoom]]:
constant[
Calculates the lon/lat estrema of the bounding box corresponding to specific tile coordinates. Output coodinates
are in degrees and in the Mercator Projection (http://en.wikipedia.org/wiki/Mercator_projection)
:param x: the x tile coordinates
:param y: the y tile coordinates
:param zoom: the zoom level
:return: tuple with (lon_left, lat_bottom, lon_right, lat_top)
]
def function[tile_to_geocoords, parameter[x, y, zoom]]:
variable[n] assign[=] binary_operation[constant[2.0] ** name[zoom]]
variable[lon] assign[=] binary_operation[binary_operation[binary_operation[name[x] / name[n]] * constant[360.0]] - constant[180.0]]
variable[lat] assign[=] call[name[math].degrees, parameter[call[name[math].atan, parameter[call[name[math].sinh, parameter[binary_operation[name[math].pi * binary_operation[constant[1] - binary_operation[binary_operation[constant[2] * name[y]] / name[n]]]]]]]]]]
return[tuple[[<ast.Name object at 0x7da18eb54730>, <ast.Name object at 0x7da18eb55240>]]]
variable[north_west_corner] assign[=] call[name[tile_to_geocoords], parameter[name[x], name[y], name[zoom]]]
variable[south_east_corner] assign[=] call[name[tile_to_geocoords], parameter[binary_operation[name[x] + constant[1]], binary_operation[name[y] + constant[1]], name[zoom]]]
return[tuple[[<ast.Subscript object at 0x7da18eb54670>, <ast.Subscript object at 0x7da18eb546d0>, <ast.Subscript object at 0x7da18eb544c0>, <ast.Subscript object at 0x7da2054a6b90>]]]
|
keyword[def] identifier[tile_coords_to_bbox] ( identifier[cls] , identifier[x] , identifier[y] , identifier[zoom] ):
literal[string]
keyword[def] identifier[tile_to_geocoords] ( identifier[x] , identifier[y] , identifier[zoom] ):
identifier[n] = literal[int] ** identifier[zoom]
identifier[lon] = identifier[x] / identifier[n] * literal[int] - literal[int]
identifier[lat] = identifier[math] . identifier[degrees] ( identifier[math] . identifier[atan] ( identifier[math] . identifier[sinh] ( identifier[math] . identifier[pi] *( literal[int] - literal[int] * identifier[y] / identifier[n] ))))
keyword[return] identifier[lat] , identifier[lon]
identifier[north_west_corner] = identifier[tile_to_geocoords] ( identifier[x] , identifier[y] , identifier[zoom] )
identifier[south_east_corner] = identifier[tile_to_geocoords] ( identifier[x] + literal[int] , identifier[y] + literal[int] , identifier[zoom] )
keyword[return] identifier[north_west_corner] [ literal[int] ], identifier[south_east_corner] [ literal[int] ], identifier[south_east_corner] [ literal[int] ], identifier[north_west_corner] [ literal[int] ]
|
def tile_coords_to_bbox(cls, x, y, zoom):
"""
Calculates the lon/lat estrema of the bounding box corresponding to specific tile coordinates. Output coodinates
are in degrees and in the Mercator Projection (http://en.wikipedia.org/wiki/Mercator_projection)
:param x: the x tile coordinates
:param y: the y tile coordinates
:param zoom: the zoom level
:return: tuple with (lon_left, lat_bottom, lon_right, lat_top)
"""
def tile_to_geocoords(x, y, zoom):
n = 2.0 ** zoom
lon = x / n * 360.0 - 180.0
lat = math.degrees(math.atan(math.sinh(math.pi * (1 - 2 * y / n))))
return (lat, lon)
north_west_corner = tile_to_geocoords(x, y, zoom)
south_east_corner = tile_to_geocoords(x + 1, y + 1, zoom)
return (north_west_corner[1], south_east_corner[0], south_east_corner[1], north_west_corner[0])
|
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
|
def function[_can_undo, parameter[comps, trans_list]]:
constant[
Return whether a components can be undone with one of the transformation in
trans_list.
]
variable[comps] assign[=] call[name[list], parameter[name[comps]]]
variable[accent_list] assign[=] call[name[list], parameter[call[name[map], parameter[name[accent].get_accent_char, call[name[comps]][constant[1]]]]]]
variable[mark_list] assign[=] call[name[list], parameter[call[name[map], parameter[name[mark].get_mark_char, call[name[utils].join, parameter[name[comps]]]]]]]
variable[action_list] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20e954310>, name[trans_list]]]]]
def function[atomic_check, parameter[action]]:
constant[
Check if the `action` created one of the marks, accents, or characters
in `comps`.
]
return[<ast.BoolOp object at 0x7da20e955210>]
return[call[name[any], parameter[call[name[map], parameter[name[atomic_check], name[action_list]]]]]]
|
keyword[def] identifier[_can_undo] ( identifier[comps] , identifier[trans_list] ):
literal[string]
identifier[comps] = identifier[list] ( identifier[comps] )
identifier[accent_list] = identifier[list] ( identifier[map] ( identifier[accent] . identifier[get_accent_char] , identifier[comps] [ literal[int] ]))
identifier[mark_list] = identifier[list] ( identifier[map] ( identifier[mark] . identifier[get_mark_char] , identifier[utils] . identifier[join] ( identifier[comps] )))
identifier[action_list] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[_get_action] ( identifier[x] ), identifier[trans_list] ))
keyword[def] identifier[atomic_check] ( identifier[action] ):
literal[string]
keyword[return] ( identifier[action] [ literal[int] ]== identifier[_Action] . identifier[ADD_ACCENT] keyword[and] identifier[action] [ literal[int] ] keyword[in] identifier[accent_list] ) keyword[or] ( identifier[action] [ literal[int] ]== identifier[_Action] . identifier[ADD_MARK] keyword[and] identifier[action] [ literal[int] ] keyword[in] identifier[mark_list] ) keyword[or] ( identifier[action] [ literal[int] ]== identifier[_Action] . identifier[ADD_CHAR] keyword[and] identifier[action] [ literal[int] ]== identifier[accent] . identifier[remove_accent_char] ( identifier[comps] [ literal[int] ][- literal[int] ]))
keyword[return] identifier[any] ( identifier[map] ( identifier[atomic_check] , identifier[action_list] ))
|
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return action[0] == _Action.ADD_ACCENT and action[1] in accent_list or (action[0] == _Action.ADD_MARK and action[1] in mark_list) or (action[0] == _Action.ADD_CHAR and action[1] == accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
|
def _mark_context_complete(marker, context, has_errors):
"""Transactionally 'complete' the context."""
current = None
if marker:
current = marker.key.get()
if not current:
return False
if current and current.complete:
return False
current.complete = True
current.has_errors = has_errors
current.put()
# Kick off completion tasks.
_insert_post_complete_tasks(context)
return True
|
def function[_mark_context_complete, parameter[marker, context, has_errors]]:
constant[Transactionally 'complete' the context.]
variable[current] assign[=] constant[None]
if name[marker] begin[:]
variable[current] assign[=] call[name[marker].key.get, parameter[]]
if <ast.UnaryOp object at 0x7da20c7cace0> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da20c7c91e0> begin[:]
return[constant[False]]
name[current].complete assign[=] constant[True]
name[current].has_errors assign[=] name[has_errors]
call[name[current].put, parameter[]]
call[name[_insert_post_complete_tasks], parameter[name[context]]]
return[constant[True]]
|
keyword[def] identifier[_mark_context_complete] ( identifier[marker] , identifier[context] , identifier[has_errors] ):
literal[string]
identifier[current] = keyword[None]
keyword[if] identifier[marker] :
identifier[current] = identifier[marker] . identifier[key] . identifier[get] ()
keyword[if] keyword[not] identifier[current] :
keyword[return] keyword[False]
keyword[if] identifier[current] keyword[and] identifier[current] . identifier[complete] :
keyword[return] keyword[False]
identifier[current] . identifier[complete] = keyword[True]
identifier[current] . identifier[has_errors] = identifier[has_errors]
identifier[current] . identifier[put] ()
identifier[_insert_post_complete_tasks] ( identifier[context] )
keyword[return] keyword[True]
|
def _mark_context_complete(marker, context, has_errors):
"""Transactionally 'complete' the context."""
current = None
if marker:
current = marker.key.get() # depends on [control=['if'], data=[]]
if not current:
return False # depends on [control=['if'], data=[]]
if current and current.complete:
return False # depends on [control=['if'], data=[]]
current.complete = True
current.has_errors = has_errors
current.put()
# Kick off completion tasks.
_insert_post_complete_tasks(context)
return True
|
def is_float_array(l):
r"""Checks if l is a numpy array of floats (any dimension
"""
if isinstance(l, np.ndarray):
if l.dtype.kind == 'f':
return True
return False
|
def function[is_float_array, parameter[l]]:
constant[Checks if l is a numpy array of floats (any dimension
]
if call[name[isinstance], parameter[name[l], name[np].ndarray]] begin[:]
if compare[name[l].dtype.kind equal[==] constant[f]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[is_float_array] ( identifier[l] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[l] , identifier[np] . identifier[ndarray] ):
keyword[if] identifier[l] . identifier[dtype] . identifier[kind] == literal[string] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def is_float_array(l):
"""Checks if l is a numpy array of floats (any dimension
"""
if isinstance(l, np.ndarray):
if l.dtype.kind == 'f':
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return False
|
def connectdown(np, p, acc, outlet, wtsd=None, workingdir=None, mpiexedir=None,
exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Reads an ad8 contributing area file,
identifies the location of the largest ad8 value as the outlet of the largest watershed"""
# If watershed is not specified, use acc to generate a mask layer.
if wtsd is None or not os.path.isfile(wtsd):
p, workingdir = TauDEM.check_infile_and_wp(p, workingdir)
wtsd = workingdir + os.sep + 'wtsd_default.tif'
RasterUtilClass.get_mask_from_raster(p, wtsd, True)
fname = TauDEM.func_name('connectdown')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-p': p, '-ad8': acc, '-w': wtsd},
workingdir,
None,
{'-o': outlet},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file})
|
def function[connectdown, parameter[np, p, acc, outlet, wtsd, workingdir, mpiexedir, exedir, log_file, runtime_file, hostfile]]:
constant[Reads an ad8 contributing area file,
identifies the location of the largest ad8 value as the outlet of the largest watershed]
if <ast.BoolOp object at 0x7da1b2595660> begin[:]
<ast.Tuple object at 0x7da1b2596a10> assign[=] call[name[TauDEM].check_infile_and_wp, parameter[name[p], name[workingdir]]]
variable[wtsd] assign[=] binary_operation[binary_operation[name[workingdir] + name[os].sep] + constant[wtsd_default.tif]]
call[name[RasterUtilClass].get_mask_from_raster, parameter[name[p], name[wtsd], constant[True]]]
variable[fname] assign[=] call[name[TauDEM].func_name, parameter[constant[connectdown]]]
return[call[name[TauDEM].run, parameter[call[name[FileClass].get_executable_fullpath, parameter[name[fname], name[exedir]]], dictionary[[<ast.Constant object at 0x7da2054a4430>, <ast.Constant object at 0x7da2054a6770>, <ast.Constant object at 0x7da2054a4700>], [<ast.Name object at 0x7da2054a4ac0>, <ast.Name object at 0x7da2054a6050>, <ast.Name object at 0x7da2054a72e0>]], name[workingdir], constant[None], dictionary[[<ast.Constant object at 0x7da2054a6c50>], [<ast.Name object at 0x7da2054a7cd0>]], dictionary[[<ast.Constant object at 0x7da2054a45b0>, <ast.Constant object at 0x7da2054a7100>, <ast.Constant object at 0x7da2054a7a60>], [<ast.Name object at 0x7da2054a6530>, <ast.Name object at 0x7da2054a71f0>, <ast.Name object at 0x7da2054a7460>]], dictionary[[<ast.Constant object at 0x7da2054a7280>, <ast.Constant object at 0x7da2054a7970>], [<ast.Name object at 0x7da2054a5d80>, <ast.Name object at 0x7da2054a78b0>]]]]]
|
keyword[def] identifier[connectdown] ( identifier[np] , identifier[p] , identifier[acc] , identifier[outlet] , identifier[wtsd] = keyword[None] , identifier[workingdir] = keyword[None] , identifier[mpiexedir] = keyword[None] ,
identifier[exedir] = keyword[None] , identifier[log_file] = keyword[None] , identifier[runtime_file] = keyword[None] , identifier[hostfile] = keyword[None] ):
literal[string]
keyword[if] identifier[wtsd] keyword[is] keyword[None] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[wtsd] ):
identifier[p] , identifier[workingdir] = identifier[TauDEM] . identifier[check_infile_and_wp] ( identifier[p] , identifier[workingdir] )
identifier[wtsd] = identifier[workingdir] + identifier[os] . identifier[sep] + literal[string]
identifier[RasterUtilClass] . identifier[get_mask_from_raster] ( identifier[p] , identifier[wtsd] , keyword[True] )
identifier[fname] = identifier[TauDEM] . identifier[func_name] ( literal[string] )
keyword[return] identifier[TauDEM] . identifier[run] ( identifier[FileClass] . identifier[get_executable_fullpath] ( identifier[fname] , identifier[exedir] ),
{ literal[string] : identifier[p] , literal[string] : identifier[acc] , literal[string] : identifier[wtsd] },
identifier[workingdir] ,
keyword[None] ,
{ literal[string] : identifier[outlet] },
{ literal[string] : identifier[mpiexedir] , literal[string] : identifier[hostfile] , literal[string] : identifier[np] },
{ literal[string] : identifier[log_file] , literal[string] : identifier[runtime_file] })
|
def connectdown(np, p, acc, outlet, wtsd=None, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
"""Reads an ad8 contributing area file,
identifies the location of the largest ad8 value as the outlet of the largest watershed"""
# If watershed is not specified, use acc to generate a mask layer.
if wtsd is None or not os.path.isfile(wtsd):
(p, workingdir) = TauDEM.check_infile_and_wp(p, workingdir)
wtsd = workingdir + os.sep + 'wtsd_default.tif'
RasterUtilClass.get_mask_from_raster(p, wtsd, True) # depends on [control=['if'], data=[]]
fname = TauDEM.func_name('connectdown')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), {'-p': p, '-ad8': acc, '-w': wtsd}, workingdir, None, {'-o': outlet}, {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np}, {'logfile': log_file, 'runtimefile': runtime_file})
|
def mark_best_classifications(errors):
"""
Convenience wrapper around mark_best_classification.
Finds the best match for each TextLogError in errors, handling no match
meeting the cut off score and then mark_best_classification to save that
information.
"""
for text_log_error in errors:
best_match = get_best_match(text_log_error)
if not best_match:
continue
mark_best_classification(text_log_error, best_match.classified_failure)
|
def function[mark_best_classifications, parameter[errors]]:
constant[
Convenience wrapper around mark_best_classification.
Finds the best match for each TextLogError in errors, handling no match
meeting the cut off score and then mark_best_classification to save that
information.
]
for taget[name[text_log_error]] in starred[name[errors]] begin[:]
variable[best_match] assign[=] call[name[get_best_match], parameter[name[text_log_error]]]
if <ast.UnaryOp object at 0x7da1b08a74c0> begin[:]
continue
call[name[mark_best_classification], parameter[name[text_log_error], name[best_match].classified_failure]]
|
keyword[def] identifier[mark_best_classifications] ( identifier[errors] ):
literal[string]
keyword[for] identifier[text_log_error] keyword[in] identifier[errors] :
identifier[best_match] = identifier[get_best_match] ( identifier[text_log_error] )
keyword[if] keyword[not] identifier[best_match] :
keyword[continue]
identifier[mark_best_classification] ( identifier[text_log_error] , identifier[best_match] . identifier[classified_failure] )
|
def mark_best_classifications(errors):
"""
Convenience wrapper around mark_best_classification.
Finds the best match for each TextLogError in errors, handling no match
meeting the cut off score and then mark_best_classification to save that
information.
"""
for text_log_error in errors:
best_match = get_best_match(text_log_error)
if not best_match:
continue # depends on [control=['if'], data=[]]
mark_best_classification(text_log_error, best_match.classified_failure) # depends on [control=['for'], data=['text_log_error']]
|
def parse(self, data):
# type: (bytes) -> None
'''
Parse the passed in data into a UDF Partition Map.
Parameters:
data - The data to parse.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Map already initialized')
(map_type, map_length, vol_seqnum,
self.part_num) = struct.unpack_from(self.FMT, data, 0)
if map_type != 1:
raise pycdlibexception.PyCdlibInvalidISO('UDF Partition Map type is not 1')
if map_length != 6:
raise pycdlibexception.PyCdlibInvalidISO('UDF Partition Map length is not 6')
if vol_seqnum != 1:
raise pycdlibexception.PyCdlibInvalidISO('UDF Partition Volume Sequence Number is not 1')
self._initialized = True
|
def function[parse, parameter[self, data]]:
constant[
Parse the passed in data into a UDF Partition Map.
Parameters:
data - The data to parse.
Returns:
Nothing.
]
if name[self]._initialized begin[:]
<ast.Raise object at 0x7da20c6c77c0>
<ast.Tuple object at 0x7da1b0f0df90> assign[=] call[name[struct].unpack_from, parameter[name[self].FMT, name[data], constant[0]]]
if compare[name[map_type] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b0f0e4a0>
if compare[name[map_length] not_equal[!=] constant[6]] begin[:]
<ast.Raise object at 0x7da1b0f0f490>
if compare[name[vol_seqnum] not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b0f0e830>
name[self]._initialized assign[=] constant[True]
|
keyword[def] identifier[parse] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[self] . identifier[_initialized] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
( identifier[map_type] , identifier[map_length] , identifier[vol_seqnum] ,
identifier[self] . identifier[part_num] )= identifier[struct] . identifier[unpack_from] ( identifier[self] . identifier[FMT] , identifier[data] , literal[int] )
keyword[if] identifier[map_type] != literal[int] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidISO] ( literal[string] )
keyword[if] identifier[map_length] != literal[int] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidISO] ( literal[string] )
keyword[if] identifier[vol_seqnum] != literal[int] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInvalidISO] ( literal[string] )
identifier[self] . identifier[_initialized] = keyword[True]
|
def parse(self, data):
# type: (bytes) -> None
'\n Parse the passed in data into a UDF Partition Map.\n\n Parameters:\n data - The data to parse.\n Returns:\n Nothing.\n '
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Map already initialized') # depends on [control=['if'], data=[]]
(map_type, map_length, vol_seqnum, self.part_num) = struct.unpack_from(self.FMT, data, 0)
if map_type != 1:
raise pycdlibexception.PyCdlibInvalidISO('UDF Partition Map type is not 1') # depends on [control=['if'], data=[]]
if map_length != 6:
raise pycdlibexception.PyCdlibInvalidISO('UDF Partition Map length is not 6') # depends on [control=['if'], data=[]]
if vol_seqnum != 1:
raise pycdlibexception.PyCdlibInvalidISO('UDF Partition Volume Sequence Number is not 1') # depends on [control=['if'], data=[]]
self._initialized = True
|
def DbGetClassPropertyList(self, argin):
""" Get property list for a given Tango class with a specified filter
:param argin: The filter
:type: tango.DevString
:return: Property name list
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetClassPropertyList()")
if not argin:
argin = "%"
else:
argin = replace_wildcard(argin)
return self.db.get_class_property_list(argin)
|
def function[DbGetClassPropertyList, parameter[self, argin]]:
constant[ Get property list for a given Tango class with a specified filter
:param argin: The filter
:type: tango.DevString
:return: Property name list
:rtype: tango.DevVarStringArray ]
call[name[self]._log.debug, parameter[constant[In DbGetClassPropertyList()]]]
if <ast.UnaryOp object at 0x7da2041d9cf0> begin[:]
variable[argin] assign[=] constant[%]
return[call[name[self].db.get_class_property_list, parameter[name[argin]]]]
|
keyword[def] identifier[DbGetClassPropertyList] ( identifier[self] , identifier[argin] ):
literal[string]
identifier[self] . identifier[_log] . identifier[debug] ( literal[string] )
keyword[if] keyword[not] identifier[argin] :
identifier[argin] = literal[string]
keyword[else] :
identifier[argin] = identifier[replace_wildcard] ( identifier[argin] )
keyword[return] identifier[self] . identifier[db] . identifier[get_class_property_list] ( identifier[argin] )
|
def DbGetClassPropertyList(self, argin):
""" Get property list for a given Tango class with a specified filter
:param argin: The filter
:type: tango.DevString
:return: Property name list
:rtype: tango.DevVarStringArray """
self._log.debug('In DbGetClassPropertyList()')
if not argin:
argin = '%' # depends on [control=['if'], data=[]]
else:
argin = replace_wildcard(argin)
return self.db.get_class_property_list(argin)
|
def from_sec(class_, sec):
"""
Create a key from an sec bytestream (which is an encoding of a public pair).
"""
public_pair = sec_to_public_pair(sec, class_._generator)
return class_(public_pair=public_pair, is_compressed=is_sec_compressed(sec))
|
def function[from_sec, parameter[class_, sec]]:
constant[
Create a key from an sec bytestream (which is an encoding of a public pair).
]
variable[public_pair] assign[=] call[name[sec_to_public_pair], parameter[name[sec], name[class_]._generator]]
return[call[name[class_], parameter[]]]
|
keyword[def] identifier[from_sec] ( identifier[class_] , identifier[sec] ):
literal[string]
identifier[public_pair] = identifier[sec_to_public_pair] ( identifier[sec] , identifier[class_] . identifier[_generator] )
keyword[return] identifier[class_] ( identifier[public_pair] = identifier[public_pair] , identifier[is_compressed] = identifier[is_sec_compressed] ( identifier[sec] ))
|
def from_sec(class_, sec):
"""
Create a key from an sec bytestream (which is an encoding of a public pair).
"""
public_pair = sec_to_public_pair(sec, class_._generator)
return class_(public_pair=public_pair, is_compressed=is_sec_compressed(sec))
|
def read_tuple_ticks(self, symbol, start, end):
''' read ticks as tuple '''
if end is None:
end=sys.maxint
session=self.getReadSession()()
try:
rows=session.query(Tick).filter(and_(Tick.symbol == symbol,
Tick.time >= int(start),
Tick.time < int(end)))
finally:
self.getReadSession().remove()
return [self.__sqlToTupleTick(row) for row in rows]
|
def function[read_tuple_ticks, parameter[self, symbol, start, end]]:
constant[ read ticks as tuple ]
if compare[name[end] is constant[None]] begin[:]
variable[end] assign[=] name[sys].maxint
variable[session] assign[=] call[call[name[self].getReadSession, parameter[]], parameter[]]
<ast.Try object at 0x7da1b0bd45e0>
return[<ast.ListComp object at 0x7da1b0a4f100>]
|
keyword[def] identifier[read_tuple_ticks] ( identifier[self] , identifier[symbol] , identifier[start] , identifier[end] ):
literal[string]
keyword[if] identifier[end] keyword[is] keyword[None] :
identifier[end] = identifier[sys] . identifier[maxint]
identifier[session] = identifier[self] . identifier[getReadSession] ()()
keyword[try] :
identifier[rows] = identifier[session] . identifier[query] ( identifier[Tick] ). identifier[filter] ( identifier[and_] ( identifier[Tick] . identifier[symbol] == identifier[symbol] ,
identifier[Tick] . identifier[time] >= identifier[int] ( identifier[start] ),
identifier[Tick] . identifier[time] < identifier[int] ( identifier[end] )))
keyword[finally] :
identifier[self] . identifier[getReadSession] (). identifier[remove] ()
keyword[return] [ identifier[self] . identifier[__sqlToTupleTick] ( identifier[row] ) keyword[for] identifier[row] keyword[in] identifier[rows] ]
|
def read_tuple_ticks(self, symbol, start, end):
""" read ticks as tuple """
if end is None:
end = sys.maxint # depends on [control=['if'], data=['end']]
session = self.getReadSession()()
try:
rows = session.query(Tick).filter(and_(Tick.symbol == symbol, Tick.time >= int(start), Tick.time < int(end))) # depends on [control=['try'], data=[]]
finally:
self.getReadSession().remove()
return [self.__sqlToTupleTick(row) for row in rows]
|
def pointwise_free_energies(self, therm_state=None):
r"""
Computes the pointwise free energies :math:`-\log(\mu^k(x))` for all points x.
:math:`\mu^k(x)` is the optimal estimate of the Boltzmann distribution
of the k'th ensemble defined on the set of all samples.
Parameters
----------
therm_state : int or None, default=None
Selects the thermodynamic state k for which to compute the
pointwise free energies.
None selects the "unbiased" state which is defined by having
zero bias energy.
Returns
-------
mu_k : list of numpy.ndarray(X_i, dtype=numpy.float64)
list of the same layout as dtrajs (or ttrajs). mu_k[i][t]
contains the pointwise free energy of the frame seen in
trajectory i and time step t.
Frames that are not in the connected sets get assiged an
infinite pointwise free energy.
"""
assert self.therm_energies is not None, \
'MEMM has to be estimate()\'d before pointwise free energies can be calculated.'
if therm_state is not None:
assert therm_state<=self.nthermo
mu = [_np.zeros(d.shape[0], dtype=_np.float64) for d in self.dtrajs+self.equilibrium_dtrajs]
if self.equilibrium is None:
_tram.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs, self.dtrajs,
self.state_counts, None, None, mu)
else:
_trammbar.get_pointwise_unbiased_free_energies(
therm_state,
self.log_lagrangian_mult, self.biased_conf_energies,
self.therm_energies, self.count_matrices,
self.btrajs+self.equilibrium_btrajs, self.dtrajs+self.equilibrium_dtrajs,
self.state_counts, None, None, mu,
equilibrium_therm_state_counts=self.equilibrium_state_counts.sum(axis=1).astype(_np.intc),
overcounting_factor=1.0/self.lag)
return mu
|
def function[pointwise_free_energies, parameter[self, therm_state]]:
constant[
Computes the pointwise free energies :math:`-\log(\mu^k(x))` for all points x.
:math:`\mu^k(x)` is the optimal estimate of the Boltzmann distribution
of the k'th ensemble defined on the set of all samples.
Parameters
----------
therm_state : int or None, default=None
Selects the thermodynamic state k for which to compute the
pointwise free energies.
None selects the "unbiased" state which is defined by having
zero bias energy.
Returns
-------
mu_k : list of numpy.ndarray(X_i, dtype=numpy.float64)
list of the same layout as dtrajs (or ttrajs). mu_k[i][t]
contains the pointwise free energy of the frame seen in
trajectory i and time step t.
Frames that are not in the connected sets get assiged an
infinite pointwise free energy.
]
assert[compare[name[self].therm_energies is_not constant[None]]]
if compare[name[therm_state] is_not constant[None]] begin[:]
assert[compare[name[therm_state] less_or_equal[<=] name[self].nthermo]]
variable[mu] assign[=] <ast.ListComp object at 0x7da1b07ff790>
if compare[name[self].equilibrium is constant[None]] begin[:]
call[name[_tram].get_pointwise_unbiased_free_energies, parameter[name[therm_state], name[self].log_lagrangian_mult, name[self].biased_conf_energies, name[self].therm_energies, name[self].count_matrices, name[self].btrajs, name[self].dtrajs, name[self].state_counts, constant[None], constant[None], name[mu]]]
return[name[mu]]
|
keyword[def] identifier[pointwise_free_energies] ( identifier[self] , identifier[therm_state] = keyword[None] ):
literal[string]
keyword[assert] identifier[self] . identifier[therm_energies] keyword[is] keyword[not] keyword[None] , literal[string]
keyword[if] identifier[therm_state] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[therm_state] <= identifier[self] . identifier[nthermo]
identifier[mu] =[ identifier[_np] . identifier[zeros] ( identifier[d] . identifier[shape] [ literal[int] ], identifier[dtype] = identifier[_np] . identifier[float64] ) keyword[for] identifier[d] keyword[in] identifier[self] . identifier[dtrajs] + identifier[self] . identifier[equilibrium_dtrajs] ]
keyword[if] identifier[self] . identifier[equilibrium] keyword[is] keyword[None] :
identifier[_tram] . identifier[get_pointwise_unbiased_free_energies] (
identifier[therm_state] ,
identifier[self] . identifier[log_lagrangian_mult] , identifier[self] . identifier[biased_conf_energies] ,
identifier[self] . identifier[therm_energies] , identifier[self] . identifier[count_matrices] ,
identifier[self] . identifier[btrajs] , identifier[self] . identifier[dtrajs] ,
identifier[self] . identifier[state_counts] , keyword[None] , keyword[None] , identifier[mu] )
keyword[else] :
identifier[_trammbar] . identifier[get_pointwise_unbiased_free_energies] (
identifier[therm_state] ,
identifier[self] . identifier[log_lagrangian_mult] , identifier[self] . identifier[biased_conf_energies] ,
identifier[self] . identifier[therm_energies] , identifier[self] . identifier[count_matrices] ,
identifier[self] . identifier[btrajs] + identifier[self] . identifier[equilibrium_btrajs] , identifier[self] . identifier[dtrajs] + identifier[self] . identifier[equilibrium_dtrajs] ,
identifier[self] . identifier[state_counts] , keyword[None] , keyword[None] , identifier[mu] ,
identifier[equilibrium_therm_state_counts] = identifier[self] . identifier[equilibrium_state_counts] . identifier[sum] ( identifier[axis] = literal[int] ). identifier[astype] ( identifier[_np] . identifier[intc] ),
identifier[overcounting_factor] = literal[int] / identifier[self] . identifier[lag] )
keyword[return] identifier[mu]
|
def pointwise_free_energies(self, therm_state=None):
"""
Computes the pointwise free energies :math:`-\\log(\\mu^k(x))` for all points x.
:math:`\\mu^k(x)` is the optimal estimate of the Boltzmann distribution
of the k'th ensemble defined on the set of all samples.
Parameters
----------
therm_state : int or None, default=None
Selects the thermodynamic state k for which to compute the
pointwise free energies.
None selects the "unbiased" state which is defined by having
zero bias energy.
Returns
-------
mu_k : list of numpy.ndarray(X_i, dtype=numpy.float64)
list of the same layout as dtrajs (or ttrajs). mu_k[i][t]
contains the pointwise free energy of the frame seen in
trajectory i and time step t.
Frames that are not in the connected sets get assiged an
infinite pointwise free energy.
"""
assert self.therm_energies is not None, "MEMM has to be estimate()'d before pointwise free energies can be calculated."
if therm_state is not None:
assert therm_state <= self.nthermo # depends on [control=['if'], data=['therm_state']]
mu = [_np.zeros(d.shape[0], dtype=_np.float64) for d in self.dtrajs + self.equilibrium_dtrajs]
if self.equilibrium is None:
_tram.get_pointwise_unbiased_free_energies(therm_state, self.log_lagrangian_mult, self.biased_conf_energies, self.therm_energies, self.count_matrices, self.btrajs, self.dtrajs, self.state_counts, None, None, mu) # depends on [control=['if'], data=[]]
else:
_trammbar.get_pointwise_unbiased_free_energies(therm_state, self.log_lagrangian_mult, self.biased_conf_energies, self.therm_energies, self.count_matrices, self.btrajs + self.equilibrium_btrajs, self.dtrajs + self.equilibrium_dtrajs, self.state_counts, None, None, mu, equilibrium_therm_state_counts=self.equilibrium_state_counts.sum(axis=1).astype(_np.intc), overcounting_factor=1.0 / self.lag)
return mu
|
def pgen_lsp(
times,
mags,
errs,
magsarefluxes=False,
startp=None,
endp=None,
stepsize=1.0e-4,
autofreq=True,
nbestpeaks=5,
periodepsilon=0.1,
sigclip=10.0,
nworkers=None,
workchunksize=None,
glspfunc=_glsp_worker_withtau,
verbose=True
):
'''This calculates the generalized Lomb-Scargle periodogram.
Uses the algorithm from Zechmeister and Kurster (2009).
Parameters
----------
times,mags,errs : np.array
The mag/flux time-series with associated measurement errors to run the
period-finding on.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float or None
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
workchunksize : None or int
If this is an int, will use chunks of the given size to break up the
work for the parallel workers. If None, the chunk size is set to 1.
glspfunc : Python function
The worker function to use to calculate the periodogram. This can be
used to make this function calculate the time-series sampling window
function instead of the time-series measurements' GLS periodogram by
passing in `_glsp_worker_specwindow` instead of the default
`_glsp_worker_withtau` function.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'method':'gls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the frequencies to use
if startp:
endf = 1.0/startp
else:
# default start period is 0.1 day
endf = 1.0/0.1
if endp:
startf = 1.0/endp
else:
# default end period is length of time series
startf = 1.0/(stimes.max() - stimes.min())
# if we're not using autofreq, then use the provided frequencies
if not autofreq:
omegas = 2*pi_value*nparange(startf, endf, stepsize)
if verbose:
LOGINFO(
'using %s frequency points, start P = %.3f, end P = %.3f' %
(omegas.size, 1.0/endf, 1.0/startf)
)
else:
# this gets an automatic grid of frequencies to use
freqs = get_frequency_grid(stimes,
minfreq=startf,
maxfreq=endf)
omegas = 2*pi_value*freqs
if verbose:
LOGINFO(
'using autofreq with %s frequency points, '
'start P = %.3f, end P = %.3f' %
(omegas.size, 1.0/freqs.max(), 1.0/freqs.min())
)
# map to parallel workers
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
tasks = [(stimes, smags, serrs, x) for x in omegas]
if workchunksize:
lsp = pool.map(glspfunc, tasks, chunksize=workchunksize)
else:
lsp = pool.map(glspfunc, tasks)
pool.close()
pool.join()
del pool
lsp = nparray(lsp)
periods = 2.0*pi_value/omegas
# find the nbestpeaks for the periodogram: 1. sort the lsp array by
# highest value first 2. go down the values until we find five
# values that are separated by at least periodepsilon in period
# make sure to filter out non-finite values of lsp
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'omegas':omegas,
'periods':None,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different peak
# in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period) for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
return {'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'omegas':omegas,
'periods':periods,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'omegas':None,
'periods':None,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
|
def function[pgen_lsp, parameter[times, mags, errs, magsarefluxes, startp, endp, stepsize, autofreq, nbestpeaks, periodepsilon, sigclip, nworkers, workchunksize, glspfunc, verbose]]:
constant[This calculates the generalized Lomb-Scargle periodogram.
Uses the algorithm from Zechmeister and Kurster (2009).
Parameters
----------
times,mags,errs : np.array
The mag/flux time-series with associated measurement errors to run the
period-finding on.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float or None
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
workchunksize : None or int
If this is an int, will use chunks of the given size to break up the
work for the parallel workers. If None, the chunk size is set to 1.
glspfunc : Python function
The worker function to use to calculate the periodogram. This can be
used to make this function calculate the time-series sampling window
function instead of the time-series measurements' GLS periodogram by
passing in `_glsp_worker_specwindow` instead of the default
`_glsp_worker_withtau` function.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'method':'gls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
]
<ast.Tuple object at 0x7da1b01e6ce0> assign[=] call[name[sigclip_magseries], parameter[name[times], name[mags], name[errs]]]
variable[nzind] assign[=] call[name[npnonzero], parameter[name[serrs]]]
<ast.Tuple object at 0x7da2041d96c0> assign[=] tuple[[<ast.Subscript object at 0x7da2041d8f40>, <ast.Subscript object at 0x7da2041db250>, <ast.Subscript object at 0x7da2041dadd0>]]
if <ast.BoolOp object at 0x7da2041db580> begin[:]
if name[startp] begin[:]
variable[endf] assign[=] binary_operation[constant[1.0] / name[startp]]
if name[endp] begin[:]
variable[startf] assign[=] binary_operation[constant[1.0] / name[endp]]
if <ast.UnaryOp object at 0x7da1b01e6800> begin[:]
variable[omegas] assign[=] binary_operation[binary_operation[constant[2] * name[pi_value]] * call[name[nparange], parameter[name[startf], name[endf], name[stepsize]]]]
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[using %s frequency points, start P = %.3f, end P = %.3f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b01e74f0>, <ast.BinOp object at 0x7da1b01e67d0>, <ast.BinOp object at 0x7da1b01e6530>]]]]]
if <ast.BoolOp object at 0x7da20e954a00> begin[:]
variable[nworkers] assign[=] name[NCPUS]
if name[verbose] begin[:]
call[name[LOGINFO], parameter[binary_operation[constant[using %s workers...] <ast.Mod object at 0x7da2590d6920> name[nworkers]]]]
variable[pool] assign[=] call[name[Pool], parameter[name[nworkers]]]
variable[tasks] assign[=] <ast.ListComp object at 0x7da204962b00>
if name[workchunksize] begin[:]
variable[lsp] assign[=] call[name[pool].map, parameter[name[glspfunc], name[tasks]]]
call[name[pool].close, parameter[]]
call[name[pool].join, parameter[]]
<ast.Delete object at 0x7da2049633d0>
variable[lsp] assign[=] call[name[nparray], parameter[name[lsp]]]
variable[periods] assign[=] binary_operation[binary_operation[constant[2.0] * name[pi_value]] / name[omegas]]
variable[finitepeakind] assign[=] call[name[npisfinite], parameter[name[lsp]]]
variable[finlsp] assign[=] call[name[lsp]][name[finitepeakind]]
variable[finperiods] assign[=] call[name[periods]][name[finitepeakind]]
<ast.Try object at 0x7da1b01185e0>
variable[sortedlspind] assign[=] call[call[name[npargsort], parameter[name[finlsp]]]][<ast.Slice object at 0x7da1b0065d20>]
variable[sortedlspperiods] assign[=] call[name[finperiods]][name[sortedlspind]]
variable[sortedlspvals] assign[=] call[name[finlsp]][name[sortedlspind]]
<ast.Tuple object at 0x7da1b0064370> assign[=] tuple[[<ast.List object at 0x7da1b0067df0>, <ast.List object at 0x7da1b0067ee0>, <ast.Constant object at 0x7da1b0064220>]]
variable[prevperiod] assign[=] call[name[sortedlspperiods]][constant[0]]
for taget[tuple[[<ast.Name object at 0x7da1b0064d00>, <ast.Name object at 0x7da1b0064d90>]]] in starred[call[name[zip], parameter[name[sortedlspperiods], name[sortedlspvals]]]] begin[:]
if compare[name[peakcount] equal[==] name[nbestpeaks]] begin[:]
break
variable[perioddiff] assign[=] call[name[abs], parameter[binary_operation[name[period] - name[prevperiod]]]]
variable[bestperiodsdiff] assign[=] <ast.ListComp object at 0x7da1b0066770>
if <ast.BoolOp object at 0x7da1b0066aa0> begin[:]
call[name[nbestperiods].append, parameter[name[period]]]
call[name[nbestlspvals].append, parameter[name[lspval]]]
variable[peakcount] assign[=] binary_operation[name[peakcount] + constant[1]]
variable[prevperiod] assign[=] name[period]
return[dictionary[[<ast.Constant object at 0x7da1b0064a00>, <ast.Constant object at 0x7da1b0064640>, <ast.Constant object at 0x7da1b0064670>, <ast.Constant object at 0x7da1b0064730>, <ast.Constant object at 0x7da1b00648b0>, <ast.Constant object at 0x7da1b00661d0>, <ast.Constant object at 0x7da1b00646d0>, <ast.Constant object at 0x7da1b0066680>, <ast.Constant object at 0x7da1b0066620>, <ast.Constant object at 0x7da1b0066260>], [<ast.Subscript object at 0x7da1b0067d30>, <ast.Subscript object at 0x7da1b0067d00>, <ast.Name object at 0x7da1b0067dc0>, <ast.Name object at 0x7da1b00664a0>, <ast.Name object at 0x7da1b0066440>, <ast.Name object at 0x7da1b00663e0>, <ast.Name object at 0x7da1b0066e90>, <ast.Name object at 0x7da1b00662f0>, <ast.Constant object at 0x7da1b00669b0>, <ast.Dict object at 0x7da1b0067b80>]]]
|
keyword[def] identifier[pgen_lsp] (
identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[magsarefluxes] = keyword[False] ,
identifier[startp] = keyword[None] ,
identifier[endp] = keyword[None] ,
identifier[stepsize] = literal[int] ,
identifier[autofreq] = keyword[True] ,
identifier[nbestpeaks] = literal[int] ,
identifier[periodepsilon] = literal[int] ,
identifier[sigclip] = literal[int] ,
identifier[nworkers] = keyword[None] ,
identifier[workchunksize] = keyword[None] ,
identifier[glspfunc] = identifier[_glsp_worker_withtau] ,
identifier[verbose] = keyword[True]
):
literal[string]
identifier[stimes] , identifier[smags] , identifier[serrs] = identifier[sigclip_magseries] ( identifier[times] ,
identifier[mags] ,
identifier[errs] ,
identifier[magsarefluxes] = identifier[magsarefluxes] ,
identifier[sigclip] = identifier[sigclip] )
identifier[nzind] = identifier[npnonzero] ( identifier[serrs] )
identifier[stimes] , identifier[smags] , identifier[serrs] = identifier[stimes] [ identifier[nzind] ], identifier[smags] [ identifier[nzind] ], identifier[serrs] [ identifier[nzind] ]
keyword[if] identifier[len] ( identifier[stimes] )> literal[int] keyword[and] identifier[len] ( identifier[smags] )> literal[int] keyword[and] identifier[len] ( identifier[serrs] )> literal[int] :
keyword[if] identifier[startp] :
identifier[endf] = literal[int] / identifier[startp]
keyword[else] :
identifier[endf] = literal[int] / literal[int]
keyword[if] identifier[endp] :
identifier[startf] = literal[int] / identifier[endp]
keyword[else] :
identifier[startf] = literal[int] /( identifier[stimes] . identifier[max] ()- identifier[stimes] . identifier[min] ())
keyword[if] keyword[not] identifier[autofreq] :
identifier[omegas] = literal[int] * identifier[pi_value] * identifier[nparange] ( identifier[startf] , identifier[endf] , identifier[stepsize] )
keyword[if] identifier[verbose] :
identifier[LOGINFO] (
literal[string] %
( identifier[omegas] . identifier[size] , literal[int] / identifier[endf] , literal[int] / identifier[startf] )
)
keyword[else] :
identifier[freqs] = identifier[get_frequency_grid] ( identifier[stimes] ,
identifier[minfreq] = identifier[startf] ,
identifier[maxfreq] = identifier[endf] )
identifier[omegas] = literal[int] * identifier[pi_value] * identifier[freqs]
keyword[if] identifier[verbose] :
identifier[LOGINFO] (
literal[string]
literal[string] %
( identifier[omegas] . identifier[size] , literal[int] / identifier[freqs] . identifier[max] (), literal[int] / identifier[freqs] . identifier[min] ())
)
keyword[if] ( keyword[not] identifier[nworkers] ) keyword[or] ( identifier[nworkers] > identifier[NCPUS] ):
identifier[nworkers] = identifier[NCPUS]
keyword[if] identifier[verbose] :
identifier[LOGINFO] ( literal[string] % identifier[nworkers] )
identifier[pool] = identifier[Pool] ( identifier[nworkers] )
identifier[tasks] =[( identifier[stimes] , identifier[smags] , identifier[serrs] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[omegas] ]
keyword[if] identifier[workchunksize] :
identifier[lsp] = identifier[pool] . identifier[map] ( identifier[glspfunc] , identifier[tasks] , identifier[chunksize] = identifier[workchunksize] )
keyword[else] :
identifier[lsp] = identifier[pool] . identifier[map] ( identifier[glspfunc] , identifier[tasks] )
identifier[pool] . identifier[close] ()
identifier[pool] . identifier[join] ()
keyword[del] identifier[pool]
identifier[lsp] = identifier[nparray] ( identifier[lsp] )
identifier[periods] = literal[int] * identifier[pi_value] / identifier[omegas]
identifier[finitepeakind] = identifier[npisfinite] ( identifier[lsp] )
identifier[finlsp] = identifier[lsp] [ identifier[finitepeakind] ]
identifier[finperiods] = identifier[periods] [ identifier[finitepeakind] ]
keyword[try] :
identifier[bestperiodind] = identifier[npargmax] ( identifier[finlsp] )
keyword[except] identifier[ValueError] :
identifier[LOGERROR] ( literal[string]
literal[string] )
keyword[return] { literal[string] : identifier[npnan] ,
literal[string] : identifier[npnan] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : identifier[omegas] ,
literal[string] : keyword[None] ,
literal[string] : literal[string] ,
literal[string] :{ literal[string] : identifier[startp] ,
literal[string] : identifier[endp] ,
literal[string] : identifier[stepsize] ,
literal[string] : identifier[autofreq] ,
literal[string] : identifier[periodepsilon] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[sigclip] }}
identifier[sortedlspind] = identifier[npargsort] ( identifier[finlsp] )[::- literal[int] ]
identifier[sortedlspperiods] = identifier[finperiods] [ identifier[sortedlspind] ]
identifier[sortedlspvals] = identifier[finlsp] [ identifier[sortedlspind] ]
identifier[nbestperiods] , identifier[nbestlspvals] , identifier[peakcount] =(
[ identifier[finperiods] [ identifier[bestperiodind] ]],
[ identifier[finlsp] [ identifier[bestperiodind] ]],
literal[int]
)
identifier[prevperiod] = identifier[sortedlspperiods] [ literal[int] ]
keyword[for] identifier[period] , identifier[lspval] keyword[in] identifier[zip] ( identifier[sortedlspperiods] , identifier[sortedlspvals] ):
keyword[if] identifier[peakcount] == identifier[nbestpeaks] :
keyword[break]
identifier[perioddiff] = identifier[abs] ( identifier[period] - identifier[prevperiod] )
identifier[bestperiodsdiff] =[ identifier[abs] ( identifier[period] - identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[nbestperiods] ]
keyword[if] ( identifier[perioddiff] >( identifier[periodepsilon] * identifier[prevperiod] ) keyword[and]
identifier[all] ( identifier[x] >( identifier[periodepsilon] * identifier[period] ) keyword[for] identifier[x] keyword[in] identifier[bestperiodsdiff] )):
identifier[nbestperiods] . identifier[append] ( identifier[period] )
identifier[nbestlspvals] . identifier[append] ( identifier[lspval] )
identifier[peakcount] = identifier[peakcount] + literal[int]
identifier[prevperiod] = identifier[period]
keyword[return] { literal[string] : identifier[finperiods] [ identifier[bestperiodind] ],
literal[string] : identifier[finlsp] [ identifier[bestperiodind] ],
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[nbestlspvals] ,
literal[string] : identifier[nbestperiods] ,
literal[string] : identifier[lsp] ,
literal[string] : identifier[omegas] ,
literal[string] : identifier[periods] ,
literal[string] : literal[string] ,
literal[string] :{ literal[string] : identifier[startp] ,
literal[string] : identifier[endp] ,
literal[string] : identifier[stepsize] ,
literal[string] : identifier[autofreq] ,
literal[string] : identifier[periodepsilon] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[sigclip] }}
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] { literal[string] : identifier[npnan] ,
literal[string] : identifier[npnan] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : literal[string] ,
literal[string] :{ literal[string] : identifier[startp] ,
literal[string] : identifier[endp] ,
literal[string] : identifier[stepsize] ,
literal[string] : identifier[autofreq] ,
literal[string] : identifier[periodepsilon] ,
literal[string] : identifier[nbestpeaks] ,
literal[string] : identifier[sigclip] }}
|
def pgen_lsp(times, mags, errs, magsarefluxes=False, startp=None, endp=None, stepsize=0.0001, autofreq=True, nbestpeaks=5, periodepsilon=0.1, sigclip=10.0, nworkers=None, workchunksize=None, glspfunc=_glsp_worker_withtau, verbose=True):
"""This calculates the generalized Lomb-Scargle periodogram.
Uses the algorithm from Zechmeister and Kurster (2009).
Parameters
----------
times,mags,errs : np.array
The mag/flux time-series with associated measurement errors to run the
period-finding on.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float or None
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
workchunksize : None or int
If this is an int, will use chunks of the given size to break up the
work for the parallel workers. If None, the chunk size is set to 1.
glspfunc : Python function
The worker function to use to calculate the periodogram. This can be
used to make this function calculate the time-series sampling window
function instead of the time-series measurements' GLS periodogram by
passing in `_glsp_worker_specwindow` instead of the default
`_glsp_worker_withtau` function.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'method':'gls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
"""
# get rid of nans first and sigclip
(stimes, smags, serrs) = sigclip_magseries(times, mags, errs, magsarefluxes=magsarefluxes, sigclip=sigclip)
# get rid of zero errs
nzind = npnonzero(serrs)
(stimes, smags, serrs) = (stimes[nzind], smags[nzind], serrs[nzind])
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and (len(serrs) > 9):
# get the frequencies to use
if startp:
endf = 1.0 / startp # depends on [control=['if'], data=[]]
else:
# default start period is 0.1 day
endf = 1.0 / 0.1
if endp:
startf = 1.0 / endp # depends on [control=['if'], data=[]]
else:
# default end period is length of time series
startf = 1.0 / (stimes.max() - stimes.min())
# if we're not using autofreq, then use the provided frequencies
if not autofreq:
omegas = 2 * pi_value * nparange(startf, endf, stepsize)
if verbose:
LOGINFO('using %s frequency points, start P = %.3f, end P = %.3f' % (omegas.size, 1.0 / endf, 1.0 / startf)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# this gets an automatic grid of frequencies to use
freqs = get_frequency_grid(stimes, minfreq=startf, maxfreq=endf)
omegas = 2 * pi_value * freqs
if verbose:
LOGINFO('using autofreq with %s frequency points, start P = %.3f, end P = %.3f' % (omegas.size, 1.0 / freqs.max(), 1.0 / freqs.min())) # depends on [control=['if'], data=[]]
# map to parallel workers
if not nworkers or nworkers > NCPUS:
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
pool = Pool(nworkers)
tasks = [(stimes, smags, serrs, x) for x in omegas]
if workchunksize:
lsp = pool.map(glspfunc, tasks, chunksize=workchunksize) # depends on [control=['if'], data=[]]
else:
lsp = pool.map(glspfunc, tasks)
pool.close()
pool.join()
del pool
lsp = nparray(lsp)
periods = 2.0 * pi_value / omegas
# find the nbestpeaks for the periodogram: 1. sort the lsp array by
# highest value first 2. go down the values until we find five
# values that are separated by at least periodepsilon in period
# make sure to filter out non-finite values of lsp
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp) # depends on [control=['try'], data=[]]
except ValueError:
LOGERROR('no finite periodogram values for this mag series, skipping...')
return {'bestperiod': npnan, 'bestlspval': npnan, 'nbestpeaks': nbestpeaks, 'nbestlspvals': None, 'nbestperiods': None, 'lspvals': None, 'omegas': omegas, 'periods': None, 'method': 'gls', 'kwargs': {'startp': startp, 'endp': endp, 'stepsize': stepsize, 'autofreq': autofreq, 'periodepsilon': periodepsilon, 'nbestpeaks': nbestpeaks, 'sigclip': sigclip}} # depends on [control=['except'], data=[]]
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
(nbestperiods, nbestlspvals, peakcount) = ([finperiods[bestperiodind]], [finlsp[bestperiodind]], 1)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for (period, lspval) in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break # depends on [control=['if'], data=[]]
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different peak
# in the periodogram
if perioddiff > periodepsilon * prevperiod and all((x > periodepsilon * period for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1 # depends on [control=['if'], data=[]]
prevperiod = period # depends on [control=['for'], data=[]]
return {'bestperiod': finperiods[bestperiodind], 'bestlspval': finlsp[bestperiodind], 'nbestpeaks': nbestpeaks, 'nbestlspvals': nbestlspvals, 'nbestperiods': nbestperiods, 'lspvals': lsp, 'omegas': omegas, 'periods': periods, 'method': 'gls', 'kwargs': {'startp': startp, 'endp': endp, 'stepsize': stepsize, 'autofreq': autofreq, 'periodepsilon': periodepsilon, 'nbestpeaks': nbestpeaks, 'sigclip': sigclip}} # depends on [control=['if'], data=[]]
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod': npnan, 'bestlspval': npnan, 'nbestpeaks': nbestpeaks, 'nbestlspvals': None, 'nbestperiods': None, 'lspvals': None, 'omegas': None, 'periods': None, 'method': 'gls', 'kwargs': {'startp': startp, 'endp': endp, 'stepsize': stepsize, 'autofreq': autofreq, 'periodepsilon': periodepsilon, 'nbestpeaks': nbestpeaks, 'sigclip': sigclip}}
|
def from_soup(self, tag_prof_header, tag_prof_nav):
"""
Returns the scraped user data from a twitter user page.
:param tag_prof_header: captures the left hand part of user info
:param tag_prof_nav: captures the upper part of user info
:return: Returns a User object with captured data via beautifulsoup
"""
self.user= tag_prof_header.find('a', {'class':'ProfileHeaderCard-nameLink u-textInheritColor js-nav'})['href'].strip("/")
self.full_name = tag_prof_header.find('a', {'class':'ProfileHeaderCard-nameLink u-textInheritColor js-nav'}).text
location = tag_prof_header.find('span', {'class':'ProfileHeaderCard-locationText u-dir'})
if location is None:
self.location = "None"
else:
self.location = location.text.strip()
blog = tag_prof_header.find('span', {'class':"ProfileHeaderCard-urlText u-dir"})
if blog is None:
blog = "None"
else:
self.blog = blog.text.strip()
date_joined = tag_prof_header.find('div', {'class':"ProfileHeaderCard-joinDate"}).find('span', {'class':'ProfileHeaderCard-joinDateText js-tooltip u-dir'})['title']
if date_joined is None:
self.data_joined = "Unknown"
else:
self.date_joined = date_joined.strip()
self.id = tag_prof_nav.find('div',{'class':'ProfileNav'})['data-user-id']
tweets = tag_prof_nav.find('span', {'class':"ProfileNav-value"})['data-count']
if tweets is None:
self.tweets = 0
else:
self.tweets = int(tweets)
following = tag_prof_nav.find('li', {'class':"ProfileNav-item ProfileNav-item--following"}).\
find('span', {'class':"ProfileNav-value"})['data-count']
if following is None:
following = 0
else:
self.following = int(following)
followers = tag_prof_nav.find('li', {'class':"ProfileNav-item ProfileNav-item--followers"}).\
find('span', {'class':"ProfileNav-value"})['data-count']
if followers is None:
self.followers = 0
else:
self.followers = int(followers)
likes = tag_prof_nav.find('li', {'class':"ProfileNav-item ProfileNav-item--favorites"}).\
find('span', {'class':"ProfileNav-value"})['data-count']
if likes is None:
self.likes = 0
else:
self.likes = int(likes)
lists = tag_prof_nav.find('li', {'class':"ProfileNav-item ProfileNav-item--lists"})
if lists is None:
self.lists = 0
elif lists.find('span', {'class':"ProfileNav-value"}) is None:
self.lists = 0
else:
lists = lists.find('span', {'class':"ProfileNav-value"}).text
self.lists = int(lists)
return(self)
|
def function[from_soup, parameter[self, tag_prof_header, tag_prof_nav]]:
constant[
Returns the scraped user data from a twitter user page.
:param tag_prof_header: captures the left hand part of user info
:param tag_prof_nav: captures the upper part of user info
:return: Returns a User object with captured data via beautifulsoup
]
name[self].user assign[=] call[call[call[name[tag_prof_header].find, parameter[constant[a], dictionary[[<ast.Constant object at 0x7da1b1b100a0>], [<ast.Constant object at 0x7da1b1b131c0>]]]]][constant[href]].strip, parameter[constant[/]]]
name[self].full_name assign[=] call[name[tag_prof_header].find, parameter[constant[a], dictionary[[<ast.Constant object at 0x7da1b1b102e0>], [<ast.Constant object at 0x7da1b1b133a0>]]]].text
variable[location] assign[=] call[name[tag_prof_header].find, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da1b1b10760>], [<ast.Constant object at 0x7da1b1b12590>]]]]
if compare[name[location] is constant[None]] begin[:]
name[self].location assign[=] constant[None]
variable[blog] assign[=] call[name[tag_prof_header].find, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da1b1b12620>], [<ast.Constant object at 0x7da1b1b123b0>]]]]
if compare[name[blog] is constant[None]] begin[:]
variable[blog] assign[=] constant[None]
variable[date_joined] assign[=] call[call[call[name[tag_prof_header].find, parameter[constant[div], dictionary[[<ast.Constant object at 0x7da1b1b11bd0>], [<ast.Constant object at 0x7da1b1b120b0>]]]].find, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da1b1b13ca0>], [<ast.Constant object at 0x7da1b1b12200>]]]]][constant[title]]
if compare[name[date_joined] is constant[None]] begin[:]
name[self].data_joined assign[=] constant[Unknown]
name[self].id assign[=] call[call[name[tag_prof_nav].find, parameter[constant[div], dictionary[[<ast.Constant object at 0x7da1b1b13f40>], [<ast.Constant object at 0x7da1b1b11840>]]]]][constant[data-user-id]]
variable[tweets] assign[=] call[call[name[tag_prof_nav].find, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da1b1b12020>], [<ast.Constant object at 0x7da1b1b10e80>]]]]][constant[data-count]]
if compare[name[tweets] is constant[None]] begin[:]
name[self].tweets assign[=] constant[0]
variable[following] assign[=] call[call[call[name[tag_prof_nav].find, parameter[constant[li], dictionary[[<ast.Constant object at 0x7da1b1b108b0>], [<ast.Constant object at 0x7da1b1b11f30>]]]].find, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da1b1b10160>], [<ast.Constant object at 0x7da1b1b10be0>]]]]][constant[data-count]]
if compare[name[following] is constant[None]] begin[:]
variable[following] assign[=] constant[0]
variable[followers] assign[=] call[call[call[name[tag_prof_nav].find, parameter[constant[li], dictionary[[<ast.Constant object at 0x7da1b1b10a30>], [<ast.Constant object at 0x7da1b1b12140>]]]].find, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da18f811960>], [<ast.Constant object at 0x7da18c4cc370>]]]]][constant[data-count]]
if compare[name[followers] is constant[None]] begin[:]
name[self].followers assign[=] constant[0]
variable[likes] assign[=] call[call[call[name[tag_prof_nav].find, parameter[constant[li], dictionary[[<ast.Constant object at 0x7da18c4cc490>], [<ast.Constant object at 0x7da18c4cef20>]]]].find, parameter[constant[span], dictionary[[<ast.Constant object at 0x7da18c4cf6d0>], [<ast.Constant object at 0x7da18c4cc8e0>]]]]][constant[data-count]]
if compare[name[likes] is constant[None]] begin[:]
name[self].likes assign[=] constant[0]
variable[lists] assign[=] call[name[tag_prof_nav].find, parameter[constant[li], dictionary[[<ast.Constant object at 0x7da18bc70eb0>], [<ast.Constant object at 0x7da18bc73940>]]]]
if compare[name[lists] is constant[None]] begin[:]
name[self].lists assign[=] constant[0]
return[name[self]]
|
keyword[def] identifier[from_soup] ( identifier[self] , identifier[tag_prof_header] , identifier[tag_prof_nav] ):
literal[string]
identifier[self] . identifier[user] = identifier[tag_prof_header] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]. identifier[strip] ( literal[string] )
identifier[self] . identifier[full_name] = identifier[tag_prof_header] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[text]
identifier[location] = identifier[tag_prof_header] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
keyword[if] identifier[location] keyword[is] keyword[None] :
identifier[self] . identifier[location] = literal[string]
keyword[else] :
identifier[self] . identifier[location] = identifier[location] . identifier[text] . identifier[strip] ()
identifier[blog] = identifier[tag_prof_header] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
keyword[if] identifier[blog] keyword[is] keyword[None] :
identifier[blog] = literal[string]
keyword[else] :
identifier[self] . identifier[blog] = identifier[blog] . identifier[text] . identifier[strip] ()
identifier[date_joined] = identifier[tag_prof_header] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]
keyword[if] identifier[date_joined] keyword[is] keyword[None] :
identifier[self] . identifier[data_joined] = literal[string]
keyword[else] :
identifier[self] . identifier[date_joined] = identifier[date_joined] . identifier[strip] ()
identifier[self] . identifier[id] = identifier[tag_prof_nav] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]
identifier[tweets] = identifier[tag_prof_nav] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]
keyword[if] identifier[tweets] keyword[is] keyword[None] :
identifier[self] . identifier[tweets] = literal[int]
keyword[else] :
identifier[self] . identifier[tweets] = identifier[int] ( identifier[tweets] )
identifier[following] = identifier[tag_prof_nav] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]
keyword[if] identifier[following] keyword[is] keyword[None] :
identifier[following] = literal[int]
keyword[else] :
identifier[self] . identifier[following] = identifier[int] ( identifier[following] )
identifier[followers] = identifier[tag_prof_nav] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]
keyword[if] identifier[followers] keyword[is] keyword[None] :
identifier[self] . identifier[followers] = literal[int]
keyword[else] :
identifier[self] . identifier[followers] = identifier[int] ( identifier[followers] )
identifier[likes] = identifier[tag_prof_nav] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find] ( literal[string] ,{ literal[string] : literal[string] })[ literal[string] ]
keyword[if] identifier[likes] keyword[is] keyword[None] :
identifier[self] . identifier[likes] = literal[int]
keyword[else] :
identifier[self] . identifier[likes] = identifier[int] ( identifier[likes] )
identifier[lists] = identifier[tag_prof_nav] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
keyword[if] identifier[lists] keyword[is] keyword[None] :
identifier[self] . identifier[lists] = literal[int]
keyword[elif] identifier[lists] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }) keyword[is] keyword[None] :
identifier[self] . identifier[lists] = literal[int]
keyword[else] :
identifier[lists] = identifier[lists] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[text]
identifier[self] . identifier[lists] = identifier[int] ( identifier[lists] )
keyword[return] ( identifier[self] )
|
def from_soup(self, tag_prof_header, tag_prof_nav):
"""
Returns the scraped user data from a twitter user page.
:param tag_prof_header: captures the left hand part of user info
:param tag_prof_nav: captures the upper part of user info
:return: Returns a User object with captured data via beautifulsoup
"""
self.user = tag_prof_header.find('a', {'class': 'ProfileHeaderCard-nameLink u-textInheritColor js-nav'})['href'].strip('/')
self.full_name = tag_prof_header.find('a', {'class': 'ProfileHeaderCard-nameLink u-textInheritColor js-nav'}).text
location = tag_prof_header.find('span', {'class': 'ProfileHeaderCard-locationText u-dir'})
if location is None:
self.location = 'None' # depends on [control=['if'], data=[]]
else:
self.location = location.text.strip()
blog = tag_prof_header.find('span', {'class': 'ProfileHeaderCard-urlText u-dir'})
if blog is None:
blog = 'None' # depends on [control=['if'], data=['blog']]
else:
self.blog = blog.text.strip()
date_joined = tag_prof_header.find('div', {'class': 'ProfileHeaderCard-joinDate'}).find('span', {'class': 'ProfileHeaderCard-joinDateText js-tooltip u-dir'})['title']
if date_joined is None:
self.data_joined = 'Unknown' # depends on [control=['if'], data=[]]
else:
self.date_joined = date_joined.strip()
self.id = tag_prof_nav.find('div', {'class': 'ProfileNav'})['data-user-id']
tweets = tag_prof_nav.find('span', {'class': 'ProfileNav-value'})['data-count']
if tweets is None:
self.tweets = 0 # depends on [control=['if'], data=[]]
else:
self.tweets = int(tweets)
following = tag_prof_nav.find('li', {'class': 'ProfileNav-item ProfileNav-item--following'}).find('span', {'class': 'ProfileNav-value'})['data-count']
if following is None:
following = 0 # depends on [control=['if'], data=['following']]
else:
self.following = int(following)
followers = tag_prof_nav.find('li', {'class': 'ProfileNav-item ProfileNav-item--followers'}).find('span', {'class': 'ProfileNav-value'})['data-count']
if followers is None:
self.followers = 0 # depends on [control=['if'], data=[]]
else:
self.followers = int(followers)
likes = tag_prof_nav.find('li', {'class': 'ProfileNav-item ProfileNav-item--favorites'}).find('span', {'class': 'ProfileNav-value'})['data-count']
if likes is None:
self.likes = 0 # depends on [control=['if'], data=[]]
else:
self.likes = int(likes)
lists = tag_prof_nav.find('li', {'class': 'ProfileNav-item ProfileNav-item--lists'})
if lists is None:
self.lists = 0 # depends on [control=['if'], data=[]]
elif lists.find('span', {'class': 'ProfileNav-value'}) is None:
self.lists = 0 # depends on [control=['if'], data=[]]
else:
lists = lists.find('span', {'class': 'ProfileNav-value'}).text
self.lists = int(lists)
return self
|
def to_bytes(self):
'''
Create bytes from properties
'''
# Check properties
self.sanitize()
# Start with reserved bits
bitstream = self._to_rsvd1()
# Add zeroes for the flags
bitstream += self._to_flags()
# Add the type
bitstream += BitArray('uint:8=%d' % self.lcaf_type)
# Some more reserved bits
bitstream += self._to_rsvd2()
# Construct the data
data = self._to_data_bytes()
# Add the length
data_length = data.len / 8
bitstream += BitArray('uint:16=%d' % data_length)
return (bitstream + data).bytes
|
def function[to_bytes, parameter[self]]:
constant[
Create bytes from properties
]
call[name[self].sanitize, parameter[]]
variable[bitstream] assign[=] call[name[self]._to_rsvd1, parameter[]]
<ast.AugAssign object at 0x7da1b0ae2e00>
<ast.AugAssign object at 0x7da1b0ae1690>
<ast.AugAssign object at 0x7da1b0ae22c0>
variable[data] assign[=] call[name[self]._to_data_bytes, parameter[]]
variable[data_length] assign[=] binary_operation[name[data].len / constant[8]]
<ast.AugAssign object at 0x7da1b0ae34c0>
return[binary_operation[name[bitstream] + name[data]].bytes]
|
keyword[def] identifier[to_bytes] ( identifier[self] ):
literal[string]
identifier[self] . identifier[sanitize] ()
identifier[bitstream] = identifier[self] . identifier[_to_rsvd1] ()
identifier[bitstream] += identifier[self] . identifier[_to_flags] ()
identifier[bitstream] += identifier[BitArray] ( literal[string] % identifier[self] . identifier[lcaf_type] )
identifier[bitstream] += identifier[self] . identifier[_to_rsvd2] ()
identifier[data] = identifier[self] . identifier[_to_data_bytes] ()
identifier[data_length] = identifier[data] . identifier[len] / literal[int]
identifier[bitstream] += identifier[BitArray] ( literal[string] % identifier[data_length] )
keyword[return] ( identifier[bitstream] + identifier[data] ). identifier[bytes]
|
def to_bytes(self):
"""
Create bytes from properties
"""
# Check properties
self.sanitize()
# Start with reserved bits
bitstream = self._to_rsvd1()
# Add zeroes for the flags
bitstream += self._to_flags()
# Add the type
bitstream += BitArray('uint:8=%d' % self.lcaf_type)
# Some more reserved bits
bitstream += self._to_rsvd2()
# Construct the data
data = self._to_data_bytes()
# Add the length
data_length = data.len / 8
bitstream += BitArray('uint:16=%d' % data_length)
return (bitstream + data).bytes
|
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('patch', url, data=data, **kwargs)
|
def function[patch, parameter[self, url, data]]:
constant[Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
]
return[call[name[self].request, parameter[constant[patch], name[url]]]]
|
keyword[def] identifier[patch] ( identifier[self] , identifier[url] , identifier[data] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[request] ( literal[string] , identifier[url] , identifier[data] = identifier[data] ,** identifier[kwargs] )
|
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param **kwargs: Optional arguments that ``request`` takes.
"""
return self.request('patch', url, data=data, **kwargs)
|
def parse_cond_and_hall(path_dir, doping_levels=None):
"""
Parses the conductivity and Hall tensors
Args:
path_dir: Path containing .condtens / .halltens files
doping_levels: ([float]) - doping lvls, parse outtrans to get this
Returns:
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc
"""
# Step 1: parse raw data but do not convert to final format
t_steps = set()
mu_steps = set()
data_full = []
data_hall = []
data_doping_full = []
data_doping_hall = []
doping_levels = doping_levels or []
# parse the full conductivity/Seebeck/kappa0/etc data
## also initialize t_steps and mu_steps
with open(os.path.join(path_dir, "boltztrap.condtens"), 'r') as f:
for line in f:
if not line.startswith("#"):
mu_steps.add(float(line.split()[0]))
t_steps.add(int(float(line.split()[1])))
data_full.append([float(c) for c in line.split()])
# parse the full Hall tensor
with open(os.path.join(path_dir, "boltztrap.halltens"), 'r') as f:
for line in f:
if not line.startswith("#"):
data_hall.append([float(c) for c in line.split()])
if len(doping_levels) != 0:
# parse doping levels version of full cond. tensor, etc.
with open(
os.path.join(path_dir, "boltztrap.condtens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_full.append([float(c)
for c in line.split()])
# parse doping levels version of full hall tensor
with open(
os.path.join(path_dir, "boltztrap.halltens_fixdoping"),
'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_hall.append(
[float(c) for c in line.split()])
# Step 2: convert raw data to final format
# sort t and mu_steps (b/c they are sets not lists)
# and convert to correct energy
t_steps = sorted([t for t in t_steps])
mu_steps = sorted([Energy(m, "Ry").to("eV") for m in mu_steps])
# initialize output variables - could use defaultdict instead
# I am leaving things like this for clarity
cond = {t: [] for t in t_steps}
seebeck = {t: [] for t in t_steps}
kappa = {t: [] for t in t_steps}
hall = {t: [] for t in t_steps}
carrier_conc = {t: [] for t in t_steps}
dos_full = {'energy': [], 'density': []}
mu_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
seebeck_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
cond_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
kappa_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
hall_doping = {'p': {t: [] for t in t_steps},
'n': {t: [] for t in t_steps}}
# process doping levels
pn_doping_levels = {'p': [], 'n': []}
for d in doping_levels:
if d > 0:
pn_doping_levels['p'].append(d)
else:
pn_doping_levels['n'].append(-d)
# process raw conductivity data, etc.
for d in data_full:
temp, doping = d[1], d[2]
carrier_conc[temp].append(doping)
cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())
seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())
kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist())
# process raw Hall data
for d in data_hall:
temp, doping = d[1], d[2]
hall_tens = [np.reshape(d[3:12], (3, 3)).tolist(),
np.reshape(d[12:21], (3, 3)).tolist(),
np.reshape(d[21:30], (3, 3)).tolist()]
hall[temp].append(hall_tens)
# process doping conductivity data, etc.
for d in data_doping_full:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
mu_doping[pn][temp].append(Energy(mu, "Ry").to("eV"))
cond_doping[pn][temp].append(
np.reshape(d[2:11], (3, 3)).tolist())
seebeck_doping[pn][temp].append(
np.reshape(d[11:20], (3, 3)).tolist())
kappa_doping[pn][temp].append(
np.reshape(d[20:29], (3, 3)).tolist())
# process doping Hall data
for d in data_doping_hall:
temp, doping, mu = d[0], d[1], d[-1]
pn = 'p' if doping > 0 else 'n'
hall_tens = [np.reshape(d[2:11], (3, 3)).tolist(),
np.reshape(d[11:20], (3, 3)).tolist(),
np.reshape(d[20:29], (3, 3)).tolist()]
hall_doping[pn][temp].append(hall_tens)
return mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, \
mu_doping, seebeck_doping, cond_doping, kappa_doping, \
hall_doping, carrier_conc
|
def function[parse_cond_and_hall, parameter[path_dir, doping_levels]]:
constant[
Parses the conductivity and Hall tensors
Args:
path_dir: Path containing .condtens / .halltens files
doping_levels: ([float]) - doping lvls, parse outtrans to get this
Returns:
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc
]
variable[t_steps] assign[=] call[name[set], parameter[]]
variable[mu_steps] assign[=] call[name[set], parameter[]]
variable[data_full] assign[=] list[[]]
variable[data_hall] assign[=] list[[]]
variable[data_doping_full] assign[=] list[[]]
variable[data_doping_hall] assign[=] list[[]]
variable[doping_levels] assign[=] <ast.BoolOp object at 0x7da1b1ce7a00>
with call[name[open], parameter[call[name[os].path.join, parameter[name[path_dir], constant[boltztrap.condtens]]], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
if <ast.UnaryOp object at 0x7da1b1ce7640> begin[:]
call[name[mu_steps].add, parameter[call[name[float], parameter[call[call[name[line].split, parameter[]]][constant[0]]]]]]
call[name[t_steps].add, parameter[call[name[int], parameter[call[name[float], parameter[call[call[name[line].split, parameter[]]][constant[1]]]]]]]]
call[name[data_full].append, parameter[<ast.ListComp object at 0x7da1b1ce6fb0>]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[path_dir], constant[boltztrap.halltens]]], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
if <ast.UnaryOp object at 0x7da1b1ce6a70> begin[:]
call[name[data_hall].append, parameter[<ast.ListComp object at 0x7da1b1ce68c0>]]
if compare[call[name[len], parameter[name[doping_levels]]] not_equal[!=] constant[0]] begin[:]
with call[name[open], parameter[call[name[os].path.join, parameter[name[path_dir], constant[boltztrap.condtens_fixdoping]]], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
if <ast.BoolOp object at 0x7da1b1ce6260> begin[:]
call[name[data_doping_full].append, parameter[<ast.ListComp object at 0x7da1b1ce5f90>]]
with call[name[open], parameter[call[name[os].path.join, parameter[name[path_dir], constant[boltztrap.halltens_fixdoping]]], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
if <ast.BoolOp object at 0x7da1b1ce5a50> begin[:]
call[name[data_doping_hall].append, parameter[<ast.ListComp object at 0x7da1b1ce5780>]]
variable[t_steps] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b1ce54b0>]]
variable[mu_steps] assign[=] call[name[sorted], parameter[<ast.ListComp object at 0x7da1b1ce52d0>]]
variable[cond] assign[=] <ast.DictComp object at 0x7da1b1ce5030>
variable[seebeck] assign[=] <ast.DictComp object at 0x7da1b1c23490>
variable[kappa] assign[=] <ast.DictComp object at 0x7da1b1c232e0>
variable[hall] assign[=] <ast.DictComp object at 0x7da1b1c23130>
variable[carrier_conc] assign[=] <ast.DictComp object at 0x7da1b1c22f80>
variable[dos_full] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c22da0>, <ast.Constant object at 0x7da1b1c22d70>], [<ast.List object at 0x7da1b1c22d40>, <ast.List object at 0x7da1b1c22d10>]]
variable[mu_doping] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c22c50>, <ast.Constant object at 0x7da1b1c22c20>], [<ast.DictComp object at 0x7da1b1c22bf0>, <ast.DictComp object at 0x7da1b1c22aa0>]]
variable[seebeck_doping] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c228c0>, <ast.Constant object at 0x7da1b1c22890>], [<ast.DictComp object at 0x7da1b1c22860>, <ast.DictComp object at 0x7da1b1c22710>]]
variable[cond_doping] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c22530>, <ast.Constant object at 0x7da1b1c22500>], [<ast.DictComp object at 0x7da1b1c224d0>, <ast.DictComp object at 0x7da1b1c22380>]]
variable[kappa_doping] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c221a0>, <ast.Constant object at 0x7da1b1c22170>], [<ast.DictComp object at 0x7da1b1c22140>, <ast.DictComp object at 0x7da1b1c21ff0>]]
variable[hall_doping] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c21e10>, <ast.Constant object at 0x7da1b1c21de0>], [<ast.DictComp object at 0x7da1b1c21db0>, <ast.DictComp object at 0x7da1b1c21c60>]]
variable[pn_doping_levels] assign[=] dictionary[[<ast.Constant object at 0x7da1b1c21a80>, <ast.Constant object at 0x7da1b1c21a50>], [<ast.List object at 0x7da1b1c21a20>, <ast.List object at 0x7da1b1c219f0>]]
for taget[name[d]] in starred[name[doping_levels]] begin[:]
if compare[name[d] greater[>] constant[0]] begin[:]
call[call[name[pn_doping_levels]][constant[p]].append, parameter[name[d]]]
for taget[name[d]] in starred[name[data_full]] begin[:]
<ast.Tuple object at 0x7da1b1c21450> assign[=] tuple[[<ast.Subscript object at 0x7da1b1c21390>, <ast.Subscript object at 0x7da1b1c21300>]]
call[call[name[carrier_conc]][name[temp]].append, parameter[name[doping]]]
call[call[name[cond]][name[temp]].append, parameter[call[call[name[np].reshape, parameter[call[name[d]][<ast.Slice object at 0x7da1b1c20e80>], tuple[[<ast.Constant object at 0x7da1b1c20dc0>, <ast.Constant object at 0x7da1b1c20d90>]]]].tolist, parameter[]]]]
call[call[name[seebeck]][name[temp]].append, parameter[call[call[name[np].reshape, parameter[call[name[d]][<ast.Slice object at 0x7da1b1c20ac0>], tuple[[<ast.Constant object at 0x7da1b1c20a00>, <ast.Constant object at 0x7da1b1c209d0>]]]].tolist, parameter[]]]]
call[call[name[kappa]][name[temp]].append, parameter[call[call[name[np].reshape, parameter[call[name[d]][<ast.Slice object at 0x7da1b1c20700>], tuple[[<ast.Constant object at 0x7da1b1c20640>, <ast.Constant object at 0x7da1b1c20610>]]]].tolist, parameter[]]]]
for taget[name[d]] in starred[name[data_hall]] begin[:]
<ast.Tuple object at 0x7da1b1c204f0> assign[=] tuple[[<ast.Subscript object at 0x7da1b1c20430>, <ast.Subscript object at 0x7da1b1c203a0>]]
variable[hall_tens] assign[=] list[[<ast.Call object at 0x7da1b1c20280>, <ast.Call object at 0x7da1b1c136a0>, <ast.Call object at 0x7da1b1c12fe0>]]
call[call[name[hall]][name[temp]].append, parameter[name[hall_tens]]]
for taget[name[d]] in starred[name[data_doping_full]] begin[:]
<ast.Tuple object at 0x7da1b1c10b80> assign[=] tuple[[<ast.Subscript object at 0x7da1b1c11060>, <ast.Subscript object at 0x7da1b1c10fd0>, <ast.Subscript object at 0x7da1b1c10f40>]]
variable[pn] assign[=] <ast.IfExp object at 0x7da1b1c13ee0>
call[call[call[name[mu_doping]][name[pn]]][name[temp]].append, parameter[call[call[name[Energy], parameter[name[mu], constant[Ry]]].to, parameter[constant[eV]]]]]
call[call[call[name[cond_doping]][name[pn]]][name[temp]].append, parameter[call[call[name[np].reshape, parameter[call[name[d]][<ast.Slice object at 0x7da1b1c138b0>], tuple[[<ast.Constant object at 0x7da1b1c10e80>, <ast.Constant object at 0x7da1b1c10eb0>]]]].tolist, parameter[]]]]
call[call[call[name[seebeck_doping]][name[pn]]][name[temp]].append, parameter[call[call[name[np].reshape, parameter[call[name[d]][<ast.Slice object at 0x7da1b1c13580>], tuple[[<ast.Constant object at 0x7da1b1c13700>, <ast.Constant object at 0x7da1b1c13730>]]]].tolist, parameter[]]]]
call[call[call[name[kappa_doping]][name[pn]]][name[temp]].append, parameter[call[call[name[np].reshape, parameter[call[name[d]][<ast.Slice object at 0x7da1b1c13220>], tuple[[<ast.Constant object at 0x7da1b1c10970>, <ast.Constant object at 0x7da1b1c109a0>]]]].tolist, parameter[]]]]
for taget[name[d]] in starred[name[data_doping_hall]] begin[:]
<ast.Tuple object at 0x7da1b1c10a30> assign[=] tuple[[<ast.Subscript object at 0x7da1b1c12920>, <ast.Subscript object at 0x7da1b1c12a40>, <ast.Subscript object at 0x7da1b1c12860>]]
variable[pn] assign[=] <ast.IfExp object at 0x7da1b1c10220>
variable[hall_tens] assign[=] list[[<ast.Call object at 0x7da1b1c11120>, <ast.Call object at 0x7da1b1c11750>, <ast.Call object at 0x7da1b1c115d0>]]
call[call[call[name[hall_doping]][name[pn]]][name[temp]].append, parameter[name[hall_tens]]]
return[tuple[[<ast.Name object at 0x7da1b1c12d10>, <ast.Name object at 0x7da1b1c12d40>, <ast.Name object at 0x7da1b1c12d70>, <ast.Name object at 0x7da1b1c12da0>, <ast.Name object at 0x7da1b1c10a90>, <ast.Name object at 0x7da1b1c12aa0>, <ast.Name object at 0x7da1b1c113c0>, <ast.Name object at 0x7da1b1c11300>, <ast.Name object at 0x7da1b1c112d0>, <ast.Name object at 0x7da1b1c11330>, <ast.Name object at 0x7da1b1c112a0>, <ast.Name object at 0x7da1b1c11360>]]]
|
keyword[def] identifier[parse_cond_and_hall] ( identifier[path_dir] , identifier[doping_levels] = keyword[None] ):
literal[string]
identifier[t_steps] = identifier[set] ()
identifier[mu_steps] = identifier[set] ()
identifier[data_full] =[]
identifier[data_hall] =[]
identifier[data_doping_full] =[]
identifier[data_doping_hall] =[]
identifier[doping_levels] = identifier[doping_levels] keyword[or] []
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path_dir] , literal[string] ), literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[mu_steps] . identifier[add] ( identifier[float] ( identifier[line] . identifier[split] ()[ literal[int] ]))
identifier[t_steps] . identifier[add] ( identifier[int] ( identifier[float] ( identifier[line] . identifier[split] ()[ literal[int] ])))
identifier[data_full] . identifier[append] ([ identifier[float] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[line] . identifier[split] ()])
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path_dir] , literal[string] ), literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[data_hall] . identifier[append] ([ identifier[float] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[line] . identifier[split] ()])
keyword[if] identifier[len] ( identifier[doping_levels] )!= literal[int] :
keyword[with] identifier[open] (
identifier[os] . identifier[path] . identifier[join] ( identifier[path_dir] , literal[string] ),
literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] identifier[len] ( identifier[line] )> literal[int] :
identifier[data_doping_full] . identifier[append] ([ identifier[float] ( identifier[c] )
keyword[for] identifier[c] keyword[in] identifier[line] . identifier[split] ()])
keyword[with] identifier[open] (
identifier[os] . identifier[path] . identifier[join] ( identifier[path_dir] , literal[string] ),
literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] identifier[len] ( identifier[line] )> literal[int] :
identifier[data_doping_hall] . identifier[append] (
[ identifier[float] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[line] . identifier[split] ()])
identifier[t_steps] = identifier[sorted] ([ identifier[t] keyword[for] identifier[t] keyword[in] identifier[t_steps] ])
identifier[mu_steps] = identifier[sorted] ([ identifier[Energy] ( identifier[m] , literal[string] ). identifier[to] ( literal[string] ) keyword[for] identifier[m] keyword[in] identifier[mu_steps] ])
identifier[cond] ={ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }
identifier[seebeck] ={ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }
identifier[kappa] ={ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }
identifier[hall] ={ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }
identifier[carrier_conc] ={ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }
identifier[dos_full] ={ literal[string] :[], literal[string] :[]}
identifier[mu_doping] ={ literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] },
literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }}
identifier[seebeck_doping] ={ literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] },
literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }}
identifier[cond_doping] ={ literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] },
literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }}
identifier[kappa_doping] ={ literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] },
literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }}
identifier[hall_doping] ={ literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] },
literal[string] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[t_steps] }}
identifier[pn_doping_levels] ={ literal[string] :[], literal[string] :[]}
keyword[for] identifier[d] keyword[in] identifier[doping_levels] :
keyword[if] identifier[d] > literal[int] :
identifier[pn_doping_levels] [ literal[string] ]. identifier[append] ( identifier[d] )
keyword[else] :
identifier[pn_doping_levels] [ literal[string] ]. identifier[append] (- identifier[d] )
keyword[for] identifier[d] keyword[in] identifier[data_full] :
identifier[temp] , identifier[doping] = identifier[d] [ literal[int] ], identifier[d] [ literal[int] ]
identifier[carrier_conc] [ identifier[temp] ]. identifier[append] ( identifier[doping] )
identifier[cond] [ identifier[temp] ]. identifier[append] ( identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ())
identifier[seebeck] [ identifier[temp] ]. identifier[append] ( identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ())
identifier[kappa] [ identifier[temp] ]. identifier[append] ( identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ())
keyword[for] identifier[d] keyword[in] identifier[data_hall] :
identifier[temp] , identifier[doping] = identifier[d] [ literal[int] ], identifier[d] [ literal[int] ]
identifier[hall_tens] =[ identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] (),
identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] (),
identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ()]
identifier[hall] [ identifier[temp] ]. identifier[append] ( identifier[hall_tens] )
keyword[for] identifier[d] keyword[in] identifier[data_doping_full] :
identifier[temp] , identifier[doping] , identifier[mu] = identifier[d] [ literal[int] ], identifier[d] [ literal[int] ], identifier[d] [- literal[int] ]
identifier[pn] = literal[string] keyword[if] identifier[doping] > literal[int] keyword[else] literal[string]
identifier[mu_doping] [ identifier[pn] ][ identifier[temp] ]. identifier[append] ( identifier[Energy] ( identifier[mu] , literal[string] ). identifier[to] ( literal[string] ))
identifier[cond_doping] [ identifier[pn] ][ identifier[temp] ]. identifier[append] (
identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ())
identifier[seebeck_doping] [ identifier[pn] ][ identifier[temp] ]. identifier[append] (
identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ())
identifier[kappa_doping] [ identifier[pn] ][ identifier[temp] ]. identifier[append] (
identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ())
keyword[for] identifier[d] keyword[in] identifier[data_doping_hall] :
identifier[temp] , identifier[doping] , identifier[mu] = identifier[d] [ literal[int] ], identifier[d] [ literal[int] ], identifier[d] [- literal[int] ]
identifier[pn] = literal[string] keyword[if] identifier[doping] > literal[int] keyword[else] literal[string]
identifier[hall_tens] =[ identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] (),
identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] (),
identifier[np] . identifier[reshape] ( identifier[d] [ literal[int] : literal[int] ],( literal[int] , literal[int] )). identifier[tolist] ()]
identifier[hall_doping] [ identifier[pn] ][ identifier[temp] ]. identifier[append] ( identifier[hall_tens] )
keyword[return] identifier[mu_steps] , identifier[cond] , identifier[seebeck] , identifier[kappa] , identifier[hall] , identifier[pn_doping_levels] , identifier[mu_doping] , identifier[seebeck_doping] , identifier[cond_doping] , identifier[kappa_doping] , identifier[hall_doping] , identifier[carrier_conc]
|
def parse_cond_and_hall(path_dir, doping_levels=None):
"""
Parses the conductivity and Hall tensors
Args:
path_dir: Path containing .condtens / .halltens files
doping_levels: ([float]) - doping lvls, parse outtrans to get this
Returns:
mu_steps, cond, seebeck, kappa, hall, pn_doping_levels,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, carrier_conc
"""
# Step 1: parse raw data but do not convert to final format
t_steps = set()
mu_steps = set()
data_full = []
data_hall = []
data_doping_full = []
data_doping_hall = []
doping_levels = doping_levels or []
# parse the full conductivity/Seebeck/kappa0/etc data
## also initialize t_steps and mu_steps
with open(os.path.join(path_dir, 'boltztrap.condtens'), 'r') as f:
for line in f:
if not line.startswith('#'):
mu_steps.add(float(line.split()[0]))
t_steps.add(int(float(line.split()[1])))
data_full.append([float(c) for c in line.split()]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
# parse the full Hall tensor
with open(os.path.join(path_dir, 'boltztrap.halltens'), 'r') as f:
for line in f:
if not line.startswith('#'):
data_hall.append([float(c) for c in line.split()]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
if len(doping_levels) != 0:
# parse doping levels version of full cond. tensor, etc.
with open(os.path.join(path_dir, 'boltztrap.condtens_fixdoping'), 'r') as f:
for line in f:
if not line.startswith('#') and len(line) > 2:
data_doping_full.append([float(c) for c in line.split()]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
# parse doping levels version of full hall tensor
with open(os.path.join(path_dir, 'boltztrap.halltens_fixdoping'), 'r') as f:
for line in f:
if not line.startswith('#') and len(line) > 2:
data_doping_hall.append([float(c) for c in line.split()]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
# Step 2: convert raw data to final format
# sort t and mu_steps (b/c they are sets not lists)
# and convert to correct energy
t_steps = sorted([t for t in t_steps])
mu_steps = sorted([Energy(m, 'Ry').to('eV') for m in mu_steps])
# initialize output variables - could use defaultdict instead
# I am leaving things like this for clarity
cond = {t: [] for t in t_steps}
seebeck = {t: [] for t in t_steps}
kappa = {t: [] for t in t_steps}
hall = {t: [] for t in t_steps}
carrier_conc = {t: [] for t in t_steps}
dos_full = {'energy': [], 'density': []}
mu_doping = {'p': {t: [] for t in t_steps}, 'n': {t: [] for t in t_steps}}
seebeck_doping = {'p': {t: [] for t in t_steps}, 'n': {t: [] for t in t_steps}}
cond_doping = {'p': {t: [] for t in t_steps}, 'n': {t: [] for t in t_steps}}
kappa_doping = {'p': {t: [] for t in t_steps}, 'n': {t: [] for t in t_steps}}
hall_doping = {'p': {t: [] for t in t_steps}, 'n': {t: [] for t in t_steps}}
# process doping levels
pn_doping_levels = {'p': [], 'n': []}
for d in doping_levels:
if d > 0:
pn_doping_levels['p'].append(d) # depends on [control=['if'], data=['d']]
else:
pn_doping_levels['n'].append(-d) # depends on [control=['for'], data=['d']]
# process raw conductivity data, etc.
for d in data_full:
(temp, doping) = (d[1], d[2])
carrier_conc[temp].append(doping)
cond[temp].append(np.reshape(d[3:12], (3, 3)).tolist())
seebeck[temp].append(np.reshape(d[12:21], (3, 3)).tolist())
kappa[temp].append(np.reshape(d[21:30], (3, 3)).tolist()) # depends on [control=['for'], data=['d']]
# process raw Hall data
for d in data_hall:
(temp, doping) = (d[1], d[2])
hall_tens = [np.reshape(d[3:12], (3, 3)).tolist(), np.reshape(d[12:21], (3, 3)).tolist(), np.reshape(d[21:30], (3, 3)).tolist()]
hall[temp].append(hall_tens) # depends on [control=['for'], data=['d']]
# process doping conductivity data, etc.
for d in data_doping_full:
(temp, doping, mu) = (d[0], d[1], d[-1])
pn = 'p' if doping > 0 else 'n'
mu_doping[pn][temp].append(Energy(mu, 'Ry').to('eV'))
cond_doping[pn][temp].append(np.reshape(d[2:11], (3, 3)).tolist())
seebeck_doping[pn][temp].append(np.reshape(d[11:20], (3, 3)).tolist())
kappa_doping[pn][temp].append(np.reshape(d[20:29], (3, 3)).tolist()) # depends on [control=['for'], data=['d']]
# process doping Hall data
for d in data_doping_hall:
(temp, doping, mu) = (d[0], d[1], d[-1])
pn = 'p' if doping > 0 else 'n'
hall_tens = [np.reshape(d[2:11], (3, 3)).tolist(), np.reshape(d[11:20], (3, 3)).tolist(), np.reshape(d[20:29], (3, 3)).tolist()]
hall_doping[pn][temp].append(hall_tens) # depends on [control=['for'], data=['d']]
return (mu_steps, cond, seebeck, kappa, hall, pn_doping_levels, mu_doping, seebeck_doping, cond_doping, kappa_doping, hall_doping, carrier_conc)
|
def mobile_template(template):
"""
Mark a function as mobile-ready and pass a mobile template if MOBILE.
For example::
@mobile_template('a/{mobile/}b.html')
def view(template=None):
...
if ``request.MOBILE=True`` the template will be `a/mobile/b.html`.
if ``request.MOBILE=False`` the template will be `a/b.html`.
This function is useful if the mobile view uses the same context but a
different template.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
is_mobile = getattr(request, 'MOBILE', None)
kwargs['template'] = re.sub(r'{(.+?)}',
r'\1' if is_mobile else '',
template)
return f(*args, **kwargs)
return wrapper
return decorator
|
def function[mobile_template, parameter[template]]:
constant[
Mark a function as mobile-ready and pass a mobile template if MOBILE.
For example::
@mobile_template('a/{mobile/}b.html')
def view(template=None):
...
if ``request.MOBILE=True`` the template will be `a/mobile/b.html`.
if ``request.MOBILE=False`` the template will be `a/b.html`.
This function is useful if the mobile view uses the same context but a
different template.
]
def function[decorator, parameter[f]]:
def function[wrapper, parameter[]]:
variable[ctx] assign[=] name[stack].top
if <ast.BoolOp object at 0x7da1b0cf47f0> begin[:]
variable[request] assign[=] name[ctx].request
variable[is_mobile] assign[=] call[name[getattr], parameter[name[request], constant[MOBILE], constant[None]]]
call[name[kwargs]][constant[template]] assign[=] call[name[re].sub, parameter[constant[{(.+?)}], <ast.IfExp object at 0x7da1b0cf4c40>, name[template]]]
return[call[name[f], parameter[<ast.Starred object at 0x7da1b0cf5720>]]]
return[name[wrapper]]
return[name[decorator]]
|
keyword[def] identifier[mobile_template] ( identifier[template] ):
literal[string]
keyword[def] identifier[decorator] ( identifier[f] ):
@ identifier[functools] . identifier[wraps] ( identifier[f] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[ctx] = identifier[stack] . identifier[top]
keyword[if] identifier[ctx] keyword[is] keyword[not] keyword[None] keyword[and] identifier[hasattr] ( identifier[ctx] , literal[string] ):
identifier[request] = identifier[ctx] . identifier[request]
identifier[is_mobile] = identifier[getattr] ( identifier[request] , literal[string] , keyword[None] )
identifier[kwargs] [ literal[string] ]= identifier[re] . identifier[sub] ( literal[string] ,
literal[string] keyword[if] identifier[is_mobile] keyword[else] literal[string] ,
identifier[template] )
keyword[return] identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
keyword[return] identifier[decorator]
|
def mobile_template(template):
"""
Mark a function as mobile-ready and pass a mobile template if MOBILE.
For example::
@mobile_template('a/{mobile/}b.html')
def view(template=None):
...
if ``request.MOBILE=True`` the template will be `a/mobile/b.html`.
if ``request.MOBILE=False`` the template will be `a/b.html`.
This function is useful if the mobile view uses the same context but a
different template.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
ctx = stack.top
if ctx is not None and hasattr(ctx, 'request'):
request = ctx.request
is_mobile = getattr(request, 'MOBILE', None)
kwargs['template'] = re.sub('{(.+?)}', '\\1' if is_mobile else '', template) # depends on [control=['if'], data=[]]
return f(*args, **kwargs)
return wrapper
return decorator
|
def load_known_hosts(self, filename=None):
"""Load host keys from an openssh :file:`known_hosts`-style file. Can
be called multiple times.
If *filename* is not specified, looks in the default locations i.e. :file:`~/.ssh/known_hosts` and :file:`~/ssh/known_hosts` for Windows.
"""
if filename is None:
filename = os.path.expanduser('~/.ssh/known_hosts')
try:
self._host_keys.load(filename)
except IOError:
# for windows
filename = os.path.expanduser('~/ssh/known_hosts')
try:
self._host_keys.load(filename)
except IOError:
pass
else:
self._host_keys.load(filename)
|
def function[load_known_hosts, parameter[self, filename]]:
constant[Load host keys from an openssh :file:`known_hosts`-style file. Can
be called multiple times.
If *filename* is not specified, looks in the default locations i.e. :file:`~/.ssh/known_hosts` and :file:`~/ssh/known_hosts` for Windows.
]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] call[name[os].path.expanduser, parameter[constant[~/.ssh/known_hosts]]]
<ast.Try object at 0x7da204566ad0>
|
keyword[def] identifier[load_known_hosts] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[try] :
identifier[self] . identifier[_host_keys] . identifier[load] ( identifier[filename] )
keyword[except] identifier[IOError] :
identifier[filename] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[try] :
identifier[self] . identifier[_host_keys] . identifier[load] ( identifier[filename] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[else] :
identifier[self] . identifier[_host_keys] . identifier[load] ( identifier[filename] )
|
def load_known_hosts(self, filename=None):
"""Load host keys from an openssh :file:`known_hosts`-style file. Can
be called multiple times.
If *filename* is not specified, looks in the default locations i.e. :file:`~/.ssh/known_hosts` and :file:`~/ssh/known_hosts` for Windows.
"""
if filename is None:
filename = os.path.expanduser('~/.ssh/known_hosts')
try:
self._host_keys.load(filename) # depends on [control=['try'], data=[]]
except IOError:
# for windows
filename = os.path.expanduser('~/ssh/known_hosts')
try:
self._host_keys.load(filename) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['filename']]
else:
self._host_keys.load(filename)
|
def remove_peer_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a peer from BGP speaker."""
return self.put((self.bgp_speaker_path % speaker_id) +
"/remove_bgp_peer", body=body)
|
def function[remove_peer_from_bgp_speaker, parameter[self, speaker_id, body]]:
constant[Removes a peer from BGP speaker.]
return[call[name[self].put, parameter[binary_operation[binary_operation[name[self].bgp_speaker_path <ast.Mod object at 0x7da2590d6920> name[speaker_id]] + constant[/remove_bgp_peer]]]]]
|
keyword[def] identifier[remove_peer_from_bgp_speaker] ( identifier[self] , identifier[speaker_id] , identifier[body] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[put] (( identifier[self] . identifier[bgp_speaker_path] % identifier[speaker_id] )+
literal[string] , identifier[body] = identifier[body] )
|
def remove_peer_from_bgp_speaker(self, speaker_id, body=None):
"""Removes a peer from BGP speaker."""
return self.put(self.bgp_speaker_path % speaker_id + '/remove_bgp_peer', body=body)
|
def from_file(cls, f, fname=None, readers=None):
"""Create a Document from a file.
Usage::
with open('paper.html', 'rb') as f:
doc = Document.from_file(f)
.. note::
Always open files in binary mode by using the 'rb' parameter.
:param file|string f: A file-like object or path to a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
"""
if isinstance(f, six.string_types):
f = io.open(f, 'rb')
if not fname and hasattr(f, 'name'):
fname = f.name
return cls.from_string(f.read(), fname=fname, readers=readers)
|
def function[from_file, parameter[cls, f, fname, readers]]:
constant[Create a Document from a file.
Usage::
with open('paper.html', 'rb') as f:
doc = Document.from_file(f)
.. note::
Always open files in binary mode by using the 'rb' parameter.
:param file|string f: A file-like object or path to a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
]
if call[name[isinstance], parameter[name[f], name[six].string_types]] begin[:]
variable[f] assign[=] call[name[io].open, parameter[name[f], constant[rb]]]
if <ast.BoolOp object at 0x7da18c4ce950> begin[:]
variable[fname] assign[=] name[f].name
return[call[name[cls].from_string, parameter[call[name[f].read, parameter[]]]]]
|
keyword[def] identifier[from_file] ( identifier[cls] , identifier[f] , identifier[fname] = keyword[None] , identifier[readers] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[f] , identifier[six] . identifier[string_types] ):
identifier[f] = identifier[io] . identifier[open] ( identifier[f] , literal[string] )
keyword[if] keyword[not] identifier[fname] keyword[and] identifier[hasattr] ( identifier[f] , literal[string] ):
identifier[fname] = identifier[f] . identifier[name]
keyword[return] identifier[cls] . identifier[from_string] ( identifier[f] . identifier[read] (), identifier[fname] = identifier[fname] , identifier[readers] = identifier[readers] )
|
def from_file(cls, f, fname=None, readers=None):
"""Create a Document from a file.
Usage::
with open('paper.html', 'rb') as f:
doc = Document.from_file(f)
.. note::
Always open files in binary mode by using the 'rb' parameter.
:param file|string f: A file-like object or path to a file.
:param string fname: (Optional) The filename. Used to help determine file format.
:param list[chemdataextractor.reader.base.BaseReader] readers: (Optional) List of readers to use.
"""
if isinstance(f, six.string_types):
f = io.open(f, 'rb') # depends on [control=['if'], data=[]]
if not fname and hasattr(f, 'name'):
fname = f.name # depends on [control=['if'], data=[]]
return cls.from_string(f.read(), fname=fname, readers=readers)
|
def is_valid_email(email):
"""
RFC822 Email Address Regex
--------------------------
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher, with changes suggested by Dan Kubb.
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
"""
import re
qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]'
dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]'
atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40\\x5b-\\x5d\\x7f-\\xff]+'
quoted_pair = '\\x5c[\\x00-\\x7f]'
domain_literal = "\\x5b(?:%s|%s)*\\x5d" % (dtext, quoted_pair)
quoted_string = "\\x22(?:%s|%s)*\\x22" % (qtext, quoted_pair)
domain_ref = atom
sub_domain = "(?:%s|%s)" % (domain_ref, domain_literal)
word = "(?:%s|%s)" % (atom, quoted_string)
domain = "%s(?:\\x2e%s)*" % (sub_domain, sub_domain)
local_part = "%s(?:\\x2e%s)*" % (word, word)
addr_spec = "%s\\x40%s" % (local_part, domain)
email_address = re.compile('\A%s\Z' % addr_spec)
if email_address.match(email):
return True
return False
|
def function[is_valid_email, parameter[email]]:
constant[
RFC822 Email Address Regex
--------------------------
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher, with changes suggested by Dan Kubb.
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
]
import module[re]
variable[qtext] assign[=] constant[[^\x0d\x22\x5c\x80-\xff]]
variable[dtext] assign[=] constant[[^\x0d\x5b-\x5d\x80-\xff]]
variable[atom] assign[=] constant[[^\x00-\x20\x22\x28\x29\x2c\x2e\x3a-\x3c\x3e\x40\x5b-\x5d\x7f-\xff]+]
variable[quoted_pair] assign[=] constant[\x5c[\x00-\x7f]]
variable[domain_literal] assign[=] binary_operation[constant[\x5b(?:%s|%s)*\x5d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f812080>, <ast.Name object at 0x7da18f810a90>]]]
variable[quoted_string] assign[=] binary_operation[constant[\x22(?:%s|%s)*\x22] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f8133a0>, <ast.Name object at 0x7da18f8112a0>]]]
variable[domain_ref] assign[=] name[atom]
variable[sub_domain] assign[=] binary_operation[constant[(?:%s|%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810040>, <ast.Name object at 0x7da18f811a80>]]]
variable[word] assign[=] binary_operation[constant[(?:%s|%s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f8130a0>, <ast.Name object at 0x7da18f810760>]]]
variable[domain] assign[=] binary_operation[constant[%s(?:\x2e%s)*] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f811a20>, <ast.Name object at 0x7da18f8120b0>]]]
variable[local_part] assign[=] binary_operation[constant[%s(?:\x2e%s)*] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f811bd0>, <ast.Name object at 0x7da18f8126b0>]]]
variable[addr_spec] assign[=] binary_operation[constant[%s\x40%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810d30>, <ast.Name object at 0x7da18f810fa0>]]]
variable[email_address] assign[=] call[name[re].compile, parameter[binary_operation[constant[\A%s\Z] <ast.Mod object at 0x7da2590d6920> name[addr_spec]]]]
if call[name[email_address].match, parameter[name[email]]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[is_valid_email] ( identifier[email] ):
literal[string]
keyword[import] identifier[re]
identifier[qtext] = literal[string]
identifier[dtext] = literal[string]
identifier[atom] = literal[string]
identifier[quoted_pair] = literal[string]
identifier[domain_literal] = literal[string] %( identifier[dtext] , identifier[quoted_pair] )
identifier[quoted_string] = literal[string] %( identifier[qtext] , identifier[quoted_pair] )
identifier[domain_ref] = identifier[atom]
identifier[sub_domain] = literal[string] %( identifier[domain_ref] , identifier[domain_literal] )
identifier[word] = literal[string] %( identifier[atom] , identifier[quoted_string] )
identifier[domain] = literal[string] %( identifier[sub_domain] , identifier[sub_domain] )
identifier[local_part] = literal[string] %( identifier[word] , identifier[word] )
identifier[addr_spec] = literal[string] %( identifier[local_part] , identifier[domain] )
identifier[email_address] = identifier[re] . identifier[compile] ( literal[string] % identifier[addr_spec] )
keyword[if] identifier[email_address] . identifier[match] ( identifier[email] ):
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def is_valid_email(email):
"""
RFC822 Email Address Regex
--------------------------
Originally written by Cal Henderson
c.f. http://iamcal.com/publish/articles/php/parsing_email/
Translated to Python by Tim Fletcher, with changes suggested by Dan Kubb.
Licensed under a Creative Commons Attribution-ShareAlike 2.5 License
http://creativecommons.org/licenses/by-sa/2.5/
"""
import re
qtext = '[^\\x0d\\x22\\x5c\\x80-\\xff]'
dtext = '[^\\x0d\\x5b-\\x5d\\x80-\\xff]'
atom = '[^\\x00-\\x20\\x22\\x28\\x29\\x2c\\x2e\\x3a-\\x3c\\x3e\\x40\\x5b-\\x5d\\x7f-\\xff]+'
quoted_pair = '\\x5c[\\x00-\\x7f]'
domain_literal = '\\x5b(?:%s|%s)*\\x5d' % (dtext, quoted_pair)
quoted_string = '\\x22(?:%s|%s)*\\x22' % (qtext, quoted_pair)
domain_ref = atom
sub_domain = '(?:%s|%s)' % (domain_ref, domain_literal)
word = '(?:%s|%s)' % (atom, quoted_string)
domain = '%s(?:\\x2e%s)*' % (sub_domain, sub_domain)
local_part = '%s(?:\\x2e%s)*' % (word, word)
addr_spec = '%s\\x40%s' % (local_part, domain)
email_address = re.compile('\\A%s\\Z' % addr_spec)
if email_address.match(email):
return True # depends on [control=['if'], data=[]]
return False
|
def set_triggered_by_event(self, value):
"""
Setter for 'triggered_by_event' field.
:param value - a new value of 'triggered_by_event' field. Must be a boolean type. Does not accept None value.
"""
if value is None or not isinstance(value, bool):
raise TypeError("TriggeredByEvent must be set to a bool")
else:
self.__triggered_by_event = value
|
def function[set_triggered_by_event, parameter[self, value]]:
constant[
Setter for 'triggered_by_event' field.
:param value - a new value of 'triggered_by_event' field. Must be a boolean type. Does not accept None value.
]
if <ast.BoolOp object at 0x7da18ede6590> begin[:]
<ast.Raise object at 0x7da18ede5660>
|
keyword[def] identifier[set_triggered_by_event] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isinstance] ( identifier[value] , identifier[bool] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[else] :
identifier[self] . identifier[__triggered_by_event] = identifier[value]
|
def set_triggered_by_event(self, value):
"""
Setter for 'triggered_by_event' field.
:param value - a new value of 'triggered_by_event' field. Must be a boolean type. Does not accept None value.
"""
if value is None or not isinstance(value, bool):
raise TypeError('TriggeredByEvent must be set to a bool') # depends on [control=['if'], data=[]]
else:
self.__triggered_by_event = value
|
def any_ends_with(self, string_list, pattern):
"""Returns true iff one of the strings in string_list ends in
pattern."""
try:
s_base = basestring
except:
s_base = str
is_string = isinstance(pattern, s_base)
if not is_string:
return False
for s in string_list:
if pattern.endswith(s):
return True
return False
|
def function[any_ends_with, parameter[self, string_list, pattern]]:
constant[Returns true iff one of the strings in string_list ends in
pattern.]
<ast.Try object at 0x7da18dc04c10>
variable[is_string] assign[=] call[name[isinstance], parameter[name[pattern], name[s_base]]]
if <ast.UnaryOp object at 0x7da18dc04220> begin[:]
return[constant[False]]
for taget[name[s]] in starred[name[string_list]] begin[:]
if call[name[pattern].endswith, parameter[name[s]]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[any_ends_with] ( identifier[self] , identifier[string_list] , identifier[pattern] ):
literal[string]
keyword[try] :
identifier[s_base] = identifier[basestring]
keyword[except] :
identifier[s_base] = identifier[str]
identifier[is_string] = identifier[isinstance] ( identifier[pattern] , identifier[s_base] )
keyword[if] keyword[not] identifier[is_string] :
keyword[return] keyword[False]
keyword[for] identifier[s] keyword[in] identifier[string_list] :
keyword[if] identifier[pattern] . identifier[endswith] ( identifier[s] ):
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def any_ends_with(self, string_list, pattern):
"""Returns true iff one of the strings in string_list ends in
pattern."""
try:
s_base = basestring # depends on [control=['try'], data=[]]
except:
s_base = str # depends on [control=['except'], data=[]]
is_string = isinstance(pattern, s_base)
if not is_string:
return False # depends on [control=['if'], data=[]]
for s in string_list:
if pattern.endswith(s):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']]
return False
|
def data_from_cluster_id(self, cluster_id, graph, data):
"""Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array.
"""
if cluster_id in graph["nodes"]:
cluster_members = graph["nodes"][cluster_id]
cluster_members_data = data[cluster_members]
return cluster_members_data
else:
return np.array([])
|
def function[data_from_cluster_id, parameter[self, cluster_id, graph, data]]:
constant[Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array.
]
if compare[name[cluster_id] in call[name[graph]][constant[nodes]]] begin[:]
variable[cluster_members] assign[=] call[call[name[graph]][constant[nodes]]][name[cluster_id]]
variable[cluster_members_data] assign[=] call[name[data]][name[cluster_members]]
return[name[cluster_members_data]]
|
keyword[def] identifier[data_from_cluster_id] ( identifier[self] , identifier[cluster_id] , identifier[graph] , identifier[data] ):
literal[string]
keyword[if] identifier[cluster_id] keyword[in] identifier[graph] [ literal[string] ]:
identifier[cluster_members] = identifier[graph] [ literal[string] ][ identifier[cluster_id] ]
identifier[cluster_members_data] = identifier[data] [ identifier[cluster_members] ]
keyword[return] identifier[cluster_members_data]
keyword[else] :
keyword[return] identifier[np] . identifier[array] ([])
|
def data_from_cluster_id(self, cluster_id, graph, data):
"""Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array.
"""
if cluster_id in graph['nodes']:
cluster_members = graph['nodes'][cluster_id]
cluster_members_data = data[cluster_members]
return cluster_members_data # depends on [control=['if'], data=['cluster_id']]
else:
return np.array([])
|
def start_new_fact(self, clone_selected=True, fallback=True):
"""Start now a new fact.
clone_selected (bool): whether to start a clone of currently
selected fact or to create a new fact from scratch.
fallback (bool): if True, fall back to creating from scratch
in case of no selected fact.
"""
if not clone_selected:
dialogs.edit.show(self, base_fact=None)
elif self.fact_tree.current_fact or fallback:
dialogs.edit.show(self, base_fact=self.fact_tree.current_fact)
|
def function[start_new_fact, parameter[self, clone_selected, fallback]]:
constant[Start now a new fact.
clone_selected (bool): whether to start a clone of currently
selected fact or to create a new fact from scratch.
fallback (bool): if True, fall back to creating from scratch
in case of no selected fact.
]
if <ast.UnaryOp object at 0x7da204960e50> begin[:]
call[name[dialogs].edit.show, parameter[name[self]]]
|
keyword[def] identifier[start_new_fact] ( identifier[self] , identifier[clone_selected] = keyword[True] , identifier[fallback] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[clone_selected] :
identifier[dialogs] . identifier[edit] . identifier[show] ( identifier[self] , identifier[base_fact] = keyword[None] )
keyword[elif] identifier[self] . identifier[fact_tree] . identifier[current_fact] keyword[or] identifier[fallback] :
identifier[dialogs] . identifier[edit] . identifier[show] ( identifier[self] , identifier[base_fact] = identifier[self] . identifier[fact_tree] . identifier[current_fact] )
|
def start_new_fact(self, clone_selected=True, fallback=True):
"""Start now a new fact.
clone_selected (bool): whether to start a clone of currently
selected fact or to create a new fact from scratch.
fallback (bool): if True, fall back to creating from scratch
in case of no selected fact.
"""
if not clone_selected:
dialogs.edit.show(self, base_fact=None) # depends on [control=['if'], data=[]]
elif self.fact_tree.current_fact or fallback:
dialogs.edit.show(self, base_fact=self.fact_tree.current_fact) # depends on [control=['if'], data=[]]
|
def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])}
|
def function[coef, parameter[self]]:
constant[
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
]
variable[tbl] assign[=] call[call[name[self]._model_json][constant[output]]][constant[coefficients_table]]
if compare[name[tbl] is constant[None]] begin[:]
return[constant[None]]
return[<ast.DictComp object at 0x7da204346e90>]
|
keyword[def] identifier[coef] ( identifier[self] ):
literal[string]
identifier[tbl] = identifier[self] . identifier[_model_json] [ literal[string] ][ literal[string] ]
keyword[if] identifier[tbl] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] { identifier[name] : identifier[coef] keyword[for] identifier[name] , identifier[coef] keyword[in] identifier[zip] ( identifier[tbl] [ literal[string] ], identifier[tbl] [ literal[string] ])}
|
def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
tbl = self._model_json['output']['coefficients_table']
if tbl is None:
return None # depends on [control=['if'], data=[]]
return {name: coef for (name, coef) in zip(tbl['names'], tbl['coefficients'])}
|
def get_image_and_mask(self, label, positive_only=True, hide_rest=False,
num_features=5, min_weight=0.):
"""Init function.
Args:
label: label to explain
positive_only: if True, only take superpixels that contribute to
the prediction of the label. Otherwise, use the top
num_features superpixels, which can be positive or negative
towards the label
hide_rest: if True, make the non-explanation part of the return
image gray
num_features: number of superpixels to include in explanation
min_weight: TODO
Returns:
(image, mask), where image is a 3d numpy array and mask is a 2d
numpy array that can be used with
skimage.segmentation.mark_boundaries
"""
if label not in self.local_exp:
raise KeyError('Label not in explanation')
segments = self.segments
image = self.image
exp = self.local_exp[label]
mask = np.zeros(segments.shape, segments.dtype)
if hide_rest:
temp = np.zeros(self.image.shape)
else:
temp = self.image.copy()
if positive_only:
fs = [x[0] for x in exp
if x[1] > 0 and x[1] > min_weight][:num_features]
for f in fs:
temp[segments == f] = image[segments == f].copy()
mask[segments == f] = 1
return temp, mask
else:
for f, w in exp[:num_features]:
if np.abs(w) < min_weight:
continue
c = 0 if w < 0 else 1
mask[segments == f] = 1 if w < 0 else 2
temp[segments == f] = image[segments == f].copy()
temp[segments == f, c] = np.max(image)
for cp in [0, 1, 2]:
if c == cp:
continue
# temp[segments == f, cp] *= 0.5
return temp, mask
|
def function[get_image_and_mask, parameter[self, label, positive_only, hide_rest, num_features, min_weight]]:
constant[Init function.
Args:
label: label to explain
positive_only: if True, only take superpixels that contribute to
the prediction of the label. Otherwise, use the top
num_features superpixels, which can be positive or negative
towards the label
hide_rest: if True, make the non-explanation part of the return
image gray
num_features: number of superpixels to include in explanation
min_weight: TODO
Returns:
(image, mask), where image is a 3d numpy array and mask is a 2d
numpy array that can be used with
skimage.segmentation.mark_boundaries
]
if compare[name[label] <ast.NotIn object at 0x7da2590d7190> name[self].local_exp] begin[:]
<ast.Raise object at 0x7da1b20bbdf0>
variable[segments] assign[=] name[self].segments
variable[image] assign[=] name[self].image
variable[exp] assign[=] call[name[self].local_exp][name[label]]
variable[mask] assign[=] call[name[np].zeros, parameter[name[segments].shape, name[segments].dtype]]
if name[hide_rest] begin[:]
variable[temp] assign[=] call[name[np].zeros, parameter[name[self].image.shape]]
if name[positive_only] begin[:]
variable[fs] assign[=] call[<ast.ListComp object at 0x7da18f811210>][<ast.Slice object at 0x7da18f8109d0>]
for taget[name[f]] in starred[name[fs]] begin[:]
call[name[temp]][compare[name[segments] equal[==] name[f]]] assign[=] call[call[name[image]][compare[name[segments] equal[==] name[f]]].copy, parameter[]]
call[name[mask]][compare[name[segments] equal[==] name[f]]] assign[=] constant[1]
return[tuple[[<ast.Name object at 0x7da18f8102b0>, <ast.Name object at 0x7da18f813880>]]]
|
keyword[def] identifier[get_image_and_mask] ( identifier[self] , identifier[label] , identifier[positive_only] = keyword[True] , identifier[hide_rest] = keyword[False] ,
identifier[num_features] = literal[int] , identifier[min_weight] = literal[int] ):
literal[string]
keyword[if] identifier[label] keyword[not] keyword[in] identifier[self] . identifier[local_exp] :
keyword[raise] identifier[KeyError] ( literal[string] )
identifier[segments] = identifier[self] . identifier[segments]
identifier[image] = identifier[self] . identifier[image]
identifier[exp] = identifier[self] . identifier[local_exp] [ identifier[label] ]
identifier[mask] = identifier[np] . identifier[zeros] ( identifier[segments] . identifier[shape] , identifier[segments] . identifier[dtype] )
keyword[if] identifier[hide_rest] :
identifier[temp] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[image] . identifier[shape] )
keyword[else] :
identifier[temp] = identifier[self] . identifier[image] . identifier[copy] ()
keyword[if] identifier[positive_only] :
identifier[fs] =[ identifier[x] [ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[exp]
keyword[if] identifier[x] [ literal[int] ]> literal[int] keyword[and] identifier[x] [ literal[int] ]> identifier[min_weight] ][: identifier[num_features] ]
keyword[for] identifier[f] keyword[in] identifier[fs] :
identifier[temp] [ identifier[segments] == identifier[f] ]= identifier[image] [ identifier[segments] == identifier[f] ]. identifier[copy] ()
identifier[mask] [ identifier[segments] == identifier[f] ]= literal[int]
keyword[return] identifier[temp] , identifier[mask]
keyword[else] :
keyword[for] identifier[f] , identifier[w] keyword[in] identifier[exp] [: identifier[num_features] ]:
keyword[if] identifier[np] . identifier[abs] ( identifier[w] )< identifier[min_weight] :
keyword[continue]
identifier[c] = literal[int] keyword[if] identifier[w] < literal[int] keyword[else] literal[int]
identifier[mask] [ identifier[segments] == identifier[f] ]= literal[int] keyword[if] identifier[w] < literal[int] keyword[else] literal[int]
identifier[temp] [ identifier[segments] == identifier[f] ]= identifier[image] [ identifier[segments] == identifier[f] ]. identifier[copy] ()
identifier[temp] [ identifier[segments] == identifier[f] , identifier[c] ]= identifier[np] . identifier[max] ( identifier[image] )
keyword[for] identifier[cp] keyword[in] [ literal[int] , literal[int] , literal[int] ]:
keyword[if] identifier[c] == identifier[cp] :
keyword[continue]
keyword[return] identifier[temp] , identifier[mask]
|
def get_image_and_mask(self, label, positive_only=True, hide_rest=False, num_features=5, min_weight=0.0):
"""Init function.
Args:
label: label to explain
positive_only: if True, only take superpixels that contribute to
the prediction of the label. Otherwise, use the top
num_features superpixels, which can be positive or negative
towards the label
hide_rest: if True, make the non-explanation part of the return
image gray
num_features: number of superpixels to include in explanation
min_weight: TODO
Returns:
(image, mask), where image is a 3d numpy array and mask is a 2d
numpy array that can be used with
skimage.segmentation.mark_boundaries
"""
if label not in self.local_exp:
raise KeyError('Label not in explanation') # depends on [control=['if'], data=[]]
segments = self.segments
image = self.image
exp = self.local_exp[label]
mask = np.zeros(segments.shape, segments.dtype)
if hide_rest:
temp = np.zeros(self.image.shape) # depends on [control=['if'], data=[]]
else:
temp = self.image.copy()
if positive_only:
fs = [x[0] for x in exp if x[1] > 0 and x[1] > min_weight][:num_features]
for f in fs:
temp[segments == f] = image[segments == f].copy()
mask[segments == f] = 1 # depends on [control=['for'], data=['f']]
return (temp, mask) # depends on [control=['if'], data=[]]
else:
for (f, w) in exp[:num_features]:
if np.abs(w) < min_weight:
continue # depends on [control=['if'], data=[]]
c = 0 if w < 0 else 1
mask[segments == f] = 1 if w < 0 else 2
temp[segments == f] = image[segments == f].copy()
temp[segments == f, c] = np.max(image)
for cp in [0, 1, 2]:
if c == cp:
continue # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cp']] # depends on [control=['for'], data=[]]
# temp[segments == f, cp] *= 0.5
return (temp, mask)
|
def has_field(mc, field_name):
"""
detect if a model has a given field has
:param field_name:
:param mc:
:return:
"""
try:
mc._meta.get_field(field_name)
except FieldDoesNotExist:
return False
return True
|
def function[has_field, parameter[mc, field_name]]:
constant[
detect if a model has a given field has
:param field_name:
:param mc:
:return:
]
<ast.Try object at 0x7da204567a00>
return[constant[True]]
|
keyword[def] identifier[has_field] ( identifier[mc] , identifier[field_name] ):
literal[string]
keyword[try] :
identifier[mc] . identifier[_meta] . identifier[get_field] ( identifier[field_name] )
keyword[except] identifier[FieldDoesNotExist] :
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def has_field(mc, field_name):
"""
detect if a model has a given field has
:param field_name:
:param mc:
:return:
"""
try:
mc._meta.get_field(field_name) # depends on [control=['try'], data=[]]
except FieldDoesNotExist:
return False # depends on [control=['except'], data=[]]
return True
|
def _parseKeyNames(lib):
"""
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
"""
_keyNames = {}
for attr in dir(lib): # from the modules variables
if attr[:6] == 'TCODK_': # get the K_* constants
_keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs
return _keyNames
|
def function[_parseKeyNames, parameter[lib]]:
constant[
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
]
variable[_keyNames] assign[=] dictionary[[], []]
for taget[name[attr]] in starred[call[name[dir], parameter[name[lib]]]] begin[:]
if compare[call[name[attr]][<ast.Slice object at 0x7da1b117a8c0>] equal[==] constant[TCODK_]] begin[:]
call[name[_keyNames]][call[name[getattr], parameter[name[lib], name[attr]]]] assign[=] call[name[attr]][<ast.Slice object at 0x7da1b117b580>]
return[name[_keyNames]]
|
keyword[def] identifier[_parseKeyNames] ( identifier[lib] ):
literal[string]
identifier[_keyNames] ={}
keyword[for] identifier[attr] keyword[in] identifier[dir] ( identifier[lib] ):
keyword[if] identifier[attr] [: literal[int] ]== literal[string] :
identifier[_keyNames] [ identifier[getattr] ( identifier[lib] , identifier[attr] )]= identifier[attr] [ literal[int] :]
keyword[return] identifier[_keyNames]
|
def _parseKeyNames(lib):
"""
returns a dictionary mapping of human readable key names to their keycodes
this parses constants with the names of K_* and makes code=name pairs
this is for KeyEvent.key variable and that enables things like:
if (event.key == 'PAGEUP'):
"""
_keyNames = {}
for attr in dir(lib): # from the modules variables
if attr[:6] == 'TCODK_': # get the K_* constants
_keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']]
return _keyNames
|
def scene_velocity(sequence_number, scene_id, velocity):
"""Create a scene.velocity message"""
return MessageWriter().string("scene.velocity").uint64(sequence_number).uint32(scene_id).uint32(int(velocity*1000)).get()
|
def function[scene_velocity, parameter[sequence_number, scene_id, velocity]]:
constant[Create a scene.velocity message]
return[call[call[call[call[call[call[name[MessageWriter], parameter[]].string, parameter[constant[scene.velocity]]].uint64, parameter[name[sequence_number]]].uint32, parameter[name[scene_id]]].uint32, parameter[call[name[int], parameter[binary_operation[name[velocity] * constant[1000]]]]]].get, parameter[]]]
|
keyword[def] identifier[scene_velocity] ( identifier[sequence_number] , identifier[scene_id] , identifier[velocity] ):
literal[string]
keyword[return] identifier[MessageWriter] (). identifier[string] ( literal[string] ). identifier[uint64] ( identifier[sequence_number] ). identifier[uint32] ( identifier[scene_id] ). identifier[uint32] ( identifier[int] ( identifier[velocity] * literal[int] )). identifier[get] ()
|
def scene_velocity(sequence_number, scene_id, velocity):
"""Create a scene.velocity message"""
return MessageWriter().string('scene.velocity').uint64(sequence_number).uint32(scene_id).uint32(int(velocity * 1000)).get()
|
def attach(self, **kwargs):
"""
Attach to this container.
:py:meth:`logs` is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach(self.id, **kwargs)
|
def function[attach, parameter[self]]:
constant[
Attach to this container.
:py:meth:`logs` is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
]
return[call[name[self].client.api.attach, parameter[name[self].id]]]
|
keyword[def] identifier[attach] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[client] . identifier[api] . identifier[attach] ( identifier[self] . identifier[id] ,** identifier[kwargs] )
|
def attach(self, **kwargs):
"""
Attach to this container.
:py:meth:`logs` is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach(self.id, **kwargs)
|
def check_config_xml(self, contents):
"""
Check whether the given XML config file contents
is well-formed and it has all the required parameters.
:param string contents: the XML config file contents or XML config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log(u"Checking contents XML config file")
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u"check_config_xml"):
return self.result
contents = gf.safe_bytes(contents)
self.log(u"Checking that contents is well formed")
self.check_raw_string(contents, is_bstring=True)
if not self.result.passed:
return self.result
self.log(u"Checking required parameters for job")
job_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=True)
self._check_required_parameters(self.XML_JOB_REQUIRED_PARAMETERS, job_parameters)
if not self.result.passed:
return self.result
self.log(u"Checking required parameters for task")
tasks_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=False)
for parameters in tasks_parameters:
self.log([u"Checking required parameters for task: '%s'", parameters])
self._check_required_parameters(self.XML_TASK_REQUIRED_PARAMETERS, parameters)
if not self.result.passed:
return self.result
return self.result
|
def function[check_config_xml, parameter[self, contents]]:
constant[
Check whether the given XML config file contents
is well-formed and it has all the required parameters.
:param string contents: the XML config file contents or XML config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
]
call[name[self].log, parameter[constant[Checking contents XML config file]]]
name[self].result assign[=] call[name[ValidatorResult], parameter[]]
if call[name[self]._are_safety_checks_disabled, parameter[constant[check_config_xml]]] begin[:]
return[name[self].result]
variable[contents] assign[=] call[name[gf].safe_bytes, parameter[name[contents]]]
call[name[self].log, parameter[constant[Checking that contents is well formed]]]
call[name[self].check_raw_string, parameter[name[contents]]]
if <ast.UnaryOp object at 0x7da18f810370> begin[:]
return[name[self].result]
call[name[self].log, parameter[constant[Checking required parameters for job]]]
variable[job_parameters] assign[=] call[name[gf].config_xml_to_dict, parameter[name[contents], name[self].result]]
call[name[self]._check_required_parameters, parameter[name[self].XML_JOB_REQUIRED_PARAMETERS, name[job_parameters]]]
if <ast.UnaryOp object at 0x7da18f813d00> begin[:]
return[name[self].result]
call[name[self].log, parameter[constant[Checking required parameters for task]]]
variable[tasks_parameters] assign[=] call[name[gf].config_xml_to_dict, parameter[name[contents], name[self].result]]
for taget[name[parameters]] in starred[name[tasks_parameters]] begin[:]
call[name[self].log, parameter[list[[<ast.Constant object at 0x7da18f810ac0>, <ast.Name object at 0x7da18f8133a0>]]]]
call[name[self]._check_required_parameters, parameter[name[self].XML_TASK_REQUIRED_PARAMETERS, name[parameters]]]
if <ast.UnaryOp object at 0x7da18f812dd0> begin[:]
return[name[self].result]
return[name[self].result]
|
keyword[def] identifier[check_config_xml] ( identifier[self] , identifier[contents] ):
literal[string]
identifier[self] . identifier[log] ( literal[string] )
identifier[self] . identifier[result] = identifier[ValidatorResult] ()
keyword[if] identifier[self] . identifier[_are_safety_checks_disabled] ( literal[string] ):
keyword[return] identifier[self] . identifier[result]
identifier[contents] = identifier[gf] . identifier[safe_bytes] ( identifier[contents] )
identifier[self] . identifier[log] ( literal[string] )
identifier[self] . identifier[check_raw_string] ( identifier[contents] , identifier[is_bstring] = keyword[True] )
keyword[if] keyword[not] identifier[self] . identifier[result] . identifier[passed] :
keyword[return] identifier[self] . identifier[result]
identifier[self] . identifier[log] ( literal[string] )
identifier[job_parameters] = identifier[gf] . identifier[config_xml_to_dict] ( identifier[contents] , identifier[self] . identifier[result] , identifier[parse_job] = keyword[True] )
identifier[self] . identifier[_check_required_parameters] ( identifier[self] . identifier[XML_JOB_REQUIRED_PARAMETERS] , identifier[job_parameters] )
keyword[if] keyword[not] identifier[self] . identifier[result] . identifier[passed] :
keyword[return] identifier[self] . identifier[result]
identifier[self] . identifier[log] ( literal[string] )
identifier[tasks_parameters] = identifier[gf] . identifier[config_xml_to_dict] ( identifier[contents] , identifier[self] . identifier[result] , identifier[parse_job] = keyword[False] )
keyword[for] identifier[parameters] keyword[in] identifier[tasks_parameters] :
identifier[self] . identifier[log] ([ literal[string] , identifier[parameters] ])
identifier[self] . identifier[_check_required_parameters] ( identifier[self] . identifier[XML_TASK_REQUIRED_PARAMETERS] , identifier[parameters] )
keyword[if] keyword[not] identifier[self] . identifier[result] . identifier[passed] :
keyword[return] identifier[self] . identifier[result]
keyword[return] identifier[self] . identifier[result]
|
def check_config_xml(self, contents):
"""
Check whether the given XML config file contents
is well-formed and it has all the required parameters.
:param string contents: the XML config file contents or XML config string
:param bool is_config_string: if ``True``, contents is a config string
:rtype: :class:`~aeneas.validator.ValidatorResult`
"""
self.log(u'Checking contents XML config file')
self.result = ValidatorResult()
if self._are_safety_checks_disabled(u'check_config_xml'):
return self.result # depends on [control=['if'], data=[]]
contents = gf.safe_bytes(contents)
self.log(u'Checking that contents is well formed')
self.check_raw_string(contents, is_bstring=True)
if not self.result.passed:
return self.result # depends on [control=['if'], data=[]]
self.log(u'Checking required parameters for job')
job_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=True)
self._check_required_parameters(self.XML_JOB_REQUIRED_PARAMETERS, job_parameters)
if not self.result.passed:
return self.result # depends on [control=['if'], data=[]]
self.log(u'Checking required parameters for task')
tasks_parameters = gf.config_xml_to_dict(contents, self.result, parse_job=False)
for parameters in tasks_parameters:
self.log([u"Checking required parameters for task: '%s'", parameters])
self._check_required_parameters(self.XML_TASK_REQUIRED_PARAMETERS, parameters)
if not self.result.passed:
return self.result # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['parameters']]
return self.result
|
def extract_shn (archive, compression, cmd, verbosity, interactive, outdir):
"""Decompress a SHN archive to a WAV file."""
cmdlist = [util.shell_quote(cmd)]
outfile = util.get_single_outfile(outdir, archive, extension=".wav")
cmdlist.extend(['-x', '-', util.shell_quote(outfile), '<',
util.shell_quote(archive)])
return (cmdlist, {'shell': True})
|
def function[extract_shn, parameter[archive, compression, cmd, verbosity, interactive, outdir]]:
constant[Decompress a SHN archive to a WAV file.]
variable[cmdlist] assign[=] list[[<ast.Call object at 0x7da1b07ae410>]]
variable[outfile] assign[=] call[name[util].get_single_outfile, parameter[name[outdir], name[archive]]]
call[name[cmdlist].extend, parameter[list[[<ast.Constant object at 0x7da1b07ac820>, <ast.Constant object at 0x7da1b07ae0b0>, <ast.Call object at 0x7da1b07acd90>, <ast.Constant object at 0x7da1b07af2b0>, <ast.Call object at 0x7da1b07acf40>]]]]
return[tuple[[<ast.Name object at 0x7da1b07ace20>, <ast.Dict object at 0x7da1b07ac160>]]]
|
keyword[def] identifier[extract_shn] ( identifier[archive] , identifier[compression] , identifier[cmd] , identifier[verbosity] , identifier[interactive] , identifier[outdir] ):
literal[string]
identifier[cmdlist] =[ identifier[util] . identifier[shell_quote] ( identifier[cmd] )]
identifier[outfile] = identifier[util] . identifier[get_single_outfile] ( identifier[outdir] , identifier[archive] , identifier[extension] = literal[string] )
identifier[cmdlist] . identifier[extend] ([ literal[string] , literal[string] , identifier[util] . identifier[shell_quote] ( identifier[outfile] ), literal[string] ,
identifier[util] . identifier[shell_quote] ( identifier[archive] )])
keyword[return] ( identifier[cmdlist] ,{ literal[string] : keyword[True] })
|
def extract_shn(archive, compression, cmd, verbosity, interactive, outdir):
"""Decompress a SHN archive to a WAV file."""
cmdlist = [util.shell_quote(cmd)]
outfile = util.get_single_outfile(outdir, archive, extension='.wav')
cmdlist.extend(['-x', '-', util.shell_quote(outfile), '<', util.shell_quote(archive)])
return (cmdlist, {'shell': True})
|
def _get_standalone_app_url(self, app_id, spark_master_address, requests_config, tags):
"""
Return the application URL from the app info page on the Spark master.
Due to a bug, we need to parse the HTML manually because we cannot
fetch JSON data from HTTP interface.
"""
app_page = self._rest_request(
spark_master_address,
SPARK_MASTER_APP_PATH,
SPARK_STANDALONE_SERVICE_CHECK,
requests_config,
tags,
appId=app_id,
)
dom = BeautifulSoup(app_page.text, 'html.parser')
app_detail_ui_links = dom.find_all('a', string='Application Detail UI')
if app_detail_ui_links and len(app_detail_ui_links) == 1:
return app_detail_ui_links[0].attrs['href']
|
def function[_get_standalone_app_url, parameter[self, app_id, spark_master_address, requests_config, tags]]:
constant[
Return the application URL from the app info page on the Spark master.
Due to a bug, we need to parse the HTML manually because we cannot
fetch JSON data from HTTP interface.
]
variable[app_page] assign[=] call[name[self]._rest_request, parameter[name[spark_master_address], name[SPARK_MASTER_APP_PATH], name[SPARK_STANDALONE_SERVICE_CHECK], name[requests_config], name[tags]]]
variable[dom] assign[=] call[name[BeautifulSoup], parameter[name[app_page].text, constant[html.parser]]]
variable[app_detail_ui_links] assign[=] call[name[dom].find_all, parameter[constant[a]]]
if <ast.BoolOp object at 0x7da20c7c82b0> begin[:]
return[call[call[name[app_detail_ui_links]][constant[0]].attrs][constant[href]]]
|
keyword[def] identifier[_get_standalone_app_url] ( identifier[self] , identifier[app_id] , identifier[spark_master_address] , identifier[requests_config] , identifier[tags] ):
literal[string]
identifier[app_page] = identifier[self] . identifier[_rest_request] (
identifier[spark_master_address] ,
identifier[SPARK_MASTER_APP_PATH] ,
identifier[SPARK_STANDALONE_SERVICE_CHECK] ,
identifier[requests_config] ,
identifier[tags] ,
identifier[appId] = identifier[app_id] ,
)
identifier[dom] = identifier[BeautifulSoup] ( identifier[app_page] . identifier[text] , literal[string] )
identifier[app_detail_ui_links] = identifier[dom] . identifier[find_all] ( literal[string] , identifier[string] = literal[string] )
keyword[if] identifier[app_detail_ui_links] keyword[and] identifier[len] ( identifier[app_detail_ui_links] )== literal[int] :
keyword[return] identifier[app_detail_ui_links] [ literal[int] ]. identifier[attrs] [ literal[string] ]
|
def _get_standalone_app_url(self, app_id, spark_master_address, requests_config, tags):
"""
Return the application URL from the app info page on the Spark master.
Due to a bug, we need to parse the HTML manually because we cannot
fetch JSON data from HTTP interface.
"""
app_page = self._rest_request(spark_master_address, SPARK_MASTER_APP_PATH, SPARK_STANDALONE_SERVICE_CHECK, requests_config, tags, appId=app_id)
dom = BeautifulSoup(app_page.text, 'html.parser')
app_detail_ui_links = dom.find_all('a', string='Application Detail UI')
if app_detail_ui_links and len(app_detail_ui_links) == 1:
return app_detail_ui_links[0].attrs['href'] # depends on [control=['if'], data=[]]
|
def _reset_suffix_links(self):
'''
Reset all suffix links in all nodes in this trie.
'''
self._suffix_links_set = False
for current, _parent in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None
|
def function[_reset_suffix_links, parameter[self]]:
constant[
Reset all suffix links in all nodes in this trie.
]
name[self]._suffix_links_set assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b2407a30>, <ast.Name object at 0x7da1b2405f00>]]] in starred[call[name[self].dfs, parameter[]]] begin[:]
name[current].suffix assign[=] constant[None]
name[current].dict_suffix assign[=] constant[None]
name[current].longest_prefix assign[=] constant[None]
|
keyword[def] identifier[_reset_suffix_links] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_suffix_links_set] = keyword[False]
keyword[for] identifier[current] , identifier[_parent] keyword[in] identifier[self] . identifier[dfs] ():
identifier[current] . identifier[suffix] = keyword[None]
identifier[current] . identifier[dict_suffix] = keyword[None]
identifier[current] . identifier[longest_prefix] = keyword[None]
|
def _reset_suffix_links(self):
"""
Reset all suffix links in all nodes in this trie.
"""
self._suffix_links_set = False
for (current, _parent) in self.dfs():
current.suffix = None
current.dict_suffix = None
current.longest_prefix = None # depends on [control=['for'], data=[]]
|
def _configure_io_handler(self, handler):
"""Register an io-handler at the polling object."""
if self.check_events():
return
if handler in self._unprepared_handlers:
old_fileno = self._unprepared_handlers[handler]
prepared = self._prepare_io_handler(handler)
else:
old_fileno = None
prepared = True
fileno = handler.fileno()
if old_fileno is not None and fileno != old_fileno:
del self._handlers[old_fileno]
# remove_handler won't raise something like KeyError if the fd
# isn't registered; it will just print a debug log.
self.io_loop.remove_handler(old_fileno)
if not prepared:
self._unprepared_handlers[handler] = fileno
if not fileno:
return
update = fileno in self._handlers
events = ioloop.IOLoop.NONE
if handler.is_readable():
logger.debug(" {0!r} readable".format(handler))
events |= ioloop.IOLoop.READ
if handler.is_writable():
logger.debug(" {0!r} writable".format(handler))
events |= ioloop.IOLoop.WRITE
if self._handlers.get(fileno, None) == events:
return
self._handlers[fileno] = events
if events:
logger.debug(" registering {0!r} handler fileno {1} for"
" events {2}".format(handler, fileno, events))
if update:
self.io_loop.update_handler(fileno, events)
else:
self.io_loop.add_handler(
fileno, partial(self._handle_event, handler), events
)
|
def function[_configure_io_handler, parameter[self, handler]]:
constant[Register an io-handler at the polling object.]
if call[name[self].check_events, parameter[]] begin[:]
return[None]
if compare[name[handler] in name[self]._unprepared_handlers] begin[:]
variable[old_fileno] assign[=] call[name[self]._unprepared_handlers][name[handler]]
variable[prepared] assign[=] call[name[self]._prepare_io_handler, parameter[name[handler]]]
variable[fileno] assign[=] call[name[handler].fileno, parameter[]]
if <ast.BoolOp object at 0x7da18f722ec0> begin[:]
<ast.Delete object at 0x7da18f722500>
call[name[self].io_loop.remove_handler, parameter[name[old_fileno]]]
if <ast.UnaryOp object at 0x7da18f720100> begin[:]
call[name[self]._unprepared_handlers][name[handler]] assign[=] name[fileno]
if <ast.UnaryOp object at 0x7da18f722890> begin[:]
return[None]
variable[update] assign[=] compare[name[fileno] in name[self]._handlers]
variable[events] assign[=] name[ioloop].IOLoop.NONE
if call[name[handler].is_readable, parameter[]] begin[:]
call[name[logger].debug, parameter[call[constant[ {0!r} readable].format, parameter[name[handler]]]]]
<ast.AugAssign object at 0x7da18f7207c0>
if call[name[handler].is_writable, parameter[]] begin[:]
call[name[logger].debug, parameter[call[constant[ {0!r} writable].format, parameter[name[handler]]]]]
<ast.AugAssign object at 0x7da1b2346530>
if compare[call[name[self]._handlers.get, parameter[name[fileno], constant[None]]] equal[==] name[events]] begin[:]
return[None]
call[name[self]._handlers][name[fileno]] assign[=] name[events]
if name[events] begin[:]
call[name[logger].debug, parameter[call[constant[ registering {0!r} handler fileno {1} for events {2}].format, parameter[name[handler], name[fileno], name[events]]]]]
if name[update] begin[:]
call[name[self].io_loop.update_handler, parameter[name[fileno], name[events]]]
|
keyword[def] identifier[_configure_io_handler] ( identifier[self] , identifier[handler] ):
literal[string]
keyword[if] identifier[self] . identifier[check_events] ():
keyword[return]
keyword[if] identifier[handler] keyword[in] identifier[self] . identifier[_unprepared_handlers] :
identifier[old_fileno] = identifier[self] . identifier[_unprepared_handlers] [ identifier[handler] ]
identifier[prepared] = identifier[self] . identifier[_prepare_io_handler] ( identifier[handler] )
keyword[else] :
identifier[old_fileno] = keyword[None]
identifier[prepared] = keyword[True]
identifier[fileno] = identifier[handler] . identifier[fileno] ()
keyword[if] identifier[old_fileno] keyword[is] keyword[not] keyword[None] keyword[and] identifier[fileno] != identifier[old_fileno] :
keyword[del] identifier[self] . identifier[_handlers] [ identifier[old_fileno] ]
identifier[self] . identifier[io_loop] . identifier[remove_handler] ( identifier[old_fileno] )
keyword[if] keyword[not] identifier[prepared] :
identifier[self] . identifier[_unprepared_handlers] [ identifier[handler] ]= identifier[fileno]
keyword[if] keyword[not] identifier[fileno] :
keyword[return]
identifier[update] = identifier[fileno] keyword[in] identifier[self] . identifier[_handlers]
identifier[events] = identifier[ioloop] . identifier[IOLoop] . identifier[NONE]
keyword[if] identifier[handler] . identifier[is_readable] ():
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[handler] ))
identifier[events] |= identifier[ioloop] . identifier[IOLoop] . identifier[READ]
keyword[if] identifier[handler] . identifier[is_writable] ():
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[handler] ))
identifier[events] |= identifier[ioloop] . identifier[IOLoop] . identifier[WRITE]
keyword[if] identifier[self] . identifier[_handlers] . identifier[get] ( identifier[fileno] , keyword[None] )== identifier[events] :
keyword[return]
identifier[self] . identifier[_handlers] [ identifier[fileno] ]= identifier[events]
keyword[if] identifier[events] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] . identifier[format] ( identifier[handler] , identifier[fileno] , identifier[events] ))
keyword[if] identifier[update] :
identifier[self] . identifier[io_loop] . identifier[update_handler] ( identifier[fileno] , identifier[events] )
keyword[else] :
identifier[self] . identifier[io_loop] . identifier[add_handler] (
identifier[fileno] , identifier[partial] ( identifier[self] . identifier[_handle_event] , identifier[handler] ), identifier[events]
)
|
def _configure_io_handler(self, handler):
"""Register an io-handler at the polling object."""
if self.check_events():
return # depends on [control=['if'], data=[]]
if handler in self._unprepared_handlers:
old_fileno = self._unprepared_handlers[handler]
prepared = self._prepare_io_handler(handler) # depends on [control=['if'], data=['handler']]
else:
old_fileno = None
prepared = True
fileno = handler.fileno()
if old_fileno is not None and fileno != old_fileno:
del self._handlers[old_fileno]
# remove_handler won't raise something like KeyError if the fd
# isn't registered; it will just print a debug log.
self.io_loop.remove_handler(old_fileno) # depends on [control=['if'], data=[]]
if not prepared:
self._unprepared_handlers[handler] = fileno # depends on [control=['if'], data=[]]
if not fileno:
return # depends on [control=['if'], data=[]]
update = fileno in self._handlers
events = ioloop.IOLoop.NONE
if handler.is_readable():
logger.debug(' {0!r} readable'.format(handler))
events |= ioloop.IOLoop.READ # depends on [control=['if'], data=[]]
if handler.is_writable():
logger.debug(' {0!r} writable'.format(handler))
events |= ioloop.IOLoop.WRITE # depends on [control=['if'], data=[]]
if self._handlers.get(fileno, None) == events:
return # depends on [control=['if'], data=[]]
self._handlers[fileno] = events
if events:
logger.debug(' registering {0!r} handler fileno {1} for events {2}'.format(handler, fileno, events))
if update:
self.io_loop.update_handler(fileno, events) # depends on [control=['if'], data=[]]
else:
self.io_loop.add_handler(fileno, partial(self._handle_event, handler), events) # depends on [control=['if'], data=[]]
|
def multiplypub(pub,priv,outcompressed=True):
'''
Input pubkey must be hex string and valid pubkey.
Input privkey must be 64-char hex string.
Pubkey input can be compressed or uncompressed, as long as it's a
valid key and a hex string. Use the validatepubkey() function to
validate the public key first. The compression of the input
public key does not do anything or matter in any way.
'''
if len(pub) == 66:
pub = uncompress(pub)
x, y = ecmultiply(int(pub[2:66],16),int(pub[66:],16),int(priv,16))
x = dechex(x,32)
y = dechex(y,32)
o = '04' + x + y
if outcompressed:
return compress(o)
else:
return o
|
def function[multiplypub, parameter[pub, priv, outcompressed]]:
constant[
Input pubkey must be hex string and valid pubkey.
Input privkey must be 64-char hex string.
Pubkey input can be compressed or uncompressed, as long as it's a
valid key and a hex string. Use the validatepubkey() function to
validate the public key first. The compression of the input
public key does not do anything or matter in any way.
]
if compare[call[name[len], parameter[name[pub]]] equal[==] constant[66]] begin[:]
variable[pub] assign[=] call[name[uncompress], parameter[name[pub]]]
<ast.Tuple object at 0x7da1b1437250> assign[=] call[name[ecmultiply], parameter[call[name[int], parameter[call[name[pub]][<ast.Slice object at 0x7da1b14374f0>], constant[16]]], call[name[int], parameter[call[name[pub]][<ast.Slice object at 0x7da1b1437790>], constant[16]]], call[name[int], parameter[name[priv], constant[16]]]]]
variable[x] assign[=] call[name[dechex], parameter[name[x], constant[32]]]
variable[y] assign[=] call[name[dechex], parameter[name[y], constant[32]]]
variable[o] assign[=] binary_operation[binary_operation[constant[04] + name[x]] + name[y]]
if name[outcompressed] begin[:]
return[call[name[compress], parameter[name[o]]]]
|
keyword[def] identifier[multiplypub] ( identifier[pub] , identifier[priv] , identifier[outcompressed] = keyword[True] ):
literal[string]
keyword[if] identifier[len] ( identifier[pub] )== literal[int] :
identifier[pub] = identifier[uncompress] ( identifier[pub] )
identifier[x] , identifier[y] = identifier[ecmultiply] ( identifier[int] ( identifier[pub] [ literal[int] : literal[int] ], literal[int] ), identifier[int] ( identifier[pub] [ literal[int] :], literal[int] ), identifier[int] ( identifier[priv] , literal[int] ))
identifier[x] = identifier[dechex] ( identifier[x] , literal[int] )
identifier[y] = identifier[dechex] ( identifier[y] , literal[int] )
identifier[o] = literal[string] + identifier[x] + identifier[y]
keyword[if] identifier[outcompressed] :
keyword[return] identifier[compress] ( identifier[o] )
keyword[else] :
keyword[return] identifier[o]
|
def multiplypub(pub, priv, outcompressed=True):
"""
Input pubkey must be hex string and valid pubkey.
Input privkey must be 64-char hex string.
Pubkey input can be compressed or uncompressed, as long as it's a
valid key and a hex string. Use the validatepubkey() function to
validate the public key first. The compression of the input
public key does not do anything or matter in any way.
"""
if len(pub) == 66:
pub = uncompress(pub) # depends on [control=['if'], data=[]]
(x, y) = ecmultiply(int(pub[2:66], 16), int(pub[66:], 16), int(priv, 16))
x = dechex(x, 32)
y = dechex(y, 32)
o = '04' + x + y
if outcompressed:
return compress(o) # depends on [control=['if'], data=[]]
else:
return o
|
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
|
def function[count, parameter[self, axis]]:
constant[
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
]
variable[i] assign[=] call[name[self]._get_axis_number, parameter[name[axis]]]
variable[values] assign[=] name[self].values
variable[mask] assign[=] call[name[np].isfinite, parameter[name[values]]]
variable[result] assign[=] call[name[mask].sum, parameter[]]
return[call[name[self]._wrap_result, parameter[name[result], name[axis]]]]
|
keyword[def] identifier[count] ( identifier[self] , identifier[axis] = literal[string] ):
literal[string]
identifier[i] = identifier[self] . identifier[_get_axis_number] ( identifier[axis] )
identifier[values] = identifier[self] . identifier[values]
identifier[mask] = identifier[np] . identifier[isfinite] ( identifier[values] )
identifier[result] = identifier[mask] . identifier[sum] ( identifier[axis] = identifier[i] , identifier[dtype] = literal[string] )
keyword[return] identifier[self] . identifier[_wrap_result] ( identifier[result] , identifier[axis] )
|
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
|
def export(self, file_type=None):
"""
Export a snapshot of the current scene.
Parameters
----------
file_type: what encoding to use for meshes
ie: dict, dict64, stl
Returns
----------
export: dict with keys:
meshes: list of meshes, encoded as per file_type
transforms: edge list of transforms, eg:
((u, v, {'matrix' : np.eye(4)}))
"""
file_type = str(file_type).strip().lower()
if file_type == 'gltf':
return gltf.export_gltf(self)
elif file_type == 'glb':
return gltf.export_glb(self)
export = {'graph': self.graph.to_edgelist(),
'geometry': {},
'scene_cache': {'bounds': self.bounds.tolist(),
'extents': self.extents.tolist(),
'centroid': self.centroid.tolist(),
'scale': self.scale}}
if file_type is None:
file_type = {'Trimesh': 'ply',
'Path2D': 'dxf'}
# if the mesh has an export method use it
# otherwise put the mesh
# itself into the export object
for geometry_name, geometry in self.geometry.items():
if hasattr(geometry, 'export'):
if isinstance(file_type, dict):
# case where we have export types that are different
# for different classes of objects.
for query_class, query_format in file_type.items():
if util.is_instance_named(geometry, query_class):
export_type = query_format
break
else:
# if file_type is not a dict, try to export everything in the
# the scene as that value like 'ply'
export_type = file_type
exported = {'data': geometry.export(file_type=export_type),
'file_type': export_type}
export['geometry'][geometry_name] = exported
else:
# case where mesh object doesn't have exporter
# might be that someone replaced the mesh with a URL
export['geometry'][geometry_name] = geometry
return export
|
def function[export, parameter[self, file_type]]:
constant[
Export a snapshot of the current scene.
Parameters
----------
file_type: what encoding to use for meshes
ie: dict, dict64, stl
Returns
----------
export: dict with keys:
meshes: list of meshes, encoded as per file_type
transforms: edge list of transforms, eg:
((u, v, {'matrix' : np.eye(4)}))
]
variable[file_type] assign[=] call[call[call[name[str], parameter[name[file_type]]].strip, parameter[]].lower, parameter[]]
if compare[name[file_type] equal[==] constant[gltf]] begin[:]
return[call[name[gltf].export_gltf, parameter[name[self]]]]
variable[export] assign[=] dictionary[[<ast.Constant object at 0x7da1b22d1bd0>, <ast.Constant object at 0x7da1b22d2e60>, <ast.Constant object at 0x7da1b22d29e0>], [<ast.Call object at 0x7da1b22d3eb0>, <ast.Dict object at 0x7da1b22d3ca0>, <ast.Dict object at 0x7da1b22d3040>]]
if compare[name[file_type] is constant[None]] begin[:]
variable[file_type] assign[=] dictionary[[<ast.Constant object at 0x7da1b22d18a0>, <ast.Constant object at 0x7da1b22d3160>], [<ast.Constant object at 0x7da1b22d2f80>, <ast.Constant object at 0x7da1b22d1c90>]]
for taget[tuple[[<ast.Name object at 0x7da1b22d3280>, <ast.Name object at 0x7da1b22d1b40>]]] in starred[call[name[self].geometry.items, parameter[]]] begin[:]
if call[name[hasattr], parameter[name[geometry], constant[export]]] begin[:]
if call[name[isinstance], parameter[name[file_type], name[dict]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b22d1780>, <ast.Name object at 0x7da1b22d2bc0>]]] in starred[call[name[file_type].items, parameter[]]] begin[:]
if call[name[util].is_instance_named, parameter[name[geometry], name[query_class]]] begin[:]
variable[export_type] assign[=] name[query_format]
break
variable[exported] assign[=] dictionary[[<ast.Constant object at 0x7da1b22d2da0>, <ast.Constant object at 0x7da1b22d1300>], [<ast.Call object at 0x7da1b22d2500>, <ast.Name object at 0x7da1b22d11b0>]]
call[call[name[export]][constant[geometry]]][name[geometry_name]] assign[=] name[exported]
return[name[export]]
|
keyword[def] identifier[export] ( identifier[self] , identifier[file_type] = keyword[None] ):
literal[string]
identifier[file_type] = identifier[str] ( identifier[file_type] ). identifier[strip] (). identifier[lower] ()
keyword[if] identifier[file_type] == literal[string] :
keyword[return] identifier[gltf] . identifier[export_gltf] ( identifier[self] )
keyword[elif] identifier[file_type] == literal[string] :
keyword[return] identifier[gltf] . identifier[export_glb] ( identifier[self] )
identifier[export] ={ literal[string] : identifier[self] . identifier[graph] . identifier[to_edgelist] (),
literal[string] :{},
literal[string] :{ literal[string] : identifier[self] . identifier[bounds] . identifier[tolist] (),
literal[string] : identifier[self] . identifier[extents] . identifier[tolist] (),
literal[string] : identifier[self] . identifier[centroid] . identifier[tolist] (),
literal[string] : identifier[self] . identifier[scale] }}
keyword[if] identifier[file_type] keyword[is] keyword[None] :
identifier[file_type] ={ literal[string] : literal[string] ,
literal[string] : literal[string] }
keyword[for] identifier[geometry_name] , identifier[geometry] keyword[in] identifier[self] . identifier[geometry] . identifier[items] ():
keyword[if] identifier[hasattr] ( identifier[geometry] , literal[string] ):
keyword[if] identifier[isinstance] ( identifier[file_type] , identifier[dict] ):
keyword[for] identifier[query_class] , identifier[query_format] keyword[in] identifier[file_type] . identifier[items] ():
keyword[if] identifier[util] . identifier[is_instance_named] ( identifier[geometry] , identifier[query_class] ):
identifier[export_type] = identifier[query_format]
keyword[break]
keyword[else] :
identifier[export_type] = identifier[file_type]
identifier[exported] ={ literal[string] : identifier[geometry] . identifier[export] ( identifier[file_type] = identifier[export_type] ),
literal[string] : identifier[export_type] }
identifier[export] [ literal[string] ][ identifier[geometry_name] ]= identifier[exported]
keyword[else] :
identifier[export] [ literal[string] ][ identifier[geometry_name] ]= identifier[geometry]
keyword[return] identifier[export]
|
def export(self, file_type=None):
"""
Export a snapshot of the current scene.
Parameters
----------
file_type: what encoding to use for meshes
ie: dict, dict64, stl
Returns
----------
export: dict with keys:
meshes: list of meshes, encoded as per file_type
transforms: edge list of transforms, eg:
((u, v, {'matrix' : np.eye(4)}))
"""
file_type = str(file_type).strip().lower()
if file_type == 'gltf':
return gltf.export_gltf(self) # depends on [control=['if'], data=[]]
elif file_type == 'glb':
return gltf.export_glb(self) # depends on [control=['if'], data=[]]
export = {'graph': self.graph.to_edgelist(), 'geometry': {}, 'scene_cache': {'bounds': self.bounds.tolist(), 'extents': self.extents.tolist(), 'centroid': self.centroid.tolist(), 'scale': self.scale}}
if file_type is None:
file_type = {'Trimesh': 'ply', 'Path2D': 'dxf'} # depends on [control=['if'], data=['file_type']]
# if the mesh has an export method use it
# otherwise put the mesh
# itself into the export object
for (geometry_name, geometry) in self.geometry.items():
if hasattr(geometry, 'export'):
if isinstance(file_type, dict):
# case where we have export types that are different
# for different classes of objects.
for (query_class, query_format) in file_type.items():
if util.is_instance_named(geometry, query_class):
export_type = query_format
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
# if file_type is not a dict, try to export everything in the
# the scene as that value like 'ply'
export_type = file_type
exported = {'data': geometry.export(file_type=export_type), 'file_type': export_type}
export['geometry'][geometry_name] = exported # depends on [control=['if'], data=[]]
else:
# case where mesh object doesn't have exporter
# might be that someone replaced the mesh with a URL
export['geometry'][geometry_name] = geometry # depends on [control=['for'], data=[]]
return export
|
def search(search, **kwargs):
"""
Search for models whose names matches the given pattern. Print the
results to stdout.
.. deprecated :: 1.0.0
`search` will be moved to ``andeshelp`` in future versions.
Parameters
----------
search : str
Partial or full name of the model to search for
kwargs : dict
Other keyword arguments.
Returns
-------
list
The list of model names that match the given pattern.
"""
from .models import all_models
out = []
if not search:
return out
keys = sorted(list(all_models.keys()))
for key in keys:
vals = all_models[key]
val = list(vals.keys())
val = sorted(val)
for item in val:
if search.lower() in item.lower():
out.append(key + '.' + item)
if out:
print('Search result: <file.model> containing <{}>'
.format(search))
print(' '.join(out))
else:
print('No model containing <{:s}> found'.format(search))
return out
|
def function[search, parameter[search]]:
constant[
Search for models whose names matches the given pattern. Print the
results to stdout.
.. deprecated :: 1.0.0
`search` will be moved to ``andeshelp`` in future versions.
Parameters
----------
search : str
Partial or full name of the model to search for
kwargs : dict
Other keyword arguments.
Returns
-------
list
The list of model names that match the given pattern.
]
from relative_module[models] import module[all_models]
variable[out] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da20cabc250> begin[:]
return[name[out]]
variable[keys] assign[=] call[name[sorted], parameter[call[name[list], parameter[call[name[all_models].keys, parameter[]]]]]]
for taget[name[key]] in starred[name[keys]] begin[:]
variable[vals] assign[=] call[name[all_models]][name[key]]
variable[val] assign[=] call[name[list], parameter[call[name[vals].keys, parameter[]]]]
variable[val] assign[=] call[name[sorted], parameter[name[val]]]
for taget[name[item]] in starred[name[val]] begin[:]
if compare[call[name[search].lower, parameter[]] in call[name[item].lower, parameter[]]] begin[:]
call[name[out].append, parameter[binary_operation[binary_operation[name[key] + constant[.]] + name[item]]]]
if name[out] begin[:]
call[name[print], parameter[call[constant[Search result: <file.model> containing <{}>].format, parameter[name[search]]]]]
call[name[print], parameter[call[constant[ ].join, parameter[name[out]]]]]
return[name[out]]
|
keyword[def] identifier[search] ( identifier[search] ,** identifier[kwargs] ):
literal[string]
keyword[from] . identifier[models] keyword[import] identifier[all_models]
identifier[out] =[]
keyword[if] keyword[not] identifier[search] :
keyword[return] identifier[out]
identifier[keys] = identifier[sorted] ( identifier[list] ( identifier[all_models] . identifier[keys] ()))
keyword[for] identifier[key] keyword[in] identifier[keys] :
identifier[vals] = identifier[all_models] [ identifier[key] ]
identifier[val] = identifier[list] ( identifier[vals] . identifier[keys] ())
identifier[val] = identifier[sorted] ( identifier[val] )
keyword[for] identifier[item] keyword[in] identifier[val] :
keyword[if] identifier[search] . identifier[lower] () keyword[in] identifier[item] . identifier[lower] ():
identifier[out] . identifier[append] ( identifier[key] + literal[string] + identifier[item] )
keyword[if] identifier[out] :
identifier[print] ( literal[string]
. identifier[format] ( identifier[search] ))
identifier[print] ( literal[string] . identifier[join] ( identifier[out] ))
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[search] ))
keyword[return] identifier[out]
|
def search(search, **kwargs):
"""
Search for models whose names matches the given pattern. Print the
results to stdout.
.. deprecated :: 1.0.0
`search` will be moved to ``andeshelp`` in future versions.
Parameters
----------
search : str
Partial or full name of the model to search for
kwargs : dict
Other keyword arguments.
Returns
-------
list
The list of model names that match the given pattern.
"""
from .models import all_models
out = []
if not search:
return out # depends on [control=['if'], data=[]]
keys = sorted(list(all_models.keys()))
for key in keys:
vals = all_models[key]
val = list(vals.keys())
val = sorted(val)
for item in val:
if search.lower() in item.lower():
out.append(key + '.' + item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['key']]
if out:
print('Search result: <file.model> containing <{}>'.format(search))
print(' '.join(out)) # depends on [control=['if'], data=[]]
else:
print('No model containing <{:s}> found'.format(search))
return out
|
def _wait_for_read_ready_or_timeout(self, timeout):
"""Returns tuple of whether stdin is ready to read and an event.
If an event is returned, that event is more pressing than reading
bytes on stdin to create a keyboard input event.
If stdin is ready, either there are bytes to read or a SIGTSTP
triggered by dsusp has been received"""
remaining_timeout = timeout
t0 = time.time()
while True:
try:
(rs, _, _) = select.select(
[self.in_stream.fileno()] + self.readers,
[], [], remaining_timeout)
if not rs:
return False, None
r = rs[0] # if there's more than one, get it in the next loop
if r == self.in_stream.fileno():
return True, None
else:
os.read(r, 1024)
if self.queued_interrupting_events:
return False, self.queued_interrupting_events.pop(0)
elif remaining_timeout is not None:
remaining_timeout = max(0, t0 + timeout - time.time())
continue
else:
continue
except select.error:
if self.sigints:
return False, self.sigints.pop()
if remaining_timeout is not None:
remaining_timeout = max(timeout - (time.time() - t0), 0)
|
def function[_wait_for_read_ready_or_timeout, parameter[self, timeout]]:
constant[Returns tuple of whether stdin is ready to read and an event.
If an event is returned, that event is more pressing than reading
bytes on stdin to create a keyboard input event.
If stdin is ready, either there are bytes to read or a SIGTSTP
triggered by dsusp has been received]
variable[remaining_timeout] assign[=] name[timeout]
variable[t0] assign[=] call[name[time].time, parameter[]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b1040070>
|
keyword[def] identifier[_wait_for_read_ready_or_timeout] ( identifier[self] , identifier[timeout] ):
literal[string]
identifier[remaining_timeout] = identifier[timeout]
identifier[t0] = identifier[time] . identifier[time] ()
keyword[while] keyword[True] :
keyword[try] :
( identifier[rs] , identifier[_] , identifier[_] )= identifier[select] . identifier[select] (
[ identifier[self] . identifier[in_stream] . identifier[fileno] ()]+ identifier[self] . identifier[readers] ,
[],[], identifier[remaining_timeout] )
keyword[if] keyword[not] identifier[rs] :
keyword[return] keyword[False] , keyword[None]
identifier[r] = identifier[rs] [ literal[int] ]
keyword[if] identifier[r] == identifier[self] . identifier[in_stream] . identifier[fileno] ():
keyword[return] keyword[True] , keyword[None]
keyword[else] :
identifier[os] . identifier[read] ( identifier[r] , literal[int] )
keyword[if] identifier[self] . identifier[queued_interrupting_events] :
keyword[return] keyword[False] , identifier[self] . identifier[queued_interrupting_events] . identifier[pop] ( literal[int] )
keyword[elif] identifier[remaining_timeout] keyword[is] keyword[not] keyword[None] :
identifier[remaining_timeout] = identifier[max] ( literal[int] , identifier[t0] + identifier[timeout] - identifier[time] . identifier[time] ())
keyword[continue]
keyword[else] :
keyword[continue]
keyword[except] identifier[select] . identifier[error] :
keyword[if] identifier[self] . identifier[sigints] :
keyword[return] keyword[False] , identifier[self] . identifier[sigints] . identifier[pop] ()
keyword[if] identifier[remaining_timeout] keyword[is] keyword[not] keyword[None] :
identifier[remaining_timeout] = identifier[max] ( identifier[timeout] -( identifier[time] . identifier[time] ()- identifier[t0] ), literal[int] )
|
def _wait_for_read_ready_or_timeout(self, timeout):
"""Returns tuple of whether stdin is ready to read and an event.
If an event is returned, that event is more pressing than reading
bytes on stdin to create a keyboard input event.
If stdin is ready, either there are bytes to read or a SIGTSTP
triggered by dsusp has been received"""
remaining_timeout = timeout
t0 = time.time()
while True:
try:
(rs, _, _) = select.select([self.in_stream.fileno()] + self.readers, [], [], remaining_timeout)
if not rs:
return (False, None) # depends on [control=['if'], data=[]]
r = rs[0] # if there's more than one, get it in the next loop
if r == self.in_stream.fileno():
return (True, None) # depends on [control=['if'], data=[]]
else:
os.read(r, 1024)
if self.queued_interrupting_events:
return (False, self.queued_interrupting_events.pop(0)) # depends on [control=['if'], data=[]]
elif remaining_timeout is not None:
remaining_timeout = max(0, t0 + timeout - time.time())
continue # depends on [control=['if'], data=['remaining_timeout']]
else:
continue # depends on [control=['try'], data=[]]
except select.error:
if self.sigints:
return (False, self.sigints.pop()) # depends on [control=['if'], data=[]]
if remaining_timeout is not None:
remaining_timeout = max(timeout - (time.time() - t0), 0) # depends on [control=['if'], data=['remaining_timeout']] # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
|
def _run_smoove(full_bams, sr_bams, disc_bams, work_dir, items):
"""Run lumpy-sv using smoove.
"""
batch = sshared.get_cur_batch(items)
ext = "-%s-svs" % batch if batch else "-svs"
name = "%s%s" % (dd.get_sample_name(items[0]), ext)
out_file = os.path.join(work_dir, "%s-smoove.genotyped.vcf.gz" % name)
sv_exclude_bed = sshared.prepare_exclude_file(items, out_file)
old_out_file = os.path.join(work_dir, "%s%s-prep.vcf.gz"
% (os.path.splitext(os.path.basename(items[0]["align_bam"]))[0], ext))
if utils.file_exists(old_out_file):
return old_out_file, sv_exclude_bed
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
cores = dd.get_num_cores(items[0])
out_dir = os.path.dirname(tx_out_file)
ref_file = dd.get_ref_file(items[0])
full_bams = " ".join(_prepare_smoove_bams(full_bams, sr_bams, disc_bams, items,
os.path.dirname(tx_out_file)))
std_excludes = ["~^GL", "~^HLA", "~_random", "~^chrUn", "~alt", "~decoy"]
def _is_std_exclude(n):
clean_excludes = [x.replace("~", "").replace("^", "") for x in std_excludes]
return any([n.startswith(x) or n.endswith(x) for x in clean_excludes])
exclude_chrs = [c.name for c in ref.file_contigs(ref_file)
if not chromhacks.is_nonalt(c.name) and not _is_std_exclude(c.name)]
exclude_chrs = "--excludechroms '%s'" % ",".join(std_excludes + exclude_chrs)
exclude_bed = ("--exclude %s" % sv_exclude_bed) if utils.file_exists(sv_exclude_bed) else ""
tempdir = os.path.dirname(tx_out_file)
cmd = ("export TMPDIR={tempdir} && "
"smoove call --processes {cores} --genotype --removepr --fasta {ref_file} "
"--name {name} --outdir {out_dir} "
"{exclude_bed} {exclude_chrs} {full_bams}")
with utils.chdir(tempdir):
try:
do.run(cmd.format(**locals()), "smoove lumpy calling", items[0])
except subprocess.CalledProcessError as msg:
if _allowed_errors(msg):
vcfutils.write_empty_vcf(tx_out_file, config=items[0]["config"],
samples=[dd.get_sample_name(d) for d in items])
else:
logger.exception()
raise
vcfutils.bgzip_and_index(out_file, items[0]["config"])
return out_file, sv_exclude_bed
|
def function[_run_smoove, parameter[full_bams, sr_bams, disc_bams, work_dir, items]]:
constant[Run lumpy-sv using smoove.
]
variable[batch] assign[=] call[name[sshared].get_cur_batch, parameter[name[items]]]
variable[ext] assign[=] <ast.IfExp object at 0x7da18f00fb20>
variable[name] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18f00df00>, <ast.Name object at 0x7da1b23450f0>]]]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[work_dir], binary_operation[constant[%s-smoove.genotyped.vcf.gz] <ast.Mod object at 0x7da2590d6920> name[name]]]]
variable[sv_exclude_bed] assign[=] call[name[sshared].prepare_exclude_file, parameter[name[items], name[out_file]]]
variable[old_out_file] assign[=] call[name[os].path.join, parameter[name[work_dir], binary_operation[constant[%s%s-prep.vcf.gz] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b2347f10>, <ast.Name object at 0x7da1b18bcc10>]]]]]
if call[name[utils].file_exists, parameter[name[old_out_file]]] begin[:]
return[tuple[[<ast.Name object at 0x7da1b18bd0f0>, <ast.Name object at 0x7da1b18be560>]]]
if <ast.UnaryOp object at 0x7da1b18bef20> begin[:]
with call[name[file_transaction], parameter[call[name[items]][constant[0]], name[out_file]]] begin[:]
variable[cores] assign[=] call[name[dd].get_num_cores, parameter[call[name[items]][constant[0]]]]
variable[out_dir] assign[=] call[name[os].path.dirname, parameter[name[tx_out_file]]]
variable[ref_file] assign[=] call[name[dd].get_ref_file, parameter[call[name[items]][constant[0]]]]
variable[full_bams] assign[=] call[constant[ ].join, parameter[call[name[_prepare_smoove_bams], parameter[name[full_bams], name[sr_bams], name[disc_bams], name[items], call[name[os].path.dirname, parameter[name[tx_out_file]]]]]]]
variable[std_excludes] assign[=] list[[<ast.Constant object at 0x7da1b18bf6a0>, <ast.Constant object at 0x7da1b18bfe20>, <ast.Constant object at 0x7da1b18bccd0>, <ast.Constant object at 0x7da1b18bf490>, <ast.Constant object at 0x7da1b18bd900>, <ast.Constant object at 0x7da1b18bd990>]]
def function[_is_std_exclude, parameter[n]]:
variable[clean_excludes] assign[=] <ast.ListComp object at 0x7da1b18be290>
return[call[name[any], parameter[<ast.ListComp object at 0x7da1b18be710>]]]
variable[exclude_chrs] assign[=] <ast.ListComp object at 0x7da1b18bc100>
variable[exclude_chrs] assign[=] binary_operation[constant[--excludechroms '%s'] <ast.Mod object at 0x7da2590d6920> call[constant[,].join, parameter[binary_operation[name[std_excludes] + name[exclude_chrs]]]]]
variable[exclude_bed] assign[=] <ast.IfExp object at 0x7da1b18bddb0>
variable[tempdir] assign[=] call[name[os].path.dirname, parameter[name[tx_out_file]]]
variable[cmd] assign[=] constant[export TMPDIR={tempdir} && smoove call --processes {cores} --genotype --removepr --fasta {ref_file} --name {name} --outdir {out_dir} {exclude_bed} {exclude_chrs} {full_bams}]
with call[name[utils].chdir, parameter[name[tempdir]]] begin[:]
<ast.Try object at 0x7da1b18be9e0>
call[name[vcfutils].bgzip_and_index, parameter[name[out_file], call[call[name[items]][constant[0]]][constant[config]]]]
return[tuple[[<ast.Name object at 0x7da1b18a36d0>, <ast.Name object at 0x7da1b18a3a60>]]]
|
keyword[def] identifier[_run_smoove] ( identifier[full_bams] , identifier[sr_bams] , identifier[disc_bams] , identifier[work_dir] , identifier[items] ):
literal[string]
identifier[batch] = identifier[sshared] . identifier[get_cur_batch] ( identifier[items] )
identifier[ext] = literal[string] % identifier[batch] keyword[if] identifier[batch] keyword[else] literal[string]
identifier[name] = literal[string] %( identifier[dd] . identifier[get_sample_name] ( identifier[items] [ literal[int] ]), identifier[ext] )
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] % identifier[name] )
identifier[sv_exclude_bed] = identifier[sshared] . identifier[prepare_exclude_file] ( identifier[items] , identifier[out_file] )
identifier[old_out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string]
%( identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[items] [ literal[int] ][ literal[string] ]))[ literal[int] ], identifier[ext] ))
keyword[if] identifier[utils] . identifier[file_exists] ( identifier[old_out_file] ):
keyword[return] identifier[old_out_file] , identifier[sv_exclude_bed]
keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ):
keyword[with] identifier[file_transaction] ( identifier[items] [ literal[int] ], identifier[out_file] ) keyword[as] identifier[tx_out_file] :
identifier[cores] = identifier[dd] . identifier[get_num_cores] ( identifier[items] [ literal[int] ])
identifier[out_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[tx_out_file] )
identifier[ref_file] = identifier[dd] . identifier[get_ref_file] ( identifier[items] [ literal[int] ])
identifier[full_bams] = literal[string] . identifier[join] ( identifier[_prepare_smoove_bams] ( identifier[full_bams] , identifier[sr_bams] , identifier[disc_bams] , identifier[items] ,
identifier[os] . identifier[path] . identifier[dirname] ( identifier[tx_out_file] )))
identifier[std_excludes] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
keyword[def] identifier[_is_std_exclude] ( identifier[n] ):
identifier[clean_excludes] =[ identifier[x] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[x] keyword[in] identifier[std_excludes] ]
keyword[return] identifier[any] ([ identifier[n] . identifier[startswith] ( identifier[x] ) keyword[or] identifier[n] . identifier[endswith] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[clean_excludes] ])
identifier[exclude_chrs] =[ identifier[c] . identifier[name] keyword[for] identifier[c] keyword[in] identifier[ref] . identifier[file_contigs] ( identifier[ref_file] )
keyword[if] keyword[not] identifier[chromhacks] . identifier[is_nonalt] ( identifier[c] . identifier[name] ) keyword[and] keyword[not] identifier[_is_std_exclude] ( identifier[c] . identifier[name] )]
identifier[exclude_chrs] = literal[string] % literal[string] . identifier[join] ( identifier[std_excludes] + identifier[exclude_chrs] )
identifier[exclude_bed] =( literal[string] % identifier[sv_exclude_bed] ) keyword[if] identifier[utils] . identifier[file_exists] ( identifier[sv_exclude_bed] ) keyword[else] literal[string]
identifier[tempdir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[tx_out_file] )
identifier[cmd] =( literal[string]
literal[string]
literal[string]
literal[string] )
keyword[with] identifier[utils] . identifier[chdir] ( identifier[tempdir] ):
keyword[try] :
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] , identifier[items] [ literal[int] ])
keyword[except] identifier[subprocess] . identifier[CalledProcessError] keyword[as] identifier[msg] :
keyword[if] identifier[_allowed_errors] ( identifier[msg] ):
identifier[vcfutils] . identifier[write_empty_vcf] ( identifier[tx_out_file] , identifier[config] = identifier[items] [ literal[int] ][ literal[string] ],
identifier[samples] =[ identifier[dd] . identifier[get_sample_name] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[items] ])
keyword[else] :
identifier[logger] . identifier[exception] ()
keyword[raise]
identifier[vcfutils] . identifier[bgzip_and_index] ( identifier[out_file] , identifier[items] [ literal[int] ][ literal[string] ])
keyword[return] identifier[out_file] , identifier[sv_exclude_bed]
|
def _run_smoove(full_bams, sr_bams, disc_bams, work_dir, items):
"""Run lumpy-sv using smoove.
"""
batch = sshared.get_cur_batch(items)
ext = '-%s-svs' % batch if batch else '-svs'
name = '%s%s' % (dd.get_sample_name(items[0]), ext)
out_file = os.path.join(work_dir, '%s-smoove.genotyped.vcf.gz' % name)
sv_exclude_bed = sshared.prepare_exclude_file(items, out_file)
old_out_file = os.path.join(work_dir, '%s%s-prep.vcf.gz' % (os.path.splitext(os.path.basename(items[0]['align_bam']))[0], ext))
if utils.file_exists(old_out_file):
return (old_out_file, sv_exclude_bed) # depends on [control=['if'], data=[]]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
cores = dd.get_num_cores(items[0])
out_dir = os.path.dirname(tx_out_file)
ref_file = dd.get_ref_file(items[0])
full_bams = ' '.join(_prepare_smoove_bams(full_bams, sr_bams, disc_bams, items, os.path.dirname(tx_out_file)))
std_excludes = ['~^GL', '~^HLA', '~_random', '~^chrUn', '~alt', '~decoy']
def _is_std_exclude(n):
clean_excludes = [x.replace('~', '').replace('^', '') for x in std_excludes]
return any([n.startswith(x) or n.endswith(x) for x in clean_excludes])
exclude_chrs = [c.name for c in ref.file_contigs(ref_file) if not chromhacks.is_nonalt(c.name) and (not _is_std_exclude(c.name))]
exclude_chrs = "--excludechroms '%s'" % ','.join(std_excludes + exclude_chrs)
exclude_bed = '--exclude %s' % sv_exclude_bed if utils.file_exists(sv_exclude_bed) else ''
tempdir = os.path.dirname(tx_out_file)
cmd = 'export TMPDIR={tempdir} && smoove call --processes {cores} --genotype --removepr --fasta {ref_file} --name {name} --outdir {out_dir} {exclude_bed} {exclude_chrs} {full_bams}'
with utils.chdir(tempdir):
try:
do.run(cmd.format(**locals()), 'smoove lumpy calling', items[0]) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError as msg:
if _allowed_errors(msg):
vcfutils.write_empty_vcf(tx_out_file, config=items[0]['config'], samples=[dd.get_sample_name(d) for d in items]) # depends on [control=['if'], data=[]]
else:
logger.exception()
raise # depends on [control=['except'], data=['msg']] # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]]
vcfutils.bgzip_and_index(out_file, items[0]['config'])
return (out_file, sv_exclude_bed)
|
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
|
def function[_get_pos, parameter[self, key]]:
constant[Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
]
variable[p] assign[=] call[name[bisect], parameter[name[self].runtime._keys, call[name[self].hashi, parameter[name[key]]]]]
if compare[name[p] equal[==] call[name[len], parameter[name[self].runtime._keys]]] begin[:]
return[constant[0]]
|
keyword[def] identifier[_get_pos] ( identifier[self] , identifier[key] ):
literal[string]
identifier[p] = identifier[bisect] ( identifier[self] . identifier[runtime] . identifier[_keys] , identifier[self] . identifier[hashi] ( identifier[key] ))
keyword[if] identifier[p] == identifier[len] ( identifier[self] . identifier[runtime] . identifier[_keys] ):
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[p]
|
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0 # depends on [control=['if'], data=[]]
else:
return p
|
def get_trunk_interfaces(devId, auth, url):
"""Function takes devId as input to RESTFULL call to HP IMC platform
:param devId: output of get_dev_details
:return: list of dictionaries containing of interfaces configured as an 802.1q trunk
Example:
auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
get_dev_asset_details("2", auth.creds, auth.url)
"""
# checks to see if the imc credentials are already available
get_trunk_interfaces_url = "/imcrs/vlan/trunk?devId=" + str(devId) + "&start=1&size=5000&total=false"
f_url = url + get_trunk_interfaces_url
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
dev_trunk_interfaces = (json.loads(r.text))
if len(dev_trunk_interfaces) == 2:
return dev_trunk_interfaces['trunkIf']
else:
dev_trunk_interfaces['trunkIf'] = ["No trunk inteface"]
return dev_trunk_interfaces['trunkIf']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_trunk_interfaces: An Error has occured'
|
def function[get_trunk_interfaces, parameter[devId, auth, url]]:
constant[Function takes devId as input to RESTFULL call to HP IMC platform
:param devId: output of get_dev_details
:return: list of dictionaries containing of interfaces configured as an 802.1q trunk
Example:
auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
get_dev_asset_details("2", auth.creds, auth.url)
]
variable[get_trunk_interfaces_url] assign[=] binary_operation[binary_operation[constant[/imcrs/vlan/trunk?devId=] + call[name[str], parameter[name[devId]]]] + constant[&start=1&size=5000&total=false]]
variable[f_url] assign[=] binary_operation[name[url] + name[get_trunk_interfaces_url]]
variable[r] assign[=] call[name[requests].get, parameter[name[f_url]]]
<ast.Try object at 0x7da2043442e0>
|
keyword[def] identifier[get_trunk_interfaces] ( identifier[devId] , identifier[auth] , identifier[url] ):
literal[string]
identifier[get_trunk_interfaces_url] = literal[string] + identifier[str] ( identifier[devId] )+ literal[string]
identifier[f_url] = identifier[url] + identifier[get_trunk_interfaces_url]
identifier[r] = identifier[requests] . identifier[get] ( identifier[f_url] , identifier[auth] = identifier[auth] , identifier[headers] = identifier[HEADERS] )
keyword[try] :
keyword[if] identifier[r] . identifier[status_code] == literal[int] :
identifier[dev_trunk_interfaces] =( identifier[json] . identifier[loads] ( identifier[r] . identifier[text] ))
keyword[if] identifier[len] ( identifier[dev_trunk_interfaces] )== literal[int] :
keyword[return] identifier[dev_trunk_interfaces] [ literal[string] ]
keyword[else] :
identifier[dev_trunk_interfaces] [ literal[string] ]=[ literal[string] ]
keyword[return] identifier[dev_trunk_interfaces] [ literal[string] ]
keyword[except] identifier[requests] . identifier[exceptions] . identifier[RequestException] keyword[as] identifier[e] :
keyword[return] literal[string] + identifier[str] ( identifier[e] )+ literal[string]
|
def get_trunk_interfaces(devId, auth, url):
"""Function takes devId as input to RESTFULL call to HP IMC platform
:param devId: output of get_dev_details
:return: list of dictionaries containing of interfaces configured as an 802.1q trunk
Example:
auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
get_dev_asset_details("2", auth.creds, auth.url)
"""
# checks to see if the imc credentials are already available
get_trunk_interfaces_url = '/imcrs/vlan/trunk?devId=' + str(devId) + '&start=1&size=5000&total=false'
f_url = url + get_trunk_interfaces_url
r = requests.get(f_url, auth=auth, headers=HEADERS)
# r.status_code
try:
if r.status_code == 200:
dev_trunk_interfaces = json.loads(r.text) # depends on [control=['if'], data=[]]
if len(dev_trunk_interfaces) == 2:
return dev_trunk_interfaces['trunkIf'] # depends on [control=['if'], data=[]]
else:
dev_trunk_interfaces['trunkIf'] = ['No trunk inteface']
return dev_trunk_interfaces['trunkIf'] # depends on [control=['try'], data=[]]
except requests.exceptions.RequestException as e:
return 'Error:\n' + str(e) + ' get_trunk_interfaces: An Error has occured' # depends on [control=['except'], data=['e']]
|
def _outputLine(self, logevent, length=None, human=False):
"""
Print the final line.
Provides various options (length, human, datetime changes, ...).
"""
# adapt timezone output if necessary
if self.args['timestamp_format'] != 'none':
logevent._reformat_timestamp(self.args['timestamp_format'],
force=True)
if any(self.args['timezone']):
if self.args['timestamp_format'] == 'none':
self.args['timestamp_format'] = logevent.datetime_format
logevent._reformat_timestamp(self.args['timestamp_format'],
force=True)
if self.args['json']:
print(logevent.to_json())
return
line = logevent.line_str
if length:
if len(line) > length:
line = (line[:int(length / 2 - 2)] + '...' +
line[int(-length / 2 + 1):])
if human:
line = self._changeMs(line)
line = self._formatNumbers(line)
print(line)
|
def function[_outputLine, parameter[self, logevent, length, human]]:
constant[
Print the final line.
Provides various options (length, human, datetime changes, ...).
]
if compare[call[name[self].args][constant[timestamp_format]] not_equal[!=] constant[none]] begin[:]
call[name[logevent]._reformat_timestamp, parameter[call[name[self].args][constant[timestamp_format]]]]
if call[name[any], parameter[call[name[self].args][constant[timezone]]]] begin[:]
if compare[call[name[self].args][constant[timestamp_format]] equal[==] constant[none]] begin[:]
call[name[self].args][constant[timestamp_format]] assign[=] name[logevent].datetime_format
call[name[logevent]._reformat_timestamp, parameter[call[name[self].args][constant[timestamp_format]]]]
if call[name[self].args][constant[json]] begin[:]
call[name[print], parameter[call[name[logevent].to_json, parameter[]]]]
return[None]
variable[line] assign[=] name[logevent].line_str
if name[length] begin[:]
if compare[call[name[len], parameter[name[line]]] greater[>] name[length]] begin[:]
variable[line] assign[=] binary_operation[binary_operation[call[name[line]][<ast.Slice object at 0x7da1b17f9f30>] + constant[...]] + call[name[line]][<ast.Slice object at 0x7da1b17f9d20>]]
if name[human] begin[:]
variable[line] assign[=] call[name[self]._changeMs, parameter[name[line]]]
variable[line] assign[=] call[name[self]._formatNumbers, parameter[name[line]]]
call[name[print], parameter[name[line]]]
|
keyword[def] identifier[_outputLine] ( identifier[self] , identifier[logevent] , identifier[length] = keyword[None] , identifier[human] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[args] [ literal[string] ]!= literal[string] :
identifier[logevent] . identifier[_reformat_timestamp] ( identifier[self] . identifier[args] [ literal[string] ],
identifier[force] = keyword[True] )
keyword[if] identifier[any] ( identifier[self] . identifier[args] [ literal[string] ]):
keyword[if] identifier[self] . identifier[args] [ literal[string] ]== literal[string] :
identifier[self] . identifier[args] [ literal[string] ]= identifier[logevent] . identifier[datetime_format]
identifier[logevent] . identifier[_reformat_timestamp] ( identifier[self] . identifier[args] [ literal[string] ],
identifier[force] = keyword[True] )
keyword[if] identifier[self] . identifier[args] [ literal[string] ]:
identifier[print] ( identifier[logevent] . identifier[to_json] ())
keyword[return]
identifier[line] = identifier[logevent] . identifier[line_str]
keyword[if] identifier[length] :
keyword[if] identifier[len] ( identifier[line] )> identifier[length] :
identifier[line] =( identifier[line] [: identifier[int] ( identifier[length] / literal[int] - literal[int] )]+ literal[string] +
identifier[line] [ identifier[int] (- identifier[length] / literal[int] + literal[int] ):])
keyword[if] identifier[human] :
identifier[line] = identifier[self] . identifier[_changeMs] ( identifier[line] )
identifier[line] = identifier[self] . identifier[_formatNumbers] ( identifier[line] )
identifier[print] ( identifier[line] )
|
def _outputLine(self, logevent, length=None, human=False):
"""
Print the final line.
Provides various options (length, human, datetime changes, ...).
"""
# adapt timezone output if necessary
if self.args['timestamp_format'] != 'none':
logevent._reformat_timestamp(self.args['timestamp_format'], force=True) # depends on [control=['if'], data=[]]
if any(self.args['timezone']):
if self.args['timestamp_format'] == 'none':
self.args['timestamp_format'] = logevent.datetime_format # depends on [control=['if'], data=[]]
logevent._reformat_timestamp(self.args['timestamp_format'], force=True) # depends on [control=['if'], data=[]]
if self.args['json']:
print(logevent.to_json())
return # depends on [control=['if'], data=[]]
line = logevent.line_str
if length:
if len(line) > length:
line = line[:int(length / 2 - 2)] + '...' + line[int(-length / 2 + 1):] # depends on [control=['if'], data=['length']] # depends on [control=['if'], data=[]]
if human:
line = self._changeMs(line)
line = self._formatNumbers(line) # depends on [control=['if'], data=[]]
print(line)
|
def sync_proxy(self, mri, block):
"""Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync
"""
done_queue = Queue()
self._queues[mri] = done_queue
update_fields = set()
def callback(value=None):
if isinstance(value, Exception):
# Disconnect or Cancelled or RemoteError
if isinstance(value, Disconnected):
# We will get a reconnect with a whole new structure
update_fields.clear()
block.health.set_value(
value="pvAccess disconnected",
alarm=Alarm.disconnected("pvAccess disconnected")
)
else:
with block.notifier.changes_squashed:
if not update_fields:
self.log.debug("Regenerating from %s", list(value))
self._regenerate_block(block, value, update_fields)
done_queue.put(None)
else:
self._update_block(block, value, update_fields)
m = self._ctxt.monitor(mri, callback, notify_disconnect=True)
self._monitors.add(m)
done_queue.get(timeout=DEFAULT_TIMEOUT)
|
def function[sync_proxy, parameter[self, mri, block]]:
constant[Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync
]
variable[done_queue] assign[=] call[name[Queue], parameter[]]
call[name[self]._queues][name[mri]] assign[=] name[done_queue]
variable[update_fields] assign[=] call[name[set], parameter[]]
def function[callback, parameter[value]]:
if call[name[isinstance], parameter[name[value], name[Exception]]] begin[:]
if call[name[isinstance], parameter[name[value], name[Disconnected]]] begin[:]
call[name[update_fields].clear, parameter[]]
call[name[block].health.set_value, parameter[]]
variable[m] assign[=] call[name[self]._ctxt.monitor, parameter[name[mri], name[callback]]]
call[name[self]._monitors.add, parameter[name[m]]]
call[name[done_queue].get, parameter[]]
|
keyword[def] identifier[sync_proxy] ( identifier[self] , identifier[mri] , identifier[block] ):
literal[string]
identifier[done_queue] = identifier[Queue] ()
identifier[self] . identifier[_queues] [ identifier[mri] ]= identifier[done_queue]
identifier[update_fields] = identifier[set] ()
keyword[def] identifier[callback] ( identifier[value] = keyword[None] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Exception] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[Disconnected] ):
identifier[update_fields] . identifier[clear] ()
identifier[block] . identifier[health] . identifier[set_value] (
identifier[value] = literal[string] ,
identifier[alarm] = identifier[Alarm] . identifier[disconnected] ( literal[string] )
)
keyword[else] :
keyword[with] identifier[block] . identifier[notifier] . identifier[changes_squashed] :
keyword[if] keyword[not] identifier[update_fields] :
identifier[self] . identifier[log] . identifier[debug] ( literal[string] , identifier[list] ( identifier[value] ))
identifier[self] . identifier[_regenerate_block] ( identifier[block] , identifier[value] , identifier[update_fields] )
identifier[done_queue] . identifier[put] ( keyword[None] )
keyword[else] :
identifier[self] . identifier[_update_block] ( identifier[block] , identifier[value] , identifier[update_fields] )
identifier[m] = identifier[self] . identifier[_ctxt] . identifier[monitor] ( identifier[mri] , identifier[callback] , identifier[notify_disconnect] = keyword[True] )
identifier[self] . identifier[_monitors] . identifier[add] ( identifier[m] )
identifier[done_queue] . identifier[get] ( identifier[timeout] = identifier[DEFAULT_TIMEOUT] )
|
def sync_proxy(self, mri, block):
"""Abstract method telling the ClientComms to sync this proxy Block
with its remote counterpart. Should wait until it is connected
Args:
mri (str): The mri for the remote block
block (BlockModel): The local proxy Block to keep in sync
"""
done_queue = Queue()
self._queues[mri] = done_queue
update_fields = set()
def callback(value=None):
if isinstance(value, Exception):
# Disconnect or Cancelled or RemoteError
if isinstance(value, Disconnected):
# We will get a reconnect with a whole new structure
update_fields.clear()
block.health.set_value(value='pvAccess disconnected', alarm=Alarm.disconnected('pvAccess disconnected')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
with block.notifier.changes_squashed:
if not update_fields:
self.log.debug('Regenerating from %s', list(value))
self._regenerate_block(block, value, update_fields)
done_queue.put(None) # depends on [control=['if'], data=[]]
else:
self._update_block(block, value, update_fields) # depends on [control=['with'], data=[]]
m = self._ctxt.monitor(mri, callback, notify_disconnect=True)
self._monitors.add(m)
done_queue.get(timeout=DEFAULT_TIMEOUT)
|
def get_version_from_tag(tag_name: str) -> Optional[str]:
"""Get git hash from tag
:param tag_name: Name of the git tag (i.e. 'v1.0.0')
:return: sha1 hash of the commit
"""
debug('get_version_from_tag({})'.format(tag_name))
check_repo()
for i in repo.tags:
if i.name == tag_name:
return i.commit.hexsha
return None
|
def function[get_version_from_tag, parameter[tag_name]]:
constant[Get git hash from tag
:param tag_name: Name of the git tag (i.e. 'v1.0.0')
:return: sha1 hash of the commit
]
call[name[debug], parameter[call[constant[get_version_from_tag({})].format, parameter[name[tag_name]]]]]
call[name[check_repo], parameter[]]
for taget[name[i]] in starred[name[repo].tags] begin[:]
if compare[name[i].name equal[==] name[tag_name]] begin[:]
return[name[i].commit.hexsha]
return[constant[None]]
|
keyword[def] identifier[get_version_from_tag] ( identifier[tag_name] : identifier[str] )-> identifier[Optional] [ identifier[str] ]:
literal[string]
identifier[debug] ( literal[string] . identifier[format] ( identifier[tag_name] ))
identifier[check_repo] ()
keyword[for] identifier[i] keyword[in] identifier[repo] . identifier[tags] :
keyword[if] identifier[i] . identifier[name] == identifier[tag_name] :
keyword[return] identifier[i] . identifier[commit] . identifier[hexsha]
keyword[return] keyword[None]
|
def get_version_from_tag(tag_name: str) -> Optional[str]:
"""Get git hash from tag
:param tag_name: Name of the git tag (i.e. 'v1.0.0')
:return: sha1 hash of the commit
"""
debug('get_version_from_tag({})'.format(tag_name))
check_repo()
for i in repo.tags:
if i.name == tag_name:
return i.commit.hexsha # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return None
|
def enable_extended_scan_code_mode(self):
"""
Extended scan code mode means the Yubikey will output the bytes in
the 'fixed string' as scan codes, without modhex encoding the data.
Because of the way this is stored in the config flags, it is not
possible to disable this option once it is enabled (of course, you
can abort config update or reprogram the YubiKey again).
Requires YubiKey 2.x.
"""
if not self.capabilities.have_extended_scan_code_mode():
raise
self._require_version(major=2)
self.config_flag('SHORT_TICKET', True)
self.config_flag('STATIC_TICKET', False)
|
def function[enable_extended_scan_code_mode, parameter[self]]:
constant[
Extended scan code mode means the Yubikey will output the bytes in
the 'fixed string' as scan codes, without modhex encoding the data.
Because of the way this is stored in the config flags, it is not
possible to disable this option once it is enabled (of course, you
can abort config update or reprogram the YubiKey again).
Requires YubiKey 2.x.
]
if <ast.UnaryOp object at 0x7da1b08b0eb0> begin[:]
<ast.Raise object at 0x7da1b08b15a0>
call[name[self]._require_version, parameter[]]
call[name[self].config_flag, parameter[constant[SHORT_TICKET], constant[True]]]
call[name[self].config_flag, parameter[constant[STATIC_TICKET], constant[False]]]
|
keyword[def] identifier[enable_extended_scan_code_mode] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[capabilities] . identifier[have_extended_scan_code_mode] ():
keyword[raise]
identifier[self] . identifier[_require_version] ( identifier[major] = literal[int] )
identifier[self] . identifier[config_flag] ( literal[string] , keyword[True] )
identifier[self] . identifier[config_flag] ( literal[string] , keyword[False] )
|
def enable_extended_scan_code_mode(self):
"""
Extended scan code mode means the Yubikey will output the bytes in
the 'fixed string' as scan codes, without modhex encoding the data.
Because of the way this is stored in the config flags, it is not
possible to disable this option once it is enabled (of course, you
can abort config update or reprogram the YubiKey again).
Requires YubiKey 2.x.
"""
if not self.capabilities.have_extended_scan_code_mode():
raise # depends on [control=['if'], data=[]]
self._require_version(major=2)
self.config_flag('SHORT_TICKET', True)
self.config_flag('STATIC_TICKET', False)
|
def _transition_steps(self, brightness=None):
"""
Get the maximum number of steps needed for a transition.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The maximum number of steps.
"""
if brightness is not None:
self._assert_is_brightness(brightness)
return self._driver.steps(self.brightness, brightness)
return 0
|
def function[_transition_steps, parameter[self, brightness]]:
constant[
Get the maximum number of steps needed for a transition.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The maximum number of steps.
]
if compare[name[brightness] is_not constant[None]] begin[:]
call[name[self]._assert_is_brightness, parameter[name[brightness]]]
return[call[name[self]._driver.steps, parameter[name[self].brightness, name[brightness]]]]
return[constant[0]]
|
keyword[def] identifier[_transition_steps] ( identifier[self] , identifier[brightness] = keyword[None] ):
literal[string]
keyword[if] identifier[brightness] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_assert_is_brightness] ( identifier[brightness] )
keyword[return] identifier[self] . identifier[_driver] . identifier[steps] ( identifier[self] . identifier[brightness] , identifier[brightness] )
keyword[return] literal[int]
|
def _transition_steps(self, brightness=None):
"""
Get the maximum number of steps needed for a transition.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The maximum number of steps.
"""
if brightness is not None:
self._assert_is_brightness(brightness)
return self._driver.steps(self.brightness, brightness) # depends on [control=['if'], data=['brightness']]
return 0
|
def list_locked(**kwargs):
'''
Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot
'''
return ['{0}-{1}'.format(pkgname, version(pkgname, **kwargs))
for pkgname in _lockcmd('lock', name=None, **kwargs)]
|
def function[list_locked, parameter[]]:
constant[
Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot
]
return[<ast.ListComp object at 0x7da1b1c15d20>]
|
keyword[def] identifier[list_locked] (** identifier[kwargs] ):
literal[string]
keyword[return] [ literal[string] . identifier[format] ( identifier[pkgname] , identifier[version] ( identifier[pkgname] ,** identifier[kwargs] ))
keyword[for] identifier[pkgname] keyword[in] identifier[_lockcmd] ( literal[string] , identifier[name] = keyword[None] ,** identifier[kwargs] )]
|
def list_locked(**kwargs):
"""
Query the package database those packages which are
locked against reinstallation, modification or deletion.
Returns returns a list of package names with version strings
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked
jail
List locked packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked jail=<jail name or id>
chroot
List locked packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked chroot=/path/to/chroot
root
List locked packages within the specified root (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.list_locked root=/path/to/chroot
"""
return ['{0}-{1}'.format(pkgname, version(pkgname, **kwargs)) for pkgname in _lockcmd('lock', name=None, **kwargs)]
|
def _url_search_builder(term, country='US', media='all', entity=None, attribute=None, limit=50):
"""
Builds the URL to perform the search based on the provided data
:param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson.
The method will take care of spaces so you don't have to.
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string
"""
built_url = base_search_url + _parse_query(term)
built_url += ampersand + parameters[1] + country
built_url += ampersand + parameters[2] + media
if entity is not None:
built_url += ampersand + parameters[3] + entity
if attribute is not None:
built_url += ampersand + parameters[4] + attribute
built_url += ampersand + parameters[5] + str(limit)
return built_url
|
def function[_url_search_builder, parameter[term, country, media, entity, attribute, limit]]:
constant[
Builds the URL to perform the search based on the provided data
:param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson.
The method will take care of spaces so you don't have to.
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string
]
variable[built_url] assign[=] binary_operation[name[base_search_url] + call[name[_parse_query], parameter[name[term]]]]
<ast.AugAssign object at 0x7da1b1904a90>
<ast.AugAssign object at 0x7da1b19047c0>
if compare[name[entity] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1a79d20>
if compare[name[attribute] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b1a784c0>
<ast.AugAssign object at 0x7da1b1a44970>
return[name[built_url]]
|
keyword[def] identifier[_url_search_builder] ( identifier[term] , identifier[country] = literal[string] , identifier[media] = literal[string] , identifier[entity] = keyword[None] , identifier[attribute] = keyword[None] , identifier[limit] = literal[int] ):
literal[string]
identifier[built_url] = identifier[base_search_url] + identifier[_parse_query] ( identifier[term] )
identifier[built_url] += identifier[ampersand] + identifier[parameters] [ literal[int] ]+ identifier[country]
identifier[built_url] += identifier[ampersand] + identifier[parameters] [ literal[int] ]+ identifier[media]
keyword[if] identifier[entity] keyword[is] keyword[not] keyword[None] :
identifier[built_url] += identifier[ampersand] + identifier[parameters] [ literal[int] ]+ identifier[entity]
keyword[if] identifier[attribute] keyword[is] keyword[not] keyword[None] :
identifier[built_url] += identifier[ampersand] + identifier[parameters] [ literal[int] ]+ identifier[attribute]
identifier[built_url] += identifier[ampersand] + identifier[parameters] [ literal[int] ]+ identifier[str] ( identifier[limit] )
keyword[return] identifier[built_url]
|
def _url_search_builder(term, country='US', media='all', entity=None, attribute=None, limit=50):
"""
Builds the URL to perform the search based on the provided data
:param term: String. The URL-encoded text string you want to search for. Example: Steven Wilson.
The method will take care of spaces so you don't have to.
:param country: String. The two-letter country code for the store you want to search.
For a full list of the codes: http://en.wikipedia.org/wiki/%20ISO_3166-1_alpha-2
:param media: String. The media type you want to search for. Example: music
:param entity: String. The type of results you want returned, relative to the specified media type. Example: musicArtist.
Full list: musicArtist, musicTrack, album, musicVideo, mix, song
:param attribute: String. The attribute you want to search for in the stores, relative to the specified media type.
:param limit: Integer. The number of search results you want the iTunes Store to return.
:return: The built URL as a string
"""
built_url = base_search_url + _parse_query(term)
built_url += ampersand + parameters[1] + country
built_url += ampersand + parameters[2] + media
if entity is not None:
built_url += ampersand + parameters[3] + entity # depends on [control=['if'], data=['entity']]
if attribute is not None:
built_url += ampersand + parameters[4] + attribute # depends on [control=['if'], data=['attribute']]
built_url += ampersand + parameters[5] + str(limit)
return built_url
|
def f_iter_nodes(self, recursive=True, with_links=True, max_depth=None, predicate=None):
"""Iterates recursively (default) over nodes hanging below this group.
:param recursive: Whether to iterate the whole sub tree or only immediate children.
:param with_links: If links should be considered
:param max_depth: Maximum depth in search tree relative to start node (inclusive)
:param predicate:
A predicate function that is applied for each node and only returns the node
if it evaluates to ``True``. If ``False``
and you iterate recursively also the children are spared.
Leave to `None` if you don't want to filter and simply iterate over all nodes.
For example, to iterate only over groups you could use:
>>> traj.f_iter_nodes(recursive=True, predicate=lambda x: x.v_is_group)
To blind out all runs except for a particular set, you can simply pass a tuple
of run indices with -1 referring to the ``run_ALL`` node.
For instance
>>> traj.f_iter_nodes(recursive=True, predicate=(0,3,-1))
Will blind out all nodes hanging below a group named ``run_XXXXXXXXX``
(including the group) except ``run_00000000``, ``run_00000003``, and ``run_ALL``.
:return: Iterator over nodes
"""
return self._nn_interface._iter_nodes(self, recursive=recursive, with_links=with_links,
max_depth=max_depth,
predicate=predicate)
|
def function[f_iter_nodes, parameter[self, recursive, with_links, max_depth, predicate]]:
constant[Iterates recursively (default) over nodes hanging below this group.
:param recursive: Whether to iterate the whole sub tree or only immediate children.
:param with_links: If links should be considered
:param max_depth: Maximum depth in search tree relative to start node (inclusive)
:param predicate:
A predicate function that is applied for each node and only returns the node
if it evaluates to ``True``. If ``False``
and you iterate recursively also the children are spared.
Leave to `None` if you don't want to filter and simply iterate over all nodes.
For example, to iterate only over groups you could use:
>>> traj.f_iter_nodes(recursive=True, predicate=lambda x: x.v_is_group)
To blind out all runs except for a particular set, you can simply pass a tuple
of run indices with -1 referring to the ``run_ALL`` node.
For instance
>>> traj.f_iter_nodes(recursive=True, predicate=(0,3,-1))
Will blind out all nodes hanging below a group named ``run_XXXXXXXXX``
(including the group) except ``run_00000000``, ``run_00000003``, and ``run_ALL``.
:return: Iterator over nodes
]
return[call[name[self]._nn_interface._iter_nodes, parameter[name[self]]]]
|
keyword[def] identifier[f_iter_nodes] ( identifier[self] , identifier[recursive] = keyword[True] , identifier[with_links] = keyword[True] , identifier[max_depth] = keyword[None] , identifier[predicate] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_nn_interface] . identifier[_iter_nodes] ( identifier[self] , identifier[recursive] = identifier[recursive] , identifier[with_links] = identifier[with_links] ,
identifier[max_depth] = identifier[max_depth] ,
identifier[predicate] = identifier[predicate] )
|
def f_iter_nodes(self, recursive=True, with_links=True, max_depth=None, predicate=None):
"""Iterates recursively (default) over nodes hanging below this group.
:param recursive: Whether to iterate the whole sub tree or only immediate children.
:param with_links: If links should be considered
:param max_depth: Maximum depth in search tree relative to start node (inclusive)
:param predicate:
A predicate function that is applied for each node and only returns the node
if it evaluates to ``True``. If ``False``
and you iterate recursively also the children are spared.
Leave to `None` if you don't want to filter and simply iterate over all nodes.
For example, to iterate only over groups you could use:
>>> traj.f_iter_nodes(recursive=True, predicate=lambda x: x.v_is_group)
To blind out all runs except for a particular set, you can simply pass a tuple
of run indices with -1 referring to the ``run_ALL`` node.
For instance
>>> traj.f_iter_nodes(recursive=True, predicate=(0,3,-1))
Will blind out all nodes hanging below a group named ``run_XXXXXXXXX``
(including the group) except ``run_00000000``, ``run_00000003``, and ``run_ALL``.
:return: Iterator over nodes
"""
return self._nn_interface._iter_nodes(self, recursive=recursive, with_links=with_links, max_depth=max_depth, predicate=predicate)
|
def plot_depth_descent_ascent(depths, dive_mask, des, asc):
'''Plot depth data for whole deployment, descents, and ascents
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
'''
import numpy
from . import plotutils
# Indices where depths are descents or ascents
des_ind = numpy.where(dive_mask & des)[0]
asc_ind = numpy.where(dive_mask & asc)[0]
fig, ax1 = plt.subplots()
ax1.title.set_text('Dive descents and ascents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, des_ind, _colors[0],
'descents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, asc_ind, _colors[1],
'ascents')
ax1.legend(loc='upper right')
ax1.invert_yaxis()
ax1.yaxis.label.set_text('depth (m)')
ax1.xaxis.label.set_text('samples')
plt.show()
return None
|
def function[plot_depth_descent_ascent, parameter[depths, dive_mask, des, asc]]:
constant[Plot depth data for whole deployment, descents, and ascents
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
]
import module[numpy]
from relative_module[None] import module[plotutils]
variable[des_ind] assign[=] call[call[name[numpy].where, parameter[binary_operation[name[dive_mask] <ast.BitAnd object at 0x7da2590d6b60> name[des]]]]][constant[0]]
variable[asc_ind] assign[=] call[call[name[numpy].where, parameter[binary_operation[name[dive_mask] <ast.BitAnd object at 0x7da2590d6b60> name[asc]]]]][constant[0]]
<ast.Tuple object at 0x7da2054a7ac0> assign[=] call[name[plt].subplots, parameter[]]
call[name[ax1].title.set_text, parameter[constant[Dive descents and ascents]]]
variable[ax1] assign[=] call[name[plotutils].plot_noncontiguous, parameter[name[ax1], name[depths], name[des_ind], call[name[_colors]][constant[0]], constant[descents]]]
variable[ax1] assign[=] call[name[plotutils].plot_noncontiguous, parameter[name[ax1], name[depths], name[asc_ind], call[name[_colors]][constant[1]], constant[ascents]]]
call[name[ax1].legend, parameter[]]
call[name[ax1].invert_yaxis, parameter[]]
call[name[ax1].yaxis.label.set_text, parameter[constant[depth (m)]]]
call[name[ax1].xaxis.label.set_text, parameter[constant[samples]]]
call[name[plt].show, parameter[]]
return[constant[None]]
|
keyword[def] identifier[plot_depth_descent_ascent] ( identifier[depths] , identifier[dive_mask] , identifier[des] , identifier[asc] ):
literal[string]
keyword[import] identifier[numpy]
keyword[from] . keyword[import] identifier[plotutils]
identifier[des_ind] = identifier[numpy] . identifier[where] ( identifier[dive_mask] & identifier[des] )[ literal[int] ]
identifier[asc_ind] = identifier[numpy] . identifier[where] ( identifier[dive_mask] & identifier[asc] )[ literal[int] ]
identifier[fig] , identifier[ax1] = identifier[plt] . identifier[subplots] ()
identifier[ax1] . identifier[title] . identifier[set_text] ( literal[string] )
identifier[ax1] = identifier[plotutils] . identifier[plot_noncontiguous] ( identifier[ax1] , identifier[depths] , identifier[des_ind] , identifier[_colors] [ literal[int] ],
literal[string] )
identifier[ax1] = identifier[plotutils] . identifier[plot_noncontiguous] ( identifier[ax1] , identifier[depths] , identifier[asc_ind] , identifier[_colors] [ literal[int] ],
literal[string] )
identifier[ax1] . identifier[legend] ( identifier[loc] = literal[string] )
identifier[ax1] . identifier[invert_yaxis] ()
identifier[ax1] . identifier[yaxis] . identifier[label] . identifier[set_text] ( literal[string] )
identifier[ax1] . identifier[xaxis] . identifier[label] . identifier[set_text] ( literal[string] )
identifier[plt] . identifier[show] ()
keyword[return] keyword[None]
|
def plot_depth_descent_ascent(depths, dive_mask, des, asc):
"""Plot depth data for whole deployment, descents, and ascents
Args
----
depths: ndarray
Depth values at each sensor sampling
dive_mask: ndarray
Boolean mask slicing dives from the tag data
des: ndarray
boolean mask for slicing descent phases of dives from tag dta
asc: ndarray
boolean mask for slicing asccent phases of dives from tag dta
"""
import numpy
from . import plotutils
# Indices where depths are descents or ascents
des_ind = numpy.where(dive_mask & des)[0]
asc_ind = numpy.where(dive_mask & asc)[0]
(fig, ax1) = plt.subplots()
ax1.title.set_text('Dive descents and ascents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, des_ind, _colors[0], 'descents')
ax1 = plotutils.plot_noncontiguous(ax1, depths, asc_ind, _colors[1], 'ascents')
ax1.legend(loc='upper right')
ax1.invert_yaxis()
ax1.yaxis.label.set_text('depth (m)')
ax1.xaxis.label.set_text('samples')
plt.show()
return None
|
def compile_less(input_file, output_file):
"""
Compile a LESS source file. Minifies the output in release mode.
"""
from .modules import less
if not isinstance(input_file, str):
raise RuntimeError('LESS compiler takes only a single input file.')
return {
'dependencies_fn': less.less_dependencies,
'compiler_fn': less.less_compile,
'input': input_file,
'output': output_file,
'kwargs': {},
}
|
def function[compile_less, parameter[input_file, output_file]]:
constant[
Compile a LESS source file. Minifies the output in release mode.
]
from relative_module[modules] import module[less]
if <ast.UnaryOp object at 0x7da204344c70> begin[:]
<ast.Raise object at 0x7da204345b70>
return[dictionary[[<ast.Constant object at 0x7da204344b80>, <ast.Constant object at 0x7da204344eb0>, <ast.Constant object at 0x7da2043456c0>, <ast.Constant object at 0x7da204344e20>, <ast.Constant object at 0x7da2043440d0>], [<ast.Attribute object at 0x7da204346860>, <ast.Attribute object at 0x7da204346da0>, <ast.Name object at 0x7da204345630>, <ast.Name object at 0x7da204345180>, <ast.Dict object at 0x7da204347dc0>]]]
|
keyword[def] identifier[compile_less] ( identifier[input_file] , identifier[output_file] ):
literal[string]
keyword[from] . identifier[modules] keyword[import] identifier[less]
keyword[if] keyword[not] identifier[isinstance] ( identifier[input_file] , identifier[str] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] {
literal[string] : identifier[less] . identifier[less_dependencies] ,
literal[string] : identifier[less] . identifier[less_compile] ,
literal[string] : identifier[input_file] ,
literal[string] : identifier[output_file] ,
literal[string] :{},
}
|
def compile_less(input_file, output_file):
"""
Compile a LESS source file. Minifies the output in release mode.
"""
from .modules import less
if not isinstance(input_file, str):
raise RuntimeError('LESS compiler takes only a single input file.') # depends on [control=['if'], data=[]]
return {'dependencies_fn': less.less_dependencies, 'compiler_fn': less.less_compile, 'input': input_file, 'output': output_file, 'kwargs': {}}
|
def attachments(self):
"""Returns an object with:
type = file content type
file_name = the name of the file
contents = base64 encoded file contents"""
attachments = None
if 'attachment-info' in self.payload:
attachments = self._get_attachments(self.request)
# Check if we have a raw message
raw_email = self.get_raw_email()
if raw_email is not None:
attachments = self._get_attachments_raw(raw_email)
return attachments
|
def function[attachments, parameter[self]]:
constant[Returns an object with:
type = file content type
file_name = the name of the file
contents = base64 encoded file contents]
variable[attachments] assign[=] constant[None]
if compare[constant[attachment-info] in name[self].payload] begin[:]
variable[attachments] assign[=] call[name[self]._get_attachments, parameter[name[self].request]]
variable[raw_email] assign[=] call[name[self].get_raw_email, parameter[]]
if compare[name[raw_email] is_not constant[None]] begin[:]
variable[attachments] assign[=] call[name[self]._get_attachments_raw, parameter[name[raw_email]]]
return[name[attachments]]
|
keyword[def] identifier[attachments] ( identifier[self] ):
literal[string]
identifier[attachments] = keyword[None]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[payload] :
identifier[attachments] = identifier[self] . identifier[_get_attachments] ( identifier[self] . identifier[request] )
identifier[raw_email] = identifier[self] . identifier[get_raw_email] ()
keyword[if] identifier[raw_email] keyword[is] keyword[not] keyword[None] :
identifier[attachments] = identifier[self] . identifier[_get_attachments_raw] ( identifier[raw_email] )
keyword[return] identifier[attachments]
|
def attachments(self):
"""Returns an object with:
type = file content type
file_name = the name of the file
contents = base64 encoded file contents"""
attachments = None
if 'attachment-info' in self.payload:
attachments = self._get_attachments(self.request) # depends on [control=['if'], data=[]]
# Check if we have a raw message
raw_email = self.get_raw_email()
if raw_email is not None:
attachments = self._get_attachments_raw(raw_email) # depends on [control=['if'], data=['raw_email']]
return attachments
|
async def service_observable(self, limit) -> int:
"""
Service the observable's inBox and outBox
:return: the number of messages successfully serviced
"""
if not self.isReady():
return 0
o = self._service_observable_out_box(limit)
i = await self._observable.serviceQueues(limit)
return o + i
|
<ast.AsyncFunctionDef object at 0x7da2044c0b50>
|
keyword[async] keyword[def] identifier[service_observable] ( identifier[self] , identifier[limit] )-> identifier[int] :
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[isReady] ():
keyword[return] literal[int]
identifier[o] = identifier[self] . identifier[_service_observable_out_box] ( identifier[limit] )
identifier[i] = keyword[await] identifier[self] . identifier[_observable] . identifier[serviceQueues] ( identifier[limit] )
keyword[return] identifier[o] + identifier[i]
|
async def service_observable(self, limit) -> int:
"""
Service the observable's inBox and outBox
:return: the number of messages successfully serviced
"""
if not self.isReady():
return 0 # depends on [control=['if'], data=[]]
o = self._service_observable_out_box(limit)
i = await self._observable.serviceQueues(limit)
return o + i
|
def return_action(self, return_action):
"""Sets the return_action of this ReturnSettings.
:param return_action: The return_action of this ReturnSettings.
:type: str
"""
allowed_values = ["refund", "storeCredit"] # noqa: E501
if return_action is not None and return_action not in allowed_values:
raise ValueError(
"Invalid value for `return_action` ({0}), must be one of {1}" # noqa: E501
.format(return_action, allowed_values)
)
self._return_action = return_action
|
def function[return_action, parameter[self, return_action]]:
constant[Sets the return_action of this ReturnSettings.
:param return_action: The return_action of this ReturnSettings.
:type: str
]
variable[allowed_values] assign[=] list[[<ast.Constant object at 0x7da20c993f40>, <ast.Constant object at 0x7da20c993010>]]
if <ast.BoolOp object at 0x7da20c992950> begin[:]
<ast.Raise object at 0x7da20c991330>
name[self]._return_action assign[=] name[return_action]
|
keyword[def] identifier[return_action] ( identifier[self] , identifier[return_action] ):
literal[string]
identifier[allowed_values] =[ literal[string] , literal[string] ]
keyword[if] identifier[return_action] keyword[is] keyword[not] keyword[None] keyword[and] identifier[return_action] keyword[not] keyword[in] identifier[allowed_values] :
keyword[raise] identifier[ValueError] (
literal[string]
. identifier[format] ( identifier[return_action] , identifier[allowed_values] )
)
identifier[self] . identifier[_return_action] = identifier[return_action]
|
def return_action(self, return_action):
"""Sets the return_action of this ReturnSettings.
:param return_action: The return_action of this ReturnSettings.
:type: str
"""
allowed_values = ['refund', 'storeCredit'] # noqa: E501
if return_action is not None and return_action not in allowed_values: # noqa: E501
raise ValueError('Invalid value for `return_action` ({0}), must be one of {1}'.format(return_action, allowed_values)) # depends on [control=['if'], data=[]]
self._return_action = return_action
|
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit()
except Exception:
raise exceptions.BrowserException(self.brow_name, "not started")
return False
self.vbro.stop()
logger.info("logged out")
return True
|
def function[logout, parameter[self]]:
constant[logout func (quit browser)]
<ast.Try object at 0x7da1b0e439a0>
call[name[self].vbro.stop, parameter[]]
call[name[logger].info, parameter[constant[logged out]]]
return[constant[True]]
|
keyword[def] identifier[logout] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[browser] . identifier[quit] ()
keyword[except] identifier[Exception] :
keyword[raise] identifier[exceptions] . identifier[BrowserException] ( identifier[self] . identifier[brow_name] , literal[string] )
keyword[return] keyword[False]
identifier[self] . identifier[vbro] . identifier[stop] ()
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] keyword[True]
|
def logout(self):
"""logout func (quit browser)"""
try:
self.browser.quit() # depends on [control=['try'], data=[]]
except Exception:
raise exceptions.BrowserException(self.brow_name, 'not started')
return False # depends on [control=['except'], data=[]]
self.vbro.stop()
logger.info('logged out')
return True
|
def move(self, path, raise_if_exists=False):
"""
Alias for ``rename()``
"""
self.rename(path, raise_if_exists=raise_if_exists)
|
def function[move, parameter[self, path, raise_if_exists]]:
constant[
Alias for ``rename()``
]
call[name[self].rename, parameter[name[path]]]
|
keyword[def] identifier[move] ( identifier[self] , identifier[path] , identifier[raise_if_exists] = keyword[False] ):
literal[string]
identifier[self] . identifier[rename] ( identifier[path] , identifier[raise_if_exists] = identifier[raise_if_exists] )
|
def move(self, path, raise_if_exists=False):
"""
Alias for ``rename()``
"""
self.rename(path, raise_if_exists=raise_if_exists)
|
def update_courses(self, event, account_id, course_ids):
"""
Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - course_ids
"""List of ids of courses to update. At most 500 courses may be updated in one call."""
data["course_ids"] = course_ids
# REQUIRED - event
"""no description"""
self._validate_enum(event, ["offer", "conclude", "delete", "undelete"])
data["event"] = event
self.logger.debug("PUT /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/accounts/{account_id}/courses".format(**path), data=data, params=params, single_item=True)
|
def function[update_courses, parameter[self, event, account_id, course_ids]]:
constant[
Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered.
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[account_id]] assign[=] name[account_id]
constant[List of ids of courses to update. At most 500 courses may be updated in one call.]
call[name[data]][constant[course_ids]] assign[=] name[course_ids]
constant[no description]
call[name[self]._validate_enum, parameter[name[event], list[[<ast.Constant object at 0x7da1b0bf3700>, <ast.Constant object at 0x7da1b0bf2020>, <ast.Constant object at 0x7da1b0bf3430>, <ast.Constant object at 0x7da1b0bf3af0>]]]]
call[name[data]][constant[event]] assign[=] name[event]
call[name[self].logger.debug, parameter[call[constant[PUT /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[PUT], call[constant[/api/v1/accounts/{account_id}/courses].format, parameter[]]]]]
|
keyword[def] identifier[update_courses] ( identifier[self] , identifier[event] , identifier[account_id] , identifier[course_ids] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[account_id]
literal[string]
identifier[data] [ literal[string] ]= identifier[course_ids]
literal[string]
identifier[self] . identifier[_validate_enum] ( identifier[event] ,[ literal[string] , literal[string] , literal[string] , literal[string] ])
identifier[data] [ literal[string] ]= identifier[event]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[single_item] = keyword[True] )
|
def update_courses(self, event, account_id, course_ids):
"""
Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered.
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - account_id
'ID'
path['account_id'] = account_id # REQUIRED - course_ids
'List of ids of courses to update. At most 500 courses may be updated in one call.'
data['course_ids'] = course_ids # REQUIRED - event
'no description'
self._validate_enum(event, ['offer', 'conclude', 'delete', 'undelete'])
data['event'] = event
self.logger.debug('PUT /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('PUT', '/api/v1/accounts/{account_id}/courses'.format(**path), data=data, params=params, single_item=True)
|
def lib(ctx,
filepath=None,
extra=False,
bootstrap=False,
cache=False,
reveal=False,
show=False,
save=False,
directory=False):
"""
LIBRARY: work with a local library of RDF models.
"""
verbose = ctx.obj['VERBOSE']
sTime = ctx.obj['STIME']
print_opts = {
'labels': verbose,
}
DONE_ACTION = False
if bootstrap:
DONE_ACTION = True
action_bootstrap(verbose)
printDebug("Tip: you can now load an ontology by typing `ontospy lib -s`",
"important")
# raise SystemExit(1)
elif cache:
DONE_ACTION = True
action_cache_reset()
elif directory:
if not filepath:
printDebug("Please specify a new directory for the local library.",
'important')
sys.exit(0)
else:
_location = filepath[0]
if _location.endswith("/"):
# dont need the final slash
_location = _location[:-1]
output = action_update_library_location(_location)
if output:
printDebug(
"Note: no files have been moved or deleted (this has to be done manually)",
"comment")
printDebug("----------\n" + "New location: '%s'" % _location,
"important")
else:
printDebug(
"----------\n" + "Please specify an existing folder path.",
"important")
raise SystemExit(1)
elif reveal:
action_reveal_library()
raise SystemExit(1)
elif save:
if filepath:
DONE_ACTION = True
action_import(filepath[0], verbose)
else:
click.secho(
"You provided no arguments - please specify what to save..",
fg='white')
raise SystemExit(1)
elif show:
click.secho("Local library => '%s'" % get_home_location(), fg='white')
filename = action_listlocal(all_details=False)
if filename:
DONE_ACTION = True
g = get_pickled_ontology(filename)
if not g:
g = do_pickle_ontology(filename)
shellPrintOverview(g, print_opts)
else:
click.echo(ctx.get_help())
return
if DONE_ACTION:
eTime = time.time()
tTime = eTime - sTime
printDebug("\n-----------\n" + "Time: %0.2fs" % tTime, "comment")
else:
printDebug("Goodbye", "comment")
|
def function[lib, parameter[ctx, filepath, extra, bootstrap, cache, reveal, show, save, directory]]:
constant[
LIBRARY: work with a local library of RDF models.
]
variable[verbose] assign[=] call[name[ctx].obj][constant[VERBOSE]]
variable[sTime] assign[=] call[name[ctx].obj][constant[STIME]]
variable[print_opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b11ab490>], [<ast.Name object at 0x7da1b11a96f0>]]
variable[DONE_ACTION] assign[=] constant[False]
if name[bootstrap] begin[:]
variable[DONE_ACTION] assign[=] constant[True]
call[name[action_bootstrap], parameter[name[verbose]]]
call[name[printDebug], parameter[constant[Tip: you can now load an ontology by typing `ontospy lib -s`], constant[important]]]
if name[DONE_ACTION] begin[:]
variable[eTime] assign[=] call[name[time].time, parameter[]]
variable[tTime] assign[=] binary_operation[name[eTime] - name[sTime]]
call[name[printDebug], parameter[binary_operation[constant[
-----------
] + binary_operation[constant[Time: %0.2fs] <ast.Mod object at 0x7da2590d6920> name[tTime]]], constant[comment]]]
|
keyword[def] identifier[lib] ( identifier[ctx] ,
identifier[filepath] = keyword[None] ,
identifier[extra] = keyword[False] ,
identifier[bootstrap] = keyword[False] ,
identifier[cache] = keyword[False] ,
identifier[reveal] = keyword[False] ,
identifier[show] = keyword[False] ,
identifier[save] = keyword[False] ,
identifier[directory] = keyword[False] ):
literal[string]
identifier[verbose] = identifier[ctx] . identifier[obj] [ literal[string] ]
identifier[sTime] = identifier[ctx] . identifier[obj] [ literal[string] ]
identifier[print_opts] ={
literal[string] : identifier[verbose] ,
}
identifier[DONE_ACTION] = keyword[False]
keyword[if] identifier[bootstrap] :
identifier[DONE_ACTION] = keyword[True]
identifier[action_bootstrap] ( identifier[verbose] )
identifier[printDebug] ( literal[string] ,
literal[string] )
keyword[elif] identifier[cache] :
identifier[DONE_ACTION] = keyword[True]
identifier[action_cache_reset] ()
keyword[elif] identifier[directory] :
keyword[if] keyword[not] identifier[filepath] :
identifier[printDebug] ( literal[string] ,
literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
keyword[else] :
identifier[_location] = identifier[filepath] [ literal[int] ]
keyword[if] identifier[_location] . identifier[endswith] ( literal[string] ):
identifier[_location] = identifier[_location] [:- literal[int] ]
identifier[output] = identifier[action_update_library_location] ( identifier[_location] )
keyword[if] identifier[output] :
identifier[printDebug] (
literal[string] ,
literal[string] )
identifier[printDebug] ( literal[string] + literal[string] % identifier[_location] ,
literal[string] )
keyword[else] :
identifier[printDebug] (
literal[string] + literal[string] ,
literal[string] )
keyword[raise] identifier[SystemExit] ( literal[int] )
keyword[elif] identifier[reveal] :
identifier[action_reveal_library] ()
keyword[raise] identifier[SystemExit] ( literal[int] )
keyword[elif] identifier[save] :
keyword[if] identifier[filepath] :
identifier[DONE_ACTION] = keyword[True]
identifier[action_import] ( identifier[filepath] [ literal[int] ], identifier[verbose] )
keyword[else] :
identifier[click] . identifier[secho] (
literal[string] ,
identifier[fg] = literal[string] )
keyword[raise] identifier[SystemExit] ( literal[int] )
keyword[elif] identifier[show] :
identifier[click] . identifier[secho] ( literal[string] % identifier[get_home_location] (), identifier[fg] = literal[string] )
identifier[filename] = identifier[action_listlocal] ( identifier[all_details] = keyword[False] )
keyword[if] identifier[filename] :
identifier[DONE_ACTION] = keyword[True]
identifier[g] = identifier[get_pickled_ontology] ( identifier[filename] )
keyword[if] keyword[not] identifier[g] :
identifier[g] = identifier[do_pickle_ontology] ( identifier[filename] )
identifier[shellPrintOverview] ( identifier[g] , identifier[print_opts] )
keyword[else] :
identifier[click] . identifier[echo] ( identifier[ctx] . identifier[get_help] ())
keyword[return]
keyword[if] identifier[DONE_ACTION] :
identifier[eTime] = identifier[time] . identifier[time] ()
identifier[tTime] = identifier[eTime] - identifier[sTime]
identifier[printDebug] ( literal[string] + literal[string] % identifier[tTime] , literal[string] )
keyword[else] :
identifier[printDebug] ( literal[string] , literal[string] )
|
def lib(ctx, filepath=None, extra=False, bootstrap=False, cache=False, reveal=False, show=False, save=False, directory=False):
"""
LIBRARY: work with a local library of RDF models.
"""
verbose = ctx.obj['VERBOSE']
sTime = ctx.obj['STIME']
print_opts = {'labels': verbose}
DONE_ACTION = False
if bootstrap:
DONE_ACTION = True
action_bootstrap(verbose)
printDebug('Tip: you can now load an ontology by typing `ontospy lib -s`', 'important') # depends on [control=['if'], data=[]]
# raise SystemExit(1)
elif cache:
DONE_ACTION = True
action_cache_reset() # depends on [control=['if'], data=[]]
elif directory:
if not filepath:
printDebug('Please specify a new directory for the local library.', 'important')
sys.exit(0) # depends on [control=['if'], data=[]]
else:
_location = filepath[0]
if _location.endswith('/'):
# dont need the final slash
_location = _location[:-1] # depends on [control=['if'], data=[]]
output = action_update_library_location(_location)
if output:
printDebug('Note: no files have been moved or deleted (this has to be done manually)', 'comment')
printDebug('----------\n' + "New location: '%s'" % _location, 'important') # depends on [control=['if'], data=[]]
else:
printDebug('----------\n' + 'Please specify an existing folder path.', 'important')
raise SystemExit(1) # depends on [control=['if'], data=[]]
elif reveal:
action_reveal_library()
raise SystemExit(1) # depends on [control=['if'], data=[]]
elif save:
if filepath:
DONE_ACTION = True
action_import(filepath[0], verbose) # depends on [control=['if'], data=[]]
else:
click.secho('You provided no arguments - please specify what to save..', fg='white')
raise SystemExit(1) # depends on [control=['if'], data=[]]
elif show:
click.secho("Local library => '%s'" % get_home_location(), fg='white')
filename = action_listlocal(all_details=False)
if filename:
DONE_ACTION = True
g = get_pickled_ontology(filename)
if not g:
g = do_pickle_ontology(filename) # depends on [control=['if'], data=[]]
shellPrintOverview(g, print_opts) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
click.echo(ctx.get_help())
return
if DONE_ACTION:
eTime = time.time()
tTime = eTime - sTime
printDebug('\n-----------\n' + 'Time:\t %0.2fs' % tTime, 'comment') # depends on [control=['if'], data=[]]
else:
printDebug('Goodbye', 'comment')
|
def unregister(self, collector):
"""Remove a collector from the registry."""
with self._lock:
for name in self._collector_to_names[collector]:
del self._names_to_collectors[name]
del self._collector_to_names[collector]
|
def function[unregister, parameter[self, collector]]:
constant[Remove a collector from the registry.]
with name[self]._lock begin[:]
for taget[name[name]] in starred[call[name[self]._collector_to_names][name[collector]]] begin[:]
<ast.Delete object at 0x7da1b2179120>
<ast.Delete object at 0x7da1b2179240>
|
keyword[def] identifier[unregister] ( identifier[self] , identifier[collector] ):
literal[string]
keyword[with] identifier[self] . identifier[_lock] :
keyword[for] identifier[name] keyword[in] identifier[self] . identifier[_collector_to_names] [ identifier[collector] ]:
keyword[del] identifier[self] . identifier[_names_to_collectors] [ identifier[name] ]
keyword[del] identifier[self] . identifier[_collector_to_names] [ identifier[collector] ]
|
def unregister(self, collector):
"""Remove a collector from the registry."""
with self._lock:
for name in self._collector_to_names[collector]:
del self._names_to_collectors[name] # depends on [control=['for'], data=['name']]
del self._collector_to_names[collector] # depends on [control=['with'], data=[]]
|
def message_info(exchange, routing_key, properties):
"""Return info about a message using the same conditional constructs
:param str exchange: The exchange the message was published to
:param str routing_key: The routing key used
:param properties: The AMQP message properties
:type properties: pika.spec.Basic.Properties
:rtype: str
"""
output = []
if properties.message_id:
output.append(properties.message_id)
if properties.correlation_id:
output.append('[correlation_id="{}"]'.format(
properties.correlation_id))
if exchange:
output.append('published to "{}"'.format(exchange))
if routing_key:
output.append('using "{}"'.format(routing_key))
return ' '.join(output)
|
def function[message_info, parameter[exchange, routing_key, properties]]:
constant[Return info about a message using the same conditional constructs
:param str exchange: The exchange the message was published to
:param str routing_key: The routing key used
:param properties: The AMQP message properties
:type properties: pika.spec.Basic.Properties
:rtype: str
]
variable[output] assign[=] list[[]]
if name[properties].message_id begin[:]
call[name[output].append, parameter[name[properties].message_id]]
if name[properties].correlation_id begin[:]
call[name[output].append, parameter[call[constant[[correlation_id="{}"]].format, parameter[name[properties].correlation_id]]]]
if name[exchange] begin[:]
call[name[output].append, parameter[call[constant[published to "{}"].format, parameter[name[exchange]]]]]
if name[routing_key] begin[:]
call[name[output].append, parameter[call[constant[using "{}"].format, parameter[name[routing_key]]]]]
return[call[constant[ ].join, parameter[name[output]]]]
|
keyword[def] identifier[message_info] ( identifier[exchange] , identifier[routing_key] , identifier[properties] ):
literal[string]
identifier[output] =[]
keyword[if] identifier[properties] . identifier[message_id] :
identifier[output] . identifier[append] ( identifier[properties] . identifier[message_id] )
keyword[if] identifier[properties] . identifier[correlation_id] :
identifier[output] . identifier[append] ( literal[string] . identifier[format] (
identifier[properties] . identifier[correlation_id] ))
keyword[if] identifier[exchange] :
identifier[output] . identifier[append] ( literal[string] . identifier[format] ( identifier[exchange] ))
keyword[if] identifier[routing_key] :
identifier[output] . identifier[append] ( literal[string] . identifier[format] ( identifier[routing_key] ))
keyword[return] literal[string] . identifier[join] ( identifier[output] )
|
def message_info(exchange, routing_key, properties):
"""Return info about a message using the same conditional constructs
:param str exchange: The exchange the message was published to
:param str routing_key: The routing key used
:param properties: The AMQP message properties
:type properties: pika.spec.Basic.Properties
:rtype: str
"""
output = []
if properties.message_id:
output.append(properties.message_id) # depends on [control=['if'], data=[]]
if properties.correlation_id:
output.append('[correlation_id="{}"]'.format(properties.correlation_id)) # depends on [control=['if'], data=[]]
if exchange:
output.append('published to "{}"'.format(exchange)) # depends on [control=['if'], data=[]]
if routing_key:
output.append('using "{}"'.format(routing_key)) # depends on [control=['if'], data=[]]
return ' '.join(output)
|
def convert_pint_to_fortran_safe_units(units, inverse=False):
"""
Convert Pint units to Fortran safe units
Parameters
----------
units : list_like, str
Units to convert
inverse : bool
If True, convert the other way i.e. convert Fortran safe units to Pint units
Returns
-------
``type(units)``
Set of converted units
"""
if inverse:
return apply_string_substitutions(units, FORTRAN_SAFE_TO_PINT_UNITS_MAPPING)
else:
return apply_string_substitutions(units, PINT_TO_FORTRAN_SAFE_UNITS_MAPPING)
|
def function[convert_pint_to_fortran_safe_units, parameter[units, inverse]]:
constant[
Convert Pint units to Fortran safe units
Parameters
----------
units : list_like, str
Units to convert
inverse : bool
If True, convert the other way i.e. convert Fortran safe units to Pint units
Returns
-------
``type(units)``
Set of converted units
]
if name[inverse] begin[:]
return[call[name[apply_string_substitutions], parameter[name[units], name[FORTRAN_SAFE_TO_PINT_UNITS_MAPPING]]]]
|
keyword[def] identifier[convert_pint_to_fortran_safe_units] ( identifier[units] , identifier[inverse] = keyword[False] ):
literal[string]
keyword[if] identifier[inverse] :
keyword[return] identifier[apply_string_substitutions] ( identifier[units] , identifier[FORTRAN_SAFE_TO_PINT_UNITS_MAPPING] )
keyword[else] :
keyword[return] identifier[apply_string_substitutions] ( identifier[units] , identifier[PINT_TO_FORTRAN_SAFE_UNITS_MAPPING] )
|
def convert_pint_to_fortran_safe_units(units, inverse=False):
"""
Convert Pint units to Fortran safe units
Parameters
----------
units : list_like, str
Units to convert
inverse : bool
If True, convert the other way i.e. convert Fortran safe units to Pint units
Returns
-------
``type(units)``
Set of converted units
"""
if inverse:
return apply_string_substitutions(units, FORTRAN_SAFE_TO_PINT_UNITS_MAPPING) # depends on [control=['if'], data=[]]
else:
return apply_string_substitutions(units, PINT_TO_FORTRAN_SAFE_UNITS_MAPPING)
|
def scale_to_vol(self, vol):
"""Scale ball to encompass a target volume."""
f = (vol / self.vol_ball) ** (1.0 / self.n) # linear factor
self.expand *= f
self.radius *= f
self.vol_ball = vol
|
def function[scale_to_vol, parameter[self, vol]]:
constant[Scale ball to encompass a target volume.]
variable[f] assign[=] binary_operation[binary_operation[name[vol] / name[self].vol_ball] ** binary_operation[constant[1.0] / name[self].n]]
<ast.AugAssign object at 0x7da1b1ed5cf0>
<ast.AugAssign object at 0x7da1b1ed5d50>
name[self].vol_ball assign[=] name[vol]
|
keyword[def] identifier[scale_to_vol] ( identifier[self] , identifier[vol] ):
literal[string]
identifier[f] =( identifier[vol] / identifier[self] . identifier[vol_ball] )**( literal[int] / identifier[self] . identifier[n] )
identifier[self] . identifier[expand] *= identifier[f]
identifier[self] . identifier[radius] *= identifier[f]
identifier[self] . identifier[vol_ball] = identifier[vol]
|
def scale_to_vol(self, vol):
"""Scale ball to encompass a target volume."""
f = (vol / self.vol_ball) ** (1.0 / self.n) # linear factor
self.expand *= f
self.radius *= f
self.vol_ball = vol
|
def clear_to_reset(self, config_vars):
"""Clear all volatile information across a reset."""
self._logger.info("Config vars in sensor log reset: %s", config_vars)
super(SensorLogSubsystem, self).clear_to_reset(config_vars)
self.storage.destroy_all_walkers()
self.dump_walker = None
if config_vars.get('storage_fillstop', False):
self._logger.debug("Marking storage log fill/stop")
self.storage.set_rollover('storage', False)
if config_vars.get('streaming_fillstop', False):
self._logger.debug("Marking streaming log fill/stop")
self.storage.set_rollover('streaming', False)
|
def function[clear_to_reset, parameter[self, config_vars]]:
constant[Clear all volatile information across a reset.]
call[name[self]._logger.info, parameter[constant[Config vars in sensor log reset: %s], name[config_vars]]]
call[call[name[super], parameter[name[SensorLogSubsystem], name[self]]].clear_to_reset, parameter[name[config_vars]]]
call[name[self].storage.destroy_all_walkers, parameter[]]
name[self].dump_walker assign[=] constant[None]
if call[name[config_vars].get, parameter[constant[storage_fillstop], constant[False]]] begin[:]
call[name[self]._logger.debug, parameter[constant[Marking storage log fill/stop]]]
call[name[self].storage.set_rollover, parameter[constant[storage], constant[False]]]
if call[name[config_vars].get, parameter[constant[streaming_fillstop], constant[False]]] begin[:]
call[name[self]._logger.debug, parameter[constant[Marking streaming log fill/stop]]]
call[name[self].storage.set_rollover, parameter[constant[streaming], constant[False]]]
|
keyword[def] identifier[clear_to_reset] ( identifier[self] , identifier[config_vars] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] , identifier[config_vars] )
identifier[super] ( identifier[SensorLogSubsystem] , identifier[self] ). identifier[clear_to_reset] ( identifier[config_vars] )
identifier[self] . identifier[storage] . identifier[destroy_all_walkers] ()
identifier[self] . identifier[dump_walker] = keyword[None]
keyword[if] identifier[config_vars] . identifier[get] ( literal[string] , keyword[False] ):
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[storage] . identifier[set_rollover] ( literal[string] , keyword[False] )
keyword[if] identifier[config_vars] . identifier[get] ( literal[string] , keyword[False] ):
identifier[self] . identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[storage] . identifier[set_rollover] ( literal[string] , keyword[False] )
|
def clear_to_reset(self, config_vars):
"""Clear all volatile information across a reset."""
self._logger.info('Config vars in sensor log reset: %s', config_vars)
super(SensorLogSubsystem, self).clear_to_reset(config_vars)
self.storage.destroy_all_walkers()
self.dump_walker = None
if config_vars.get('storage_fillstop', False):
self._logger.debug('Marking storage log fill/stop')
self.storage.set_rollover('storage', False) # depends on [control=['if'], data=[]]
if config_vars.get('streaming_fillstop', False):
self._logger.debug('Marking streaming log fill/stop')
self.storage.set_rollover('streaming', False) # depends on [control=['if'], data=[]]
|
def delay(self, amount, pid, method, *args):
"""Call a method on another process after a specified delay.
This is equivalent to ``dispatch`` except with an additional amount of
time to wait prior to invoking the call.
This function returns immediately.
:param amount: The amount of time to wait in seconds before making the call.
:type amount: ``float`` or ``int``
:param pid: The pid of the process to be called.
:type pid: :class:`PID`
:param method: The name of the method to be called.
:type method: ``str``
:return: Nothing
"""
self._assert_started()
self._assert_local_pid(pid)
function = self._get_dispatch_method(pid, method)
self.__loop.add_timeout(self.__loop.time() + amount, function, *args)
|
def function[delay, parameter[self, amount, pid, method]]:
constant[Call a method on another process after a specified delay.
This is equivalent to ``dispatch`` except with an additional amount of
time to wait prior to invoking the call.
This function returns immediately.
:param amount: The amount of time to wait in seconds before making the call.
:type amount: ``float`` or ``int``
:param pid: The pid of the process to be called.
:type pid: :class:`PID`
:param method: The name of the method to be called.
:type method: ``str``
:return: Nothing
]
call[name[self]._assert_started, parameter[]]
call[name[self]._assert_local_pid, parameter[name[pid]]]
variable[function] assign[=] call[name[self]._get_dispatch_method, parameter[name[pid], name[method]]]
call[name[self].__loop.add_timeout, parameter[binary_operation[call[name[self].__loop.time, parameter[]] + name[amount]], name[function], <ast.Starred object at 0x7da1b141d690>]]
|
keyword[def] identifier[delay] ( identifier[self] , identifier[amount] , identifier[pid] , identifier[method] ,* identifier[args] ):
literal[string]
identifier[self] . identifier[_assert_started] ()
identifier[self] . identifier[_assert_local_pid] ( identifier[pid] )
identifier[function] = identifier[self] . identifier[_get_dispatch_method] ( identifier[pid] , identifier[method] )
identifier[self] . identifier[__loop] . identifier[add_timeout] ( identifier[self] . identifier[__loop] . identifier[time] ()+ identifier[amount] , identifier[function] ,* identifier[args] )
|
def delay(self, amount, pid, method, *args):
"""Call a method on another process after a specified delay.
This is equivalent to ``dispatch`` except with an additional amount of
time to wait prior to invoking the call.
This function returns immediately.
:param amount: The amount of time to wait in seconds before making the call.
:type amount: ``float`` or ``int``
:param pid: The pid of the process to be called.
:type pid: :class:`PID`
:param method: The name of the method to be called.
:type method: ``str``
:return: Nothing
"""
self._assert_started()
self._assert_local_pid(pid)
function = self._get_dispatch_method(pid, method)
self.__loop.add_timeout(self.__loop.time() + amount, function, *args)
|
def update_terminal_size():
"""Propagate the terminal size to the remote shells accounting for the
place taken by the longest name"""
w, h = terminal_size()
w = max(w - display_names.max_display_name_length - 2, min(w, 10))
# python bug http://python.org/sf/1112949 on amd64
# from ajaxterm.py
bug = struct.unpack('i', struct.pack('I', termios.TIOCSWINSZ))[0]
packed_size = struct.pack('HHHH', h, w, 0, 0)
term_size = w, h
for i in all_instances():
if i.enabled and i.term_size != term_size:
i.term_size = term_size
fcntl.ioctl(i.fd, bug, packed_size)
|
def function[update_terminal_size, parameter[]]:
constant[Propagate the terminal size to the remote shells accounting for the
place taken by the longest name]
<ast.Tuple object at 0x7da204344790> assign[=] call[name[terminal_size], parameter[]]
variable[w] assign[=] call[name[max], parameter[binary_operation[binary_operation[name[w] - name[display_names].max_display_name_length] - constant[2]], call[name[min], parameter[name[w], constant[10]]]]]
variable[bug] assign[=] call[call[name[struct].unpack, parameter[constant[i], call[name[struct].pack, parameter[constant[I], name[termios].TIOCSWINSZ]]]]][constant[0]]
variable[packed_size] assign[=] call[name[struct].pack, parameter[constant[HHHH], name[h], name[w], constant[0], constant[0]]]
variable[term_size] assign[=] tuple[[<ast.Name object at 0x7da204347b50>, <ast.Name object at 0x7da204345960>]]
for taget[name[i]] in starred[call[name[all_instances], parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da204347100> begin[:]
name[i].term_size assign[=] name[term_size]
call[name[fcntl].ioctl, parameter[name[i].fd, name[bug], name[packed_size]]]
|
keyword[def] identifier[update_terminal_size] ():
literal[string]
identifier[w] , identifier[h] = identifier[terminal_size] ()
identifier[w] = identifier[max] ( identifier[w] - identifier[display_names] . identifier[max_display_name_length] - literal[int] , identifier[min] ( identifier[w] , literal[int] ))
identifier[bug] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[struct] . identifier[pack] ( literal[string] , identifier[termios] . identifier[TIOCSWINSZ] ))[ literal[int] ]
identifier[packed_size] = identifier[struct] . identifier[pack] ( literal[string] , identifier[h] , identifier[w] , literal[int] , literal[int] )
identifier[term_size] = identifier[w] , identifier[h]
keyword[for] identifier[i] keyword[in] identifier[all_instances] ():
keyword[if] identifier[i] . identifier[enabled] keyword[and] identifier[i] . identifier[term_size] != identifier[term_size] :
identifier[i] . identifier[term_size] = identifier[term_size]
identifier[fcntl] . identifier[ioctl] ( identifier[i] . identifier[fd] , identifier[bug] , identifier[packed_size] )
|
def update_terminal_size():
"""Propagate the terminal size to the remote shells accounting for the
place taken by the longest name"""
(w, h) = terminal_size()
w = max(w - display_names.max_display_name_length - 2, min(w, 10))
# python bug http://python.org/sf/1112949 on amd64
# from ajaxterm.py
bug = struct.unpack('i', struct.pack('I', termios.TIOCSWINSZ))[0]
packed_size = struct.pack('HHHH', h, w, 0, 0)
term_size = (w, h)
for i in all_instances():
if i.enabled and i.term_size != term_size:
i.term_size = term_size
fcntl.ioctl(i.fd, bug, packed_size) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
|
def tocimxml(self):
"""
Return the CIM-XML representation of this CIM property,
as an object of an appropriate subclass of :term:`Element`.
The returned CIM-XML representation is a `PROPERTY`,
`PROPERTY.REFERENCE`, or `PROPERTY.ARRAY` element dependent on the
property type, and consistent with :term:`DSP0201`. Note that
array properties cannot be of reference type.
The order of qualifiers in the returned CIM-XML representation is
preserved from the :class:`~pywbem.CIMProperty` object.
Returns:
The CIM-XML representation, as an object of an appropriate subclass
of :term:`Element`.
"""
qualifiers = [q.tocimxml() for q in self.qualifiers.values()]
if self.is_array: # pylint: disable=no-else-return
assert self.type != 'reference'
if self.value is None:
value_xml = None
else:
array_xml = []
for v in self.value:
if v is None:
if SEND_VALUE_NULL:
array_xml.append(cim_xml.VALUE_NULL())
else:
array_xml.append(cim_xml.VALUE(None))
elif self.embedded_object is not None:
assert isinstance(v, (CIMInstance, CIMClass))
array_xml.append(cim_xml.VALUE(v.tocimxml().toxml()))
else:
array_xml.append(cim_xml.VALUE(atomic_to_cim_xml(v)))
value_xml = cim_xml.VALUE_ARRAY(array_xml)
return cim_xml.PROPERTY_ARRAY(
self.name,
self.type,
value_xml,
self.array_size,
self.class_origin,
self.propagated,
embedded_object=self.embedded_object,
qualifiers=qualifiers)
elif self.type == 'reference': # scalar
if self.value is None:
value_xml = None
else:
value_xml = cim_xml.VALUE_REFERENCE(self.value.tocimxml())
return cim_xml.PROPERTY_REFERENCE(
self.name,
value_xml,
reference_class=self.reference_class,
class_origin=self.class_origin,
propagated=self.propagated,
qualifiers=qualifiers)
else: # scalar non-reference
if self.value is None:
value_xml = None
else:
if self.embedded_object is not None:
assert isinstance(self.value, (CIMInstance, CIMClass))
value_xml = cim_xml.VALUE(self.value.tocimxml().toxml())
else:
value_xml = cim_xml.VALUE(atomic_to_cim_xml(self.value))
return cim_xml.PROPERTY(
self.name,
self.type,
value_xml,
class_origin=self.class_origin,
propagated=self.propagated,
embedded_object=self.embedded_object,
qualifiers=qualifiers)
|
def function[tocimxml, parameter[self]]:
constant[
Return the CIM-XML representation of this CIM property,
as an object of an appropriate subclass of :term:`Element`.
The returned CIM-XML representation is a `PROPERTY`,
`PROPERTY.REFERENCE`, or `PROPERTY.ARRAY` element dependent on the
property type, and consistent with :term:`DSP0201`. Note that
array properties cannot be of reference type.
The order of qualifiers in the returned CIM-XML representation is
preserved from the :class:`~pywbem.CIMProperty` object.
Returns:
The CIM-XML representation, as an object of an appropriate subclass
of :term:`Element`.
]
variable[qualifiers] assign[=] <ast.ListComp object at 0x7da20e9b0550>
if name[self].is_array begin[:]
assert[compare[name[self].type not_equal[!=] constant[reference]]]
if compare[name[self].value is constant[None]] begin[:]
variable[value_xml] assign[=] constant[None]
return[call[name[cim_xml].PROPERTY_ARRAY, parameter[name[self].name, name[self].type, name[value_xml], name[self].array_size, name[self].class_origin, name[self].propagated]]]
|
keyword[def] identifier[tocimxml] ( identifier[self] ):
literal[string]
identifier[qualifiers] =[ identifier[q] . identifier[tocimxml] () keyword[for] identifier[q] keyword[in] identifier[self] . identifier[qualifiers] . identifier[values] ()]
keyword[if] identifier[self] . identifier[is_array] :
keyword[assert] identifier[self] . identifier[type] != literal[string]
keyword[if] identifier[self] . identifier[value] keyword[is] keyword[None] :
identifier[value_xml] = keyword[None]
keyword[else] :
identifier[array_xml] =[]
keyword[for] identifier[v] keyword[in] identifier[self] . identifier[value] :
keyword[if] identifier[v] keyword[is] keyword[None] :
keyword[if] identifier[SEND_VALUE_NULL] :
identifier[array_xml] . identifier[append] ( identifier[cim_xml] . identifier[VALUE_NULL] ())
keyword[else] :
identifier[array_xml] . identifier[append] ( identifier[cim_xml] . identifier[VALUE] ( keyword[None] ))
keyword[elif] identifier[self] . identifier[embedded_object] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[isinstance] ( identifier[v] ,( identifier[CIMInstance] , identifier[CIMClass] ))
identifier[array_xml] . identifier[append] ( identifier[cim_xml] . identifier[VALUE] ( identifier[v] . identifier[tocimxml] (). identifier[toxml] ()))
keyword[else] :
identifier[array_xml] . identifier[append] ( identifier[cim_xml] . identifier[VALUE] ( identifier[atomic_to_cim_xml] ( identifier[v] )))
identifier[value_xml] = identifier[cim_xml] . identifier[VALUE_ARRAY] ( identifier[array_xml] )
keyword[return] identifier[cim_xml] . identifier[PROPERTY_ARRAY] (
identifier[self] . identifier[name] ,
identifier[self] . identifier[type] ,
identifier[value_xml] ,
identifier[self] . identifier[array_size] ,
identifier[self] . identifier[class_origin] ,
identifier[self] . identifier[propagated] ,
identifier[embedded_object] = identifier[self] . identifier[embedded_object] ,
identifier[qualifiers] = identifier[qualifiers] )
keyword[elif] identifier[self] . identifier[type] == literal[string] :
keyword[if] identifier[self] . identifier[value] keyword[is] keyword[None] :
identifier[value_xml] = keyword[None]
keyword[else] :
identifier[value_xml] = identifier[cim_xml] . identifier[VALUE_REFERENCE] ( identifier[self] . identifier[value] . identifier[tocimxml] ())
keyword[return] identifier[cim_xml] . identifier[PROPERTY_REFERENCE] (
identifier[self] . identifier[name] ,
identifier[value_xml] ,
identifier[reference_class] = identifier[self] . identifier[reference_class] ,
identifier[class_origin] = identifier[self] . identifier[class_origin] ,
identifier[propagated] = identifier[self] . identifier[propagated] ,
identifier[qualifiers] = identifier[qualifiers] )
keyword[else] :
keyword[if] identifier[self] . identifier[value] keyword[is] keyword[None] :
identifier[value_xml] = keyword[None]
keyword[else] :
keyword[if] identifier[self] . identifier[embedded_object] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[isinstance] ( identifier[self] . identifier[value] ,( identifier[CIMInstance] , identifier[CIMClass] ))
identifier[value_xml] = identifier[cim_xml] . identifier[VALUE] ( identifier[self] . identifier[value] . identifier[tocimxml] (). identifier[toxml] ())
keyword[else] :
identifier[value_xml] = identifier[cim_xml] . identifier[VALUE] ( identifier[atomic_to_cim_xml] ( identifier[self] . identifier[value] ))
keyword[return] identifier[cim_xml] . identifier[PROPERTY] (
identifier[self] . identifier[name] ,
identifier[self] . identifier[type] ,
identifier[value_xml] ,
identifier[class_origin] = identifier[self] . identifier[class_origin] ,
identifier[propagated] = identifier[self] . identifier[propagated] ,
identifier[embedded_object] = identifier[self] . identifier[embedded_object] ,
identifier[qualifiers] = identifier[qualifiers] )
|
def tocimxml(self):
"""
Return the CIM-XML representation of this CIM property,
as an object of an appropriate subclass of :term:`Element`.
The returned CIM-XML representation is a `PROPERTY`,
`PROPERTY.REFERENCE`, or `PROPERTY.ARRAY` element dependent on the
property type, and consistent with :term:`DSP0201`. Note that
array properties cannot be of reference type.
The order of qualifiers in the returned CIM-XML representation is
preserved from the :class:`~pywbem.CIMProperty` object.
Returns:
The CIM-XML representation, as an object of an appropriate subclass
of :term:`Element`.
"""
qualifiers = [q.tocimxml() for q in self.qualifiers.values()]
if self.is_array: # pylint: disable=no-else-return
assert self.type != 'reference'
if self.value is None:
value_xml = None # depends on [control=['if'], data=[]]
else:
array_xml = []
for v in self.value:
if v is None:
if SEND_VALUE_NULL:
array_xml.append(cim_xml.VALUE_NULL()) # depends on [control=['if'], data=[]]
else:
array_xml.append(cim_xml.VALUE(None)) # depends on [control=['if'], data=[]]
elif self.embedded_object is not None:
assert isinstance(v, (CIMInstance, CIMClass))
array_xml.append(cim_xml.VALUE(v.tocimxml().toxml())) # depends on [control=['if'], data=[]]
else:
array_xml.append(cim_xml.VALUE(atomic_to_cim_xml(v))) # depends on [control=['for'], data=['v']]
value_xml = cim_xml.VALUE_ARRAY(array_xml)
return cim_xml.PROPERTY_ARRAY(self.name, self.type, value_xml, self.array_size, self.class_origin, self.propagated, embedded_object=self.embedded_object, qualifiers=qualifiers) # depends on [control=['if'], data=[]]
elif self.type == 'reference': # scalar
if self.value is None:
value_xml = None # depends on [control=['if'], data=[]]
else:
value_xml = cim_xml.VALUE_REFERENCE(self.value.tocimxml())
return cim_xml.PROPERTY_REFERENCE(self.name, value_xml, reference_class=self.reference_class, class_origin=self.class_origin, propagated=self.propagated, qualifiers=qualifiers) # depends on [control=['if'], data=[]]
else: # scalar non-reference
if self.value is None:
value_xml = None # depends on [control=['if'], data=[]]
elif self.embedded_object is not None:
assert isinstance(self.value, (CIMInstance, CIMClass))
value_xml = cim_xml.VALUE(self.value.tocimxml().toxml()) # depends on [control=['if'], data=[]]
else:
value_xml = cim_xml.VALUE(atomic_to_cim_xml(self.value))
return cim_xml.PROPERTY(self.name, self.type, value_xml, class_origin=self.class_origin, propagated=self.propagated, embedded_object=self.embedded_object, qualifiers=qualifiers)
|
def setup_failure(self, scenario=None, **args):
"""Add a given failure scenario
@param scenario: string 'delayrequests' or 'crash'
"""
assert scenario is not None and scenario in self.functiondict
self.functiondict[scenario](**args)
|
def function[setup_failure, parameter[self, scenario]]:
constant[Add a given failure scenario
@param scenario: string 'delayrequests' or 'crash'
]
assert[<ast.BoolOp object at 0x7da2047eb520>]
call[call[name[self].functiondict][name[scenario]], parameter[]]
|
keyword[def] identifier[setup_failure] ( identifier[self] , identifier[scenario] = keyword[None] ,** identifier[args] ):
literal[string]
keyword[assert] identifier[scenario] keyword[is] keyword[not] keyword[None] keyword[and] identifier[scenario] keyword[in] identifier[self] . identifier[functiondict]
identifier[self] . identifier[functiondict] [ identifier[scenario] ](** identifier[args] )
|
def setup_failure(self, scenario=None, **args):
"""Add a given failure scenario
@param scenario: string 'delayrequests' or 'crash'
"""
assert scenario is not None and scenario in self.functiondict
self.functiondict[scenario](**args)
|
def sawtooth(ATTITUDE, amplitude=2.0, period=5.0):
'''sawtooth pattern based on uptime'''
mins = (ATTITUDE.usec * 1.0e-6)/60
p = fmod(mins, period*2)
if p < period:
return amplitude * (p/period)
return amplitude * (period - (p-period))/period
|
def function[sawtooth, parameter[ATTITUDE, amplitude, period]]:
constant[sawtooth pattern based on uptime]
variable[mins] assign[=] binary_operation[binary_operation[name[ATTITUDE].usec * constant[1e-06]] / constant[60]]
variable[p] assign[=] call[name[fmod], parameter[name[mins], binary_operation[name[period] * constant[2]]]]
if compare[name[p] less[<] name[period]] begin[:]
return[binary_operation[name[amplitude] * binary_operation[name[p] / name[period]]]]
return[binary_operation[binary_operation[name[amplitude] * binary_operation[name[period] - binary_operation[name[p] - name[period]]]] / name[period]]]
|
keyword[def] identifier[sawtooth] ( identifier[ATTITUDE] , identifier[amplitude] = literal[int] , identifier[period] = literal[int] ):
literal[string]
identifier[mins] =( identifier[ATTITUDE] . identifier[usec] * literal[int] )/ literal[int]
identifier[p] = identifier[fmod] ( identifier[mins] , identifier[period] * literal[int] )
keyword[if] identifier[p] < identifier[period] :
keyword[return] identifier[amplitude] *( identifier[p] / identifier[period] )
keyword[return] identifier[amplitude] *( identifier[period] -( identifier[p] - identifier[period] ))/ identifier[period]
|
def sawtooth(ATTITUDE, amplitude=2.0, period=5.0):
"""sawtooth pattern based on uptime"""
mins = ATTITUDE.usec * 1e-06 / 60
p = fmod(mins, period * 2)
if p < period:
return amplitude * (p / period) # depends on [control=['if'], data=['p', 'period']]
return amplitude * (period - (p - period)) / period
|
def alleles_to_retrieve(df):
"""Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence.
"""
contig_blastn_records = defaultdict(list)
markers = df.marker.unique()
for m in markers:
dfsub = df[df.marker == m]
for i, r in dfsub.iterrows():
if r.coverage < 1.0:
contig_blastn_records[r.stitle].append(r)
break
return contig_blastn_records
|
def function[alleles_to_retrieve, parameter[df]]:
constant[Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence.
]
variable[contig_blastn_records] assign[=] call[name[defaultdict], parameter[name[list]]]
variable[markers] assign[=] call[name[df].marker.unique, parameter[]]
for taget[name[m]] in starred[name[markers]] begin[:]
variable[dfsub] assign[=] call[name[df]][compare[name[df].marker equal[==] name[m]]]
for taget[tuple[[<ast.Name object at 0x7da1b1a956f0>, <ast.Name object at 0x7da1b1a97370>]]] in starred[call[name[dfsub].iterrows, parameter[]]] begin[:]
if compare[name[r].coverage less[<] constant[1.0]] begin[:]
call[call[name[contig_blastn_records]][name[r].stitle].append, parameter[name[r]]]
break
return[name[contig_blastn_records]]
|
keyword[def] identifier[alleles_to_retrieve] ( identifier[df] ):
literal[string]
identifier[contig_blastn_records] = identifier[defaultdict] ( identifier[list] )
identifier[markers] = identifier[df] . identifier[marker] . identifier[unique] ()
keyword[for] identifier[m] keyword[in] identifier[markers] :
identifier[dfsub] = identifier[df] [ identifier[df] . identifier[marker] == identifier[m] ]
keyword[for] identifier[i] , identifier[r] keyword[in] identifier[dfsub] . identifier[iterrows] ():
keyword[if] identifier[r] . identifier[coverage] < literal[int] :
identifier[contig_blastn_records] [ identifier[r] . identifier[stitle] ]. identifier[append] ( identifier[r] )
keyword[break]
keyword[return] identifier[contig_blastn_records]
|
def alleles_to_retrieve(df):
"""Alleles to retrieve from genome fasta
Get a dict of the genome fasta contig title to a list of blastn results of the allele sequences that must be
retrieved from the genome contig.
Args:
df (pandas.DataFrame): blastn results dataframe
Returns:
{str:[pandas.Series]}: dict of contig title (header name) to list of top blastn result records for each marker
for which the allele sequence must be retrieved from the original sequence.
"""
contig_blastn_records = defaultdict(list)
markers = df.marker.unique()
for m in markers:
dfsub = df[df.marker == m]
for (i, r) in dfsub.iterrows():
if r.coverage < 1.0:
contig_blastn_records[r.stitle].append(r) # depends on [control=['if'], data=[]]
break # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['m']]
return contig_blastn_records
|
def list_models(self, limit=-1, offset=-1):
"""List models in the database. Takes optional parameters limit and
offset for pagination.
Parameters
----------
limit : int
Limit number of models in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
"""
return self.list_objects(limit=limit, offset=offset)
|
def function[list_models, parameter[self, limit, offset]]:
constant[List models in the database. Takes optional parameters limit and
offset for pagination.
Parameters
----------
limit : int
Limit number of models in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
]
return[call[name[self].list_objects, parameter[]]]
|
keyword[def] identifier[list_models] ( identifier[self] , identifier[limit] =- literal[int] , identifier[offset] =- literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[list_objects] ( identifier[limit] = identifier[limit] , identifier[offset] = identifier[offset] )
|
def list_models(self, limit=-1, offset=-1):
"""List models in the database. Takes optional parameters limit and
offset for pagination.
Parameters
----------
limit : int
Limit number of models in the result set
offset : int
Set offset in list (order as defined by object store)
Returns
-------
ObjectListing
"""
return self.list_objects(limit=limit, offset=offset)
|
def start(self):
"""Start processing streams."""
def cb():
time_ = time.time()
log.debug('Step {}'.format(time_))
# run a step on all streams
for d in self._dstreams:
d._step(time_)
self._pcb = PeriodicCallback(cb, self.batch_duration * 1000.0)
self._pcb.start()
self._on_stop_cb.append(self._pcb.stop)
StreamingContext._activeContext = self
|
def function[start, parameter[self]]:
constant[Start processing streams.]
def function[cb, parameter[]]:
variable[time_] assign[=] call[name[time].time, parameter[]]
call[name[log].debug, parameter[call[constant[Step {}].format, parameter[name[time_]]]]]
for taget[name[d]] in starred[name[self]._dstreams] begin[:]
call[name[d]._step, parameter[name[time_]]]
name[self]._pcb assign[=] call[name[PeriodicCallback], parameter[name[cb], binary_operation[name[self].batch_duration * constant[1000.0]]]]
call[name[self]._pcb.start, parameter[]]
call[name[self]._on_stop_cb.append, parameter[name[self]._pcb.stop]]
name[StreamingContext]._activeContext assign[=] name[self]
|
keyword[def] identifier[start] ( identifier[self] ):
literal[string]
keyword[def] identifier[cb] ():
identifier[time_] = identifier[time] . identifier[time] ()
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[time_] ))
keyword[for] identifier[d] keyword[in] identifier[self] . identifier[_dstreams] :
identifier[d] . identifier[_step] ( identifier[time_] )
identifier[self] . identifier[_pcb] = identifier[PeriodicCallback] ( identifier[cb] , identifier[self] . identifier[batch_duration] * literal[int] )
identifier[self] . identifier[_pcb] . identifier[start] ()
identifier[self] . identifier[_on_stop_cb] . identifier[append] ( identifier[self] . identifier[_pcb] . identifier[stop] )
identifier[StreamingContext] . identifier[_activeContext] = identifier[self]
|
def start(self):
"""Start processing streams."""
def cb():
time_ = time.time()
log.debug('Step {}'.format(time_))
# run a step on all streams
for d in self._dstreams:
d._step(time_) # depends on [control=['for'], data=['d']]
self._pcb = PeriodicCallback(cb, self.batch_duration * 1000.0)
self._pcb.start()
self._on_stop_cb.append(self._pcb.stop)
StreamingContext._activeContext = self
|
def install_dir(pkgpath, install_path, register_func, delete_after_install=False):
"""Install plugin from specified directory.
install_path and register_func are same as :func:`install_plugin`.
:param delete_after_install: Delete pkgpath after install (used in :func:`install_from_zip`).
"""
logger.debug("%s is a directory, attempting to validate", pkgpath)
plugin = register_func(pkgpath)
logger.debug("%s looks good, copying to %s", pkgpath, install_path)
try:
copy_tree(pkgpath, os.path.join(install_path, plugin.name))
if delete_after_install:
logger.debug("deleting %s", pkgpath)
shutil.rmtree(pkgpath)
pkgpath = os.path.join(install_path, plugin.name)
except (OSError, CTError) as exc:
# TODO: handle package name exists (upgrade? overwrite?)
logger.debug(str(exc), exc_info=True)
raise exceptions.PluginAlreadyInstalled(plugin.name)
return install_deps(pkgpath)
|
def function[install_dir, parameter[pkgpath, install_path, register_func, delete_after_install]]:
constant[Install plugin from specified directory.
install_path and register_func are same as :func:`install_plugin`.
:param delete_after_install: Delete pkgpath after install (used in :func:`install_from_zip`).
]
call[name[logger].debug, parameter[constant[%s is a directory, attempting to validate], name[pkgpath]]]
variable[plugin] assign[=] call[name[register_func], parameter[name[pkgpath]]]
call[name[logger].debug, parameter[constant[%s looks good, copying to %s], name[pkgpath], name[install_path]]]
<ast.Try object at 0x7da1b12f3520>
return[call[name[install_deps], parameter[name[pkgpath]]]]
|
keyword[def] identifier[install_dir] ( identifier[pkgpath] , identifier[install_path] , identifier[register_func] , identifier[delete_after_install] = keyword[False] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[pkgpath] )
identifier[plugin] = identifier[register_func] ( identifier[pkgpath] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[pkgpath] , identifier[install_path] )
keyword[try] :
identifier[copy_tree] ( identifier[pkgpath] , identifier[os] . identifier[path] . identifier[join] ( identifier[install_path] , identifier[plugin] . identifier[name] ))
keyword[if] identifier[delete_after_install] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[pkgpath] )
identifier[shutil] . identifier[rmtree] ( identifier[pkgpath] )
identifier[pkgpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[install_path] , identifier[plugin] . identifier[name] )
keyword[except] ( identifier[OSError] , identifier[CTError] ) keyword[as] identifier[exc] :
identifier[logger] . identifier[debug] ( identifier[str] ( identifier[exc] ), identifier[exc_info] = keyword[True] )
keyword[raise] identifier[exceptions] . identifier[PluginAlreadyInstalled] ( identifier[plugin] . identifier[name] )
keyword[return] identifier[install_deps] ( identifier[pkgpath] )
|
def install_dir(pkgpath, install_path, register_func, delete_after_install=False):
"""Install plugin from specified directory.
install_path and register_func are same as :func:`install_plugin`.
:param delete_after_install: Delete pkgpath after install (used in :func:`install_from_zip`).
"""
logger.debug('%s is a directory, attempting to validate', pkgpath)
plugin = register_func(pkgpath)
logger.debug('%s looks good, copying to %s', pkgpath, install_path)
try:
copy_tree(pkgpath, os.path.join(install_path, plugin.name))
if delete_after_install:
logger.debug('deleting %s', pkgpath)
shutil.rmtree(pkgpath) # depends on [control=['if'], data=[]]
pkgpath = os.path.join(install_path, plugin.name) # depends on [control=['try'], data=[]]
except (OSError, CTError) as exc:
# TODO: handle package name exists (upgrade? overwrite?)
logger.debug(str(exc), exc_info=True)
raise exceptions.PluginAlreadyInstalled(plugin.name) # depends on [control=['except'], data=['exc']]
return install_deps(pkgpath)
|
def average_last_builds(connection, package, limit=5):
"""
Find the average duration time for the last couple of builds.
:param connection: txkoji.Connection
:param package: package name
:returns: deferred that when fired returns a datetime.timedelta object, or
None if there were no previous builds for this package.
"""
# TODO: take branches (targets, or tags, etc) into account when estimating
# a package's build time.
state = build_states.COMPLETE
opts = {'limit': limit, 'order': '-completion_time'}
builds = yield connection.listBuilds(package, state=state, queryOpts=opts)
if not builds:
defer.returnValue(None)
durations = [build.duration for build in builds]
average = sum(durations, timedelta()) / len(durations)
# print('average duration for %s is %s' % (package, average))
defer.returnValue(average)
|
def function[average_last_builds, parameter[connection, package, limit]]:
constant[
Find the average duration time for the last couple of builds.
:param connection: txkoji.Connection
:param package: package name
:returns: deferred that when fired returns a datetime.timedelta object, or
None if there were no previous builds for this package.
]
variable[state] assign[=] name[build_states].COMPLETE
variable[opts] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f39870>, <ast.Constant object at 0x7da1b1f39060>], [<ast.Name object at 0x7da1b1f392a0>, <ast.Constant object at 0x7da1b1f38e50>]]
variable[builds] assign[=] <ast.Yield object at 0x7da1b1f3ad10>
if <ast.UnaryOp object at 0x7da1b1f397e0> begin[:]
call[name[defer].returnValue, parameter[constant[None]]]
variable[durations] assign[=] <ast.ListComp object at 0x7da1b1f3a950>
variable[average] assign[=] binary_operation[call[name[sum], parameter[name[durations], call[name[timedelta], parameter[]]]] / call[name[len], parameter[name[durations]]]]
call[name[defer].returnValue, parameter[name[average]]]
|
keyword[def] identifier[average_last_builds] ( identifier[connection] , identifier[package] , identifier[limit] = literal[int] ):
literal[string]
identifier[state] = identifier[build_states] . identifier[COMPLETE]
identifier[opts] ={ literal[string] : identifier[limit] , literal[string] : literal[string] }
identifier[builds] = keyword[yield] identifier[connection] . identifier[listBuilds] ( identifier[package] , identifier[state] = identifier[state] , identifier[queryOpts] = identifier[opts] )
keyword[if] keyword[not] identifier[builds] :
identifier[defer] . identifier[returnValue] ( keyword[None] )
identifier[durations] =[ identifier[build] . identifier[duration] keyword[for] identifier[build] keyword[in] identifier[builds] ]
identifier[average] = identifier[sum] ( identifier[durations] , identifier[timedelta] ())/ identifier[len] ( identifier[durations] )
identifier[defer] . identifier[returnValue] ( identifier[average] )
|
def average_last_builds(connection, package, limit=5):
"""
Find the average duration time for the last couple of builds.
:param connection: txkoji.Connection
:param package: package name
:returns: deferred that when fired returns a datetime.timedelta object, or
None if there were no previous builds for this package.
"""
# TODO: take branches (targets, or tags, etc) into account when estimating
# a package's build time.
state = build_states.COMPLETE
opts = {'limit': limit, 'order': '-completion_time'}
builds = (yield connection.listBuilds(package, state=state, queryOpts=opts))
if not builds:
defer.returnValue(None) # depends on [control=['if'], data=[]]
durations = [build.duration for build in builds]
average = sum(durations, timedelta()) / len(durations)
# print('average duration for %s is %s' % (package, average))
defer.returnValue(average)
|
def _repack_pkgs(pkgs, normalize=True):
'''
Repack packages specified using "pkgs" argument to pkg states into a single
dictionary
'''
if normalize and 'pkg.normalize_name' in __salt__:
_normalize_name = __salt__['pkg.normalize_name']
else:
_normalize_name = lambda pkgname: pkgname
return dict(
[
(_normalize_name(six.text_type(x)), six.text_type(y) if y is not None else y)
for x, y in six.iteritems(salt.utils.data.repack_dictlist(pkgs))
]
)
|
def function[_repack_pkgs, parameter[pkgs, normalize]]:
constant[
Repack packages specified using "pkgs" argument to pkg states into a single
dictionary
]
if <ast.BoolOp object at 0x7da1b20467a0> begin[:]
variable[_normalize_name] assign[=] call[name[__salt__]][constant[pkg.normalize_name]]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da1b2044220>]]]
|
keyword[def] identifier[_repack_pkgs] ( identifier[pkgs] , identifier[normalize] = keyword[True] ):
literal[string]
keyword[if] identifier[normalize] keyword[and] literal[string] keyword[in] identifier[__salt__] :
identifier[_normalize_name] = identifier[__salt__] [ literal[string] ]
keyword[else] :
identifier[_normalize_name] = keyword[lambda] identifier[pkgname] : identifier[pkgname]
keyword[return] identifier[dict] (
[
( identifier[_normalize_name] ( identifier[six] . identifier[text_type] ( identifier[x] )), identifier[six] . identifier[text_type] ( identifier[y] ) keyword[if] identifier[y] keyword[is] keyword[not] keyword[None] keyword[else] identifier[y] )
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[six] . identifier[iteritems] ( identifier[salt] . identifier[utils] . identifier[data] . identifier[repack_dictlist] ( identifier[pkgs] ))
]
)
|
def _repack_pkgs(pkgs, normalize=True):
"""
Repack packages specified using "pkgs" argument to pkg states into a single
dictionary
"""
if normalize and 'pkg.normalize_name' in __salt__:
_normalize_name = __salt__['pkg.normalize_name'] # depends on [control=['if'], data=[]]
else:
_normalize_name = lambda pkgname: pkgname
return dict([(_normalize_name(six.text_type(x)), six.text_type(y) if y is not None else y) for (x, y) in six.iteritems(salt.utils.data.repack_dictlist(pkgs))])
|
def indication(self, pdu):
"""Direct this PDU to the appropriate server, create a
connection if one hasn't already been created."""
if _debug: TCPClientDirector._debug("indication %r", pdu)
# get the destination
addr = pdu.pduDestination
# get the client
client = self.clients.get(addr, None)
if not client:
client = self.actorClass(self, addr)
# send the message
client.indication(pdu)
|
def function[indication, parameter[self, pdu]]:
constant[Direct this PDU to the appropriate server, create a
connection if one hasn't already been created.]
if name[_debug] begin[:]
call[name[TCPClientDirector]._debug, parameter[constant[indication %r], name[pdu]]]
variable[addr] assign[=] name[pdu].pduDestination
variable[client] assign[=] call[name[self].clients.get, parameter[name[addr], constant[None]]]
if <ast.UnaryOp object at 0x7da1b084f430> begin[:]
variable[client] assign[=] call[name[self].actorClass, parameter[name[self], name[addr]]]
call[name[client].indication, parameter[name[pdu]]]
|
keyword[def] identifier[indication] ( identifier[self] , identifier[pdu] ):
literal[string]
keyword[if] identifier[_debug] : identifier[TCPClientDirector] . identifier[_debug] ( literal[string] , identifier[pdu] )
identifier[addr] = identifier[pdu] . identifier[pduDestination]
identifier[client] = identifier[self] . identifier[clients] . identifier[get] ( identifier[addr] , keyword[None] )
keyword[if] keyword[not] identifier[client] :
identifier[client] = identifier[self] . identifier[actorClass] ( identifier[self] , identifier[addr] )
identifier[client] . identifier[indication] ( identifier[pdu] )
|
def indication(self, pdu):
"""Direct this PDU to the appropriate server, create a
connection if one hasn't already been created."""
if _debug:
TCPClientDirector._debug('indication %r', pdu) # depends on [control=['if'], data=[]]
# get the destination
addr = pdu.pduDestination
# get the client
client = self.clients.get(addr, None)
if not client:
client = self.actorClass(self, addr) # depends on [control=['if'], data=[]]
# send the message
client.indication(pdu)
|
def conditionOnState(self,state_index):
'''
Temporarily assume that a particular Markov state will occur in the
succeeding period, and condition solver attributes on this assumption.
Allows the solver to construct the future-state-conditional marginal
value function (etc) for that future state.
Parameters
----------
state_index : int
Index of the future Markov state to condition on.
Returns
-------
none
'''
# Set future-state-conditional values as attributes of self
self.IncomeDstn = self.IncomeDstn_list[state_index]
self.Rfree = self.Rfree_list[state_index]
self.PermGroFac = self.PermGroFac_list[state_index]
self.vPfuncNext = self.solution_next.vPfunc[state_index]
self.mNrmMinNow = self.mNrmMin_list[state_index]
self.BoroCnstNat = self.BoroCnstNatAll[state_index]
self.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac)
self.DiscFacEff = self.DiscFac # survival probability LivPrb represents probability from
# *current* state, so DiscFacEff is just DiscFac for now
# These lines have to come after setAndUpdateValues to override the definitions there
self.vPfuncNext = self.solution_next.vPfunc[state_index]
if self.CubicBool:
self.vPPfuncNext= self.solution_next.vPPfunc[state_index]
if self.vFuncBool:
self.vFuncNext = self.solution_next.vFunc[state_index]
|
def function[conditionOnState, parameter[self, state_index]]:
constant[
Temporarily assume that a particular Markov state will occur in the
succeeding period, and condition solver attributes on this assumption.
Allows the solver to construct the future-state-conditional marginal
value function (etc) for that future state.
Parameters
----------
state_index : int
Index of the future Markov state to condition on.
Returns
-------
none
]
name[self].IncomeDstn assign[=] call[name[self].IncomeDstn_list][name[state_index]]
name[self].Rfree assign[=] call[name[self].Rfree_list][name[state_index]]
name[self].PermGroFac assign[=] call[name[self].PermGroFac_list][name[state_index]]
name[self].vPfuncNext assign[=] call[name[self].solution_next.vPfunc][name[state_index]]
name[self].mNrmMinNow assign[=] call[name[self].mNrmMin_list][name[state_index]]
name[self].BoroCnstNat assign[=] call[name[self].BoroCnstNatAll][name[state_index]]
call[name[self].setAndUpdateValues, parameter[name[self].solution_next, name[self].IncomeDstn, name[self].LivPrb, name[self].DiscFac]]
name[self].DiscFacEff assign[=] name[self].DiscFac
name[self].vPfuncNext assign[=] call[name[self].solution_next.vPfunc][name[state_index]]
if name[self].CubicBool begin[:]
name[self].vPPfuncNext assign[=] call[name[self].solution_next.vPPfunc][name[state_index]]
if name[self].vFuncBool begin[:]
name[self].vFuncNext assign[=] call[name[self].solution_next.vFunc][name[state_index]]
|
keyword[def] identifier[conditionOnState] ( identifier[self] , identifier[state_index] ):
literal[string]
identifier[self] . identifier[IncomeDstn] = identifier[self] . identifier[IncomeDstn_list] [ identifier[state_index] ]
identifier[self] . identifier[Rfree] = identifier[self] . identifier[Rfree_list] [ identifier[state_index] ]
identifier[self] . identifier[PermGroFac] = identifier[self] . identifier[PermGroFac_list] [ identifier[state_index] ]
identifier[self] . identifier[vPfuncNext] = identifier[self] . identifier[solution_next] . identifier[vPfunc] [ identifier[state_index] ]
identifier[self] . identifier[mNrmMinNow] = identifier[self] . identifier[mNrmMin_list] [ identifier[state_index] ]
identifier[self] . identifier[BoroCnstNat] = identifier[self] . identifier[BoroCnstNatAll] [ identifier[state_index] ]
identifier[self] . identifier[setAndUpdateValues] ( identifier[self] . identifier[solution_next] , identifier[self] . identifier[IncomeDstn] , identifier[self] . identifier[LivPrb] , identifier[self] . identifier[DiscFac] )
identifier[self] . identifier[DiscFacEff] = identifier[self] . identifier[DiscFac]
identifier[self] . identifier[vPfuncNext] = identifier[self] . identifier[solution_next] . identifier[vPfunc] [ identifier[state_index] ]
keyword[if] identifier[self] . identifier[CubicBool] :
identifier[self] . identifier[vPPfuncNext] = identifier[self] . identifier[solution_next] . identifier[vPPfunc] [ identifier[state_index] ]
keyword[if] identifier[self] . identifier[vFuncBool] :
identifier[self] . identifier[vFuncNext] = identifier[self] . identifier[solution_next] . identifier[vFunc] [ identifier[state_index] ]
|
def conditionOnState(self, state_index):
"""
Temporarily assume that a particular Markov state will occur in the
succeeding period, and condition solver attributes on this assumption.
Allows the solver to construct the future-state-conditional marginal
value function (etc) for that future state.
Parameters
----------
state_index : int
Index of the future Markov state to condition on.
Returns
-------
none
"""
# Set future-state-conditional values as attributes of self
self.IncomeDstn = self.IncomeDstn_list[state_index]
self.Rfree = self.Rfree_list[state_index]
self.PermGroFac = self.PermGroFac_list[state_index]
self.vPfuncNext = self.solution_next.vPfunc[state_index]
self.mNrmMinNow = self.mNrmMin_list[state_index]
self.BoroCnstNat = self.BoroCnstNatAll[state_index]
self.setAndUpdateValues(self.solution_next, self.IncomeDstn, self.LivPrb, self.DiscFac)
self.DiscFacEff = self.DiscFac # survival probability LivPrb represents probability from
# *current* state, so DiscFacEff is just DiscFac for now
# These lines have to come after setAndUpdateValues to override the definitions there
self.vPfuncNext = self.solution_next.vPfunc[state_index]
if self.CubicBool:
self.vPPfuncNext = self.solution_next.vPPfunc[state_index] # depends on [control=['if'], data=[]]
if self.vFuncBool:
self.vFuncNext = self.solution_next.vFunc[state_index] # depends on [control=['if'], data=[]]
|
def full_width_svg(url, width, height, alt_text=None):
''' Helper to render an SVG that will size to fill
its element while keeping its dimentions.
'''
return {
'ratio': str((float(height)/float(width))*100)[:2],
'url': url,
'alt_text': alt_text
}
|
def function[full_width_svg, parameter[url, width, height, alt_text]]:
constant[ Helper to render an SVG that will size to fill
its element while keeping its dimentions.
]
return[dictionary[[<ast.Constant object at 0x7da18f8119c0>, <ast.Constant object at 0x7da18f811f90>, <ast.Constant object at 0x7da18f813d60>], [<ast.Subscript object at 0x7da18f810be0>, <ast.Name object at 0x7da18f813bb0>, <ast.Name object at 0x7da18f810370>]]]
|
keyword[def] identifier[full_width_svg] ( identifier[url] , identifier[width] , identifier[height] , identifier[alt_text] = keyword[None] ):
literal[string]
keyword[return] {
literal[string] : identifier[str] (( identifier[float] ( identifier[height] )/ identifier[float] ( identifier[width] ))* literal[int] )[: literal[int] ],
literal[string] : identifier[url] ,
literal[string] : identifier[alt_text]
}
|
def full_width_svg(url, width, height, alt_text=None):
""" Helper to render an SVG that will size to fill
its element while keeping its dimentions.
"""
return {'ratio': str(float(height) / float(width) * 100)[:2], 'url': url, 'alt_text': alt_text}
|
def run(*extractor_list, **kwargs):
"""Parse arguments provided on the commandline and execute extractors."""
args = _get_args(kwargs.get('args'))
n_extractors = len(extractor_list)
log.info('Going to run list of {} FeatureExtractors'.format(n_extractors))
collection = fex.Collection(cache_path=args.cache_path)
for extractor in extractor_list:
collection.add_feature_extractor(extractor)
out_path = args.path
if args.deploy:
out_path = _prefix_git_hash(out_path)
collection.run(out_path)
|
def function[run, parameter[]]:
constant[Parse arguments provided on the commandline and execute extractors.]
variable[args] assign[=] call[name[_get_args], parameter[call[name[kwargs].get, parameter[constant[args]]]]]
variable[n_extractors] assign[=] call[name[len], parameter[name[extractor_list]]]
call[name[log].info, parameter[call[constant[Going to run list of {} FeatureExtractors].format, parameter[name[n_extractors]]]]]
variable[collection] assign[=] call[name[fex].Collection, parameter[]]
for taget[name[extractor]] in starred[name[extractor_list]] begin[:]
call[name[collection].add_feature_extractor, parameter[name[extractor]]]
variable[out_path] assign[=] name[args].path
if name[args].deploy begin[:]
variable[out_path] assign[=] call[name[_prefix_git_hash], parameter[name[out_path]]]
call[name[collection].run, parameter[name[out_path]]]
|
keyword[def] identifier[run] (* identifier[extractor_list] ,** identifier[kwargs] ):
literal[string]
identifier[args] = identifier[_get_args] ( identifier[kwargs] . identifier[get] ( literal[string] ))
identifier[n_extractors] = identifier[len] ( identifier[extractor_list] )
identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[n_extractors] ))
identifier[collection] = identifier[fex] . identifier[Collection] ( identifier[cache_path] = identifier[args] . identifier[cache_path] )
keyword[for] identifier[extractor] keyword[in] identifier[extractor_list] :
identifier[collection] . identifier[add_feature_extractor] ( identifier[extractor] )
identifier[out_path] = identifier[args] . identifier[path]
keyword[if] identifier[args] . identifier[deploy] :
identifier[out_path] = identifier[_prefix_git_hash] ( identifier[out_path] )
identifier[collection] . identifier[run] ( identifier[out_path] )
|
def run(*extractor_list, **kwargs):
"""Parse arguments provided on the commandline and execute extractors."""
args = _get_args(kwargs.get('args'))
n_extractors = len(extractor_list)
log.info('Going to run list of {} FeatureExtractors'.format(n_extractors))
collection = fex.Collection(cache_path=args.cache_path)
for extractor in extractor_list:
collection.add_feature_extractor(extractor) # depends on [control=['for'], data=['extractor']]
out_path = args.path
if args.deploy:
out_path = _prefix_git_hash(out_path) # depends on [control=['if'], data=[]]
collection.run(out_path)
|
def createDataFromFile(self, filePath, inputEncoding = None, defaultFps = None):
"""Fetch a given filePath and parse its contents.
May raise the following exceptions:
* RuntimeError - generic exception telling that parsing was unsuccessfull
* IOError - failed to open a file at given filePath
@return SubtitleData filled with non-empty, default datafields. Client should modify them
and then perform an add/update operation"""
file_ = File(filePath)
if inputEncoding is None:
inputEncoding = file_.detectEncoding()
inputEncoding = inputEncoding.lower()
videoInfo = VideoInfo(defaultFps) if defaultFps is not None else file_.detectFps()
subtitles = self._parseFile(file_, inputEncoding, videoInfo.fps)
data = SubtitleData()
data.subtitles = subtitles
data.fps = videoInfo.fps
data.inputEncoding = inputEncoding
data.outputEncoding = inputEncoding
data.outputFormat = self._parser.parsedFormat()
data.videoPath = videoInfo.videoPath
return data
|
def function[createDataFromFile, parameter[self, filePath, inputEncoding, defaultFps]]:
constant[Fetch a given filePath and parse its contents.
May raise the following exceptions:
* RuntimeError - generic exception telling that parsing was unsuccessfull
* IOError - failed to open a file at given filePath
@return SubtitleData filled with non-empty, default datafields. Client should modify them
and then perform an add/update operation]
variable[file_] assign[=] call[name[File], parameter[name[filePath]]]
if compare[name[inputEncoding] is constant[None]] begin[:]
variable[inputEncoding] assign[=] call[name[file_].detectEncoding, parameter[]]
variable[inputEncoding] assign[=] call[name[inputEncoding].lower, parameter[]]
variable[videoInfo] assign[=] <ast.IfExp object at 0x7da20e956f50>
variable[subtitles] assign[=] call[name[self]._parseFile, parameter[name[file_], name[inputEncoding], name[videoInfo].fps]]
variable[data] assign[=] call[name[SubtitleData], parameter[]]
name[data].subtitles assign[=] name[subtitles]
name[data].fps assign[=] name[videoInfo].fps
name[data].inputEncoding assign[=] name[inputEncoding]
name[data].outputEncoding assign[=] name[inputEncoding]
name[data].outputFormat assign[=] call[name[self]._parser.parsedFormat, parameter[]]
name[data].videoPath assign[=] name[videoInfo].videoPath
return[name[data]]
|
keyword[def] identifier[createDataFromFile] ( identifier[self] , identifier[filePath] , identifier[inputEncoding] = keyword[None] , identifier[defaultFps] = keyword[None] ):
literal[string]
identifier[file_] = identifier[File] ( identifier[filePath] )
keyword[if] identifier[inputEncoding] keyword[is] keyword[None] :
identifier[inputEncoding] = identifier[file_] . identifier[detectEncoding] ()
identifier[inputEncoding] = identifier[inputEncoding] . identifier[lower] ()
identifier[videoInfo] = identifier[VideoInfo] ( identifier[defaultFps] ) keyword[if] identifier[defaultFps] keyword[is] keyword[not] keyword[None] keyword[else] identifier[file_] . identifier[detectFps] ()
identifier[subtitles] = identifier[self] . identifier[_parseFile] ( identifier[file_] , identifier[inputEncoding] , identifier[videoInfo] . identifier[fps] )
identifier[data] = identifier[SubtitleData] ()
identifier[data] . identifier[subtitles] = identifier[subtitles]
identifier[data] . identifier[fps] = identifier[videoInfo] . identifier[fps]
identifier[data] . identifier[inputEncoding] = identifier[inputEncoding]
identifier[data] . identifier[outputEncoding] = identifier[inputEncoding]
identifier[data] . identifier[outputFormat] = identifier[self] . identifier[_parser] . identifier[parsedFormat] ()
identifier[data] . identifier[videoPath] = identifier[videoInfo] . identifier[videoPath]
keyword[return] identifier[data]
|
def createDataFromFile(self, filePath, inputEncoding=None, defaultFps=None):
"""Fetch a given filePath and parse its contents.
May raise the following exceptions:
* RuntimeError - generic exception telling that parsing was unsuccessfull
* IOError - failed to open a file at given filePath
@return SubtitleData filled with non-empty, default datafields. Client should modify them
and then perform an add/update operation"""
file_ = File(filePath)
if inputEncoding is None:
inputEncoding = file_.detectEncoding() # depends on [control=['if'], data=['inputEncoding']]
inputEncoding = inputEncoding.lower()
videoInfo = VideoInfo(defaultFps) if defaultFps is not None else file_.detectFps()
subtitles = self._parseFile(file_, inputEncoding, videoInfo.fps)
data = SubtitleData()
data.subtitles = subtitles
data.fps = videoInfo.fps
data.inputEncoding = inputEncoding
data.outputEncoding = inputEncoding
data.outputFormat = self._parser.parsedFormat()
data.videoPath = videoInfo.videoPath
return data
|
def get_bio(self, section, language=None):
"""
Returns a section of the bio.
section can be "content", "summary" or
"published" (for published date)
"""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return self._extract_cdata_from_request(
self.ws_prefix + ".getInfo", section, params
)
|
def function[get_bio, parameter[self, section, language]]:
constant[
Returns a section of the bio.
section can be "content", "summary" or
"published" (for published date)
]
if name[language] begin[:]
variable[params] assign[=] call[name[self]._get_params, parameter[]]
call[name[params]][constant[lang]] assign[=] name[language]
return[call[name[self]._extract_cdata_from_request, parameter[binary_operation[name[self].ws_prefix + constant[.getInfo]], name[section], name[params]]]]
|
keyword[def] identifier[get_bio] ( identifier[self] , identifier[section] , identifier[language] = keyword[None] ):
literal[string]
keyword[if] identifier[language] :
identifier[params] = identifier[self] . identifier[_get_params] ()
identifier[params] [ literal[string] ]= identifier[language]
keyword[else] :
identifier[params] = keyword[None]
keyword[return] identifier[self] . identifier[_extract_cdata_from_request] (
identifier[self] . identifier[ws_prefix] + literal[string] , identifier[section] , identifier[params]
)
|
def get_bio(self, section, language=None):
"""
Returns a section of the bio.
section can be "content", "summary" or
"published" (for published date)
"""
if language:
params = self._get_params()
params['lang'] = language # depends on [control=['if'], data=[]]
else:
params = None
return self._extract_cdata_from_request(self.ws_prefix + '.getInfo', section, params)
|
def serialise(self):
"""Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
"""
return {
'marketId': self.market_id,
'totalAvailable': None,
'isMarketDataDelayed': None,
'lastMatchTime': None,
'betDelay': self.market_definition.get('betDelay'),
'version': self.market_definition.get('version'),
'complete': self.market_definition.get('complete'),
'runnersVoidable': self.market_definition.get('runnersVoidable'),
'totalMatched': self.total_matched,
'status': self.market_definition.get('status'),
'bspReconciled': self.market_definition.get('bspReconciled'),
'crossMatching': self.market_definition.get('crossMatching'),
'inplay': self.market_definition.get('inPlay'),
'numberOfWinners': self.market_definition.get('numberOfWinners'),
'numberOfRunners': len(self.market_definition.get('runners')),
'numberOfActiveRunners': self.market_definition.get('numberOfActiveRunners'),
'runners': [
runner.serialise(
self.market_definition_runner_dict[(runner.selection_id, runner.handicap)]
) for runner in self.runners
],
'publishTime': self.publish_time,
'priceLadderDefinition': self.market_definition.get('priceLadderDefinition'),
'keyLineDescription': self.market_definition.get('keyLineDefinition'),
'marketDefinition': self.market_definition, # used in lightweight
}
|
def function[serialise, parameter[self]]:
constant[Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
]
return[dictionary[[<ast.Constant object at 0x7da1b15f1390>, <ast.Constant object at 0x7da1b15f3280>, <ast.Constant object at 0x7da1b15f06d0>, <ast.Constant object at 0x7da1b15f0d90>, <ast.Constant object at 0x7da1b15f1f90>, <ast.Constant object at 0x7da1b15f1b40>, <ast.Constant object at 0x7da1b15f31c0>, <ast.Constant object at 0x7da1b15f2710>, <ast.Constant object at 0x7da1b15f00d0>, <ast.Constant object at 0x7da1b15f17b0>, <ast.Constant object at 0x7da1b15f1690>, <ast.Constant object at 0x7da1b15f1180>, <ast.Constant object at 0x7da1b15f1f30>, <ast.Constant object at 0x7da1b15f0d60>, <ast.Constant object at 0x7da1b15f2320>, <ast.Constant object at 0x7da1b15f3040>, <ast.Constant object at 0x7da1b15f1cf0>, <ast.Constant object at 0x7da1b15f1750>, <ast.Constant object at 0x7da1b15f0820>, <ast.Constant object at 0x7da1b15f18a0>, <ast.Constant object at 0x7da1b15f1960>], [<ast.Attribute object at 0x7da1b15f0fd0>, <ast.Constant object at 0x7da1b15f2f50>, <ast.Constant object at 0x7da1b15f1330>, <ast.Constant object at 0x7da1b15f0130>, <ast.Call object at 0x7da1b15f2e90>, <ast.Call object at 0x7da1b15f1450>, <ast.Call object at 0x7da1b15f02e0>, <ast.Call object at 0x7da1b15f0340>, <ast.Attribute object at 0x7da1b15f30a0>, <ast.Call object at 0x7da1b15f0ca0>, <ast.Call object at 0x7da1b15f0430>, <ast.Call object at 0x7da1b15f1a20>, <ast.Call object at 0x7da1b15f1480>, <ast.Call object at 0x7da1b15f2da0>, <ast.Call object at 0x7da1b15f1d50>, <ast.Call object at 0x7da1b15f2b90>, <ast.ListComp object at 0x7da1b15f0940>, <ast.Attribute object at 0x7da1b15f0a00>, <ast.Call object at 0x7da1b15f1ed0>, <ast.Call object at 0x7da1b15f0c70>, <ast.Attribute object at 0x7da1b15f1900>]]]
|
keyword[def] identifier[serialise] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[market_id] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : keyword[None] ,
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[total_matched] ,
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[len] ( identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] )),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] :[
identifier[runner] . identifier[serialise] (
identifier[self] . identifier[market_definition_runner_dict] [( identifier[runner] . identifier[selection_id] , identifier[runner] . identifier[handicap] )]
) keyword[for] identifier[runner] keyword[in] identifier[self] . identifier[runners]
],
literal[string] : identifier[self] . identifier[publish_time] ,
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] . identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[market_definition] ,
}
|
def serialise(self):
"""Creates standard market book json response,
will error if EX_MARKET_DEF not incl.
""" # used in lightweight
return {'marketId': self.market_id, 'totalAvailable': None, 'isMarketDataDelayed': None, 'lastMatchTime': None, 'betDelay': self.market_definition.get('betDelay'), 'version': self.market_definition.get('version'), 'complete': self.market_definition.get('complete'), 'runnersVoidable': self.market_definition.get('runnersVoidable'), 'totalMatched': self.total_matched, 'status': self.market_definition.get('status'), 'bspReconciled': self.market_definition.get('bspReconciled'), 'crossMatching': self.market_definition.get('crossMatching'), 'inplay': self.market_definition.get('inPlay'), 'numberOfWinners': self.market_definition.get('numberOfWinners'), 'numberOfRunners': len(self.market_definition.get('runners')), 'numberOfActiveRunners': self.market_definition.get('numberOfActiveRunners'), 'runners': [runner.serialise(self.market_definition_runner_dict[runner.selection_id, runner.handicap]) for runner in self.runners], 'publishTime': self.publish_time, 'priceLadderDefinition': self.market_definition.get('priceLadderDefinition'), 'keyLineDescription': self.market_definition.get('keyLineDefinition'), 'marketDefinition': self.market_definition}
|
def serialize(obj):
"""Takes a object and produces a dict-like representation
:param obj: the object to serialize
"""
if isinstance(obj, list):
return [serialize(o) for o in obj]
return GenericSerializer(ModelProviderImpl()).serialize(obj)
|
def function[serialize, parameter[obj]]:
constant[Takes a object and produces a dict-like representation
:param obj: the object to serialize
]
if call[name[isinstance], parameter[name[obj], name[list]]] begin[:]
return[<ast.ListComp object at 0x7da18f58e6b0>]
return[call[call[name[GenericSerializer], parameter[call[name[ModelProviderImpl], parameter[]]]].serialize, parameter[name[obj]]]]
|
keyword[def] identifier[serialize] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[list] ):
keyword[return] [ identifier[serialize] ( identifier[o] ) keyword[for] identifier[o] keyword[in] identifier[obj] ]
keyword[return] identifier[GenericSerializer] ( identifier[ModelProviderImpl] ()). identifier[serialize] ( identifier[obj] )
|
def serialize(obj):
"""Takes a object and produces a dict-like representation
:param obj: the object to serialize
"""
if isinstance(obj, list):
return [serialize(o) for o in obj] # depends on [control=['if'], data=[]]
return GenericSerializer(ModelProviderImpl()).serialize(obj)
|
def unassign_asset_from_repository(self, asset_id, repository_id):
"""Removes an ``Asset`` from a ``Repository``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset``
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
raise: NotFound - ``asset_id`` or ``repository_id`` not found
or ``asset_id`` not assigned to ``repository_id``
raise: NullArgument - ``asset_id`` or ``repository_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
lookup_session.get_repository(repository_id) # to raise NotFound
self._unassign_object_from_catalog(asset_id, repository_id)
|
def function[unassign_asset_from_repository, parameter[self, asset_id, repository_id]]:
constant[Removes an ``Asset`` from a ``Repository``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset``
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
raise: NotFound - ``asset_id`` or ``repository_id`` not found
or ``asset_id`` not assigned to ``repository_id``
raise: NullArgument - ``asset_id`` or ``repository_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
variable[mgr] assign[=] call[name[self]._get_provider_manager, parameter[constant[REPOSITORY]]]
variable[lookup_session] assign[=] call[name[mgr].get_repository_lookup_session, parameter[]]
call[name[lookup_session].get_repository, parameter[name[repository_id]]]
call[name[self]._unassign_object_from_catalog, parameter[name[asset_id], name[repository_id]]]
|
keyword[def] identifier[unassign_asset_from_repository] ( identifier[self] , identifier[asset_id] , identifier[repository_id] ):
literal[string]
identifier[mgr] = identifier[self] . identifier[_get_provider_manager] ( literal[string] , identifier[local] = keyword[True] )
identifier[lookup_session] = identifier[mgr] . identifier[get_repository_lookup_session] ( identifier[proxy] = identifier[self] . identifier[_proxy] )
identifier[lookup_session] . identifier[get_repository] ( identifier[repository_id] )
identifier[self] . identifier[_unassign_object_from_catalog] ( identifier[asset_id] , identifier[repository_id] )
|
def unassign_asset_from_repository(self, asset_id, repository_id):
"""Removes an ``Asset`` from a ``Repository``.
arg: asset_id (osid.id.Id): the ``Id`` of the ``Asset``
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
raise: NotFound - ``asset_id`` or ``repository_id`` not found
or ``asset_id`` not assigned to ``repository_id``
raise: NullArgument - ``asset_id`` or ``repository_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
lookup_session.get_repository(repository_id) # to raise NotFound
self._unassign_object_from_catalog(asset_id, repository_id)
|
def vectorized_sunday_to_monday(dtix):
"""A vectorized implementation of
:func:`pandas.tseries.holiday.sunday_to_monday`.
Parameters
----------
dtix : pd.DatetimeIndex
The index to shift sundays to mondays.
Returns
-------
sundays_as_mondays : pd.DatetimeIndex
``dtix`` with all sundays moved to the next monday.
"""
values = dtix.values.copy()
values[dtix.weekday == 6] += np.timedelta64(1, 'D')
return pd.DatetimeIndex(values)
|
def function[vectorized_sunday_to_monday, parameter[dtix]]:
constant[A vectorized implementation of
:func:`pandas.tseries.holiday.sunday_to_monday`.
Parameters
----------
dtix : pd.DatetimeIndex
The index to shift sundays to mondays.
Returns
-------
sundays_as_mondays : pd.DatetimeIndex
``dtix`` with all sundays moved to the next monday.
]
variable[values] assign[=] call[name[dtix].values.copy, parameter[]]
<ast.AugAssign object at 0x7da1b16b0d90>
return[call[name[pd].DatetimeIndex, parameter[name[values]]]]
|
keyword[def] identifier[vectorized_sunday_to_monday] ( identifier[dtix] ):
literal[string]
identifier[values] = identifier[dtix] . identifier[values] . identifier[copy] ()
identifier[values] [ identifier[dtix] . identifier[weekday] == literal[int] ]+= identifier[np] . identifier[timedelta64] ( literal[int] , literal[string] )
keyword[return] identifier[pd] . identifier[DatetimeIndex] ( identifier[values] )
|
def vectorized_sunday_to_monday(dtix):
"""A vectorized implementation of
:func:`pandas.tseries.holiday.sunday_to_monday`.
Parameters
----------
dtix : pd.DatetimeIndex
The index to shift sundays to mondays.
Returns
-------
sundays_as_mondays : pd.DatetimeIndex
``dtix`` with all sundays moved to the next monday.
"""
values = dtix.values.copy()
values[dtix.weekday == 6] += np.timedelta64(1, 'D')
return pd.DatetimeIndex(values)
|
def addInput(self, key):
"""Add key to input : key, value or map
"""
if key not in self.inputs:
self.inputs.append(key)
root = self.etree
t_inputs = root.find('inputs')
if not t_inputs :
t_inputs = ctree.SubElement(root, 'inputs')
t_inputs.append(key.etree)
return True
|
def function[addInput, parameter[self, key]]:
constant[Add key to input : key, value or map
]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self].inputs] begin[:]
call[name[self].inputs.append, parameter[name[key]]]
variable[root] assign[=] name[self].etree
variable[t_inputs] assign[=] call[name[root].find, parameter[constant[inputs]]]
if <ast.UnaryOp object at 0x7da204565b40> begin[:]
variable[t_inputs] assign[=] call[name[ctree].SubElement, parameter[name[root], constant[inputs]]]
call[name[t_inputs].append, parameter[name[key].etree]]
return[constant[True]]
|
keyword[def] identifier[addInput] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[inputs] :
identifier[self] . identifier[inputs] . identifier[append] ( identifier[key] )
identifier[root] = identifier[self] . identifier[etree]
identifier[t_inputs] = identifier[root] . identifier[find] ( literal[string] )
keyword[if] keyword[not] identifier[t_inputs] :
identifier[t_inputs] = identifier[ctree] . identifier[SubElement] ( identifier[root] , literal[string] )
identifier[t_inputs] . identifier[append] ( identifier[key] . identifier[etree] )
keyword[return] keyword[True]
|
def addInput(self, key):
"""Add key to input : key, value or map
"""
if key not in self.inputs:
self.inputs.append(key) # depends on [control=['if'], data=['key']]
root = self.etree
t_inputs = root.find('inputs')
if not t_inputs:
t_inputs = ctree.SubElement(root, 'inputs') # depends on [control=['if'], data=[]]
t_inputs.append(key.etree)
return True
|
def create(cls, repo, path, ref='HEAD', message=None, force=False, **kwargs):
"""Create a new tag reference.
:param path:
The name of the tag, i.e. 1.0 or releases/1.0.
The prefix refs/tags is implied
:param ref:
A reference to the object you want to tag. It can be a commit, tree or
blob.
:param message:
If not None, the message will be used in your tag object. This will also
create an additional tag object that allows to obtain that information, i.e.::
tagref.tag.message
:param force:
If True, to force creation of a tag even though that tag already exists.
:param kwargs:
Additional keyword arguments to be passed to git-tag
:return: A new TagReference"""
args = (path, ref)
if message:
kwargs['m'] = message
if force:
kwargs['f'] = True
repo.git.tag(*args, **kwargs)
return TagReference(repo, "%s/%s" % (cls._common_path_default, path))
|
def function[create, parameter[cls, repo, path, ref, message, force]]:
constant[Create a new tag reference.
:param path:
The name of the tag, i.e. 1.0 or releases/1.0.
The prefix refs/tags is implied
:param ref:
A reference to the object you want to tag. It can be a commit, tree or
blob.
:param message:
If not None, the message will be used in your tag object. This will also
create an additional tag object that allows to obtain that information, i.e.::
tagref.tag.message
:param force:
If True, to force creation of a tag even though that tag already exists.
:param kwargs:
Additional keyword arguments to be passed to git-tag
:return: A new TagReference]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da1b1d5dd80>, <ast.Name object at 0x7da1b1d5e3e0>]]
if name[message] begin[:]
call[name[kwargs]][constant[m]] assign[=] name[message]
if name[force] begin[:]
call[name[kwargs]][constant[f]] assign[=] constant[True]
call[name[repo].git.tag, parameter[<ast.Starred object at 0x7da1b1d5d210>]]
return[call[name[TagReference], parameter[name[repo], binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1d5cca0>, <ast.Name object at 0x7da1b1d5f1c0>]]]]]]
|
keyword[def] identifier[create] ( identifier[cls] , identifier[repo] , identifier[path] , identifier[ref] = literal[string] , identifier[message] = keyword[None] , identifier[force] = keyword[False] ,** identifier[kwargs] ):
literal[string]
identifier[args] =( identifier[path] , identifier[ref] )
keyword[if] identifier[message] :
identifier[kwargs] [ literal[string] ]= identifier[message]
keyword[if] identifier[force] :
identifier[kwargs] [ literal[string] ]= keyword[True]
identifier[repo] . identifier[git] . identifier[tag] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[TagReference] ( identifier[repo] , literal[string] %( identifier[cls] . identifier[_common_path_default] , identifier[path] ))
|
def create(cls, repo, path, ref='HEAD', message=None, force=False, **kwargs):
"""Create a new tag reference.
:param path:
The name of the tag, i.e. 1.0 or releases/1.0.
The prefix refs/tags is implied
:param ref:
A reference to the object you want to tag. It can be a commit, tree or
blob.
:param message:
If not None, the message will be used in your tag object. This will also
create an additional tag object that allows to obtain that information, i.e.::
tagref.tag.message
:param force:
If True, to force creation of a tag even though that tag already exists.
:param kwargs:
Additional keyword arguments to be passed to git-tag
:return: A new TagReference"""
args = (path, ref)
if message:
kwargs['m'] = message # depends on [control=['if'], data=[]]
if force:
kwargs['f'] = True # depends on [control=['if'], data=[]]
repo.git.tag(*args, **kwargs)
return TagReference(repo, '%s/%s' % (cls._common_path_default, path))
|
def is_valid(isbn_id):
"""
Check that a given string is a valid ISBN.
:param isbn_id: the isbn to be checked.
:returns: boolean indicating whether the isbn is valid or not.
>>> is_valid("978-3-16-148410-0")
True
>>> is_valid("9783161484100")
True
>>> is_valid("9783161484100aa")
False
>>> is_valid("abcd")
False
>>> is_valid("0136091814")
True
>>> is_valid("0136091812")
False
>>> is_valid("9780136091817")
False
>>> is_valid("123456789X")
True
"""
return (
(not isbnlib.notisbn(isbn_id)) and (
isbnlib.get_canonical_isbn(isbn_id) == isbn_id or
isbnlib.mask(isbnlib.get_canonical_isbn(isbn_id)) == isbn_id)
)
|
def function[is_valid, parameter[isbn_id]]:
constant[
Check that a given string is a valid ISBN.
:param isbn_id: the isbn to be checked.
:returns: boolean indicating whether the isbn is valid or not.
>>> is_valid("978-3-16-148410-0")
True
>>> is_valid("9783161484100")
True
>>> is_valid("9783161484100aa")
False
>>> is_valid("abcd")
False
>>> is_valid("0136091814")
True
>>> is_valid("0136091812")
False
>>> is_valid("9780136091817")
False
>>> is_valid("123456789X")
True
]
return[<ast.BoolOp object at 0x7da1b244ed70>]
|
keyword[def] identifier[is_valid] ( identifier[isbn_id] ):
literal[string]
keyword[return] (
( keyword[not] identifier[isbnlib] . identifier[notisbn] ( identifier[isbn_id] )) keyword[and] (
identifier[isbnlib] . identifier[get_canonical_isbn] ( identifier[isbn_id] )== identifier[isbn_id] keyword[or]
identifier[isbnlib] . identifier[mask] ( identifier[isbnlib] . identifier[get_canonical_isbn] ( identifier[isbn_id] ))== identifier[isbn_id] )
)
|
def is_valid(isbn_id):
"""
Check that a given string is a valid ISBN.
:param isbn_id: the isbn to be checked.
:returns: boolean indicating whether the isbn is valid or not.
>>> is_valid("978-3-16-148410-0")
True
>>> is_valid("9783161484100")
True
>>> is_valid("9783161484100aa")
False
>>> is_valid("abcd")
False
>>> is_valid("0136091814")
True
>>> is_valid("0136091812")
False
>>> is_valid("9780136091817")
False
>>> is_valid("123456789X")
True
"""
return not isbnlib.notisbn(isbn_id) and (isbnlib.get_canonical_isbn(isbn_id) == isbn_id or isbnlib.mask(isbnlib.get_canonical_isbn(isbn_id)) == isbn_id)
|
def get_collection(self, **kwargs):
"""We need special get collection method to address issue in 11.5.4
In 11.5.4 collection 'items' were nested under 'policiesReference'
key. This has caused get_collection() calls to return empty list.
This fix will update the list if the policiesReference key is found
and 'items' key do not exists in __dict__.
:raises: UnregisteredKind
:returns: list of reference dicts and Python ``Resource`` objects
"""
list_of_contents = []
self.refresh(**kwargs)
if 'items' in self.__dict__:
for item in self.items:
# It's possible to have non-"kind" JSON returned. We just
# append the corresponding dict. PostProcessing is the caller's
# responsibility.
if 'kind' not in item:
list_of_contents.append(item)
continue
kind = item['kind']
if kind in self._meta_data['attribute_registry']:
# If it has a kind, it must be registered.
instance =\
self._meta_data['attribute_registry'][kind](self)
instance._local_update(item)
instance._activate_URI(instance.selfLink)
list_of_contents.append(instance)
else:
error_message = '%r is not registered!' % kind
raise UnregisteredKind(error_message)
if 'policiesReference' in self.__dict__ and 'items' not in \
self.__dict__:
for item in self.policiesReference['items']:
kind = item['kind']
if kind in self._meta_data['attribute_registry']:
# If it has a kind, it must be registered.
instance = \
self._meta_data['attribute_registry'][kind](self)
instance._local_update(item)
instance._activate_URI(instance.selfLink)
list_of_contents.append(instance)
else:
error_message = '%r is not registered!' % kind
raise UnregisteredKind(error_message)
return list_of_contents
|
def function[get_collection, parameter[self]]:
constant[We need special get collection method to address issue in 11.5.4
In 11.5.4 collection 'items' were nested under 'policiesReference'
key. This has caused get_collection() calls to return empty list.
This fix will update the list if the policiesReference key is found
and 'items' key do not exists in __dict__.
:raises: UnregisteredKind
:returns: list of reference dicts and Python ``Resource`` objects
]
variable[list_of_contents] assign[=] list[[]]
call[name[self].refresh, parameter[]]
if compare[constant[items] in name[self].__dict__] begin[:]
for taget[name[item]] in starred[name[self].items] begin[:]
if compare[constant[kind] <ast.NotIn object at 0x7da2590d7190> name[item]] begin[:]
call[name[list_of_contents].append, parameter[name[item]]]
continue
variable[kind] assign[=] call[name[item]][constant[kind]]
if compare[name[kind] in call[name[self]._meta_data][constant[attribute_registry]]] begin[:]
variable[instance] assign[=] call[call[call[name[self]._meta_data][constant[attribute_registry]]][name[kind]], parameter[name[self]]]
call[name[instance]._local_update, parameter[name[item]]]
call[name[instance]._activate_URI, parameter[name[instance].selfLink]]
call[name[list_of_contents].append, parameter[name[instance]]]
if <ast.BoolOp object at 0x7da2043448e0> begin[:]
for taget[name[item]] in starred[call[name[self].policiesReference][constant[items]]] begin[:]
variable[kind] assign[=] call[name[item]][constant[kind]]
if compare[name[kind] in call[name[self]._meta_data][constant[attribute_registry]]] begin[:]
variable[instance] assign[=] call[call[call[name[self]._meta_data][constant[attribute_registry]]][name[kind]], parameter[name[self]]]
call[name[instance]._local_update, parameter[name[item]]]
call[name[instance]._activate_URI, parameter[name[instance].selfLink]]
call[name[list_of_contents].append, parameter[name[instance]]]
return[name[list_of_contents]]
|
keyword[def] identifier[get_collection] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[list_of_contents] =[]
identifier[self] . identifier[refresh] (** identifier[kwargs] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[__dict__] :
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[items] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[item] :
identifier[list_of_contents] . identifier[append] ( identifier[item] )
keyword[continue]
identifier[kind] = identifier[item] [ literal[string] ]
keyword[if] identifier[kind] keyword[in] identifier[self] . identifier[_meta_data] [ literal[string] ]:
identifier[instance] = identifier[self] . identifier[_meta_data] [ literal[string] ][ identifier[kind] ]( identifier[self] )
identifier[instance] . identifier[_local_update] ( identifier[item] )
identifier[instance] . identifier[_activate_URI] ( identifier[instance] . identifier[selfLink] )
identifier[list_of_contents] . identifier[append] ( identifier[instance] )
keyword[else] :
identifier[error_message] = literal[string] % identifier[kind]
keyword[raise] identifier[UnregisteredKind] ( identifier[error_message] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[__dict__] keyword[and] literal[string] keyword[not] keyword[in] identifier[self] . identifier[__dict__] :
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[policiesReference] [ literal[string] ]:
identifier[kind] = identifier[item] [ literal[string] ]
keyword[if] identifier[kind] keyword[in] identifier[self] . identifier[_meta_data] [ literal[string] ]:
identifier[instance] = identifier[self] . identifier[_meta_data] [ literal[string] ][ identifier[kind] ]( identifier[self] )
identifier[instance] . identifier[_local_update] ( identifier[item] )
identifier[instance] . identifier[_activate_URI] ( identifier[instance] . identifier[selfLink] )
identifier[list_of_contents] . identifier[append] ( identifier[instance] )
keyword[else] :
identifier[error_message] = literal[string] % identifier[kind]
keyword[raise] identifier[UnregisteredKind] ( identifier[error_message] )
keyword[return] identifier[list_of_contents]
|
def get_collection(self, **kwargs):
"""We need special get collection method to address issue in 11.5.4
In 11.5.4 collection 'items' were nested under 'policiesReference'
key. This has caused get_collection() calls to return empty list.
This fix will update the list if the policiesReference key is found
and 'items' key do not exists in __dict__.
:raises: UnregisteredKind
:returns: list of reference dicts and Python ``Resource`` objects
"""
list_of_contents = []
self.refresh(**kwargs)
if 'items' in self.__dict__:
for item in self.items:
# It's possible to have non-"kind" JSON returned. We just
# append the corresponding dict. PostProcessing is the caller's
# responsibility.
if 'kind' not in item:
list_of_contents.append(item)
continue # depends on [control=['if'], data=['item']]
kind = item['kind']
if kind in self._meta_data['attribute_registry']:
# If it has a kind, it must be registered.
instance = self._meta_data['attribute_registry'][kind](self)
instance._local_update(item)
instance._activate_URI(instance.selfLink)
list_of_contents.append(instance) # depends on [control=['if'], data=['kind']]
else:
error_message = '%r is not registered!' % kind
raise UnregisteredKind(error_message) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
if 'policiesReference' in self.__dict__ and 'items' not in self.__dict__:
for item in self.policiesReference['items']:
kind = item['kind']
if kind in self._meta_data['attribute_registry']:
# If it has a kind, it must be registered.
instance = self._meta_data['attribute_registry'][kind](self)
instance._local_update(item)
instance._activate_URI(instance.selfLink)
list_of_contents.append(instance) # depends on [control=['if'], data=['kind']]
else:
error_message = '%r is not registered!' % kind
raise UnregisteredKind(error_message) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
return list_of_contents
|
def will_print(level=1):
"""Returns True if the current global status of messaging would print a
message using any of the printing functions in this module.
"""
if level == 1:
#We only affect printability using the quiet setting.
return quiet is None or quiet == False
else:
return ((isinstance(verbosity, int) and level <= verbosity) or
(isinstance(verbosity, bool) and verbosity == True))
|
def function[will_print, parameter[level]]:
constant[Returns True if the current global status of messaging would print a
message using any of the printing functions in this module.
]
if compare[name[level] equal[==] constant[1]] begin[:]
return[<ast.BoolOp object at 0x7da20cabf280>]
|
keyword[def] identifier[will_print] ( identifier[level] = literal[int] ):
literal[string]
keyword[if] identifier[level] == literal[int] :
keyword[return] identifier[quiet] keyword[is] keyword[None] keyword[or] identifier[quiet] == keyword[False]
keyword[else] :
keyword[return] (( identifier[isinstance] ( identifier[verbosity] , identifier[int] ) keyword[and] identifier[level] <= identifier[verbosity] ) keyword[or]
( identifier[isinstance] ( identifier[verbosity] , identifier[bool] ) keyword[and] identifier[verbosity] == keyword[True] ))
|
def will_print(level=1):
"""Returns True if the current global status of messaging would print a
message using any of the printing functions in this module.
"""
if level == 1:
#We only affect printability using the quiet setting.
return quiet is None or quiet == False # depends on [control=['if'], data=[]]
else:
return isinstance(verbosity, int) and level <= verbosity or (isinstance(verbosity, bool) and verbosity == True)
|
def augknt(knots, order):
"""Augment a knot vector.
Parameters:
knots:
Python list or rank-1 array, the original knot vector (without endpoint repeats)
order:
int, >= 0, order of spline
Returns:
list_of_knots:
rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``.
Caveats:
`order` is the spline order `p`, not `p` + 1, and existing knots are never deleted.
The knot vector always becomes longer by calling this function.
"""
if isinstance(knots, np.ndarray) and knots.ndim > 1:
raise ValueError("knots must be a list or a rank-1 array")
knots = list(knots) # ensure Python list
# One copy of knots[0] and knots[-1] will come from "knots" itself,
# so we only need to prepend/append "order" copies.
#
return np.array( [knots[0]] * order + knots + [knots[-1]] * order )
|
def function[augknt, parameter[knots, order]]:
constant[Augment a knot vector.
Parameters:
knots:
Python list or rank-1 array, the original knot vector (without endpoint repeats)
order:
int, >= 0, order of spline
Returns:
list_of_knots:
rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``.
Caveats:
`order` is the spline order `p`, not `p` + 1, and existing knots are never deleted.
The knot vector always becomes longer by calling this function.
]
if <ast.BoolOp object at 0x7da207f004c0> begin[:]
<ast.Raise object at 0x7da18eb57eb0>
variable[knots] assign[=] call[name[list], parameter[name[knots]]]
return[call[name[np].array, parameter[binary_operation[binary_operation[binary_operation[list[[<ast.Subscript object at 0x7da18eb56110>]] * name[order]] + name[knots]] + binary_operation[list[[<ast.Subscript object at 0x7da18eb54b20>]] * name[order]]]]]]
|
keyword[def] identifier[augknt] ( identifier[knots] , identifier[order] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[knots] , identifier[np] . identifier[ndarray] ) keyword[and] identifier[knots] . identifier[ndim] > literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[knots] = identifier[list] ( identifier[knots] )
keyword[return] identifier[np] . identifier[array] ([ identifier[knots] [ literal[int] ]]* identifier[order] + identifier[knots] +[ identifier[knots] [- literal[int] ]]* identifier[order] )
|
def augknt(knots, order):
"""Augment a knot vector.
Parameters:
knots:
Python list or rank-1 array, the original knot vector (without endpoint repeats)
order:
int, >= 0, order of spline
Returns:
list_of_knots:
rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``.
Caveats:
`order` is the spline order `p`, not `p` + 1, and existing knots are never deleted.
The knot vector always becomes longer by calling this function.
"""
if isinstance(knots, np.ndarray) and knots.ndim > 1:
raise ValueError('knots must be a list or a rank-1 array') # depends on [control=['if'], data=[]]
knots = list(knots) # ensure Python list
# One copy of knots[0] and knots[-1] will come from "knots" itself,
# so we only need to prepend/append "order" copies.
#
return np.array([knots[0]] * order + knots + [knots[-1]] * order)
|
def set_directory(robject):
"""
Context manager to temporarily set the directory of a ROOT object
(if possible)
"""
if (not hasattr(robject, 'GetDirectory') or
not hasattr(robject, 'SetDirectory')):
log.warning("Cannot set the directory of a `{0}`".format(
type(robject)))
# Do nothing
yield
else:
old_dir = robject.GetDirectory()
try:
robject.SetDirectory(ROOT.gDirectory)
yield
finally:
robject.SetDirectory(old_dir)
|
def function[set_directory, parameter[robject]]:
constant[
Context manager to temporarily set the directory of a ROOT object
(if possible)
]
if <ast.BoolOp object at 0x7da1b1128d60> begin[:]
call[name[log].warning, parameter[call[constant[Cannot set the directory of a `{0}`].format, parameter[call[name[type], parameter[name[robject]]]]]]]
<ast.Yield object at 0x7da1b11b5c60>
|
keyword[def] identifier[set_directory] ( identifier[robject] ):
literal[string]
keyword[if] ( keyword[not] identifier[hasattr] ( identifier[robject] , literal[string] ) keyword[or]
keyword[not] identifier[hasattr] ( identifier[robject] , literal[string] )):
identifier[log] . identifier[warning] ( literal[string] . identifier[format] (
identifier[type] ( identifier[robject] )))
keyword[yield]
keyword[else] :
identifier[old_dir] = identifier[robject] . identifier[GetDirectory] ()
keyword[try] :
identifier[robject] . identifier[SetDirectory] ( identifier[ROOT] . identifier[gDirectory] )
keyword[yield]
keyword[finally] :
identifier[robject] . identifier[SetDirectory] ( identifier[old_dir] )
|
def set_directory(robject):
"""
Context manager to temporarily set the directory of a ROOT object
(if possible)
"""
if not hasattr(robject, 'GetDirectory') or not hasattr(robject, 'SetDirectory'):
log.warning('Cannot set the directory of a `{0}`'.format(type(robject)))
# Do nothing
yield # depends on [control=['if'], data=[]]
else:
old_dir = robject.GetDirectory()
try:
robject.SetDirectory(ROOT.gDirectory)
yield # depends on [control=['try'], data=[]]
finally:
robject.SetDirectory(old_dir)
|
def y0(x, context=None):
"""
Return the value of the second kind Bessel function of order 0 at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_y0,
(BigFloat._implicit_convert(x),),
context,
)
|
def function[y0, parameter[x, context]]:
constant[
Return the value of the second kind Bessel function of order 0 at x.
]
return[call[name[_apply_function_in_current_context], parameter[name[BigFloat], name[mpfr].mpfr_y0, tuple[[<ast.Call object at 0x7da20c76d990>]], name[context]]]]
|
keyword[def] identifier[y0] ( identifier[x] , identifier[context] = keyword[None] ):
literal[string]
keyword[return] identifier[_apply_function_in_current_context] (
identifier[BigFloat] ,
identifier[mpfr] . identifier[mpfr_y0] ,
( identifier[BigFloat] . identifier[_implicit_convert] ( identifier[x] ),),
identifier[context] ,
)
|
def y0(x, context=None):
"""
Return the value of the second kind Bessel function of order 0 at x.
"""
return _apply_function_in_current_context(BigFloat, mpfr.mpfr_y0, (BigFloat._implicit_convert(x),), context)
|
def get_description_metadata(self):
"""Gets the metadata for a description.
return: (osid.Metadata) - metadata for the description
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._mdata['description'])
metadata.update({'existing_string_values': self._my_map['description']['text']})
return Metadata(**metadata)
|
def function[get_description_metadata, parameter[self]]:
constant[Gets the metadata for a description.
return: (osid.Metadata) - metadata for the description
*compliance: mandatory -- This method must be implemented.*
]
variable[metadata] assign[=] call[name[dict], parameter[call[name[self]._mdata][constant[description]]]]
call[name[metadata].update, parameter[dictionary[[<ast.Constant object at 0x7da1b09705b0>], [<ast.Subscript object at 0x7da1b0972350>]]]]
return[call[name[Metadata], parameter[]]]
|
keyword[def] identifier[get_description_metadata] ( identifier[self] ):
literal[string]
identifier[metadata] = identifier[dict] ( identifier[self] . identifier[_mdata] [ literal[string] ])
identifier[metadata] . identifier[update] ({ literal[string] : identifier[self] . identifier[_my_map] [ literal[string] ][ literal[string] ]})
keyword[return] identifier[Metadata] (** identifier[metadata] )
|
def get_description_metadata(self):
"""Gets the metadata for a description.
return: (osid.Metadata) - metadata for the description
*compliance: mandatory -- This method must be implemented.*
"""
metadata = dict(self._mdata['description'])
metadata.update({'existing_string_values': self._my_map['description']['text']})
return Metadata(**metadata)
|
def _link(self, next_worker, next_is_first=False):
"""Link the worker to the given next worker object,
connecting the two workers with communication tubes."""
lock = multiprocessing.Lock()
next_worker._lock_prev_input = lock
self._lock_next_input = lock
lock.acquire()
lock = multiprocessing.Lock()
next_worker._lock_prev_output = lock
self._lock_next_output = lock
lock.acquire()
# If the next worker is the first one, trigger it now.
if next_is_first:
self._lock_next_input.release()
self._lock_next_output.release()
|
def function[_link, parameter[self, next_worker, next_is_first]]:
constant[Link the worker to the given next worker object,
connecting the two workers with communication tubes.]
variable[lock] assign[=] call[name[multiprocessing].Lock, parameter[]]
name[next_worker]._lock_prev_input assign[=] name[lock]
name[self]._lock_next_input assign[=] name[lock]
call[name[lock].acquire, parameter[]]
variable[lock] assign[=] call[name[multiprocessing].Lock, parameter[]]
name[next_worker]._lock_prev_output assign[=] name[lock]
name[self]._lock_next_output assign[=] name[lock]
call[name[lock].acquire, parameter[]]
if name[next_is_first] begin[:]
call[name[self]._lock_next_input.release, parameter[]]
call[name[self]._lock_next_output.release, parameter[]]
|
keyword[def] identifier[_link] ( identifier[self] , identifier[next_worker] , identifier[next_is_first] = keyword[False] ):
literal[string]
identifier[lock] = identifier[multiprocessing] . identifier[Lock] ()
identifier[next_worker] . identifier[_lock_prev_input] = identifier[lock]
identifier[self] . identifier[_lock_next_input] = identifier[lock]
identifier[lock] . identifier[acquire] ()
identifier[lock] = identifier[multiprocessing] . identifier[Lock] ()
identifier[next_worker] . identifier[_lock_prev_output] = identifier[lock]
identifier[self] . identifier[_lock_next_output] = identifier[lock]
identifier[lock] . identifier[acquire] ()
keyword[if] identifier[next_is_first] :
identifier[self] . identifier[_lock_next_input] . identifier[release] ()
identifier[self] . identifier[_lock_next_output] . identifier[release] ()
|
def _link(self, next_worker, next_is_first=False):
"""Link the worker to the given next worker object,
connecting the two workers with communication tubes."""
lock = multiprocessing.Lock()
next_worker._lock_prev_input = lock
self._lock_next_input = lock
lock.acquire()
lock = multiprocessing.Lock()
next_worker._lock_prev_output = lock
self._lock_next_output = lock
lock.acquire()
# If the next worker is the first one, trigger it now.
if next_is_first:
self._lock_next_input.release()
self._lock_next_output.release() # depends on [control=['if'], data=[]]
|
def _crop_data(self):
"""
Crop the ``data`` and ``mask`` to have an integer number of
background meshes of size ``box_size`` in both dimensions. The
data are cropped on the top and/or right edges (this is the best
option for the "zoom" interpolator).
Returns
-------
result : `~numpy.ma.MaskedArray`
The cropped data and mask as a masked array.
"""
ny_crop = self.nyboxes * self.box_size[1]
nx_crop = self.nxboxes * self.box_size[0]
crop_slc = index_exp[0:ny_crop, 0:nx_crop]
if self.mask is not None:
mask = self.mask[crop_slc]
else:
mask = False
return np.ma.masked_array(self.data[crop_slc], mask=mask)
|
def function[_crop_data, parameter[self]]:
constant[
Crop the ``data`` and ``mask`` to have an integer number of
background meshes of size ``box_size`` in both dimensions. The
data are cropped on the top and/or right edges (this is the best
option for the "zoom" interpolator).
Returns
-------
result : `~numpy.ma.MaskedArray`
The cropped data and mask as a masked array.
]
variable[ny_crop] assign[=] binary_operation[name[self].nyboxes * call[name[self].box_size][constant[1]]]
variable[nx_crop] assign[=] binary_operation[name[self].nxboxes * call[name[self].box_size][constant[0]]]
variable[crop_slc] assign[=] call[name[index_exp]][tuple[[<ast.Slice object at 0x7da1b1173760>, <ast.Slice object at 0x7da1b11abb20>]]]
if compare[name[self].mask is_not constant[None]] begin[:]
variable[mask] assign[=] call[name[self].mask][name[crop_slc]]
return[call[name[np].ma.masked_array, parameter[call[name[self].data][name[crop_slc]]]]]
|
keyword[def] identifier[_crop_data] ( identifier[self] ):
literal[string]
identifier[ny_crop] = identifier[self] . identifier[nyboxes] * identifier[self] . identifier[box_size] [ literal[int] ]
identifier[nx_crop] = identifier[self] . identifier[nxboxes] * identifier[self] . identifier[box_size] [ literal[int] ]
identifier[crop_slc] = identifier[index_exp] [ literal[int] : identifier[ny_crop] , literal[int] : identifier[nx_crop] ]
keyword[if] identifier[self] . identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[mask] = identifier[self] . identifier[mask] [ identifier[crop_slc] ]
keyword[else] :
identifier[mask] = keyword[False]
keyword[return] identifier[np] . identifier[ma] . identifier[masked_array] ( identifier[self] . identifier[data] [ identifier[crop_slc] ], identifier[mask] = identifier[mask] )
|
def _crop_data(self):
"""
Crop the ``data`` and ``mask`` to have an integer number of
background meshes of size ``box_size`` in both dimensions. The
data are cropped on the top and/or right edges (this is the best
option for the "zoom" interpolator).
Returns
-------
result : `~numpy.ma.MaskedArray`
The cropped data and mask as a masked array.
"""
ny_crop = self.nyboxes * self.box_size[1]
nx_crop = self.nxboxes * self.box_size[0]
crop_slc = index_exp[0:ny_crop, 0:nx_crop]
if self.mask is not None:
mask = self.mask[crop_slc] # depends on [control=['if'], data=[]]
else:
mask = False
return np.ma.masked_array(self.data[crop_slc], mask=mask)
|
def set_display_luminosity(self, luminosity):
"""
Change the intensity of light of the front panel of the box
:param luminosity: must be between 0 (light off) and 100
:type luminosity: int
"""
if (luminosity < 0) or (luminosity > 100):
raise ValueError("Luminosity must be between 0 and 100")
self.bbox_auth.set_access(BboxConstant.AUTHENTICATION_LEVEL_PRIVATE,
BboxConstant.AUTHENTICATION_LEVEL_PRIVATE)
self.bbox_url.set_api_name(BboxConstant.API_DEVICE, "display")
data = {'luminosity': luminosity}
api = BboxApiCall(self.bbox_url, BboxConstant.HTTP_METHOD_PUT, data,
self.bbox_auth)
api.execute_api_request()
|
def function[set_display_luminosity, parameter[self, luminosity]]:
constant[
Change the intensity of light of the front panel of the box
:param luminosity: must be between 0 (light off) and 100
:type luminosity: int
]
if <ast.BoolOp object at 0x7da2044c3a60> begin[:]
<ast.Raise object at 0x7da2044c3640>
call[name[self].bbox_auth.set_access, parameter[name[BboxConstant].AUTHENTICATION_LEVEL_PRIVATE, name[BboxConstant].AUTHENTICATION_LEVEL_PRIVATE]]
call[name[self].bbox_url.set_api_name, parameter[name[BboxConstant].API_DEVICE, constant[display]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da2044c3940>], [<ast.Name object at 0x7da2044c3b20>]]
variable[api] assign[=] call[name[BboxApiCall], parameter[name[self].bbox_url, name[BboxConstant].HTTP_METHOD_PUT, name[data], name[self].bbox_auth]]
call[name[api].execute_api_request, parameter[]]
|
keyword[def] identifier[set_display_luminosity] ( identifier[self] , identifier[luminosity] ):
literal[string]
keyword[if] ( identifier[luminosity] < literal[int] ) keyword[or] ( identifier[luminosity] > literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[bbox_auth] . identifier[set_access] ( identifier[BboxConstant] . identifier[AUTHENTICATION_LEVEL_PRIVATE] ,
identifier[BboxConstant] . identifier[AUTHENTICATION_LEVEL_PRIVATE] )
identifier[self] . identifier[bbox_url] . identifier[set_api_name] ( identifier[BboxConstant] . identifier[API_DEVICE] , literal[string] )
identifier[data] ={ literal[string] : identifier[luminosity] }
identifier[api] = identifier[BboxApiCall] ( identifier[self] . identifier[bbox_url] , identifier[BboxConstant] . identifier[HTTP_METHOD_PUT] , identifier[data] ,
identifier[self] . identifier[bbox_auth] )
identifier[api] . identifier[execute_api_request] ()
|
def set_display_luminosity(self, luminosity):
"""
Change the intensity of light of the front panel of the box
:param luminosity: must be between 0 (light off) and 100
:type luminosity: int
"""
if luminosity < 0 or luminosity > 100:
raise ValueError('Luminosity must be between 0 and 100') # depends on [control=['if'], data=[]]
self.bbox_auth.set_access(BboxConstant.AUTHENTICATION_LEVEL_PRIVATE, BboxConstant.AUTHENTICATION_LEVEL_PRIVATE)
self.bbox_url.set_api_name(BboxConstant.API_DEVICE, 'display')
data = {'luminosity': luminosity}
api = BboxApiCall(self.bbox_url, BboxConstant.HTTP_METHOD_PUT, data, self.bbox_auth)
api.execute_api_request()
|
def _request_process_text(response):
"""Handle Signature download.
Return:
(string): The data from the download
(string): The status of the download
"""
status = 'Failure'
# Handle document download
data = response.content
if data:
status = 'Success'
return data, status
|
def function[_request_process_text, parameter[response]]:
constant[Handle Signature download.
Return:
(string): The data from the download
(string): The status of the download
]
variable[status] assign[=] constant[Failure]
variable[data] assign[=] name[response].content
if name[data] begin[:]
variable[status] assign[=] constant[Success]
return[tuple[[<ast.Name object at 0x7da2041db0d0>, <ast.Name object at 0x7da2041d9150>]]]
|
keyword[def] identifier[_request_process_text] ( identifier[response] ):
literal[string]
identifier[status] = literal[string]
identifier[data] = identifier[response] . identifier[content]
keyword[if] identifier[data] :
identifier[status] = literal[string]
keyword[return] identifier[data] , identifier[status]
|
def _request_process_text(response):
"""Handle Signature download.
Return:
(string): The data from the download
(string): The status of the download
"""
status = 'Failure'
# Handle document download
data = response.content
if data:
status = 'Success' # depends on [control=['if'], data=[]]
return (data, status)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.