code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def replace(doc, pointer, value):
"""Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value.
"""
return Target(doc).replace(pointer, value).document | def function[replace, parameter[doc, pointer, value]]:
constant[Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value.
]
return[call[call[name[Target], parameter[name[doc]]].replace, parameter[name[pointer], name[value]]].document] | keyword[def] identifier[replace] ( identifier[doc] , identifier[pointer] , identifier[value] ):
literal[string]
keyword[return] identifier[Target] ( identifier[doc] ). identifier[replace] ( identifier[pointer] , identifier[value] ). identifier[document] | def replace(doc, pointer, value):
"""Replace element from sequence, member from mapping.
:param doc: the document base
:param pointer: the path to search in
:param value: the new value
:return: the new object
.. note::
This operation is functionally identical to a "remove" operation for
a value, followed immediately by an "add" operation at the same
location with the replacement value.
"""
return Target(doc).replace(pointer, value).document |
def getKeyConfig(self, keyID=None):
"""
Get key configuration specified by `keyID`, or current keyID.
:param str keyID: optional keyID to retrieve, or current if not passed
:return: a dict of the request (or current) key config
"""
k = keyID if keyID is not None else self._keyID
if not k or k not in self._data['keys']:
raise ConfigException('request key does not exist: %s' % k)
return self._data['keys'][k] | def function[getKeyConfig, parameter[self, keyID]]:
constant[
Get key configuration specified by `keyID`, or current keyID.
:param str keyID: optional keyID to retrieve, or current if not passed
:return: a dict of the request (or current) key config
]
variable[k] assign[=] <ast.IfExp object at 0x7da1b06894b0>
if <ast.BoolOp object at 0x7da1b0688c40> begin[:]
<ast.Raise object at 0x7da1b0689210>
return[call[call[name[self]._data][constant[keys]]][name[k]]] | keyword[def] identifier[getKeyConfig] ( identifier[self] , identifier[keyID] = keyword[None] ):
literal[string]
identifier[k] = identifier[keyID] keyword[if] identifier[keyID] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[_keyID]
keyword[if] keyword[not] identifier[k] keyword[or] identifier[k] keyword[not] keyword[in] identifier[self] . identifier[_data] [ literal[string] ]:
keyword[raise] identifier[ConfigException] ( literal[string] % identifier[k] )
keyword[return] identifier[self] . identifier[_data] [ literal[string] ][ identifier[k] ] | def getKeyConfig(self, keyID=None):
"""
Get key configuration specified by `keyID`, or current keyID.
:param str keyID: optional keyID to retrieve, or current if not passed
:return: a dict of the request (or current) key config
"""
k = keyID if keyID is not None else self._keyID
if not k or k not in self._data['keys']:
raise ConfigException('request key does not exist: %s' % k) # depends on [control=['if'], data=[]]
return self._data['keys'][k] |
def __find_processes_by_filename(self, filename):
"""
Internally used by L{find_processes_by_filename}.
"""
found = list()
filename = filename.lower()
if PathOperations.path_is_absolute(filename):
for aProcess in self.iter_processes():
imagename = aProcess.get_filename()
if imagename and imagename.lower() == filename:
found.append( (aProcess, imagename) )
else:
for aProcess in self.iter_processes():
imagename = aProcess.get_filename()
if imagename:
imagename = PathOperations.pathname_to_filename(imagename)
if imagename.lower() == filename:
found.append( (aProcess, imagename) )
return found | def function[__find_processes_by_filename, parameter[self, filename]]:
constant[
Internally used by L{find_processes_by_filename}.
]
variable[found] assign[=] call[name[list], parameter[]]
variable[filename] assign[=] call[name[filename].lower, parameter[]]
if call[name[PathOperations].path_is_absolute, parameter[name[filename]]] begin[:]
for taget[name[aProcess]] in starred[call[name[self].iter_processes, parameter[]]] begin[:]
variable[imagename] assign[=] call[name[aProcess].get_filename, parameter[]]
if <ast.BoolOp object at 0x7da1b088dfc0> begin[:]
call[name[found].append, parameter[tuple[[<ast.Name object at 0x7da1b088dc90>, <ast.Name object at 0x7da1b088d810>]]]]
return[name[found]] | keyword[def] identifier[__find_processes_by_filename] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[found] = identifier[list] ()
identifier[filename] = identifier[filename] . identifier[lower] ()
keyword[if] identifier[PathOperations] . identifier[path_is_absolute] ( identifier[filename] ):
keyword[for] identifier[aProcess] keyword[in] identifier[self] . identifier[iter_processes] ():
identifier[imagename] = identifier[aProcess] . identifier[get_filename] ()
keyword[if] identifier[imagename] keyword[and] identifier[imagename] . identifier[lower] ()== identifier[filename] :
identifier[found] . identifier[append] (( identifier[aProcess] , identifier[imagename] ))
keyword[else] :
keyword[for] identifier[aProcess] keyword[in] identifier[self] . identifier[iter_processes] ():
identifier[imagename] = identifier[aProcess] . identifier[get_filename] ()
keyword[if] identifier[imagename] :
identifier[imagename] = identifier[PathOperations] . identifier[pathname_to_filename] ( identifier[imagename] )
keyword[if] identifier[imagename] . identifier[lower] ()== identifier[filename] :
identifier[found] . identifier[append] (( identifier[aProcess] , identifier[imagename] ))
keyword[return] identifier[found] | def __find_processes_by_filename(self, filename):
"""
Internally used by L{find_processes_by_filename}.
"""
found = list()
filename = filename.lower()
if PathOperations.path_is_absolute(filename):
for aProcess in self.iter_processes():
imagename = aProcess.get_filename()
if imagename and imagename.lower() == filename:
found.append((aProcess, imagename)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['aProcess']] # depends on [control=['if'], data=[]]
else:
for aProcess in self.iter_processes():
imagename = aProcess.get_filename()
if imagename:
imagename = PathOperations.pathname_to_filename(imagename)
if imagename.lower() == filename:
found.append((aProcess, imagename)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['aProcess']]
return found |
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.year = None
else:
self.year = vals[i]
i += 1
if len(vals[i]) == 0:
self.month = None
else:
self.month = vals[i]
i += 1
if len(vals[i]) == 0:
self.day = None
else:
self.day = vals[i]
i += 1
if len(vals[i]) == 0:
self.hour = None
else:
self.hour = vals[i]
i += 1
if len(vals[i]) == 0:
self.minute = None
else:
self.minute = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_source_and_uncertainty_flags = None
else:
self.data_source_and_uncertainty_flags = vals[i]
i += 1
if len(vals[i]) == 0:
self.dry_bulb_temperature = None
else:
self.dry_bulb_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.dew_point_temperature = None
else:
self.dew_point_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.relative_humidity = None
else:
self.relative_humidity = vals[i]
i += 1
if len(vals[i]) == 0:
self.atmospheric_station_pressure = None
else:
self.atmospheric_station_pressure = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_horizontal_radiation = None
else:
self.extraterrestrial_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_direct_normal_radiation = None
else:
self.extraterrestrial_direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.horizontal_infrared_radiation_intensity = None
else:
self.horizontal_infrared_radiation_intensity = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_radiation = None
else:
self.global_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_radiation = None
else:
self.direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_radiation = None
else:
self.diffuse_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_illuminance = None
else:
self.global_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_illuminance = None
else:
self.direct_normal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_illuminance = None
else:
self.diffuse_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.zenith_luminance = None
else:
self.zenith_luminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_direction = None
else:
self.wind_direction = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_speed = None
else:
self.wind_speed = vals[i]
i += 1
if len(vals[i]) == 0:
self.total_sky_cover = None
else:
self.total_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.opaque_sky_cover = None
else:
self.opaque_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.visibility = None
else:
self.visibility = vals[i]
i += 1
if len(vals[i]) == 0:
self.ceiling_height = None
else:
self.ceiling_height = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_observation = None
else:
self.present_weather_observation = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_codes = None
else:
self.present_weather_codes = vals[i]
i += 1
if len(vals[i]) == 0:
self.precipitable_water = None
else:
self.precipitable_water = vals[i]
i += 1
if len(vals[i]) == 0:
self.aerosol_optical_depth = None
else:
self.aerosol_optical_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.snow_depth = None
else:
self.snow_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.days_since_last_snowfall = None
else:
self.days_since_last_snowfall = vals[i]
i += 1
if len(vals[i]) == 0:
self.albedo = None
else:
self.albedo = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_depth = None
else:
self.liquid_precipitation_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_quantity = None
else:
self.liquid_precipitation_quantity = vals[i]
i += 1 | def function[read, parameter[self, vals]]:
constant[Read values.
Args:
vals (list): list of strings representing values
]
variable[i] assign[=] constant[0]
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].year assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f2b160>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].month assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f2ad70>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].day assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f2a980>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].hour assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f2a590>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].minute assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f2a1a0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].data_source_and_uncertainty_flags assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f29db0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].dry_bulb_temperature assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f299c0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].dew_point_temperature assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f295d0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].relative_humidity assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f291e0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].atmospheric_station_pressure assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f28df0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].extraterrestrial_horizontal_radiation assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f28a00>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].extraterrestrial_direct_normal_radiation assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f28610>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].horizontal_infrared_radiation_intensity assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f28340>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].global_horizontal_radiation assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9f9a0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].direct_normal_radiation assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9d2a0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].diffuse_horizontal_radiation assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9f130>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].global_horizontal_illuminance assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9dd50>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].direct_normal_illuminance assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9cfa0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].diffuse_horizontal_illuminance assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f9cf10>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].zenith_luminance assign[=] constant[None]
<ast.AugAssign object at 0x7da18eb560e0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].wind_direction assign[=] constant[None]
<ast.AugAssign object at 0x7da18eb560b0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].wind_speed assign[=] constant[None]
<ast.AugAssign object at 0x7da18eb571f0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].total_sky_cover assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f90940>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].opaque_sky_cover assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f933a0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].visibility assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f91090>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].ceiling_height assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f90610>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].present_weather_observation assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f930d0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].present_weather_codes assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f93460>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].precipitable_water assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f93af0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].aerosol_optical_depth assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f910f0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].snow_depth assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f92b60>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].days_since_last_snowfall assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f90490>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].albedo assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f90eb0>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].liquid_precipitation_depth assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f93790>
if compare[call[name[len], parameter[call[name[vals]][name[i]]]] equal[==] constant[0]] begin[:]
name[self].liquid_precipitation_quantity assign[=] constant[None]
<ast.AugAssign object at 0x7da1b0f93970> | keyword[def] identifier[read] ( identifier[self] , identifier[vals] ):
literal[string]
identifier[i] = literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[year] = keyword[None]
keyword[else] :
identifier[self] . identifier[year] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[month] = keyword[None]
keyword[else] :
identifier[self] . identifier[month] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[day] = keyword[None]
keyword[else] :
identifier[self] . identifier[day] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[hour] = keyword[None]
keyword[else] :
identifier[self] . identifier[hour] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[minute] = keyword[None]
keyword[else] :
identifier[self] . identifier[minute] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[data_source_and_uncertainty_flags] = keyword[None]
keyword[else] :
identifier[self] . identifier[data_source_and_uncertainty_flags] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[dry_bulb_temperature] = keyword[None]
keyword[else] :
identifier[self] . identifier[dry_bulb_temperature] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[dew_point_temperature] = keyword[None]
keyword[else] :
identifier[self] . identifier[dew_point_temperature] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[relative_humidity] = keyword[None]
keyword[else] :
identifier[self] . identifier[relative_humidity] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[atmospheric_station_pressure] = keyword[None]
keyword[else] :
identifier[self] . identifier[atmospheric_station_pressure] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[extraterrestrial_horizontal_radiation] = keyword[None]
keyword[else] :
identifier[self] . identifier[extraterrestrial_horizontal_radiation] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[extraterrestrial_direct_normal_radiation] = keyword[None]
keyword[else] :
identifier[self] . identifier[extraterrestrial_direct_normal_radiation] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[horizontal_infrared_radiation_intensity] = keyword[None]
keyword[else] :
identifier[self] . identifier[horizontal_infrared_radiation_intensity] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[global_horizontal_radiation] = keyword[None]
keyword[else] :
identifier[self] . identifier[global_horizontal_radiation] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[direct_normal_radiation] = keyword[None]
keyword[else] :
identifier[self] . identifier[direct_normal_radiation] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[diffuse_horizontal_radiation] = keyword[None]
keyword[else] :
identifier[self] . identifier[diffuse_horizontal_radiation] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[global_horizontal_illuminance] = keyword[None]
keyword[else] :
identifier[self] . identifier[global_horizontal_illuminance] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[direct_normal_illuminance] = keyword[None]
keyword[else] :
identifier[self] . identifier[direct_normal_illuminance] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[diffuse_horizontal_illuminance] = keyword[None]
keyword[else] :
identifier[self] . identifier[diffuse_horizontal_illuminance] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[zenith_luminance] = keyword[None]
keyword[else] :
identifier[self] . identifier[zenith_luminance] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[wind_direction] = keyword[None]
keyword[else] :
identifier[self] . identifier[wind_direction] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[wind_speed] = keyword[None]
keyword[else] :
identifier[self] . identifier[wind_speed] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[total_sky_cover] = keyword[None]
keyword[else] :
identifier[self] . identifier[total_sky_cover] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[opaque_sky_cover] = keyword[None]
keyword[else] :
identifier[self] . identifier[opaque_sky_cover] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[visibility] = keyword[None]
keyword[else] :
identifier[self] . identifier[visibility] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[ceiling_height] = keyword[None]
keyword[else] :
identifier[self] . identifier[ceiling_height] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[present_weather_observation] = keyword[None]
keyword[else] :
identifier[self] . identifier[present_weather_observation] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[present_weather_codes] = keyword[None]
keyword[else] :
identifier[self] . identifier[present_weather_codes] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[precipitable_water] = keyword[None]
keyword[else] :
identifier[self] . identifier[precipitable_water] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[aerosol_optical_depth] = keyword[None]
keyword[else] :
identifier[self] . identifier[aerosol_optical_depth] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[snow_depth] = keyword[None]
keyword[else] :
identifier[self] . identifier[snow_depth] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[days_since_last_snowfall] = keyword[None]
keyword[else] :
identifier[self] . identifier[days_since_last_snowfall] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[albedo] = keyword[None]
keyword[else] :
identifier[self] . identifier[albedo] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[liquid_precipitation_depth] = keyword[None]
keyword[else] :
identifier[self] . identifier[liquid_precipitation_depth] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[len] ( identifier[vals] [ identifier[i] ])== literal[int] :
identifier[self] . identifier[liquid_precipitation_quantity] = keyword[None]
keyword[else] :
identifier[self] . identifier[liquid_precipitation_quantity] = identifier[vals] [ identifier[i] ]
identifier[i] += literal[int] | def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.year = None # depends on [control=['if'], data=[]]
else:
self.year = vals[i]
i += 1
if len(vals[i]) == 0:
self.month = None # depends on [control=['if'], data=[]]
else:
self.month = vals[i]
i += 1
if len(vals[i]) == 0:
self.day = None # depends on [control=['if'], data=[]]
else:
self.day = vals[i]
i += 1
if len(vals[i]) == 0:
self.hour = None # depends on [control=['if'], data=[]]
else:
self.hour = vals[i]
i += 1
if len(vals[i]) == 0:
self.minute = None # depends on [control=['if'], data=[]]
else:
self.minute = vals[i]
i += 1
if len(vals[i]) == 0:
self.data_source_and_uncertainty_flags = None # depends on [control=['if'], data=[]]
else:
self.data_source_and_uncertainty_flags = vals[i]
i += 1
if len(vals[i]) == 0:
self.dry_bulb_temperature = None # depends on [control=['if'], data=[]]
else:
self.dry_bulb_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.dew_point_temperature = None # depends on [control=['if'], data=[]]
else:
self.dew_point_temperature = vals[i]
i += 1
if len(vals[i]) == 0:
self.relative_humidity = None # depends on [control=['if'], data=[]]
else:
self.relative_humidity = vals[i]
i += 1
if len(vals[i]) == 0:
self.atmospheric_station_pressure = None # depends on [control=['if'], data=[]]
else:
self.atmospheric_station_pressure = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_horizontal_radiation = None # depends on [control=['if'], data=[]]
else:
self.extraterrestrial_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.extraterrestrial_direct_normal_radiation = None # depends on [control=['if'], data=[]]
else:
self.extraterrestrial_direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.horizontal_infrared_radiation_intensity = None # depends on [control=['if'], data=[]]
else:
self.horizontal_infrared_radiation_intensity = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_radiation = None # depends on [control=['if'], data=[]]
else:
self.global_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_radiation = None # depends on [control=['if'], data=[]]
else:
self.direct_normal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_radiation = None # depends on [control=['if'], data=[]]
else:
self.diffuse_horizontal_radiation = vals[i]
i += 1
if len(vals[i]) == 0:
self.global_horizontal_illuminance = None # depends on [control=['if'], data=[]]
else:
self.global_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.direct_normal_illuminance = None # depends on [control=['if'], data=[]]
else:
self.direct_normal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.diffuse_horizontal_illuminance = None # depends on [control=['if'], data=[]]
else:
self.diffuse_horizontal_illuminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.zenith_luminance = None # depends on [control=['if'], data=[]]
else:
self.zenith_luminance = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_direction = None # depends on [control=['if'], data=[]]
else:
self.wind_direction = vals[i]
i += 1
if len(vals[i]) == 0:
self.wind_speed = None # depends on [control=['if'], data=[]]
else:
self.wind_speed = vals[i]
i += 1
if len(vals[i]) == 0:
self.total_sky_cover = None # depends on [control=['if'], data=[]]
else:
self.total_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.opaque_sky_cover = None # depends on [control=['if'], data=[]]
else:
self.opaque_sky_cover = vals[i]
i += 1
if len(vals[i]) == 0:
self.visibility = None # depends on [control=['if'], data=[]]
else:
self.visibility = vals[i]
i += 1
if len(vals[i]) == 0:
self.ceiling_height = None # depends on [control=['if'], data=[]]
else:
self.ceiling_height = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_observation = None # depends on [control=['if'], data=[]]
else:
self.present_weather_observation = vals[i]
i += 1
if len(vals[i]) == 0:
self.present_weather_codes = None # depends on [control=['if'], data=[]]
else:
self.present_weather_codes = vals[i]
i += 1
if len(vals[i]) == 0:
self.precipitable_water = None # depends on [control=['if'], data=[]]
else:
self.precipitable_water = vals[i]
i += 1
if len(vals[i]) == 0:
self.aerosol_optical_depth = None # depends on [control=['if'], data=[]]
else:
self.aerosol_optical_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.snow_depth = None # depends on [control=['if'], data=[]]
else:
self.snow_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.days_since_last_snowfall = None # depends on [control=['if'], data=[]]
else:
self.days_since_last_snowfall = vals[i]
i += 1
if len(vals[i]) == 0:
self.albedo = None # depends on [control=['if'], data=[]]
else:
self.albedo = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_depth = None # depends on [control=['if'], data=[]]
else:
self.liquid_precipitation_depth = vals[i]
i += 1
if len(vals[i]) == 0:
self.liquid_precipitation_quantity = None # depends on [control=['if'], data=[]]
else:
self.liquid_precipitation_quantity = vals[i]
i += 1 |
def dev_null_wrapper(func, *a, **kwargs):
"""
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null.
This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
"""
os.dup2(dev_null, sys.stdout.fileno())
return_object = func(*a, **kwargs)
sys.stdout.flush()
os.dup2(tmp_stdout, sys.stdout.fileno())
return return_object | def function[dev_null_wrapper, parameter[func]]:
constant[
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null.
This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
]
call[name[os].dup2, parameter[name[dev_null], call[name[sys].stdout.fileno, parameter[]]]]
variable[return_object] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b1b373a0>]]
call[name[sys].stdout.flush, parameter[]]
call[name[os].dup2, parameter[name[tmp_stdout], call[name[sys].stdout.fileno, parameter[]]]]
return[name[return_object]] | keyword[def] identifier[dev_null_wrapper] ( identifier[func] ,* identifier[a] ,** identifier[kwargs] ):
literal[string]
identifier[os] . identifier[dup2] ( identifier[dev_null] , identifier[sys] . identifier[stdout] . identifier[fileno] ())
identifier[return_object] = identifier[func] (* identifier[a] ,** identifier[kwargs] )
identifier[sys] . identifier[stdout] . identifier[flush] ()
identifier[os] . identifier[dup2] ( identifier[tmp_stdout] , identifier[sys] . identifier[stdout] . identifier[fileno] ())
keyword[return] identifier[return_object] | def dev_null_wrapper(func, *a, **kwargs):
"""
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null.
This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
"""
os.dup2(dev_null, sys.stdout.fileno())
return_object = func(*a, **kwargs)
sys.stdout.flush()
os.dup2(tmp_stdout, sys.stdout.fileno())
return return_object |
def ldap_sync(self, **kwargs):
"""Sync LDAP groups.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
"""
path = '/groups/%s/ldap_sync' % self.get_id()
self.manager.gitlab.http_post(path, **kwargs) | def function[ldap_sync, parameter[self]]:
constant[Sync LDAP groups.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
]
variable[path] assign[=] binary_operation[constant[/groups/%s/ldap_sync] <ast.Mod object at 0x7da2590d6920> call[name[self].get_id, parameter[]]]
call[name[self].manager.gitlab.http_post, parameter[name[path]]] | keyword[def] identifier[ldap_sync] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[path] = literal[string] % identifier[self] . identifier[get_id] ()
identifier[self] . identifier[manager] . identifier[gitlab] . identifier[http_post] ( identifier[path] ,** identifier[kwargs] ) | def ldap_sync(self, **kwargs):
"""Sync LDAP groups.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabCreateError: If the server cannot perform the request
"""
path = '/groups/%s/ldap_sync' % self.get_id()
self.manager.gitlab.http_post(path, **kwargs) |
def iter_leaves(self) -> Iterable[BaseEntity]:
"""Return an iterable over all nodes that are leaves.
A node is a leaf if either:
- it doesn't have any predecessors, OR
- all of its predecessors have a score in their data dictionaries
"""
for node in self.graph:
if self.tag in self.graph.nodes[node]:
continue
if not any(self.tag not in self.graph.nodes[p] for p in self.graph.predecessors(node)):
yield node | def function[iter_leaves, parameter[self]]:
constant[Return an iterable over all nodes that are leaves.
A node is a leaf if either:
- it doesn't have any predecessors, OR
- all of its predecessors have a score in their data dictionaries
]
for taget[name[node]] in starred[name[self].graph] begin[:]
if compare[name[self].tag in call[name[self].graph.nodes][name[node]]] begin[:]
continue
if <ast.UnaryOp object at 0x7da20c7942b0> begin[:]
<ast.Yield object at 0x7da20c7968f0> | keyword[def] identifier[iter_leaves] ( identifier[self] )-> identifier[Iterable] [ identifier[BaseEntity] ]:
literal[string]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[graph] :
keyword[if] identifier[self] . identifier[tag] keyword[in] identifier[self] . identifier[graph] . identifier[nodes] [ identifier[node] ]:
keyword[continue]
keyword[if] keyword[not] identifier[any] ( identifier[self] . identifier[tag] keyword[not] keyword[in] identifier[self] . identifier[graph] . identifier[nodes] [ identifier[p] ] keyword[for] identifier[p] keyword[in] identifier[self] . identifier[graph] . identifier[predecessors] ( identifier[node] )):
keyword[yield] identifier[node] | def iter_leaves(self) -> Iterable[BaseEntity]:
"""Return an iterable over all nodes that are leaves.
A node is a leaf if either:
- it doesn't have any predecessors, OR
- all of its predecessors have a score in their data dictionaries
"""
for node in self.graph:
if self.tag in self.graph.nodes[node]:
continue # depends on [control=['if'], data=[]]
if not any((self.tag not in self.graph.nodes[p] for p in self.graph.predecessors(node))):
yield node # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']] |
def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name."""
for extension in list_extensions(request):
if extension.name == extension_name:
return True
return False | def function[extension_supported, parameter[request, extension_name]]:
constant[This method will determine if Cinder supports a given extension name.]
for taget[name[extension]] in starred[call[name[list_extensions], parameter[name[request]]]] begin[:]
if compare[name[extension].name equal[==] name[extension_name]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[extension_supported] ( identifier[request] , identifier[extension_name] ):
literal[string]
keyword[for] identifier[extension] keyword[in] identifier[list_extensions] ( identifier[request] ):
keyword[if] identifier[extension] . identifier[name] == identifier[extension_name] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def extension_supported(request, extension_name):
"""This method will determine if Cinder supports a given extension name."""
for extension in list_extensions(request):
if extension.name == extension_name:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['extension']]
return False |
def search_feature_sets(self, dataset_id):
"""
Returns an iterator over the FeatureSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.FeatureSet`
objects defined by the query parameters.
"""
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "featuresets", protocol.SearchFeatureSetsResponse) | def function[search_feature_sets, parameter[self, dataset_id]]:
constant[
Returns an iterator over the FeatureSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.FeatureSet`
objects defined by the query parameters.
]
variable[request] assign[=] call[name[protocol].SearchFeatureSetsRequest, parameter[]]
name[request].dataset_id assign[=] name[dataset_id]
name[request].page_size assign[=] call[name[pb].int, parameter[name[self]._page_size]]
return[call[name[self]._run_search_request, parameter[name[request], constant[featuresets], name[protocol].SearchFeatureSetsResponse]]] | keyword[def] identifier[search_feature_sets] ( identifier[self] , identifier[dataset_id] ):
literal[string]
identifier[request] = identifier[protocol] . identifier[SearchFeatureSetsRequest] ()
identifier[request] . identifier[dataset_id] = identifier[dataset_id]
identifier[request] . identifier[page_size] = identifier[pb] . identifier[int] ( identifier[self] . identifier[_page_size] )
keyword[return] identifier[self] . identifier[_run_search_request] (
identifier[request] , literal[string] , identifier[protocol] . identifier[SearchFeatureSetsResponse] ) | def search_feature_sets(self, dataset_id):
"""
Returns an iterator over the FeatureSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the
:class:`ga4gh.protocol.Dataset` of interest.
:return: An iterator over the :class:`ga4gh.protocol.FeatureSet`
objects defined by the query parameters.
"""
request = protocol.SearchFeatureSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(request, 'featuresets', protocol.SearchFeatureSetsResponse) |
def _from_jd_schematic(jd, method):
'''Convert from JD using various leap-year calculation methods'''
if jd < EPOCH:
raise ValueError("Can't convert days before the French Revolution")
# days since Epoch
J = trunc(jd) + 0.5 - EPOCH
y0, y1, y2, y3, y4, y5 = 0, 0, 0, 0, 0, 0
intercal_cycle_days = leap_suppression_days = over_cycle_days = None
# Use the every-four-years method below year 17
if (J <= DAYS_IN_YEAR * 12 + 3 and
method in (100, 'romme')) or (J <= DAYS_IN_YEAR * 17 + 4 and method in (128, 'madler')):
method = 4
# set p and r in Hatcher algorithm
if method in (4, 'continuous'):
# Leap years: 15, 19, 23, ...
# Reorganize so that leap day is last day of cycle
J = J + 365
y5 = - 1
elif method in (100, 'romme'):
# Year 15 is not a leap year
# Year 16 is leap, then multiples of 4, not multiples of 100, yes multiples of 400
y5 = 12
J = J - DAYS_IN_YEAR * 12 - 3
leap_suppression_yrs = 100.
leap_suppression_days = 36524 # LEAP_CYCLE_DAYS * 25 - 1
intercal_cycle_yrs = 400.
intercal_cycle_days = 146097 # leap_suppression_days * 4 + 1
over_cycle_yrs = 4000.
over_cycle_days = 1460969 # intercal_cycle_days * 10 - 1
elif method in (128, 'madler'):
# Year 15 is a leap year, then year 20 and multiples of 4, not multiples of 128
y5 = 16
J = J - DAYS_IN_YEAR * 16 - 4
leap_suppression_yrs = 128
leap_suppression_days = 46751 # 32 * leap_cycle_days - 1
else:
raise ValueError("Unknown leap year method. Try: continuous, romme, madler or equinox")
if over_cycle_days:
y0 = trunc(J / over_cycle_days) * over_cycle_yrs
J = J % over_cycle_days
if intercal_cycle_days:
y1 = trunc(J / intercal_cycle_days) * intercal_cycle_yrs
J = J % intercal_cycle_days
if leap_suppression_days:
y2 = trunc(J / leap_suppression_days) * leap_suppression_yrs
J = J % leap_suppression_days
y3 = trunc(J / LEAP_CYCLE_DAYS) * LEAP_CYCLE_YEARS
if J % LEAP_CYCLE_DAYS == LEAP_CYCLE_DAYS - 1:
J = 1460
else:
J = J % LEAP_CYCLE_DAYS
# 0 <= J <= 1460
# J needs to be 365 here on leap days ONLY
y4 = trunc(J / DAYS_IN_YEAR)
if J == DAYS_IN_YEAR * 4:
y4 = y4 - 1
J = 365.0
else:
J = J % DAYS_IN_YEAR
year = y0 + y1 + y2 + y3 + y4 + y5
month = trunc(J / 30.)
J = J - month * 30
return year + 1, month + 1, trunc(J) + 1 | def function[_from_jd_schematic, parameter[jd, method]]:
constant[Convert from JD using various leap-year calculation methods]
if compare[name[jd] less[<] name[EPOCH]] begin[:]
<ast.Raise object at 0x7da1b0f3b070>
variable[J] assign[=] binary_operation[binary_operation[call[name[trunc], parameter[name[jd]]] + constant[0.5]] - name[EPOCH]]
<ast.Tuple object at 0x7da1b0f3a2f0> assign[=] tuple[[<ast.Constant object at 0x7da1b0f3bc70>, <ast.Constant object at 0x7da1b0f3b130>, <ast.Constant object at 0x7da1b0f3a350>, <ast.Constant object at 0x7da1b0f388b0>, <ast.Constant object at 0x7da1b0f39210>, <ast.Constant object at 0x7da1b0e60130>]]
variable[intercal_cycle_days] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b0e61960> begin[:]
variable[method] assign[=] constant[4]
if compare[name[method] in tuple[[<ast.Constant object at 0x7da1b0e63a90>, <ast.Constant object at 0x7da1b0e63dc0>]]] begin[:]
variable[J] assign[=] binary_operation[name[J] + constant[365]]
variable[y5] assign[=] <ast.UnaryOp object at 0x7da1b0e60c40>
if name[over_cycle_days] begin[:]
variable[y0] assign[=] binary_operation[call[name[trunc], parameter[binary_operation[name[J] / name[over_cycle_days]]]] * name[over_cycle_yrs]]
variable[J] assign[=] binary_operation[name[J] <ast.Mod object at 0x7da2590d6920> name[over_cycle_days]]
if name[intercal_cycle_days] begin[:]
variable[y1] assign[=] binary_operation[call[name[trunc], parameter[binary_operation[name[J] / name[intercal_cycle_days]]]] * name[intercal_cycle_yrs]]
variable[J] assign[=] binary_operation[name[J] <ast.Mod object at 0x7da2590d6920> name[intercal_cycle_days]]
if name[leap_suppression_days] begin[:]
variable[y2] assign[=] binary_operation[call[name[trunc], parameter[binary_operation[name[J] / name[leap_suppression_days]]]] * name[leap_suppression_yrs]]
variable[J] assign[=] binary_operation[name[J] <ast.Mod object at 0x7da2590d6920> name[leap_suppression_days]]
variable[y3] assign[=] binary_operation[call[name[trunc], parameter[binary_operation[name[J] / name[LEAP_CYCLE_DAYS]]]] * name[LEAP_CYCLE_YEARS]]
if compare[binary_operation[name[J] <ast.Mod object at 0x7da2590d6920> name[LEAP_CYCLE_DAYS]] equal[==] binary_operation[name[LEAP_CYCLE_DAYS] - constant[1]]] begin[:]
variable[J] assign[=] constant[1460]
variable[y4] assign[=] call[name[trunc], parameter[binary_operation[name[J] / name[DAYS_IN_YEAR]]]]
if compare[name[J] equal[==] binary_operation[name[DAYS_IN_YEAR] * constant[4]]] begin[:]
variable[y4] assign[=] binary_operation[name[y4] - constant[1]]
variable[J] assign[=] constant[365.0]
variable[year] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[y0] + name[y1]] + name[y2]] + name[y3]] + name[y4]] + name[y5]]
variable[month] assign[=] call[name[trunc], parameter[binary_operation[name[J] / constant[30.0]]]]
variable[J] assign[=] binary_operation[name[J] - binary_operation[name[month] * constant[30]]]
return[tuple[[<ast.BinOp object at 0x7da1b0ef4f40>, <ast.BinOp object at 0x7da1b0ef4490>, <ast.BinOp object at 0x7da1b0ef4a00>]]] | keyword[def] identifier[_from_jd_schematic] ( identifier[jd] , identifier[method] ):
literal[string]
keyword[if] identifier[jd] < identifier[EPOCH] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[J] = identifier[trunc] ( identifier[jd] )+ literal[int] - identifier[EPOCH]
identifier[y0] , identifier[y1] , identifier[y2] , identifier[y3] , identifier[y4] , identifier[y5] = literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int]
identifier[intercal_cycle_days] = identifier[leap_suppression_days] = identifier[over_cycle_days] = keyword[None]
keyword[if] ( identifier[J] <= identifier[DAYS_IN_YEAR] * literal[int] + literal[int] keyword[and]
identifier[method] keyword[in] ( literal[int] , literal[string] )) keyword[or] ( identifier[J] <= identifier[DAYS_IN_YEAR] * literal[int] + literal[int] keyword[and] identifier[method] keyword[in] ( literal[int] , literal[string] )):
identifier[method] = literal[int]
keyword[if] identifier[method] keyword[in] ( literal[int] , literal[string] ):
identifier[J] = identifier[J] + literal[int]
identifier[y5] =- literal[int]
keyword[elif] identifier[method] keyword[in] ( literal[int] , literal[string] ):
identifier[y5] = literal[int]
identifier[J] = identifier[J] - identifier[DAYS_IN_YEAR] * literal[int] - literal[int]
identifier[leap_suppression_yrs] = literal[int]
identifier[leap_suppression_days] = literal[int]
identifier[intercal_cycle_yrs] = literal[int]
identifier[intercal_cycle_days] = literal[int]
identifier[over_cycle_yrs] = literal[int]
identifier[over_cycle_days] = literal[int]
keyword[elif] identifier[method] keyword[in] ( literal[int] , literal[string] ):
identifier[y5] = literal[int]
identifier[J] = identifier[J] - identifier[DAYS_IN_YEAR] * literal[int] - literal[int]
identifier[leap_suppression_yrs] = literal[int]
identifier[leap_suppression_days] = literal[int]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[over_cycle_days] :
identifier[y0] = identifier[trunc] ( identifier[J] / identifier[over_cycle_days] )* identifier[over_cycle_yrs]
identifier[J] = identifier[J] % identifier[over_cycle_days]
keyword[if] identifier[intercal_cycle_days] :
identifier[y1] = identifier[trunc] ( identifier[J] / identifier[intercal_cycle_days] )* identifier[intercal_cycle_yrs]
identifier[J] = identifier[J] % identifier[intercal_cycle_days]
keyword[if] identifier[leap_suppression_days] :
identifier[y2] = identifier[trunc] ( identifier[J] / identifier[leap_suppression_days] )* identifier[leap_suppression_yrs]
identifier[J] = identifier[J] % identifier[leap_suppression_days]
identifier[y3] = identifier[trunc] ( identifier[J] / identifier[LEAP_CYCLE_DAYS] )* identifier[LEAP_CYCLE_YEARS]
keyword[if] identifier[J] % identifier[LEAP_CYCLE_DAYS] == identifier[LEAP_CYCLE_DAYS] - literal[int] :
identifier[J] = literal[int]
keyword[else] :
identifier[J] = identifier[J] % identifier[LEAP_CYCLE_DAYS]
identifier[y4] = identifier[trunc] ( identifier[J] / identifier[DAYS_IN_YEAR] )
keyword[if] identifier[J] == identifier[DAYS_IN_YEAR] * literal[int] :
identifier[y4] = identifier[y4] - literal[int]
identifier[J] = literal[int]
keyword[else] :
identifier[J] = identifier[J] % identifier[DAYS_IN_YEAR]
identifier[year] = identifier[y0] + identifier[y1] + identifier[y2] + identifier[y3] + identifier[y4] + identifier[y5]
identifier[month] = identifier[trunc] ( identifier[J] / literal[int] )
identifier[J] = identifier[J] - identifier[month] * literal[int]
keyword[return] identifier[year] + literal[int] , identifier[month] + literal[int] , identifier[trunc] ( identifier[J] )+ literal[int] | def _from_jd_schematic(jd, method):
"""Convert from JD using various leap-year calculation methods"""
if jd < EPOCH:
raise ValueError("Can't convert days before the French Revolution") # depends on [control=['if'], data=[]]
# days since Epoch
J = trunc(jd) + 0.5 - EPOCH
(y0, y1, y2, y3, y4, y5) = (0, 0, 0, 0, 0, 0)
intercal_cycle_days = leap_suppression_days = over_cycle_days = None
# Use the every-four-years method below year 17
if J <= DAYS_IN_YEAR * 12 + 3 and method in (100, 'romme') or (J <= DAYS_IN_YEAR * 17 + 4 and method in (128, 'madler')):
method = 4 # depends on [control=['if'], data=[]]
# set p and r in Hatcher algorithm
if method in (4, 'continuous'):
# Leap years: 15, 19, 23, ...
# Reorganize so that leap day is last day of cycle
J = J + 365
y5 = -1 # depends on [control=['if'], data=[]]
elif method in (100, 'romme'):
# Year 15 is not a leap year
# Year 16 is leap, then multiples of 4, not multiples of 100, yes multiples of 400
y5 = 12
J = J - DAYS_IN_YEAR * 12 - 3
leap_suppression_yrs = 100.0
leap_suppression_days = 36524 # LEAP_CYCLE_DAYS * 25 - 1
intercal_cycle_yrs = 400.0
intercal_cycle_days = 146097 # leap_suppression_days * 4 + 1
over_cycle_yrs = 4000.0
over_cycle_days = 1460969 # intercal_cycle_days * 10 - 1 # depends on [control=['if'], data=[]]
elif method in (128, 'madler'):
# Year 15 is a leap year, then year 20 and multiples of 4, not multiples of 128
y5 = 16
J = J - DAYS_IN_YEAR * 16 - 4
leap_suppression_yrs = 128
leap_suppression_days = 46751 # 32 * leap_cycle_days - 1 # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown leap year method. Try: continuous, romme, madler or equinox')
if over_cycle_days:
y0 = trunc(J / over_cycle_days) * over_cycle_yrs
J = J % over_cycle_days # depends on [control=['if'], data=[]]
if intercal_cycle_days:
y1 = trunc(J / intercal_cycle_days) * intercal_cycle_yrs
J = J % intercal_cycle_days # depends on [control=['if'], data=[]]
if leap_suppression_days:
y2 = trunc(J / leap_suppression_days) * leap_suppression_yrs
J = J % leap_suppression_days # depends on [control=['if'], data=[]]
y3 = trunc(J / LEAP_CYCLE_DAYS) * LEAP_CYCLE_YEARS
if J % LEAP_CYCLE_DAYS == LEAP_CYCLE_DAYS - 1:
J = 1460 # depends on [control=['if'], data=[]]
else:
J = J % LEAP_CYCLE_DAYS
# 0 <= J <= 1460
# J needs to be 365 here on leap days ONLY
y4 = trunc(J / DAYS_IN_YEAR)
if J == DAYS_IN_YEAR * 4:
y4 = y4 - 1
J = 365.0 # depends on [control=['if'], data=['J']]
else:
J = J % DAYS_IN_YEAR
year = y0 + y1 + y2 + y3 + y4 + y5
month = trunc(J / 30.0)
J = J - month * 30
return (year + 1, month + 1, trunc(J) + 1) |
async def register(*address_list, cluster=None, loop=None):
"""Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
"""
loop = loop or asyncio.get_event_loop()
for address in address_list:
host, port = address.rsplit(':', 1)
node = Node(address=(host, int(port)), loop=loop)
await node.start()
for address in cluster:
host, port = address.rsplit(':', 1)
port = int(port)
if (host, port) != (node.host, node.port):
node.update_cluster((host, port)) | <ast.AsyncFunctionDef object at 0x7da1b1529a20> | keyword[async] keyword[def] identifier[register] (* identifier[address_list] , identifier[cluster] = keyword[None] , identifier[loop] = keyword[None] ):
literal[string]
identifier[loop] = identifier[loop] keyword[or] identifier[asyncio] . identifier[get_event_loop] ()
keyword[for] identifier[address] keyword[in] identifier[address_list] :
identifier[host] , identifier[port] = identifier[address] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[node] = identifier[Node] ( identifier[address] =( identifier[host] , identifier[int] ( identifier[port] )), identifier[loop] = identifier[loop] )
keyword[await] identifier[node] . identifier[start] ()
keyword[for] identifier[address] keyword[in] identifier[cluster] :
identifier[host] , identifier[port] = identifier[address] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[port] = identifier[int] ( identifier[port] )
keyword[if] ( identifier[host] , identifier[port] )!=( identifier[node] . identifier[host] , identifier[node] . identifier[port] ):
identifier[node] . identifier[update_cluster] (( identifier[host] , identifier[port] )) | async def register(*address_list, cluster=None, loop=None):
"""Start Raft node (server)
Args:
address_list — 127.0.0.1:8000 [, 127.0.0.1:8001 ...]
cluster — [127.0.0.1:8001, 127.0.0.1:8002, ...]
"""
loop = loop or asyncio.get_event_loop()
for address in address_list:
(host, port) = address.rsplit(':', 1)
node = Node(address=(host, int(port)), loop=loop)
await node.start()
for address in cluster:
(host, port) = address.rsplit(':', 1)
port = int(port)
if (host, port) != (node.host, node.port):
node.update_cluster((host, port)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['address']] # depends on [control=['for'], data=['address']] |
def to_named_tuple(keys, values):
"""
Convert a sqlalchemy object into a named tuple
"""
values = [dbobject.__dict__[key] for key in dbobject.keys()]
tuple_object = namedtuple('DBObject', dbobject.keys())
tuple_instance = tuple_object._make(values)
return tuple_instance | def function[to_named_tuple, parameter[keys, values]]:
constant[
Convert a sqlalchemy object into a named tuple
]
variable[values] assign[=] <ast.ListComp object at 0x7da18bcc8070>
variable[tuple_object] assign[=] call[name[namedtuple], parameter[constant[DBObject], call[name[dbobject].keys, parameter[]]]]
variable[tuple_instance] assign[=] call[name[tuple_object]._make, parameter[name[values]]]
return[name[tuple_instance]] | keyword[def] identifier[to_named_tuple] ( identifier[keys] , identifier[values] ):
literal[string]
identifier[values] =[ identifier[dbobject] . identifier[__dict__] [ identifier[key] ] keyword[for] identifier[key] keyword[in] identifier[dbobject] . identifier[keys] ()]
identifier[tuple_object] = identifier[namedtuple] ( literal[string] , identifier[dbobject] . identifier[keys] ())
identifier[tuple_instance] = identifier[tuple_object] . identifier[_make] ( identifier[values] )
keyword[return] identifier[tuple_instance] | def to_named_tuple(keys, values):
"""
Convert a sqlalchemy object into a named tuple
"""
values = [dbobject.__dict__[key] for key in dbobject.keys()]
tuple_object = namedtuple('DBObject', dbobject.keys())
tuple_instance = tuple_object._make(values)
return tuple_instance |
def filter_core_keywords(keywords):
"""Only return keywords that are CORE."""
matches = {}
for kw, info in keywords.items():
if kw.core:
matches[kw] = info
return matches | def function[filter_core_keywords, parameter[keywords]]:
constant[Only return keywords that are CORE.]
variable[matches] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204345f60>, <ast.Name object at 0x7da204346c20>]]] in starred[call[name[keywords].items, parameter[]]] begin[:]
if name[kw].core begin[:]
call[name[matches]][name[kw]] assign[=] name[info]
return[name[matches]] | keyword[def] identifier[filter_core_keywords] ( identifier[keywords] ):
literal[string]
identifier[matches] ={}
keyword[for] identifier[kw] , identifier[info] keyword[in] identifier[keywords] . identifier[items] ():
keyword[if] identifier[kw] . identifier[core] :
identifier[matches] [ identifier[kw] ]= identifier[info]
keyword[return] identifier[matches] | def filter_core_keywords(keywords):
"""Only return keywords that are CORE."""
matches = {}
for (kw, info) in keywords.items():
if kw.core:
matches[kw] = info # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return matches |
def update(self, duration):
"""Add a recorded duration."""
if duration >= 0:
self.histogram.update(duration)
self.meter.mark() | def function[update, parameter[self, duration]]:
constant[Add a recorded duration.]
if compare[name[duration] greater_or_equal[>=] constant[0]] begin[:]
call[name[self].histogram.update, parameter[name[duration]]]
call[name[self].meter.mark, parameter[]] | keyword[def] identifier[update] ( identifier[self] , identifier[duration] ):
literal[string]
keyword[if] identifier[duration] >= literal[int] :
identifier[self] . identifier[histogram] . identifier[update] ( identifier[duration] )
identifier[self] . identifier[meter] . identifier[mark] () | def update(self, duration):
"""Add a recorded duration."""
if duration >= 0:
self.histogram.update(duration)
self.meter.mark() # depends on [control=['if'], data=['duration']] |
def resolve_pname(self, pname: PrefName,
mid: ModuleId) -> Tuple[YangIdentifier, ModuleId]:
"""Return the name and module identifier in which the name is defined.
Args:
pname: Name with an optional prefix.
mid: Identifier of the module in which `pname` appears.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in `pname` is not declared.
"""
p, s, loc = pname.partition(":")
try:
mdata = self.modules[mid]
except KeyError:
raise ModuleNotRegistered(*mid) from None
try:
return (loc, mdata.prefix_map[p]) if s else (p, mdata.main_module)
except KeyError:
raise UnknownPrefix(p, mid) from None | def function[resolve_pname, parameter[self, pname, mid]]:
constant[Return the name and module identifier in which the name is defined.
Args:
pname: Name with an optional prefix.
mid: Identifier of the module in which `pname` appears.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in `pname` is not declared.
]
<ast.Tuple object at 0x7da1b052a920> assign[=] call[name[pname].partition, parameter[constant[:]]]
<ast.Try object at 0x7da1b0528d90>
<ast.Try object at 0x7da1b05280a0> | keyword[def] identifier[resolve_pname] ( identifier[self] , identifier[pname] : identifier[PrefName] ,
identifier[mid] : identifier[ModuleId] )-> identifier[Tuple] [ identifier[YangIdentifier] , identifier[ModuleId] ]:
literal[string]
identifier[p] , identifier[s] , identifier[loc] = identifier[pname] . identifier[partition] ( literal[string] )
keyword[try] :
identifier[mdata] = identifier[self] . identifier[modules] [ identifier[mid] ]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[ModuleNotRegistered] (* identifier[mid] ) keyword[from] keyword[None]
keyword[try] :
keyword[return] ( identifier[loc] , identifier[mdata] . identifier[prefix_map] [ identifier[p] ]) keyword[if] identifier[s] keyword[else] ( identifier[p] , identifier[mdata] . identifier[main_module] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[UnknownPrefix] ( identifier[p] , identifier[mid] ) keyword[from] keyword[None] | def resolve_pname(self, pname: PrefName, mid: ModuleId) -> Tuple[YangIdentifier, ModuleId]:
"""Return the name and module identifier in which the name is defined.
Args:
pname: Name with an optional prefix.
mid: Identifier of the module in which `pname` appears.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in `pname` is not declared.
"""
(p, s, loc) = pname.partition(':')
try:
mdata = self.modules[mid] # depends on [control=['try'], data=[]]
except KeyError:
raise ModuleNotRegistered(*mid) from None # depends on [control=['except'], data=[]]
try:
return (loc, mdata.prefix_map[p]) if s else (p, mdata.main_module) # depends on [control=['try'], data=[]]
except KeyError:
raise UnknownPrefix(p, mid) from None # depends on [control=['except'], data=[]] |
def plot_CI(
ax,
sampler,
modelidx=0,
sed=True,
confs=[3, 1, 0.5],
e_unit=u.eV,
label=None,
e_range=None,
e_npoints=100,
threads=None,
last_step=False,
):
"""Plot confidence interval.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
confs : list, optional
List of confidence levels (in sigma) to use for generating the
confidence intervals. Default is `[3,1,0.5]`
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
"""
confs.sort(reverse=True)
modelx, CI = _calc_CI(
sampler,
modelidx=modelidx,
confs=confs,
e_range=e_range,
e_npoints=e_npoints,
last_step=last_step,
threads=threads,
)
# pick first confidence interval curve for units
f_unit, sedf = sed_conversion(modelx, CI[0][0].unit, sed)
for (ymin, ymax), conf in zip(CI, confs):
color = np.log(conf) / np.log(20) + 0.4
ax.fill_between(
modelx.to(e_unit).value,
(ymax * sedf).to(f_unit).value,
(ymin * sedf).to(f_unit).value,
lw=0.001,
color=(color,) * 3,
alpha=0.6,
zorder=-10,
)
_plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed)
if label is not None:
ax.set_ylabel(
"{0} [{1}]".format(label, f_unit.to_string("latex_inline"))
) | def function[plot_CI, parameter[ax, sampler, modelidx, sed, confs, e_unit, label, e_range, e_npoints, threads, last_step]]:
constant[Plot confidence interval.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
confs : list, optional
List of confidence levels (in sigma) to use for generating the
confidence intervals. Default is `[3,1,0.5]`
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
]
call[name[confs].sort, parameter[]]
<ast.Tuple object at 0x7da1b0c9c550> assign[=] call[name[_calc_CI], parameter[name[sampler]]]
<ast.Tuple object at 0x7da1b0c9c7c0> assign[=] call[name[sed_conversion], parameter[name[modelx], call[call[name[CI]][constant[0]]][constant[0]].unit, name[sed]]]
for taget[tuple[[<ast.Tuple object at 0x7da1b0cf1180>, <ast.Name object at 0x7da1b0cf1240>]]] in starred[call[name[zip], parameter[name[CI], name[confs]]]] begin[:]
variable[color] assign[=] binary_operation[binary_operation[call[name[np].log, parameter[name[conf]]] / call[name[np].log, parameter[constant[20]]]] + constant[0.4]]
call[name[ax].fill_between, parameter[call[name[modelx].to, parameter[name[e_unit]]].value, call[binary_operation[name[ymax] * name[sedf]].to, parameter[name[f_unit]]].value, call[binary_operation[name[ymin] * name[sedf]].to, parameter[name[f_unit]]].value]]
call[name[_plot_MLmodel], parameter[name[ax], name[sampler], name[modelidx], name[e_range], name[e_npoints], name[e_unit], name[sed]]]
if compare[name[label] is_not constant[None]] begin[:]
call[name[ax].set_ylabel, parameter[call[constant[{0} [{1}]].format, parameter[name[label], call[name[f_unit].to_string, parameter[constant[latex_inline]]]]]]] | keyword[def] identifier[plot_CI] (
identifier[ax] ,
identifier[sampler] ,
identifier[modelidx] = literal[int] ,
identifier[sed] = keyword[True] ,
identifier[confs] =[ literal[int] , literal[int] , literal[int] ],
identifier[e_unit] = identifier[u] . identifier[eV] ,
identifier[label] = keyword[None] ,
identifier[e_range] = keyword[None] ,
identifier[e_npoints] = literal[int] ,
identifier[threads] = keyword[None] ,
identifier[last_step] = keyword[False] ,
):
literal[string]
identifier[confs] . identifier[sort] ( identifier[reverse] = keyword[True] )
identifier[modelx] , identifier[CI] = identifier[_calc_CI] (
identifier[sampler] ,
identifier[modelidx] = identifier[modelidx] ,
identifier[confs] = identifier[confs] ,
identifier[e_range] = identifier[e_range] ,
identifier[e_npoints] = identifier[e_npoints] ,
identifier[last_step] = identifier[last_step] ,
identifier[threads] = identifier[threads] ,
)
identifier[f_unit] , identifier[sedf] = identifier[sed_conversion] ( identifier[modelx] , identifier[CI] [ literal[int] ][ literal[int] ]. identifier[unit] , identifier[sed] )
keyword[for] ( identifier[ymin] , identifier[ymax] ), identifier[conf] keyword[in] identifier[zip] ( identifier[CI] , identifier[confs] ):
identifier[color] = identifier[np] . identifier[log] ( identifier[conf] )/ identifier[np] . identifier[log] ( literal[int] )+ literal[int]
identifier[ax] . identifier[fill_between] (
identifier[modelx] . identifier[to] ( identifier[e_unit] ). identifier[value] ,
( identifier[ymax] * identifier[sedf] ). identifier[to] ( identifier[f_unit] ). identifier[value] ,
( identifier[ymin] * identifier[sedf] ). identifier[to] ( identifier[f_unit] ). identifier[value] ,
identifier[lw] = literal[int] ,
identifier[color] =( identifier[color] ,)* literal[int] ,
identifier[alpha] = literal[int] ,
identifier[zorder] =- literal[int] ,
)
identifier[_plot_MLmodel] ( identifier[ax] , identifier[sampler] , identifier[modelidx] , identifier[e_range] , identifier[e_npoints] , identifier[e_unit] , identifier[sed] )
keyword[if] identifier[label] keyword[is] keyword[not] keyword[None] :
identifier[ax] . identifier[set_ylabel] (
literal[string] . identifier[format] ( identifier[label] , identifier[f_unit] . identifier[to_string] ( literal[string] ))
) | def plot_CI(ax, sampler, modelidx=0, sed=True, confs=[3, 1, 0.5], e_unit=u.eV, label=None, e_range=None, e_npoints=100, threads=None, last_step=False):
"""Plot confidence interval.
Parameters
----------
ax : `matplotlib.Axes`
Axes to plot on.
sampler : `emcee.EnsembleSampler`
Sampler
modelidx : int, optional
Model index. Default is 0
sed : bool, optional
Whether to plot SED or differential spectrum. If `None`, the units of
the observed spectrum will be used.
confs : list, optional
List of confidence levels (in sigma) to use for generating the
confidence intervals. Default is `[3,1,0.5]`
e_unit : :class:`~astropy.units.Unit` or str parseable to unit
Unit in which to plot energy axis.
e_npoints : int, optional
How many points to compute for the model samples and ML model if
`e_range` is set.
threads : int, optional
How many parallel processing threads to use when computing the samples.
Defaults to the number of available cores.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
"""
confs.sort(reverse=True)
(modelx, CI) = _calc_CI(sampler, modelidx=modelidx, confs=confs, e_range=e_range, e_npoints=e_npoints, last_step=last_step, threads=threads)
# pick first confidence interval curve for units
(f_unit, sedf) = sed_conversion(modelx, CI[0][0].unit, sed)
for ((ymin, ymax), conf) in zip(CI, confs):
color = np.log(conf) / np.log(20) + 0.4
ax.fill_between(modelx.to(e_unit).value, (ymax * sedf).to(f_unit).value, (ymin * sedf).to(f_unit).value, lw=0.001, color=(color,) * 3, alpha=0.6, zorder=-10) # depends on [control=['for'], data=[]]
_plot_MLmodel(ax, sampler, modelidx, e_range, e_npoints, e_unit, sed)
if label is not None:
ax.set_ylabel('{0} [{1}]'.format(label, f_unit.to_string('latex_inline'))) # depends on [control=['if'], data=['label']] |
def has_preview(self):
"""stub"""
# I had to add the following check because file record types don't seem to be implemented
# correctly for raw edx Question objects
if ('fileIds' not in self.my_osid_object._my_map or
'preview' not in self.my_osid_object._my_map['fileIds'] or
self.my_osid_object._my_map['fileIds']['preview'] is None):
return False
return bool(self.my_osid_object._my_map['fileIds']['preview']) | def function[has_preview, parameter[self]]:
constant[stub]
if <ast.BoolOp object at 0x7da20e9576d0> begin[:]
return[constant[False]]
return[call[name[bool], parameter[call[call[name[self].my_osid_object._my_map][constant[fileIds]]][constant[preview]]]]] | keyword[def] identifier[has_preview] ( identifier[self] ):
literal[string]
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[self] . identifier[my_osid_object] . identifier[_my_map] keyword[or]
literal[string] keyword[not] keyword[in] identifier[self] . identifier[my_osid_object] . identifier[_my_map] [ literal[string] ] keyword[or]
identifier[self] . identifier[my_osid_object] . identifier[_my_map] [ literal[string] ][ literal[string] ] keyword[is] keyword[None] ):
keyword[return] keyword[False]
keyword[return] identifier[bool] ( identifier[self] . identifier[my_osid_object] . identifier[_my_map] [ literal[string] ][ literal[string] ]) | def has_preview(self):
"""stub"""
# I had to add the following check because file record types don't seem to be implemented
# correctly for raw edx Question objects
if 'fileIds' not in self.my_osid_object._my_map or 'preview' not in self.my_osid_object._my_map['fileIds'] or self.my_osid_object._my_map['fileIds']['preview'] is None:
return False # depends on [control=['if'], data=[]]
return bool(self.my_osid_object._my_map['fileIds']['preview']) |
def version(verbose):
"""Prints the current version number"""
print(Fore.BLUE + '-=' * 15)
print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(
version=config.get('VERSION_STRING')))
print(Fore.BLUE + '-=' * 15)
if verbose:
print('[DB] : ' + '{}'.format(db.engine))
print(Style.RESET_ALL) | def function[version, parameter[verbose]]:
constant[Prints the current version number]
call[name[print], parameter[binary_operation[name[Fore].BLUE + binary_operation[constant[-=] * constant[15]]]]]
call[name[print], parameter[binary_operation[binary_operation[binary_operation[name[Fore].YELLOW + constant[Superset ]] + name[Fore].CYAN] + call[constant[{version}].format, parameter[]]]]]
call[name[print], parameter[binary_operation[name[Fore].BLUE + binary_operation[constant[-=] * constant[15]]]]]
if name[verbose] begin[:]
call[name[print], parameter[binary_operation[constant[[DB] : ] + call[constant[{}].format, parameter[name[db].engine]]]]]
call[name[print], parameter[name[Style].RESET_ALL]] | keyword[def] identifier[version] ( identifier[verbose] ):
literal[string]
identifier[print] ( identifier[Fore] . identifier[BLUE] + literal[string] * literal[int] )
identifier[print] ( identifier[Fore] . identifier[YELLOW] + literal[string] + identifier[Fore] . identifier[CYAN] + literal[string] . identifier[format] (
identifier[version] = identifier[config] . identifier[get] ( literal[string] )))
identifier[print] ( identifier[Fore] . identifier[BLUE] + literal[string] * literal[int] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] + literal[string] . identifier[format] ( identifier[db] . identifier[engine] ))
identifier[print] ( identifier[Style] . identifier[RESET_ALL] ) | def version(verbose):
"""Prints the current version number"""
print(Fore.BLUE + '-=' * 15)
print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(version=config.get('VERSION_STRING')))
print(Fore.BLUE + '-=' * 15)
if verbose:
print('[DB] : ' + '{}'.format(db.engine)) # depends on [control=['if'], data=[]]
print(Style.RESET_ALL) |
def task_path(cls, project, location, queue, task):
"""Return a fully-qualified task string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/queues/{queue}/tasks/{task}",
project=project,
location=location,
queue=queue,
task=task,
) | def function[task_path, parameter[cls, project, location, queue, task]]:
constant[Return a fully-qualified task string.]
return[call[name[google].api_core.path_template.expand, parameter[constant[projects/{project}/locations/{location}/queues/{queue}/tasks/{task}]]]] | keyword[def] identifier[task_path] ( identifier[cls] , identifier[project] , identifier[location] , identifier[queue] , identifier[task] ):
literal[string]
keyword[return] identifier[google] . identifier[api_core] . identifier[path_template] . identifier[expand] (
literal[string] ,
identifier[project] = identifier[project] ,
identifier[location] = identifier[location] ,
identifier[queue] = identifier[queue] ,
identifier[task] = identifier[task] ,
) | def task_path(cls, project, location, queue, task):
"""Return a fully-qualified task string."""
return google.api_core.path_template.expand('projects/{project}/locations/{location}/queues/{queue}/tasks/{task}', project=project, location=location, queue=queue, task=task) |
def vm_snapshot_delete(vm_name, kwargs=None, call=None):
'''
Deletes a virtual machine snapshot from the provided VM.
.. versionadded:: 2016.3.0
vm_name
The name of the VM from which to delete the snapshot.
snapshot_id
The ID of the snapshot to be deleted.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_snapshot_delete my-vm snapshot_id=8
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_snapshot_delete action must be called with -a or --action.'
)
if kwargs is None:
kwargs = {}
snapshot_id = kwargs.get('snapshot_id', None)
if snapshot_id is None:
raise SaltCloudSystemExit(
'The vm_snapshot_delete function requires a \'snapshot_id\' to be provided.'
)
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': vm_name}))
response = server.one.vm.snapshotdelete(auth, vm_id, int(snapshot_id))
data = {
'action': 'vm.snapshotdelete',
'snapshot_deleted': response[0],
'vm_id': response[1],
'error_code': response[2],
}
return data | def function[vm_snapshot_delete, parameter[vm_name, kwargs, call]]:
constant[
Deletes a virtual machine snapshot from the provided VM.
.. versionadded:: 2016.3.0
vm_name
The name of the VM from which to delete the snapshot.
snapshot_id
The ID of the snapshot to be deleted.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_snapshot_delete my-vm snapshot_id=8
]
if compare[name[call] not_equal[!=] constant[action]] begin[:]
<ast.Raise object at 0x7da20c795ed0>
if compare[name[kwargs] is constant[None]] begin[:]
variable[kwargs] assign[=] dictionary[[], []]
variable[snapshot_id] assign[=] call[name[kwargs].get, parameter[constant[snapshot_id], constant[None]]]
if compare[name[snapshot_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c795120>
<ast.Tuple object at 0x7da20c794a90> assign[=] call[name[_get_xml_rpc], parameter[]]
variable[auth] assign[=] call[constant[:].join, parameter[list[[<ast.Name object at 0x7da20c795e10>, <ast.Name object at 0x7da20c794550>]]]]
variable[vm_id] assign[=] call[name[int], parameter[call[name[get_vm_id], parameter[]]]]
variable[response] assign[=] call[name[server].one.vm.snapshotdelete, parameter[name[auth], name[vm_id], call[name[int], parameter[name[snapshot_id]]]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da20c7951b0>, <ast.Constant object at 0x7da20c7946a0>, <ast.Constant object at 0x7da20c7967d0>, <ast.Constant object at 0x7da20c794e80>], [<ast.Constant object at 0x7da20c796620>, <ast.Subscript object at 0x7da20c7958d0>, <ast.Subscript object at 0x7da20c794b50>, <ast.Subscript object at 0x7da20c7959c0>]]
return[name[data]] | keyword[def] identifier[vm_snapshot_delete] ( identifier[vm_name] , identifier[kwargs] = keyword[None] , identifier[call] = keyword[None] ):
literal[string]
keyword[if] identifier[call] != literal[string] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
)
keyword[if] identifier[kwargs] keyword[is] keyword[None] :
identifier[kwargs] ={}
identifier[snapshot_id] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[snapshot_id] keyword[is] keyword[None] :
keyword[raise] identifier[SaltCloudSystemExit] (
literal[string]
)
identifier[server] , identifier[user] , identifier[password] = identifier[_get_xml_rpc] ()
identifier[auth] = literal[string] . identifier[join] ([ identifier[user] , identifier[password] ])
identifier[vm_id] = identifier[int] ( identifier[get_vm_id] ( identifier[kwargs] ={ literal[string] : identifier[vm_name] }))
identifier[response] = identifier[server] . identifier[one] . identifier[vm] . identifier[snapshotdelete] ( identifier[auth] , identifier[vm_id] , identifier[int] ( identifier[snapshot_id] ))
identifier[data] ={
literal[string] : literal[string] ,
literal[string] : identifier[response] [ literal[int] ],
literal[string] : identifier[response] [ literal[int] ],
literal[string] : identifier[response] [ literal[int] ],
}
keyword[return] identifier[data] | def vm_snapshot_delete(vm_name, kwargs=None, call=None):
"""
Deletes a virtual machine snapshot from the provided VM.
.. versionadded:: 2016.3.0
vm_name
The name of the VM from which to delete the snapshot.
snapshot_id
The ID of the snapshot to be deleted.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_snapshot_delete my-vm snapshot_id=8
"""
if call != 'action':
raise SaltCloudSystemExit('The vm_snapshot_delete action must be called with -a or --action.') # depends on [control=['if'], data=[]]
if kwargs is None:
kwargs = {} # depends on [control=['if'], data=['kwargs']]
snapshot_id = kwargs.get('snapshot_id', None)
if snapshot_id is None:
raise SaltCloudSystemExit("The vm_snapshot_delete function requires a 'snapshot_id' to be provided.") # depends on [control=['if'], data=[]]
(server, user, password) = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = int(get_vm_id(kwargs={'name': vm_name}))
response = server.one.vm.snapshotdelete(auth, vm_id, int(snapshot_id))
data = {'action': 'vm.snapshotdelete', 'snapshot_deleted': response[0], 'vm_id': response[1], 'error_code': response[2]}
return data |
def get_zone():
"""make http response to AcraServer api to generate new zone and return tuple
of zone id and public key
"""
response = urlopen('{}/getNewZone'.format(ACRA_CONNECTOR_API_ADDRESS))
json_data = response.read().decode('utf-8')
zone_data = json.loads(json_data)
return zone_data['id'], b64decode(zone_data['public_key']) | def function[get_zone, parameter[]]:
constant[make http response to AcraServer api to generate new zone and return tuple
of zone id and public key
]
variable[response] assign[=] call[name[urlopen], parameter[call[constant[{}/getNewZone].format, parameter[name[ACRA_CONNECTOR_API_ADDRESS]]]]]
variable[json_data] assign[=] call[call[name[response].read, parameter[]].decode, parameter[constant[utf-8]]]
variable[zone_data] assign[=] call[name[json].loads, parameter[name[json_data]]]
return[tuple[[<ast.Subscript object at 0x7da204962920>, <ast.Call object at 0x7da204963ca0>]]] | keyword[def] identifier[get_zone] ():
literal[string]
identifier[response] = identifier[urlopen] ( literal[string] . identifier[format] ( identifier[ACRA_CONNECTOR_API_ADDRESS] ))
identifier[json_data] = identifier[response] . identifier[read] (). identifier[decode] ( literal[string] )
identifier[zone_data] = identifier[json] . identifier[loads] ( identifier[json_data] )
keyword[return] identifier[zone_data] [ literal[string] ], identifier[b64decode] ( identifier[zone_data] [ literal[string] ]) | def get_zone():
"""make http response to AcraServer api to generate new zone and return tuple
of zone id and public key
"""
response = urlopen('{}/getNewZone'.format(ACRA_CONNECTOR_API_ADDRESS))
json_data = response.read().decode('utf-8')
zone_data = json.loads(json_data)
return (zone_data['id'], b64decode(zone_data['public_key'])) |
def respects_language(fun):
"""Decorator for tasks with respect to site's current language.
You can use this decorator on your tasks together with default @task
decorator (remember that the task decorator must be applied last).
See also the with-statement alternative :func:`respect_language`.
**Example**:
.. code-block:: python
@task
@respects_language
def my_task()
# localize something.
The task will then accept a ``language`` argument that will be
used to set the language in the task, and the task can thus be
called like:
.. code-block:: python
from django.utils import translation
from myapp.tasks import my_task
# Pass the current language on to the task
my_task.delay(language=translation.get_language())
# or set the language explicitly
my_task.delay(language='no.no')
"""
@wraps(fun)
def _inner(*args, **kwargs):
with respect_language(kwargs.pop('language', None)):
return fun(*args, **kwargs)
return _inner | def function[respects_language, parameter[fun]]:
constant[Decorator for tasks with respect to site's current language.
You can use this decorator on your tasks together with default @task
decorator (remember that the task decorator must be applied last).
See also the with-statement alternative :func:`respect_language`.
**Example**:
.. code-block:: python
@task
@respects_language
def my_task()
# localize something.
The task will then accept a ``language`` argument that will be
used to set the language in the task, and the task can thus be
called like:
.. code-block:: python
from django.utils import translation
from myapp.tasks import my_task
# Pass the current language on to the task
my_task.delay(language=translation.get_language())
# or set the language explicitly
my_task.delay(language='no.no')
]
def function[_inner, parameter[]]:
with call[name[respect_language], parameter[call[name[kwargs].pop, parameter[constant[language], constant[None]]]]] begin[:]
return[call[name[fun], parameter[<ast.Starred object at 0x7da18f09ee90>]]]
return[name[_inner]] | keyword[def] identifier[respects_language] ( identifier[fun] ):
literal[string]
@ identifier[wraps] ( identifier[fun] )
keyword[def] identifier[_inner] (* identifier[args] ,** identifier[kwargs] ):
keyword[with] identifier[respect_language] ( identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )):
keyword[return] identifier[fun] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[_inner] | def respects_language(fun):
"""Decorator for tasks with respect to site's current language.
You can use this decorator on your tasks together with default @task
decorator (remember that the task decorator must be applied last).
See also the with-statement alternative :func:`respect_language`.
**Example**:
.. code-block:: python
@task
@respects_language
def my_task()
# localize something.
The task will then accept a ``language`` argument that will be
used to set the language in the task, and the task can thus be
called like:
.. code-block:: python
from django.utils import translation
from myapp.tasks import my_task
# Pass the current language on to the task
my_task.delay(language=translation.get_language())
# or set the language explicitly
my_task.delay(language='no.no')
"""
@wraps(fun)
def _inner(*args, **kwargs):
with respect_language(kwargs.pop('language', None)):
return fun(*args, **kwargs) # depends on [control=['with'], data=[]]
return _inner |
def get_what_follows(strings: Sequence[str],
prefix: str,
onlyatstart: bool = True,
stripwhitespace: bool = True,
precedingline: str = "") -> str:
"""
Find a string in ``strings`` that begins with ``prefix``; return the part
that's after ``prefix``. Optionally, require that the preceding string
(line) is ``precedingline``.
Args:
strings: strings to analyse
prefix: prefix to find
onlyatstart: only accept the prefix if it is right at the start of
``s``
stripwhitespace: remove whitespace from the result
precedingline: if truthy, require that the preceding line be as
specified here
Returns:
the line fragment
"""
if not precedingline:
for s in strings:
(found, result) = get_what_follows_raw(s, prefix, onlyatstart,
stripwhitespace)
if found:
return result
return ""
else:
for i in range(1, len(strings)): # i indexes the second of a pair
if strings[i-1].find(precedingline) == 0:
# ... if found at the start
(found, result) = get_what_follows_raw(strings[i], prefix,
onlyatstart,
stripwhitespace)
if found:
return result
return "" | def function[get_what_follows, parameter[strings, prefix, onlyatstart, stripwhitespace, precedingline]]:
constant[
Find a string in ``strings`` that begins with ``prefix``; return the part
that's after ``prefix``. Optionally, require that the preceding string
(line) is ``precedingline``.
Args:
strings: strings to analyse
prefix: prefix to find
onlyatstart: only accept the prefix if it is right at the start of
``s``
stripwhitespace: remove whitespace from the result
precedingline: if truthy, require that the preceding line be as
specified here
Returns:
the line fragment
]
if <ast.UnaryOp object at 0x7da1b185dea0> begin[:]
for taget[name[s]] in starred[name[strings]] begin[:]
<ast.Tuple object at 0x7da1b185e2f0> assign[=] call[name[get_what_follows_raw], parameter[name[s], name[prefix], name[onlyatstart], name[stripwhitespace]]]
if name[found] begin[:]
return[name[result]]
return[constant[]] | keyword[def] identifier[get_what_follows] ( identifier[strings] : identifier[Sequence] [ identifier[str] ],
identifier[prefix] : identifier[str] ,
identifier[onlyatstart] : identifier[bool] = keyword[True] ,
identifier[stripwhitespace] : identifier[bool] = keyword[True] ,
identifier[precedingline] : identifier[str] = literal[string] )-> identifier[str] :
literal[string]
keyword[if] keyword[not] identifier[precedingline] :
keyword[for] identifier[s] keyword[in] identifier[strings] :
( identifier[found] , identifier[result] )= identifier[get_what_follows_raw] ( identifier[s] , identifier[prefix] , identifier[onlyatstart] ,
identifier[stripwhitespace] )
keyword[if] identifier[found] :
keyword[return] identifier[result]
keyword[return] literal[string]
keyword[else] :
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[strings] )):
keyword[if] identifier[strings] [ identifier[i] - literal[int] ]. identifier[find] ( identifier[precedingline] )== literal[int] :
( identifier[found] , identifier[result] )= identifier[get_what_follows_raw] ( identifier[strings] [ identifier[i] ], identifier[prefix] ,
identifier[onlyatstart] ,
identifier[stripwhitespace] )
keyword[if] identifier[found] :
keyword[return] identifier[result]
keyword[return] literal[string] | def get_what_follows(strings: Sequence[str], prefix: str, onlyatstart: bool=True, stripwhitespace: bool=True, precedingline: str='') -> str:
"""
Find a string in ``strings`` that begins with ``prefix``; return the part
that's after ``prefix``. Optionally, require that the preceding string
(line) is ``precedingline``.
Args:
strings: strings to analyse
prefix: prefix to find
onlyatstart: only accept the prefix if it is right at the start of
``s``
stripwhitespace: remove whitespace from the result
precedingline: if truthy, require that the preceding line be as
specified here
Returns:
the line fragment
"""
if not precedingline:
for s in strings:
(found, result) = get_what_follows_raw(s, prefix, onlyatstart, stripwhitespace)
if found:
return result # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['s']]
return '' # depends on [control=['if'], data=[]]
else:
for i in range(1, len(strings)): # i indexes the second of a pair
if strings[i - 1].find(precedingline) == 0:
# ... if found at the start
(found, result) = get_what_follows_raw(strings[i], prefix, onlyatstart, stripwhitespace)
if found:
return result # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
return '' |
def get_tcntobj(self, go_all, **kws):
"""Get a TermCounts object if the user provides an annotation file, otherwise None."""
# kws: gaf (gene2go taxid)
if 'gaf' in kws or 'gene2go' in kws:
# Get a reduced go2obj set for TermCounts
_gosubdag = GoSubDag(go_all, self.godag, rcntobj=False, prt=None)
return get_tcntobj(_gosubdag.go2obj, **kws) | def function[get_tcntobj, parameter[self, go_all]]:
constant[Get a TermCounts object if the user provides an annotation file, otherwise None.]
if <ast.BoolOp object at 0x7da20c6a8970> begin[:]
variable[_gosubdag] assign[=] call[name[GoSubDag], parameter[name[go_all], name[self].godag]]
return[call[name[get_tcntobj], parameter[name[_gosubdag].go2obj]]] | keyword[def] identifier[get_tcntobj] ( identifier[self] , identifier[go_all] ,** identifier[kws] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[kws] keyword[or] literal[string] keyword[in] identifier[kws] :
identifier[_gosubdag] = identifier[GoSubDag] ( identifier[go_all] , identifier[self] . identifier[godag] , identifier[rcntobj] = keyword[False] , identifier[prt] = keyword[None] )
keyword[return] identifier[get_tcntobj] ( identifier[_gosubdag] . identifier[go2obj] ,** identifier[kws] ) | def get_tcntobj(self, go_all, **kws):
"""Get a TermCounts object if the user provides an annotation file, otherwise None."""
# kws: gaf (gene2go taxid)
if 'gaf' in kws or 'gene2go' in kws:
# Get a reduced go2obj set for TermCounts
_gosubdag = GoSubDag(go_all, self.godag, rcntobj=False, prt=None)
return get_tcntobj(_gosubdag.go2obj, **kws) # depends on [control=['if'], data=[]] |
def boot(app_name) -> Rinzler:
"""
Start Rinzler App
:param app_name: str Application's identifier
:return: dict
"""
app = Rinzler(app_name)
app.log.info("App booted =)")
return app | def function[boot, parameter[app_name]]:
constant[
Start Rinzler App
:param app_name: str Application's identifier
:return: dict
]
variable[app] assign[=] call[name[Rinzler], parameter[name[app_name]]]
call[name[app].log.info, parameter[constant[App booted =)]]]
return[name[app]] | keyword[def] identifier[boot] ( identifier[app_name] )-> identifier[Rinzler] :
literal[string]
identifier[app] = identifier[Rinzler] ( identifier[app_name] )
identifier[app] . identifier[log] . identifier[info] ( literal[string] )
keyword[return] identifier[app] | def boot(app_name) -> Rinzler:
"""
Start Rinzler App
:param app_name: str Application's identifier
:return: dict
"""
app = Rinzler(app_name)
app.log.info('App booted =)')
return app |
def fill_opacity(value):
"""ValueRef : int or float, opacity of the fill (0 to 1)
"""
if value.value:
_assert_is_type('fill_opacity.value', value.value,
(float, int))
if value.value < 0 or value.value > 1:
raise ValueError(
'fill_opacity must be between 0 and 1') | def function[fill_opacity, parameter[value]]:
constant[ValueRef : int or float, opacity of the fill (0 to 1)
]
if name[value].value begin[:]
call[name[_assert_is_type], parameter[constant[fill_opacity.value], name[value].value, tuple[[<ast.Name object at 0x7da18f58cfd0>, <ast.Name object at 0x7da18f58fca0>]]]]
if <ast.BoolOp object at 0x7da18f58d780> begin[:]
<ast.Raise object at 0x7da18f58ed70> | keyword[def] identifier[fill_opacity] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] . identifier[value] :
identifier[_assert_is_type] ( literal[string] , identifier[value] . identifier[value] ,
( identifier[float] , identifier[int] ))
keyword[if] identifier[value] . identifier[value] < literal[int] keyword[or] identifier[value] . identifier[value] > literal[int] :
keyword[raise] identifier[ValueError] (
literal[string] ) | def fill_opacity(value):
"""ValueRef : int or float, opacity of the fill (0 to 1)
"""
if value.value:
_assert_is_type('fill_opacity.value', value.value, (float, int))
if value.value < 0 or value.value > 1:
raise ValueError('fill_opacity must be between 0 and 1') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self) | def function[on_connection_unblocked, parameter[self, method_frame]]:
constant[When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
]
call[name[LOGGER].debug, parameter[constant[Connection unblocked: %r], name[method_frame]]]
name[self].state assign[=] name[self].STATE_READY
if name[self].on_ready begin[:]
call[name[self].on_ready, parameter[name[self]]] | keyword[def] identifier[on_connection_unblocked] ( identifier[self] , identifier[method_frame] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[method_frame] )
identifier[self] . identifier[state] = identifier[self] . identifier[STATE_READY]
keyword[if] identifier[self] . identifier[on_ready] :
identifier[self] . identifier[on_ready] ( identifier[self] ) | def on_connection_unblocked(self, method_frame):
"""When RabbitMQ indicates the connection is unblocked, set the state
appropriately.
:param pika.amqp_object.Method method_frame: Unblocked method frame
"""
LOGGER.debug('Connection unblocked: %r', method_frame)
self.state = self.STATE_READY
if self.on_ready:
self.on_ready(self) # depends on [control=['if'], data=[]] |
def growing_season_length(tas, thresh='5.0 degC', window=6, freq='YS'):
r"""Growing season length.
The number of days between the first occurrence of at least
six consecutive days with mean daily temperature over 5℃ and
the first occurrence of at least six consecutive days with
mean daily temperature below 5℃ after July 1st in the northern
hemisphere and January 1st in the southern hemisphere.
Parameters
---------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '5.0 degC'.
window : int
Minimum number of days with temperature above threshold to mark the beginning and end of growing season.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Growing season length.
Notes
-----
Let :math:`TG_{ij}` be the mean temperature at day :math:`i` of period :math:`j`. Then counted is
the number of days between the first occurrence of at least 6 consecutive days with:
.. math::
TG_{ij} > 5 ℃
and the first occurrence after 1 July of at least 6 consecutive days with:
.. math::
TG_{ij} < 5 ℃
"""
# i = xr.DataArray(np.arange(tas.time.size), dims='time')
# ind = xr.broadcast(i, tas)[0]
#
# c = ((tas > thresh) * 1).rolling(time=window).sum()
# i1 = ind.where(c == window).resample(time=freq).min(dim='time')
#
# # Resample sets the time to T00:00.
# i11 = i1.reindex_like(c, method='ffill')
#
# # TODO: Adjust for southern hemisphere
#
# #i2 = ind.where(c == 0).where(tas.time.dt.month >= 7)
# # add check to make sure indice of end of growing season is after growing season start
# i2 = ind.where((c==0) & (ind > i11)).where(tas.time.dt.month >= 7)
#
# d = i2 - i11
#
# # take min value (first occurence after july)
# gsl = d.resample(time=freq).min(dim='time')
#
# # turn nan into 0
# gsl = xr.where(np.isnan(gsl), 0, gsl)
# compute growth season length on resampled data
thresh = utils.convert_units_to(thresh, tas)
c = ((tas > thresh) * 1).rolling(time=window).sum().chunk(tas.chunks)
def compute_gsl(c):
nt = c.time.size
i = xr.DataArray(np.arange(nt), dims='time').chunk({'time': 1})
ind = xr.broadcast(i, c)[0].chunk(c.chunks)
i1 = ind.where(c == window).min(dim='time')
i1 = xr.where(np.isnan(i1), nt, i1)
i11 = i1.reindex_like(c, method='ffill')
i2 = ind.where((c == 0) & (ind > i11)).where(c.time.dt.month >= 7)
i2 = xr.where(np.isnan(i2), nt, i2)
d = (i2 - i1).min(dim='time')
return d
gsl = c.resample(time=freq).apply(compute_gsl)
return gsl | def function[growing_season_length, parameter[tas, thresh, window, freq]]:
constant[Growing season length.
The number of days between the first occurrence of at least
six consecutive days with mean daily temperature over 5℃ and
the first occurrence of at least six consecutive days with
mean daily temperature below 5℃ after July 1st in the northern
hemisphere and January 1st in the southern hemisphere.
Parameters
---------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '5.0 degC'.
window : int
Minimum number of days with temperature above threshold to mark the beginning and end of growing season.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Growing season length.
Notes
-----
Let :math:`TG_{ij}` be the mean temperature at day :math:`i` of period :math:`j`. Then counted is
the number of days between the first occurrence of at least 6 consecutive days with:
.. math::
TG_{ij} > 5 ℃
and the first occurrence after 1 July of at least 6 consecutive days with:
.. math::
TG_{ij} < 5 ℃
]
variable[thresh] assign[=] call[name[utils].convert_units_to, parameter[name[thresh], name[tas]]]
variable[c] assign[=] call[call[call[binary_operation[compare[name[tas] greater[>] name[thresh]] * constant[1]].rolling, parameter[]].sum, parameter[]].chunk, parameter[name[tas].chunks]]
def function[compute_gsl, parameter[c]]:
variable[nt] assign[=] name[c].time.size
variable[i] assign[=] call[call[name[xr].DataArray, parameter[call[name[np].arange, parameter[name[nt]]]]].chunk, parameter[dictionary[[<ast.Constant object at 0x7da18eb55c90>], [<ast.Constant object at 0x7da18eb55ba0>]]]]
variable[ind] assign[=] call[call[call[name[xr].broadcast, parameter[name[i], name[c]]]][constant[0]].chunk, parameter[name[c].chunks]]
variable[i1] assign[=] call[call[name[ind].where, parameter[compare[name[c] equal[==] name[window]]]].min, parameter[]]
variable[i1] assign[=] call[name[xr].where, parameter[call[name[np].isnan, parameter[name[i1]]], name[nt], name[i1]]]
variable[i11] assign[=] call[name[i1].reindex_like, parameter[name[c]]]
variable[i2] assign[=] call[call[name[ind].where, parameter[binary_operation[compare[name[c] equal[==] constant[0]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[ind] greater[>] name[i11]]]]].where, parameter[compare[name[c].time.dt.month greater_or_equal[>=] constant[7]]]]
variable[i2] assign[=] call[name[xr].where, parameter[call[name[np].isnan, parameter[name[i2]]], name[nt], name[i2]]]
variable[d] assign[=] call[binary_operation[name[i2] - name[i1]].min, parameter[]]
return[name[d]]
variable[gsl] assign[=] call[call[name[c].resample, parameter[]].apply, parameter[name[compute_gsl]]]
return[name[gsl]] | keyword[def] identifier[growing_season_length] ( identifier[tas] , identifier[thresh] = literal[string] , identifier[window] = literal[int] , identifier[freq] = literal[string] ):
literal[string]
identifier[thresh] = identifier[utils] . identifier[convert_units_to] ( identifier[thresh] , identifier[tas] )
identifier[c] =(( identifier[tas] > identifier[thresh] )* literal[int] ). identifier[rolling] ( identifier[time] = identifier[window] ). identifier[sum] (). identifier[chunk] ( identifier[tas] . identifier[chunks] )
keyword[def] identifier[compute_gsl] ( identifier[c] ):
identifier[nt] = identifier[c] . identifier[time] . identifier[size]
identifier[i] = identifier[xr] . identifier[DataArray] ( identifier[np] . identifier[arange] ( identifier[nt] ), identifier[dims] = literal[string] ). identifier[chunk] ({ literal[string] : literal[int] })
identifier[ind] = identifier[xr] . identifier[broadcast] ( identifier[i] , identifier[c] )[ literal[int] ]. identifier[chunk] ( identifier[c] . identifier[chunks] )
identifier[i1] = identifier[ind] . identifier[where] ( identifier[c] == identifier[window] ). identifier[min] ( identifier[dim] = literal[string] )
identifier[i1] = identifier[xr] . identifier[where] ( identifier[np] . identifier[isnan] ( identifier[i1] ), identifier[nt] , identifier[i1] )
identifier[i11] = identifier[i1] . identifier[reindex_like] ( identifier[c] , identifier[method] = literal[string] )
identifier[i2] = identifier[ind] . identifier[where] (( identifier[c] == literal[int] )&( identifier[ind] > identifier[i11] )). identifier[where] ( identifier[c] . identifier[time] . identifier[dt] . identifier[month] >= literal[int] )
identifier[i2] = identifier[xr] . identifier[where] ( identifier[np] . identifier[isnan] ( identifier[i2] ), identifier[nt] , identifier[i2] )
identifier[d] =( identifier[i2] - identifier[i1] ). identifier[min] ( identifier[dim] = literal[string] )
keyword[return] identifier[d]
identifier[gsl] = identifier[c] . identifier[resample] ( identifier[time] = identifier[freq] ). identifier[apply] ( identifier[compute_gsl] )
keyword[return] identifier[gsl] | def growing_season_length(tas, thresh='5.0 degC', window=6, freq='YS'):
"""Growing season length.
The number of days between the first occurrence of at least
six consecutive days with mean daily temperature over 5℃ and
the first occurrence of at least six consecutive days with
mean daily temperature below 5℃ after July 1st in the northern
hemisphere and January 1st in the southern hemisphere.
Parameters
---------
tas : xarray.DataArray
Mean daily temperature [℃] or [K]
thresh : str
Threshold temperature on which to base evaluation [℃] or [K]. Default: '5.0 degC'.
window : int
Minimum number of days with temperature above threshold to mark the beginning and end of growing season.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Growing season length.
Notes
-----
Let :math:`TG_{ij}` be the mean temperature at day :math:`i` of period :math:`j`. Then counted is
the number of days between the first occurrence of at least 6 consecutive days with:
.. math::
TG_{ij} > 5 ℃
and the first occurrence after 1 July of at least 6 consecutive days with:
.. math::
TG_{ij} < 5 ℃
"""
# i = xr.DataArray(np.arange(tas.time.size), dims='time')
# ind = xr.broadcast(i, tas)[0]
#
# c = ((tas > thresh) * 1).rolling(time=window).sum()
# i1 = ind.where(c == window).resample(time=freq).min(dim='time')
#
# # Resample sets the time to T00:00.
# i11 = i1.reindex_like(c, method='ffill')
#
# # TODO: Adjust for southern hemisphere
#
# #i2 = ind.where(c == 0).where(tas.time.dt.month >= 7)
# # add check to make sure indice of end of growing season is after growing season start
# i2 = ind.where((c==0) & (ind > i11)).where(tas.time.dt.month >= 7)
#
# d = i2 - i11
#
# # take min value (first occurence after july)
# gsl = d.resample(time=freq).min(dim='time')
#
# # turn nan into 0
# gsl = xr.where(np.isnan(gsl), 0, gsl)
# compute growth season length on resampled data
thresh = utils.convert_units_to(thresh, tas)
c = ((tas > thresh) * 1).rolling(time=window).sum().chunk(tas.chunks)
def compute_gsl(c):
nt = c.time.size
i = xr.DataArray(np.arange(nt), dims='time').chunk({'time': 1})
ind = xr.broadcast(i, c)[0].chunk(c.chunks)
i1 = ind.where(c == window).min(dim='time')
i1 = xr.where(np.isnan(i1), nt, i1)
i11 = i1.reindex_like(c, method='ffill')
i2 = ind.where((c == 0) & (ind > i11)).where(c.time.dt.month >= 7)
i2 = xr.where(np.isnan(i2), nt, i2)
d = (i2 - i1).min(dim='time')
return d
gsl = c.resample(time=freq).apply(compute_gsl)
return gsl |
async def wait_slaves(self, timeout, check_ready=False):
"""Wait until all slaves are online (their managers accept connections)
or timeout expires.
:param int timeout:
Timeout (in seconds) after which the method will return even though
all the slaves are not online yet.
:param bool check_ready:
If ``True`` also checks if all slave environment's are ready.
A slave environment is assumed to be ready when its manager's
:meth:`is_ready`-method returns ``True``.
.. seealso::
:meth:`creamas.core.environment.Environment.is_ready`,
:meth:`creamas.mp.EnvManager.is_ready`,
:meth:`creamas.mp.MultiEnvManager.is_ready`
"""
status = 'ready' if check_ready else 'online'
self._log(logging.DEBUG,
"Waiting for slaves to become {}...".format(status))
t = time.monotonic()
online = []
while len(online) < len(self.addrs):
for addr in self.addrs:
if time.monotonic() - t > timeout:
self._log(logging.DEBUG, "Timeout while waiting for the "
"slaves to become {}.".format(status))
return False
if addr not in online:
try:
r_manager = await self.env.connect(addr, timeout)
ready = True
if check_ready:
ready = await r_manager.is_ready()
if ready:
online.append(addr)
self._log(logging.DEBUG, "Slave {}/{} {}: {}"
.format(len(online),
len(self.addrs),
status,
addr))
except:
pass
asyncio.sleep(0.5)
self._log(logging.DEBUG, "All slaves {} in {} seconds!"
.format(status, time.monotonic() - t))
return True | <ast.AsyncFunctionDef object at 0x7da18bcc90c0> | keyword[async] keyword[def] identifier[wait_slaves] ( identifier[self] , identifier[timeout] , identifier[check_ready] = keyword[False] ):
literal[string]
identifier[status] = literal[string] keyword[if] identifier[check_ready] keyword[else] literal[string]
identifier[self] . identifier[_log] ( identifier[logging] . identifier[DEBUG] ,
literal[string] . identifier[format] ( identifier[status] ))
identifier[t] = identifier[time] . identifier[monotonic] ()
identifier[online] =[]
keyword[while] identifier[len] ( identifier[online] )< identifier[len] ( identifier[self] . identifier[addrs] ):
keyword[for] identifier[addr] keyword[in] identifier[self] . identifier[addrs] :
keyword[if] identifier[time] . identifier[monotonic] ()- identifier[t] > identifier[timeout] :
identifier[self] . identifier[_log] ( identifier[logging] . identifier[DEBUG] , literal[string]
literal[string] . identifier[format] ( identifier[status] ))
keyword[return] keyword[False]
keyword[if] identifier[addr] keyword[not] keyword[in] identifier[online] :
keyword[try] :
identifier[r_manager] = keyword[await] identifier[self] . identifier[env] . identifier[connect] ( identifier[addr] , identifier[timeout] )
identifier[ready] = keyword[True]
keyword[if] identifier[check_ready] :
identifier[ready] = keyword[await] identifier[r_manager] . identifier[is_ready] ()
keyword[if] identifier[ready] :
identifier[online] . identifier[append] ( identifier[addr] )
identifier[self] . identifier[_log] ( identifier[logging] . identifier[DEBUG] , literal[string]
. identifier[format] ( identifier[len] ( identifier[online] ),
identifier[len] ( identifier[self] . identifier[addrs] ),
identifier[status] ,
identifier[addr] ))
keyword[except] :
keyword[pass]
identifier[asyncio] . identifier[sleep] ( literal[int] )
identifier[self] . identifier[_log] ( identifier[logging] . identifier[DEBUG] , literal[string]
. identifier[format] ( identifier[status] , identifier[time] . identifier[monotonic] ()- identifier[t] ))
keyword[return] keyword[True] | async def wait_slaves(self, timeout, check_ready=False):
"""Wait until all slaves are online (their managers accept connections)
or timeout expires.
:param int timeout:
Timeout (in seconds) after which the method will return even though
all the slaves are not online yet.
:param bool check_ready:
If ``True`` also checks if all slave environment's are ready.
A slave environment is assumed to be ready when its manager's
:meth:`is_ready`-method returns ``True``.
.. seealso::
:meth:`creamas.core.environment.Environment.is_ready`,
:meth:`creamas.mp.EnvManager.is_ready`,
:meth:`creamas.mp.MultiEnvManager.is_ready`
"""
status = 'ready' if check_ready else 'online'
self._log(logging.DEBUG, 'Waiting for slaves to become {}...'.format(status))
t = time.monotonic()
online = []
while len(online) < len(self.addrs):
for addr in self.addrs:
if time.monotonic() - t > timeout:
self._log(logging.DEBUG, 'Timeout while waiting for the slaves to become {}.'.format(status))
return False # depends on [control=['if'], data=[]]
if addr not in online:
try:
r_manager = await self.env.connect(addr, timeout)
ready = True
if check_ready:
ready = await r_manager.is_ready() # depends on [control=['if'], data=[]]
if ready:
online.append(addr)
self._log(logging.DEBUG, 'Slave {}/{} {}: {}'.format(len(online), len(self.addrs), status, addr)) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['addr', 'online']] # depends on [control=['for'], data=['addr']]
asyncio.sleep(0.5) # depends on [control=['while'], data=[]]
self._log(logging.DEBUG, 'All slaves {} in {} seconds!'.format(status, time.monotonic() - t))
return True |
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax) | def function[autoscale, parameter[self]]:
constant[
Set the view limits to include the data range.
]
<ast.Tuple object at 0x7da18ede67d0> assign[=] call[name[self].datalim_to_dt, parameter[]]
if compare[name[dmin] greater[>] name[dmax]] begin[:]
<ast.Tuple object at 0x7da18ede7370> assign[=] tuple[[<ast.Name object at 0x7da18ede5ed0>, <ast.Name object at 0x7da18ede74f0>]]
<ast.Tuple object at 0x7da20e9b1570> assign[=] call[name[self].datalim_to_dt, parameter[]]
variable[vmin] assign[=] call[name[dates].date2num, parameter[name[dmin]]]
variable[vmax] assign[=] call[name[dates].date2num, parameter[name[dmax]]]
return[call[name[self].nonsingular, parameter[name[vmin], name[vmax]]]] | keyword[def] identifier[autoscale] ( identifier[self] ):
literal[string]
identifier[dmin] , identifier[dmax] = identifier[self] . identifier[datalim_to_dt] ()
keyword[if] identifier[dmin] > identifier[dmax] :
identifier[dmax] , identifier[dmin] = identifier[dmin] , identifier[dmax]
identifier[dmin] , identifier[dmax] = identifier[self] . identifier[datalim_to_dt] ()
identifier[vmin] = identifier[dates] . identifier[date2num] ( identifier[dmin] )
identifier[vmax] = identifier[dates] . identifier[date2num] ( identifier[dmax] )
keyword[return] identifier[self] . identifier[nonsingular] ( identifier[vmin] , identifier[vmax] ) | def autoscale(self):
"""
Set the view limits to include the data range.
"""
(dmin, dmax) = self.datalim_to_dt()
if dmin > dmax:
(dmax, dmin) = (dmin, dmax) # depends on [control=['if'], data=['dmin', 'dmax']]
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
(dmin, dmax) = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax) |
def flush(self):
"""Flush all pending gauges"""
writer = self.writer
if writer is None:
raise GaugedUseAfterFreeError
self.flush_writer_position()
keys = self.translate_keys()
blocks = []
current_block = self.current_block
statistics = self.statistics
driver = self.driver
flags = 0 # for future extensions, e.g. block compression
for namespace, key, block in self.pending_blocks():
length = block.byte_length()
if not length:
continue
key_id = keys[(namespace, key)]
statistics[namespace].byte_count += length
blocks.append((namespace, current_block, key_id, block.buffer(),
flags))
if self.config.overwrite_blocks:
driver.replace_blocks(blocks)
else:
driver.insert_or_append_blocks(blocks)
if not Gauged.writer_flush_maps(writer, True):
raise MemoryError
update_namespace = driver.add_namespace_statistics
for namespace, stats in statistics.iteritems():
update_namespace(namespace, self.current_block,
stats.data_points, stats.byte_count)
statistics.clear()
driver.commit()
self.flush_now = False | def function[flush, parameter[self]]:
constant[Flush all pending gauges]
variable[writer] assign[=] name[self].writer
if compare[name[writer] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b24eba30>
call[name[self].flush_writer_position, parameter[]]
variable[keys] assign[=] call[name[self].translate_keys, parameter[]]
variable[blocks] assign[=] list[[]]
variable[current_block] assign[=] name[self].current_block
variable[statistics] assign[=] name[self].statistics
variable[driver] assign[=] name[self].driver
variable[flags] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b24ea260>, <ast.Name object at 0x7da1b24e9000>, <ast.Name object at 0x7da1b24e9450>]]] in starred[call[name[self].pending_blocks, parameter[]]] begin[:]
variable[length] assign[=] call[name[block].byte_length, parameter[]]
if <ast.UnaryOp object at 0x7da1b24e8220> begin[:]
continue
variable[key_id] assign[=] call[name[keys]][tuple[[<ast.Name object at 0x7da1b24e99c0>, <ast.Name object at 0x7da1b24eb1f0>]]]
<ast.AugAssign object at 0x7da1b24eb8b0>
call[name[blocks].append, parameter[tuple[[<ast.Name object at 0x7da1b24ead40>, <ast.Name object at 0x7da1b24e9ae0>, <ast.Name object at 0x7da1b24e8340>, <ast.Call object at 0x7da1b24e9f00>, <ast.Name object at 0x7da1b24e8790>]]]]
if name[self].config.overwrite_blocks begin[:]
call[name[driver].replace_blocks, parameter[name[blocks]]]
variable[update_namespace] assign[=] name[driver].add_namespace_statistics
for taget[tuple[[<ast.Name object at 0x7da1b24ebf10>, <ast.Name object at 0x7da1b24e9b40>]]] in starred[call[name[statistics].iteritems, parameter[]]] begin[:]
call[name[update_namespace], parameter[name[namespace], name[self].current_block, name[stats].data_points, name[stats].byte_count]]
call[name[statistics].clear, parameter[]]
call[name[driver].commit, parameter[]]
name[self].flush_now assign[=] constant[False] | keyword[def] identifier[flush] ( identifier[self] ):
literal[string]
identifier[writer] = identifier[self] . identifier[writer]
keyword[if] identifier[writer] keyword[is] keyword[None] :
keyword[raise] identifier[GaugedUseAfterFreeError]
identifier[self] . identifier[flush_writer_position] ()
identifier[keys] = identifier[self] . identifier[translate_keys] ()
identifier[blocks] =[]
identifier[current_block] = identifier[self] . identifier[current_block]
identifier[statistics] = identifier[self] . identifier[statistics]
identifier[driver] = identifier[self] . identifier[driver]
identifier[flags] = literal[int]
keyword[for] identifier[namespace] , identifier[key] , identifier[block] keyword[in] identifier[self] . identifier[pending_blocks] ():
identifier[length] = identifier[block] . identifier[byte_length] ()
keyword[if] keyword[not] identifier[length] :
keyword[continue]
identifier[key_id] = identifier[keys] [( identifier[namespace] , identifier[key] )]
identifier[statistics] [ identifier[namespace] ]. identifier[byte_count] += identifier[length]
identifier[blocks] . identifier[append] (( identifier[namespace] , identifier[current_block] , identifier[key_id] , identifier[block] . identifier[buffer] (),
identifier[flags] ))
keyword[if] identifier[self] . identifier[config] . identifier[overwrite_blocks] :
identifier[driver] . identifier[replace_blocks] ( identifier[blocks] )
keyword[else] :
identifier[driver] . identifier[insert_or_append_blocks] ( identifier[blocks] )
keyword[if] keyword[not] identifier[Gauged] . identifier[writer_flush_maps] ( identifier[writer] , keyword[True] ):
keyword[raise] identifier[MemoryError]
identifier[update_namespace] = identifier[driver] . identifier[add_namespace_statistics]
keyword[for] identifier[namespace] , identifier[stats] keyword[in] identifier[statistics] . identifier[iteritems] ():
identifier[update_namespace] ( identifier[namespace] , identifier[self] . identifier[current_block] ,
identifier[stats] . identifier[data_points] , identifier[stats] . identifier[byte_count] )
identifier[statistics] . identifier[clear] ()
identifier[driver] . identifier[commit] ()
identifier[self] . identifier[flush_now] = keyword[False] | def flush(self):
"""Flush all pending gauges"""
writer = self.writer
if writer is None:
raise GaugedUseAfterFreeError # depends on [control=['if'], data=[]]
self.flush_writer_position()
keys = self.translate_keys()
blocks = []
current_block = self.current_block
statistics = self.statistics
driver = self.driver
flags = 0 # for future extensions, e.g. block compression
for (namespace, key, block) in self.pending_blocks():
length = block.byte_length()
if not length:
continue # depends on [control=['if'], data=[]]
key_id = keys[namespace, key]
statistics[namespace].byte_count += length
blocks.append((namespace, current_block, key_id, block.buffer(), flags)) # depends on [control=['for'], data=[]]
if self.config.overwrite_blocks:
driver.replace_blocks(blocks) # depends on [control=['if'], data=[]]
else:
driver.insert_or_append_blocks(blocks)
if not Gauged.writer_flush_maps(writer, True):
raise MemoryError # depends on [control=['if'], data=[]]
update_namespace = driver.add_namespace_statistics
for (namespace, stats) in statistics.iteritems():
update_namespace(namespace, self.current_block, stats.data_points, stats.byte_count) # depends on [control=['for'], data=[]]
statistics.clear()
driver.commit()
self.flush_now = False |
def _pull(self,
file_name,
names,
save=True,
force=False,
uri="docker://",
**kwargs):
'''pull an image from a docker hub. This is a (less than ideal) workaround
that actually does the following:
- creates a sandbox folder
- adds docker layers, metadata folder, and custom metadata to it
- converts to a squashfs image with build
the docker manifests are stored with registry metadata.
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
'''
# Use Singularity to build the image, based on user preference
if file_name is None:
file_name = self._get_storage_name(names)
# Determine if the user already has the image
if os.path.exists(file_name) and force is False:
bot.error('Image exists! Remove first, or use --force to overwrite')
sys.exit(1)
digest = names['version'] or names['tag']
# Build from sandbox, prefix with sandbox
sandbox = get_tmpdir(prefix="sregistry-sandbox")
# First effort, get image via Sregistry
layers = self._download_layers(names['url'], digest)
# This is the url where the manifests were obtained
url = self._get_manifest_selfLink(names['url'], digest)
# Add environment to the layers
envtar = self._get_environment_tar()
layers = [envtar] + layers
# Create singularity image from an empty folder
for layer in layers:
bot.info('Exploding %s' %layer)
result = extract_tar(layer, sandbox, handle_whiteout=True)
if result['return_code'] != 0:
bot.error(result['message'])
sys.exit(1)
sudo = kwargs.get('sudo', False)
# Build from a sandbox (recipe) into the image_file (squashfs)
image_file = Singularity.build(image=file_name,
recipe=sandbox,
sudo=sudo)
# Fall back to using Singularity
if image_file is None:
bot.info('Downloading with native Singularity, please wait...')
image = image.replace('docker://', uri)
image_file = Singularity.pull(image, pull_folder=sandbox)
# Save to local storage
if save is True:
# Did we get the manifests?
manifests = {}
if hasattr(self, 'manifests'):
manifests = self.manifests
container = self.add(image_path = image_file,
image_uri = names['uri'],
metadata = manifests,
url = url)
# When the container is created, this is the path to the image
image_file = container.image
if os.path.exists(image_file):
bot.debug('Retrieved image file %s' %image_file)
bot.custom(prefix="Success!", message=image_file)
# Clean up sandbox
shutil.rmtree(sandbox)
return image_file | def function[_pull, parameter[self, file_name, names, save, force, uri]]:
constant[pull an image from a docker hub. This is a (less than ideal) workaround
that actually does the following:
- creates a sandbox folder
- adds docker layers, metadata folder, and custom metadata to it
- converts to a squashfs image with build
the docker manifests are stored with registry metadata.
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
]
if compare[name[file_name] is constant[None]] begin[:]
variable[file_name] assign[=] call[name[self]._get_storage_name, parameter[name[names]]]
if <ast.BoolOp object at 0x7da1b03ba710> begin[:]
call[name[bot].error, parameter[constant[Image exists! Remove first, or use --force to overwrite]]]
call[name[sys].exit, parameter[constant[1]]]
variable[digest] assign[=] <ast.BoolOp object at 0x7da1b03b9cc0>
variable[sandbox] assign[=] call[name[get_tmpdir], parameter[]]
variable[layers] assign[=] call[name[self]._download_layers, parameter[call[name[names]][constant[url]], name[digest]]]
variable[url] assign[=] call[name[self]._get_manifest_selfLink, parameter[call[name[names]][constant[url]], name[digest]]]
variable[envtar] assign[=] call[name[self]._get_environment_tar, parameter[]]
variable[layers] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b03ba980>]] + name[layers]]
for taget[name[layer]] in starred[name[layers]] begin[:]
call[name[bot].info, parameter[binary_operation[constant[Exploding %s] <ast.Mod object at 0x7da2590d6920> name[layer]]]]
variable[result] assign[=] call[name[extract_tar], parameter[name[layer], name[sandbox]]]
if compare[call[name[result]][constant[return_code]] not_equal[!=] constant[0]] begin[:]
call[name[bot].error, parameter[call[name[result]][constant[message]]]]
call[name[sys].exit, parameter[constant[1]]]
variable[sudo] assign[=] call[name[kwargs].get, parameter[constant[sudo], constant[False]]]
variable[image_file] assign[=] call[name[Singularity].build, parameter[]]
if compare[name[image_file] is constant[None]] begin[:]
call[name[bot].info, parameter[constant[Downloading with native Singularity, please wait...]]]
variable[image] assign[=] call[name[image].replace, parameter[constant[docker://], name[uri]]]
variable[image_file] assign[=] call[name[Singularity].pull, parameter[name[image]]]
if compare[name[save] is constant[True]] begin[:]
variable[manifests] assign[=] dictionary[[], []]
if call[name[hasattr], parameter[name[self], constant[manifests]]] begin[:]
variable[manifests] assign[=] name[self].manifests
variable[container] assign[=] call[name[self].add, parameter[]]
variable[image_file] assign[=] name[container].image
if call[name[os].path.exists, parameter[name[image_file]]] begin[:]
call[name[bot].debug, parameter[binary_operation[constant[Retrieved image file %s] <ast.Mod object at 0x7da2590d6920> name[image_file]]]]
call[name[bot].custom, parameter[]]
call[name[shutil].rmtree, parameter[name[sandbox]]]
return[name[image_file]] | keyword[def] identifier[_pull] ( identifier[self] ,
identifier[file_name] ,
identifier[names] ,
identifier[save] = keyword[True] ,
identifier[force] = keyword[False] ,
identifier[uri] = literal[string] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[file_name] keyword[is] keyword[None] :
identifier[file_name] = identifier[self] . identifier[_get_storage_name] ( identifier[names] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_name] ) keyword[and] identifier[force] keyword[is] keyword[False] :
identifier[bot] . identifier[error] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
identifier[digest] = identifier[names] [ literal[string] ] keyword[or] identifier[names] [ literal[string] ]
identifier[sandbox] = identifier[get_tmpdir] ( identifier[prefix] = literal[string] )
identifier[layers] = identifier[self] . identifier[_download_layers] ( identifier[names] [ literal[string] ], identifier[digest] )
identifier[url] = identifier[self] . identifier[_get_manifest_selfLink] ( identifier[names] [ literal[string] ], identifier[digest] )
identifier[envtar] = identifier[self] . identifier[_get_environment_tar] ()
identifier[layers] =[ identifier[envtar] ]+ identifier[layers]
keyword[for] identifier[layer] keyword[in] identifier[layers] :
identifier[bot] . identifier[info] ( literal[string] % identifier[layer] )
identifier[result] = identifier[extract_tar] ( identifier[layer] , identifier[sandbox] , identifier[handle_whiteout] = keyword[True] )
keyword[if] identifier[result] [ literal[string] ]!= literal[int] :
identifier[bot] . identifier[error] ( identifier[result] [ literal[string] ])
identifier[sys] . identifier[exit] ( literal[int] )
identifier[sudo] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[image_file] = identifier[Singularity] . identifier[build] ( identifier[image] = identifier[file_name] ,
identifier[recipe] = identifier[sandbox] ,
identifier[sudo] = identifier[sudo] )
keyword[if] identifier[image_file] keyword[is] keyword[None] :
identifier[bot] . identifier[info] ( literal[string] )
identifier[image] = identifier[image] . identifier[replace] ( literal[string] , identifier[uri] )
identifier[image_file] = identifier[Singularity] . identifier[pull] ( identifier[image] , identifier[pull_folder] = identifier[sandbox] )
keyword[if] identifier[save] keyword[is] keyword[True] :
identifier[manifests] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[manifests] = identifier[self] . identifier[manifests]
identifier[container] = identifier[self] . identifier[add] ( identifier[image_path] = identifier[image_file] ,
identifier[image_uri] = identifier[names] [ literal[string] ],
identifier[metadata] = identifier[manifests] ,
identifier[url] = identifier[url] )
identifier[image_file] = identifier[container] . identifier[image]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[image_file] ):
identifier[bot] . identifier[debug] ( literal[string] % identifier[image_file] )
identifier[bot] . identifier[custom] ( identifier[prefix] = literal[string] , identifier[message] = identifier[image_file] )
identifier[shutil] . identifier[rmtree] ( identifier[sandbox] )
keyword[return] identifier[image_file] | def _pull(self, file_name, names, save=True, force=False, uri='docker://', **kwargs):
"""pull an image from a docker hub. This is a (less than ideal) workaround
that actually does the following:
- creates a sandbox folder
- adds docker layers, metadata folder, and custom metadata to it
- converts to a squashfs image with build
the docker manifests are stored with registry metadata.
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
"""
# Use Singularity to build the image, based on user preference
if file_name is None:
file_name = self._get_storage_name(names) # depends on [control=['if'], data=['file_name']]
# Determine if the user already has the image
if os.path.exists(file_name) and force is False:
bot.error('Image exists! Remove first, or use --force to overwrite')
sys.exit(1) # depends on [control=['if'], data=[]]
digest = names['version'] or names['tag']
# Build from sandbox, prefix with sandbox
sandbox = get_tmpdir(prefix='sregistry-sandbox')
# First effort, get image via Sregistry
layers = self._download_layers(names['url'], digest)
# This is the url where the manifests were obtained
url = self._get_manifest_selfLink(names['url'], digest)
# Add environment to the layers
envtar = self._get_environment_tar()
layers = [envtar] + layers
# Create singularity image from an empty folder
for layer in layers:
bot.info('Exploding %s' % layer)
result = extract_tar(layer, sandbox, handle_whiteout=True)
if result['return_code'] != 0:
bot.error(result['message'])
sys.exit(1) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['layer']]
sudo = kwargs.get('sudo', False)
# Build from a sandbox (recipe) into the image_file (squashfs)
image_file = Singularity.build(image=file_name, recipe=sandbox, sudo=sudo)
# Fall back to using Singularity
if image_file is None:
bot.info('Downloading with native Singularity, please wait...')
image = image.replace('docker://', uri)
image_file = Singularity.pull(image, pull_folder=sandbox) # depends on [control=['if'], data=['image_file']]
# Save to local storage
if save is True:
# Did we get the manifests?
manifests = {}
if hasattr(self, 'manifests'):
manifests = self.manifests # depends on [control=['if'], data=[]]
container = self.add(image_path=image_file, image_uri=names['uri'], metadata=manifests, url=url)
# When the container is created, this is the path to the image
image_file = container.image # depends on [control=['if'], data=[]]
if os.path.exists(image_file):
bot.debug('Retrieved image file %s' % image_file)
bot.custom(prefix='Success!', message=image_file) # depends on [control=['if'], data=[]]
# Clean up sandbox
shutil.rmtree(sandbox)
return image_file |
def parse(self, content):
"""Parse raw response content for a list of remote artifact cache URLs.
:API: public
"""
if self.format == 'json_map':
try:
return assert_list(json.loads(content.decode(self.encoding))[self.index])
except (KeyError, UnicodeDecodeError, ValueError) as e:
raise self.ResponseParserError("Error while parsing response content: {0}".format(str(e)))
# Should never get here.
raise ValueError('Unknown content format: "{}"'.format(self.format)) | def function[parse, parameter[self, content]]:
constant[Parse raw response content for a list of remote artifact cache URLs.
:API: public
]
if compare[name[self].format equal[==] constant[json_map]] begin[:]
<ast.Try object at 0x7da1b1e01930>
<ast.Raise object at 0x7da1b1e001f0> | keyword[def] identifier[parse] ( identifier[self] , identifier[content] ):
literal[string]
keyword[if] identifier[self] . identifier[format] == literal[string] :
keyword[try] :
keyword[return] identifier[assert_list] ( identifier[json] . identifier[loads] ( identifier[content] . identifier[decode] ( identifier[self] . identifier[encoding] ))[ identifier[self] . identifier[index] ])
keyword[except] ( identifier[KeyError] , identifier[UnicodeDecodeError] , identifier[ValueError] ) keyword[as] identifier[e] :
keyword[raise] identifier[self] . identifier[ResponseParserError] ( literal[string] . identifier[format] ( identifier[str] ( identifier[e] )))
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[self] . identifier[format] )) | def parse(self, content):
"""Parse raw response content for a list of remote artifact cache URLs.
:API: public
"""
if self.format == 'json_map':
try:
return assert_list(json.loads(content.decode(self.encoding))[self.index]) # depends on [control=['try'], data=[]]
except (KeyError, UnicodeDecodeError, ValueError) as e:
raise self.ResponseParserError('Error while parsing response content: {0}'.format(str(e))) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
# Should never get here.
raise ValueError('Unknown content format: "{}"'.format(self.format)) |
def decimal_default(obj):
"""Properly parse out the Decimal datatypes into proper int/float types."""
if isinstance(obj, decimal.Decimal):
if obj % 1:
return float(obj)
return int(obj)
raise TypeError | def function[decimal_default, parameter[obj]]:
constant[Properly parse out the Decimal datatypes into proper int/float types.]
if call[name[isinstance], parameter[name[obj], name[decimal].Decimal]] begin[:]
if binary_operation[name[obj] <ast.Mod object at 0x7da2590d6920> constant[1]] begin[:]
return[call[name[float], parameter[name[obj]]]]
return[call[name[int], parameter[name[obj]]]]
<ast.Raise object at 0x7da1b1206110> | keyword[def] identifier[decimal_default] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[decimal] . identifier[Decimal] ):
keyword[if] identifier[obj] % literal[int] :
keyword[return] identifier[float] ( identifier[obj] )
keyword[return] identifier[int] ( identifier[obj] )
keyword[raise] identifier[TypeError] | def decimal_default(obj):
"""Properly parse out the Decimal datatypes into proper int/float types."""
if isinstance(obj, decimal.Decimal):
if obj % 1:
return float(obj) # depends on [control=['if'], data=[]]
return int(obj) # depends on [control=['if'], data=[]]
raise TypeError |
def main(nodes, edges):
"""Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
"""
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print("building DAG")
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait
client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies"%(nodes,edges))
results = submit_jobs(view, G, jobs)
print("waiting for results")
view.wait()
print("done")
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow,
with_labels=False)
x,y = zip(*pos.values())
xmin,ymin = map(min, (x,y))
xmax,ymax = map(max, (x,y))
xscale = xmax-xmin
yscale = ymax-ymin
plt.xlim(xmin-xscale*.1,xmax+xscale*.1)
plt.ylim(ymin-yscale*.1,ymax+yscale*.1)
return G,results | def function[main, parameter[nodes, edges]]:
constant[Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
]
from relative_module[matplotlib] import module[pyplot]
from relative_module[matplotlib.dates] import module[date2num]
from relative_module[matplotlib.cm] import module[gist_rainbow]
call[name[print], parameter[constant[building DAG]]]
variable[G] assign[=] call[name[random_dag], parameter[name[nodes], name[edges]]]
variable[jobs] assign[=] dictionary[[], []]
variable[pos] assign[=] dictionary[[], []]
variable[colors] assign[=] dictionary[[], []]
for taget[name[node]] in starred[name[G]] begin[:]
call[name[jobs]][name[node]] assign[=] name[randomwait]
variable[client] assign[=] call[name[parallel].Client, parameter[]]
variable[view] assign[=] call[name[client].load_balanced_view, parameter[]]
call[name[print], parameter[binary_operation[constant[submitting %i tasks with %i dependencies] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e960d90>, <ast.Name object at 0x7da20e9621a0>]]]]]
variable[results] assign[=] call[name[submit_jobs], parameter[name[view], name[G], name[jobs]]]
call[name[print], parameter[constant[waiting for results]]]
call[name[view].wait, parameter[]]
call[name[print], parameter[constant[done]]]
for taget[name[node]] in starred[name[G]] begin[:]
variable[md] assign[=] call[name[results]][name[node]].metadata
variable[start] assign[=] call[name[date2num], parameter[name[md].started]]
variable[runtime] assign[=] binary_operation[call[name[date2num], parameter[name[md].completed]] - name[start]]
call[name[pos]][name[node]] assign[=] tuple[[<ast.Name object at 0x7da1b26af8e0>, <ast.Name object at 0x7da1b26ad660>]]
call[name[colors]][name[node]] assign[=] name[md].engine_id
call[name[validate_tree], parameter[name[G], name[results]]]
call[name[nx].draw, parameter[name[G], name[pos]]]
<ast.Tuple object at 0x7da1b26aded0> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da20e961b70>]]
<ast.Tuple object at 0x7da20e963a90> assign[=] call[name[map], parameter[name[min], tuple[[<ast.Name object at 0x7da20e961cc0>, <ast.Name object at 0x7da20e963280>]]]]
<ast.Tuple object at 0x7da20e963b80> assign[=] call[name[map], parameter[name[max], tuple[[<ast.Name object at 0x7da20e9626b0>, <ast.Name object at 0x7da20e963f70>]]]]
variable[xscale] assign[=] binary_operation[name[xmax] - name[xmin]]
variable[yscale] assign[=] binary_operation[name[ymax] - name[ymin]]
call[name[plt].xlim, parameter[binary_operation[name[xmin] - binary_operation[name[xscale] * constant[0.1]]], binary_operation[name[xmax] + binary_operation[name[xscale] * constant[0.1]]]]]
call[name[plt].ylim, parameter[binary_operation[name[ymin] - binary_operation[name[yscale] * constant[0.1]]], binary_operation[name[ymax] + binary_operation[name[yscale] * constant[0.1]]]]]
return[tuple[[<ast.Name object at 0x7da20c76f7f0>, <ast.Name object at 0x7da20c76f160>]]] | keyword[def] identifier[main] ( identifier[nodes] , identifier[edges] ):
literal[string]
keyword[from] identifier[matplotlib] keyword[import] identifier[pyplot] keyword[as] identifier[plt]
keyword[from] identifier[matplotlib] . identifier[dates] keyword[import] identifier[date2num]
keyword[from] identifier[matplotlib] . identifier[cm] keyword[import] identifier[gist_rainbow]
identifier[print] ( literal[string] )
identifier[G] = identifier[random_dag] ( identifier[nodes] , identifier[edges] )
identifier[jobs] ={}
identifier[pos] ={}
identifier[colors] ={}
keyword[for] identifier[node] keyword[in] identifier[G] :
identifier[jobs] [ identifier[node] ]= identifier[randomwait]
identifier[client] = identifier[parallel] . identifier[Client] ()
identifier[view] = identifier[client] . identifier[load_balanced_view] ()
identifier[print] ( literal[string] %( identifier[nodes] , identifier[edges] ))
identifier[results] = identifier[submit_jobs] ( identifier[view] , identifier[G] , identifier[jobs] )
identifier[print] ( literal[string] )
identifier[view] . identifier[wait] ()
identifier[print] ( literal[string] )
keyword[for] identifier[node] keyword[in] identifier[G] :
identifier[md] = identifier[results] [ identifier[node] ]. identifier[metadata]
identifier[start] = identifier[date2num] ( identifier[md] . identifier[started] )
identifier[runtime] = identifier[date2num] ( identifier[md] . identifier[completed] )- identifier[start]
identifier[pos] [ identifier[node] ]=( identifier[start] , identifier[runtime] )
identifier[colors] [ identifier[node] ]= identifier[md] . identifier[engine_id]
identifier[validate_tree] ( identifier[G] , identifier[results] )
identifier[nx] . identifier[draw] ( identifier[G] , identifier[pos] , identifier[node_list] = identifier[colors] . identifier[keys] (), identifier[node_color] = identifier[colors] . identifier[values] (), identifier[cmap] = identifier[gist_rainbow] ,
identifier[with_labels] = keyword[False] )
identifier[x] , identifier[y] = identifier[zip] (* identifier[pos] . identifier[values] ())
identifier[xmin] , identifier[ymin] = identifier[map] ( identifier[min] ,( identifier[x] , identifier[y] ))
identifier[xmax] , identifier[ymax] = identifier[map] ( identifier[max] ,( identifier[x] , identifier[y] ))
identifier[xscale] = identifier[xmax] - identifier[xmin]
identifier[yscale] = identifier[ymax] - identifier[ymin]
identifier[plt] . identifier[xlim] ( identifier[xmin] - identifier[xscale] * literal[int] , identifier[xmax] + identifier[xscale] * literal[int] )
identifier[plt] . identifier[ylim] ( identifier[ymin] - identifier[yscale] * literal[int] , identifier[ymax] + identifier[yscale] * literal[int] )
keyword[return] identifier[G] , identifier[results] | def main(nodes, edges):
"""Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
"""
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print('building DAG')
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait # depends on [control=['for'], data=['node']]
client = parallel.Client()
view = client.load_balanced_view()
print('submitting %i tasks with %i dependencies' % (nodes, edges))
results = submit_jobs(view, G, jobs)
print('waiting for results')
view.wait()
print('done')
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id # depends on [control=['for'], data=['node']]
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow, with_labels=False)
(x, y) = zip(*pos.values())
(xmin, ymin) = map(min, (x, y))
(xmax, ymax) = map(max, (x, y))
xscale = xmax - xmin
yscale = ymax - ymin
plt.xlim(xmin - xscale * 0.1, xmax + xscale * 0.1)
plt.ylim(ymin - yscale * 0.1, ymax + yscale * 0.1)
return (G, results) |
def fee(self, value=None, unit='satoshi'):
"""
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
"""
convert = None
if not value:
# no fee was specified, use $0.02 as default.
convert = get_current_price(self.crypto, "usd")
self.fee_satoshi = int(0.02 / convert * 1e8)
verbose = "Using default fee of:"
elif value == 'optimal':
self.fee_satoshi = get_optimal_fee(
self.crypto, self.estimate_size(), verbose=self.verbose
)
verbose = "Using optimal fee of:"
else:
self.fee_satoshi = self.from_unit_to_satoshi(value, unit)
verbose = "Using manually set fee of:"
if self.verbose:
if not convert:
convert = get_current_price(self.crypto, "usd")
fee_dollar = convert * self.fee_satoshi / 1e8
print(verbose + " %s satoshis ($%.2f)" % (self.fee_satoshi, fee_dollar)) | def function[fee, parameter[self, value, unit]]:
constant[
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
]
variable[convert] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da1b10177c0> begin[:]
variable[convert] assign[=] call[name[get_current_price], parameter[name[self].crypto, constant[usd]]]
name[self].fee_satoshi assign[=] call[name[int], parameter[binary_operation[binary_operation[constant[0.02] / name[convert]] * constant[100000000.0]]]]
variable[verbose] assign[=] constant[Using default fee of:]
if name[self].verbose begin[:]
if <ast.UnaryOp object at 0x7da1b11bfaf0> begin[:]
variable[convert] assign[=] call[name[get_current_price], parameter[name[self].crypto, constant[usd]]]
variable[fee_dollar] assign[=] binary_operation[binary_operation[name[convert] * name[self].fee_satoshi] / constant[100000000.0]]
call[name[print], parameter[binary_operation[name[verbose] + binary_operation[constant[ %s satoshis ($%.2f)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b11bc430>, <ast.Name object at 0x7da1b11bfdc0>]]]]]] | keyword[def] identifier[fee] ( identifier[self] , identifier[value] = keyword[None] , identifier[unit] = literal[string] ):
literal[string]
identifier[convert] = keyword[None]
keyword[if] keyword[not] identifier[value] :
identifier[convert] = identifier[get_current_price] ( identifier[self] . identifier[crypto] , literal[string] )
identifier[self] . identifier[fee_satoshi] = identifier[int] ( literal[int] / identifier[convert] * literal[int] )
identifier[verbose] = literal[string]
keyword[elif] identifier[value] == literal[string] :
identifier[self] . identifier[fee_satoshi] = identifier[get_optimal_fee] (
identifier[self] . identifier[crypto] , identifier[self] . identifier[estimate_size] (), identifier[verbose] = identifier[self] . identifier[verbose]
)
identifier[verbose] = literal[string]
keyword[else] :
identifier[self] . identifier[fee_satoshi] = identifier[self] . identifier[from_unit_to_satoshi] ( identifier[value] , identifier[unit] )
identifier[verbose] = literal[string]
keyword[if] identifier[self] . identifier[verbose] :
keyword[if] keyword[not] identifier[convert] :
identifier[convert] = identifier[get_current_price] ( identifier[self] . identifier[crypto] , literal[string] )
identifier[fee_dollar] = identifier[convert] * identifier[self] . identifier[fee_satoshi] / literal[int]
identifier[print] ( identifier[verbose] + literal[string] %( identifier[self] . identifier[fee_satoshi] , identifier[fee_dollar] )) | def fee(self, value=None, unit='satoshi'):
"""
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
"""
convert = None
if not value:
# no fee was specified, use $0.02 as default.
convert = get_current_price(self.crypto, 'usd')
self.fee_satoshi = int(0.02 / convert * 100000000.0)
verbose = 'Using default fee of:' # depends on [control=['if'], data=[]]
elif value == 'optimal':
self.fee_satoshi = get_optimal_fee(self.crypto, self.estimate_size(), verbose=self.verbose)
verbose = 'Using optimal fee of:' # depends on [control=['if'], data=[]]
else:
self.fee_satoshi = self.from_unit_to_satoshi(value, unit)
verbose = 'Using manually set fee of:'
if self.verbose:
if not convert:
convert = get_current_price(self.crypto, 'usd') # depends on [control=['if'], data=[]]
fee_dollar = convert * self.fee_satoshi / 100000000.0
print(verbose + ' %s satoshis ($%.2f)' % (self.fee_satoshi, fee_dollar)) # depends on [control=['if'], data=[]] |
def pre(self, inputstring, **kwargs):
"""Perform pre-processing."""
out = self.apply_procs(self.preprocs, kwargs, str(inputstring))
logger.log_tag("skips", self.skips)
return out | def function[pre, parameter[self, inputstring]]:
constant[Perform pre-processing.]
variable[out] assign[=] call[name[self].apply_procs, parameter[name[self].preprocs, name[kwargs], call[name[str], parameter[name[inputstring]]]]]
call[name[logger].log_tag, parameter[constant[skips], name[self].skips]]
return[name[out]] | keyword[def] identifier[pre] ( identifier[self] , identifier[inputstring] ,** identifier[kwargs] ):
literal[string]
identifier[out] = identifier[self] . identifier[apply_procs] ( identifier[self] . identifier[preprocs] , identifier[kwargs] , identifier[str] ( identifier[inputstring] ))
identifier[logger] . identifier[log_tag] ( literal[string] , identifier[self] . identifier[skips] )
keyword[return] identifier[out] | def pre(self, inputstring, **kwargs):
"""Perform pre-processing."""
out = self.apply_procs(self.preprocs, kwargs, str(inputstring))
logger.log_tag('skips', self.skips)
return out |
def chart_series(series, market_sym='$SPX', price='actual_close', normalize=True):
"""Display a graph of the price history for the list of ticker symbols provided
Arguments:
series (dataframe, list of str, or list of tuples):
datafram (Timestamp or Datetime for index)
other columns are float y-axis values to be plotted
list of str: 1st 3 comma or slash-separated integers are the year, month, day
others are float y-axis values
list of tuples: 1st 3 integers are year, month, day
others are float y-axis values
market_sym (str): ticker symbol of equity or comodity to plot along side the series
price (str): which market data value ('close', 'actual_close', 'volume', etc) to use
for the market symbol for comparison to the series
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
"""
series = util.make_dataframe(series)
start = util.normalize_date(series.index[0] or datetime.datetime(2008, 1, 1))
end = util.normalize_date(series.index[-1] or datetime.datetime(2009, 12, 28))
timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16))
if market_sym:
if isinstance(market_sym, basestring):
market_sym = [market_sym.upper().strip()]
reference_prices = da.get_data(timestamps, market_sym, [price])[0]
reference_dict = dict(zip(market_sym, reference_prices))
for sym, market_data in reference_dict.iteritems():
series[sym] = pd.Series(market_data, index=timestamps)
# na_price = reference_dict[price].values
# if normalize:
# na_price /= na_price[0, :]
series.plot()
# plt.clf()
# plt.plot(timestamps, na_price)
# plt.legend(symbols)
# plt.ylabel(price.title())
# plt.xlabel('Date')
# # plt.savefig('portfolio.chart_series.pdf', format='pdf')
plt.grid(True)
plt.show()
return series | def function[chart_series, parameter[series, market_sym, price, normalize]]:
constant[Display a graph of the price history for the list of ticker symbols provided
Arguments:
series (dataframe, list of str, or list of tuples):
datafram (Timestamp or Datetime for index)
other columns are float y-axis values to be plotted
list of str: 1st 3 comma or slash-separated integers are the year, month, day
others are float y-axis values
list of tuples: 1st 3 integers are year, month, day
others are float y-axis values
market_sym (str): ticker symbol of equity or comodity to plot along side the series
price (str): which market data value ('close', 'actual_close', 'volume', etc) to use
for the market symbol for comparison to the series
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
]
variable[series] assign[=] call[name[util].make_dataframe, parameter[name[series]]]
variable[start] assign[=] call[name[util].normalize_date, parameter[<ast.BoolOp object at 0x7da207f9b3a0>]]
variable[end] assign[=] call[name[util].normalize_date, parameter[<ast.BoolOp object at 0x7da1b1629f30>]]
variable[timestamps] assign[=] call[name[du].getNYSEdays, parameter[name[start], name[end], call[name[datetime].timedelta, parameter[]]]]
if name[market_sym] begin[:]
if call[name[isinstance], parameter[name[market_sym], name[basestring]]] begin[:]
variable[market_sym] assign[=] list[[<ast.Call object at 0x7da1b162b970>]]
variable[reference_prices] assign[=] call[call[name[da].get_data, parameter[name[timestamps], name[market_sym], list[[<ast.Name object at 0x7da1b1629390>]]]]][constant[0]]
variable[reference_dict] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[market_sym], name[reference_prices]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b162bdc0>, <ast.Name object at 0x7da1b162ba90>]]] in starred[call[name[reference_dict].iteritems, parameter[]]] begin[:]
call[name[series]][name[sym]] assign[=] call[name[pd].Series, parameter[name[market_data]]]
call[name[series].plot, parameter[]]
call[name[plt].grid, parameter[constant[True]]]
call[name[plt].show, parameter[]]
return[name[series]] | keyword[def] identifier[chart_series] ( identifier[series] , identifier[market_sym] = literal[string] , identifier[price] = literal[string] , identifier[normalize] = keyword[True] ):
literal[string]
identifier[series] = identifier[util] . identifier[make_dataframe] ( identifier[series] )
identifier[start] = identifier[util] . identifier[normalize_date] ( identifier[series] . identifier[index] [ literal[int] ] keyword[or] identifier[datetime] . identifier[datetime] ( literal[int] , literal[int] , literal[int] ))
identifier[end] = identifier[util] . identifier[normalize_date] ( identifier[series] . identifier[index] [- literal[int] ] keyword[or] identifier[datetime] . identifier[datetime] ( literal[int] , literal[int] , literal[int] ))
identifier[timestamps] = identifier[du] . identifier[getNYSEdays] ( identifier[start] , identifier[end] , identifier[datetime] . identifier[timedelta] ( identifier[hours] = literal[int] ))
keyword[if] identifier[market_sym] :
keyword[if] identifier[isinstance] ( identifier[market_sym] , identifier[basestring] ):
identifier[market_sym] =[ identifier[market_sym] . identifier[upper] (). identifier[strip] ()]
identifier[reference_prices] = identifier[da] . identifier[get_data] ( identifier[timestamps] , identifier[market_sym] ,[ identifier[price] ])[ literal[int] ]
identifier[reference_dict] = identifier[dict] ( identifier[zip] ( identifier[market_sym] , identifier[reference_prices] ))
keyword[for] identifier[sym] , identifier[market_data] keyword[in] identifier[reference_dict] . identifier[iteritems] ():
identifier[series] [ identifier[sym] ]= identifier[pd] . identifier[Series] ( identifier[market_data] , identifier[index] = identifier[timestamps] )
identifier[series] . identifier[plot] ()
identifier[plt] . identifier[grid] ( keyword[True] )
identifier[plt] . identifier[show] ()
keyword[return] identifier[series] | def chart_series(series, market_sym='$SPX', price='actual_close', normalize=True):
"""Display a graph of the price history for the list of ticker symbols provided
Arguments:
series (dataframe, list of str, or list of tuples):
datafram (Timestamp or Datetime for index)
other columns are float y-axis values to be plotted
list of str: 1st 3 comma or slash-separated integers are the year, month, day
others are float y-axis values
list of tuples: 1st 3 integers are year, month, day
others are float y-axis values
market_sym (str): ticker symbol of equity or comodity to plot along side the series
price (str): which market data value ('close', 'actual_close', 'volume', etc) to use
for the market symbol for comparison to the series
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
"""
series = util.make_dataframe(series)
start = util.normalize_date(series.index[0] or datetime.datetime(2008, 1, 1))
end = util.normalize_date(series.index[-1] or datetime.datetime(2009, 12, 28))
timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16))
if market_sym:
if isinstance(market_sym, basestring):
market_sym = [market_sym.upper().strip()] # depends on [control=['if'], data=[]]
reference_prices = da.get_data(timestamps, market_sym, [price])[0]
reference_dict = dict(zip(market_sym, reference_prices))
for (sym, market_data) in reference_dict.iteritems():
series[sym] = pd.Series(market_data, index=timestamps) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# na_price = reference_dict[price].values
# if normalize:
# na_price /= na_price[0, :]
series.plot()
# plt.clf()
# plt.plot(timestamps, na_price)
# plt.legend(symbols)
# plt.ylabel(price.title())
# plt.xlabel('Date')
# # plt.savefig('portfolio.chart_series.pdf', format='pdf')
plt.grid(True)
plt.show()
return series |
def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
Parameters:
----------
all_boxes: list
boxes to be processed [bbox, confidence]
Returns:
----------
None
"""
for cls_ind, cls in enumerate(self.classes):
print('Writing {} VOC results file'.format(cls))
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[im_ind]
if dets.shape[0] < 1:
continue
h, w = self._get_imsize(self.image_path_from_index(im_ind))
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
if (int(dets[k, 0]) == cls_ind):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, 1],
int(dets[k, 2] * w) + 1, int(dets[k, 3] * h) + 1,
int(dets[k, 4] * w) + 1, int(dets[k, 5] * h) + 1)) | def function[write_pascal_results, parameter[self, all_boxes]]:
constant[
write results files in pascal devkit path
Parameters:
----------
all_boxes: list
boxes to be processed [bbox, confidence]
Returns:
----------
None
]
for taget[tuple[[<ast.Name object at 0x7da204963190>, <ast.Name object at 0x7da204963460>]]] in starred[call[name[enumerate], parameter[name[self].classes]]] begin[:]
call[name[print], parameter[call[constant[Writing {} VOC results file].format, parameter[name[cls]]]]]
variable[filename] assign[=] call[call[name[self].get_result_file_template, parameter[]].format, parameter[name[cls]]]
with call[name[open], parameter[name[filename], constant[wt]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18c4ccb80>, <ast.Name object at 0x7da18c4ccdc0>]]] in starred[call[name[enumerate], parameter[name[self].image_set_index]]] begin[:]
variable[dets] assign[=] call[name[all_boxes]][name[im_ind]]
if compare[call[name[dets].shape][constant[0]] less[<] constant[1]] begin[:]
continue
<ast.Tuple object at 0x7da1b201d7e0> assign[=] call[name[self]._get_imsize, parameter[call[name[self].image_path_from_index, parameter[name[im_ind]]]]]
for taget[name[k]] in starred[call[name[range], parameter[call[name[dets].shape][constant[0]]]]] begin[:]
if compare[call[name[int], parameter[call[name[dets]][tuple[[<ast.Name object at 0x7da1b201f6a0>, <ast.Constant object at 0x7da1b201d120>]]]]] equal[==] name[cls_ind]] begin[:]
call[name[f].write, parameter[call[constant[{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}
].format, parameter[name[index], call[name[dets]][tuple[[<ast.Name object at 0x7da2049639a0>, <ast.Constant object at 0x7da204960610>]]], binary_operation[call[name[int], parameter[binary_operation[call[name[dets]][tuple[[<ast.Name object at 0x7da204960070>, <ast.Constant object at 0x7da204963940>]]] * name[w]]]] + constant[1]], binary_operation[call[name[int], parameter[binary_operation[call[name[dets]][tuple[[<ast.Name object at 0x7da2049628f0>, <ast.Constant object at 0x7da204960460>]]] * name[h]]]] + constant[1]], binary_operation[call[name[int], parameter[binary_operation[call[name[dets]][tuple[[<ast.Name object at 0x7da20c76ec80>, <ast.Constant object at 0x7da20c76d630>]]] * name[w]]]] + constant[1]], binary_operation[call[name[int], parameter[binary_operation[call[name[dets]][tuple[[<ast.Name object at 0x7da20c76c580>, <ast.Constant object at 0x7da20c76c370>]]] * name[h]]]] + constant[1]]]]]] | keyword[def] identifier[write_pascal_results] ( identifier[self] , identifier[all_boxes] ):
literal[string]
keyword[for] identifier[cls_ind] , identifier[cls] keyword[in] identifier[enumerate] ( identifier[self] . identifier[classes] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[cls] ))
identifier[filename] = identifier[self] . identifier[get_result_file_template] (). identifier[format] ( identifier[cls] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[im_ind] , identifier[index] keyword[in] identifier[enumerate] ( identifier[self] . identifier[image_set_index] ):
identifier[dets] = identifier[all_boxes] [ identifier[im_ind] ]
keyword[if] identifier[dets] . identifier[shape] [ literal[int] ]< literal[int] :
keyword[continue]
identifier[h] , identifier[w] = identifier[self] . identifier[_get_imsize] ( identifier[self] . identifier[image_path_from_index] ( identifier[im_ind] ))
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[dets] . identifier[shape] [ literal[int] ]):
keyword[if] ( identifier[int] ( identifier[dets] [ identifier[k] , literal[int] ])== identifier[cls_ind] ):
identifier[f] . identifier[write] ( literal[string] .
identifier[format] ( identifier[index] , identifier[dets] [ identifier[k] , literal[int] ],
identifier[int] ( identifier[dets] [ identifier[k] , literal[int] ]* identifier[w] )+ literal[int] , identifier[int] ( identifier[dets] [ identifier[k] , literal[int] ]* identifier[h] )+ literal[int] ,
identifier[int] ( identifier[dets] [ identifier[k] , literal[int] ]* identifier[w] )+ literal[int] , identifier[int] ( identifier[dets] [ identifier[k] , literal[int] ]* identifier[h] )+ literal[int] )) | def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
Parameters:
----------
all_boxes: list
boxes to be processed [bbox, confidence]
Returns:
----------
None
"""
for (cls_ind, cls) in enumerate(self.classes):
print('Writing {} VOC results file'.format(cls))
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for (im_ind, index) in enumerate(self.image_set_index):
dets = all_boxes[im_ind]
if dets.shape[0] < 1:
continue # depends on [control=['if'], data=[]]
(h, w) = self._get_imsize(self.image_path_from_index(im_ind))
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
if int(dets[k, 0]) == cls_ind:
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(index, dets[k, 1], int(dets[k, 2] * w) + 1, int(dets[k, 3] * h) + 1, int(dets[k, 4] * w) + 1, int(dets[k, 5] * h) + 1)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=[]] |
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr | def function[new_figure_manager_given_figure, parameter[num, figure]]:
constant[
Create a new figure manager instance for the given figure.
]
variable[frame] assign[=] call[name[FigureFrameWxAgg], parameter[name[num], name[figure]]]
variable[figmgr] assign[=] call[name[frame].get_figure_manager, parameter[]]
if call[name[matplotlib].is_interactive, parameter[]] begin[:]
call[name[figmgr].frame.Show, parameter[]]
return[name[figmgr]] | keyword[def] identifier[new_figure_manager_given_figure] ( identifier[num] , identifier[figure] ):
literal[string]
identifier[frame] = identifier[FigureFrameWxAgg] ( identifier[num] , identifier[figure] )
identifier[figmgr] = identifier[frame] . identifier[get_figure_manager] ()
keyword[if] identifier[matplotlib] . identifier[is_interactive] ():
identifier[figmgr] . identifier[frame] . identifier[Show] ()
keyword[return] identifier[figmgr] | def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show() # depends on [control=['if'], data=[]]
return figmgr |
def write(*args):
"""Like print(), but recognizes tensors and arrays and show
more details about them.
Example:
hl.write("My Tensor", my_tensor)
Prints:
My Tensor float32 (10, 3, 224, 224) min: 0.0 max: 1.0
"""
s = ""
for a in args:
# Convert tensors to Numpy arrays
a = to_data(a)
if isinstance(a, np.ndarray):
# Numpy Array
s += ("\t" if s else "") + "Tensor {} {} min: {:.3f} max: {:.3f}".format(
a.dtype, a.shape, a.min(), a.max())
print(s)
s = ""
elif isinstance(a, list):
s += ("\t" if s else "") + "list len: {} {}".format(len(a), a[:10])
else:
s += (" " if s else "") + str(a)
if s:
print(s) | def function[write, parameter[]]:
constant[Like print(), but recognizes tensors and arrays and show
more details about them.
Example:
hl.write("My Tensor", my_tensor)
Prints:
My Tensor float32 (10, 3, 224, 224) min: 0.0 max: 1.0
]
variable[s] assign[=] constant[]
for taget[name[a]] in starred[name[args]] begin[:]
variable[a] assign[=] call[name[to_data], parameter[name[a]]]
if call[name[isinstance], parameter[name[a], name[np].ndarray]] begin[:]
<ast.AugAssign object at 0x7da18eb55990>
call[name[print], parameter[name[s]]]
variable[s] assign[=] constant[]
if name[s] begin[:]
call[name[print], parameter[name[s]]] | keyword[def] identifier[write] (* identifier[args] ):
literal[string]
identifier[s] = literal[string]
keyword[for] identifier[a] keyword[in] identifier[args] :
identifier[a] = identifier[to_data] ( identifier[a] )
keyword[if] identifier[isinstance] ( identifier[a] , identifier[np] . identifier[ndarray] ):
identifier[s] +=( literal[string] keyword[if] identifier[s] keyword[else] literal[string] )+ literal[string] . identifier[format] (
identifier[a] . identifier[dtype] , identifier[a] . identifier[shape] , identifier[a] . identifier[min] (), identifier[a] . identifier[max] ())
identifier[print] ( identifier[s] )
identifier[s] = literal[string]
keyword[elif] identifier[isinstance] ( identifier[a] , identifier[list] ):
identifier[s] +=( literal[string] keyword[if] identifier[s] keyword[else] literal[string] )+ literal[string] . identifier[format] ( identifier[len] ( identifier[a] ), identifier[a] [: literal[int] ])
keyword[else] :
identifier[s] +=( literal[string] keyword[if] identifier[s] keyword[else] literal[string] )+ identifier[str] ( identifier[a] )
keyword[if] identifier[s] :
identifier[print] ( identifier[s] ) | def write(*args):
"""Like print(), but recognizes tensors and arrays and show
more details about them.
Example:
hl.write("My Tensor", my_tensor)
Prints:
My Tensor float32 (10, 3, 224, 224) min: 0.0 max: 1.0
"""
s = ''
for a in args:
# Convert tensors to Numpy arrays
a = to_data(a)
if isinstance(a, np.ndarray):
# Numpy Array
s += ('\t' if s else '') + 'Tensor {} {} min: {:.3f} max: {:.3f}'.format(a.dtype, a.shape, a.min(), a.max())
print(s)
s = '' # depends on [control=['if'], data=[]]
elif isinstance(a, list):
s += ('\t' if s else '') + 'list len: {} {}'.format(len(a), a[:10]) # depends on [control=['if'], data=[]]
else:
s += (' ' if s else '') + str(a) # depends on [control=['for'], data=['a']]
if s:
print(s) # depends on [control=['if'], data=[]] |
def return_single_path_base(dbpath, set_object, object_id):
"""
Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object
specified by the object_id
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
Returns
-------
path : string
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(object_id)
session.close()
return tmp_object.path | def function[return_single_path_base, parameter[dbpath, set_object, object_id]]:
constant[
Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object
specified by the object_id
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
Returns
-------
path : string
]
variable[engine] assign[=] call[name[create_engine], parameter[binary_operation[constant[sqlite:////] + name[dbpath]]]]
variable[session_cl] assign[=] call[name[sessionmaker], parameter[]]
variable[session] assign[=] call[name[session_cl], parameter[]]
variable[tmp_object] assign[=] call[call[name[session].query, parameter[name[set_object]]].get, parameter[name[object_id]]]
call[name[session].close, parameter[]]
return[name[tmp_object].path] | keyword[def] identifier[return_single_path_base] ( identifier[dbpath] , identifier[set_object] , identifier[object_id] ):
literal[string]
identifier[engine] = identifier[create_engine] ( literal[string] + identifier[dbpath] )
identifier[session_cl] = identifier[sessionmaker] ( identifier[bind] = identifier[engine] )
identifier[session] = identifier[session_cl] ()
identifier[tmp_object] = identifier[session] . identifier[query] ( identifier[set_object] ). identifier[get] ( identifier[object_id] )
identifier[session] . identifier[close] ()
keyword[return] identifier[tmp_object] . identifier[path] | def return_single_path_base(dbpath, set_object, object_id):
"""
Generic function which returns a path (path is relative to the path_to_set stored in the database) of an object
specified by the object_id
Parameters
----------
dbpath : string, path to SQLite database file
set_object : object (either TestSet or TrainSet) which is stored in the database
object_id : int, id of object in database
Returns
-------
path : string
"""
engine = create_engine('sqlite:////' + dbpath)
session_cl = sessionmaker(bind=engine)
session = session_cl()
tmp_object = session.query(set_object).get(object_id)
session.close()
return tmp_object.path |
def gdate(self):
"""Return the Gregorian date for the given Hebrew date object."""
if self._last_updated == "gdate":
return self._gdate
return conv.jdn_to_gdate(self._jdn) | def function[gdate, parameter[self]]:
constant[Return the Gregorian date for the given Hebrew date object.]
if compare[name[self]._last_updated equal[==] constant[gdate]] begin[:]
return[name[self]._gdate]
return[call[name[conv].jdn_to_gdate, parameter[name[self]._jdn]]] | keyword[def] identifier[gdate] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_last_updated] == literal[string] :
keyword[return] identifier[self] . identifier[_gdate]
keyword[return] identifier[conv] . identifier[jdn_to_gdate] ( identifier[self] . identifier[_jdn] ) | def gdate(self):
"""Return the Gregorian date for the given Hebrew date object."""
if self._last_updated == 'gdate':
return self._gdate # depends on [control=['if'], data=[]]
return conv.jdn_to_gdate(self._jdn) |
def basic_auth(user, password, realm=None):
""" Generate a basic auth token for a given user and password.
:param user: user name
:param password: current password
:param realm: specifies the authentication provider
:return: auth token for use with :meth:`GraphDatabase.driver`
"""
from neobolt.security import AuthToken
return AuthToken("basic", user, password, realm) | def function[basic_auth, parameter[user, password, realm]]:
constant[ Generate a basic auth token for a given user and password.
:param user: user name
:param password: current password
:param realm: specifies the authentication provider
:return: auth token for use with :meth:`GraphDatabase.driver`
]
from relative_module[neobolt.security] import module[AuthToken]
return[call[name[AuthToken], parameter[constant[basic], name[user], name[password], name[realm]]]] | keyword[def] identifier[basic_auth] ( identifier[user] , identifier[password] , identifier[realm] = keyword[None] ):
literal[string]
keyword[from] identifier[neobolt] . identifier[security] keyword[import] identifier[AuthToken]
keyword[return] identifier[AuthToken] ( literal[string] , identifier[user] , identifier[password] , identifier[realm] ) | def basic_auth(user, password, realm=None):
""" Generate a basic auth token for a given user and password.
:param user: user name
:param password: current password
:param realm: specifies the authentication provider
:return: auth token for use with :meth:`GraphDatabase.driver`
"""
from neobolt.security import AuthToken
return AuthToken('basic', user, password, realm) |
def channel_list(self):
"""
Main screen for channel management.
Channels listed and operations can be chosen on the screen.
If there is an error message like non-choice,
it is shown here.
"""
if self.current.task_data.get('msg', False):
if self.current.task_data.get('target_channel_key', False):
self.current.output['msgbox'] = {'type': 'info',
"title": _(u"Successful Operation"),
"msg": self.current.task_data['msg']}
del self.current.task_data['msg']
else:
self.show_warning_messages()
self.current.task_data['new_channel'] = False
_form = ChannelListForm(title=_(u'Public Channel List'), help_text=CHANNEL_CHOICE_TEXT)
for channel in Channel.objects.filter(typ=15):
owner_name = channel.owner.username
_form.ChannelList(choice=False, name=channel.name, owner=owner_name, key=channel.key)
_form.new_channel = fields.Button(_(u"Merge At New Channel"), cmd="create_new_channel")
_form.existing_channel = fields.Button(_(u"Merge With An Existing Channel"),
cmd="choose_existing_channel")
_form.find_chosen_channel = fields.Button(_(u"Split Channel"), cmd="find_chosen_channel")
self.form_out(_form) | def function[channel_list, parameter[self]]:
constant[
Main screen for channel management.
Channels listed and operations can be chosen on the screen.
If there is an error message like non-choice,
it is shown here.
]
if call[name[self].current.task_data.get, parameter[constant[msg], constant[False]]] begin[:]
if call[name[self].current.task_data.get, parameter[constant[target_channel_key], constant[False]]] begin[:]
call[name[self].current.output][constant[msgbox]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7cbd90>, <ast.Constant object at 0x7da20c7ca0b0>, <ast.Constant object at 0x7da20c7c91b0>], [<ast.Constant object at 0x7da20c7c9690>, <ast.Call object at 0x7da20c7cbbb0>, <ast.Subscript object at 0x7da20c7cb730>]]
<ast.Delete object at 0x7da20c7cbc70>
call[name[self].current.task_data][constant[new_channel]] assign[=] constant[False]
variable[_form] assign[=] call[name[ChannelListForm], parameter[]]
for taget[name[channel]] in starred[call[name[Channel].objects.filter, parameter[]]] begin[:]
variable[owner_name] assign[=] name[channel].owner.username
call[name[_form].ChannelList, parameter[]]
name[_form].new_channel assign[=] call[name[fields].Button, parameter[call[name[_], parameter[constant[Merge At New Channel]]]]]
name[_form].existing_channel assign[=] call[name[fields].Button, parameter[call[name[_], parameter[constant[Merge With An Existing Channel]]]]]
name[_form].find_chosen_channel assign[=] call[name[fields].Button, parameter[call[name[_], parameter[constant[Split Channel]]]]]
call[name[self].form_out, parameter[name[_form]]] | keyword[def] identifier[channel_list] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[current] . identifier[task_data] . identifier[get] ( literal[string] , keyword[False] ):
keyword[if] identifier[self] . identifier[current] . identifier[task_data] . identifier[get] ( literal[string] , keyword[False] ):
identifier[self] . identifier[current] . identifier[output] [ literal[string] ]={ literal[string] : literal[string] ,
literal[string] : identifier[_] ( literal[string] ),
literal[string] : identifier[self] . identifier[current] . identifier[task_data] [ literal[string] ]}
keyword[del] identifier[self] . identifier[current] . identifier[task_data] [ literal[string] ]
keyword[else] :
identifier[self] . identifier[show_warning_messages] ()
identifier[self] . identifier[current] . identifier[task_data] [ literal[string] ]= keyword[False]
identifier[_form] = identifier[ChannelListForm] ( identifier[title] = identifier[_] ( literal[string] ), identifier[help_text] = identifier[CHANNEL_CHOICE_TEXT] )
keyword[for] identifier[channel] keyword[in] identifier[Channel] . identifier[objects] . identifier[filter] ( identifier[typ] = literal[int] ):
identifier[owner_name] = identifier[channel] . identifier[owner] . identifier[username]
identifier[_form] . identifier[ChannelList] ( identifier[choice] = keyword[False] , identifier[name] = identifier[channel] . identifier[name] , identifier[owner] = identifier[owner_name] , identifier[key] = identifier[channel] . identifier[key] )
identifier[_form] . identifier[new_channel] = identifier[fields] . identifier[Button] ( identifier[_] ( literal[string] ), identifier[cmd] = literal[string] )
identifier[_form] . identifier[existing_channel] = identifier[fields] . identifier[Button] ( identifier[_] ( literal[string] ),
identifier[cmd] = literal[string] )
identifier[_form] . identifier[find_chosen_channel] = identifier[fields] . identifier[Button] ( identifier[_] ( literal[string] ), identifier[cmd] = literal[string] )
identifier[self] . identifier[form_out] ( identifier[_form] ) | def channel_list(self):
"""
Main screen for channel management.
Channels listed and operations can be chosen on the screen.
If there is an error message like non-choice,
it is shown here.
"""
if self.current.task_data.get('msg', False):
if self.current.task_data.get('target_channel_key', False):
self.current.output['msgbox'] = {'type': 'info', 'title': _(u'Successful Operation'), 'msg': self.current.task_data['msg']}
del self.current.task_data['msg'] # depends on [control=['if'], data=[]]
else:
self.show_warning_messages() # depends on [control=['if'], data=[]]
self.current.task_data['new_channel'] = False
_form = ChannelListForm(title=_(u'Public Channel List'), help_text=CHANNEL_CHOICE_TEXT)
for channel in Channel.objects.filter(typ=15):
owner_name = channel.owner.username
_form.ChannelList(choice=False, name=channel.name, owner=owner_name, key=channel.key) # depends on [control=['for'], data=['channel']]
_form.new_channel = fields.Button(_(u'Merge At New Channel'), cmd='create_new_channel')
_form.existing_channel = fields.Button(_(u'Merge With An Existing Channel'), cmd='choose_existing_channel')
_form.find_chosen_channel = fields.Button(_(u'Split Channel'), cmd='find_chosen_channel')
self.form_out(_form) |
def _PromptUserForEncryptedVolumeCredential(
self, scan_context, locked_scan_node, output_writer):
"""Prompts the user to provide a credential for an encrypted volume.
Args:
scan_context (SourceScannerContext): the source scanner context.
locked_scan_node (SourceScanNode): the locked scan node.
output_writer (StdoutWriter): the output writer.
"""
credentials = credentials_manager.CredentialsManager.GetCredentials(
locked_scan_node.path_spec)
# TODO: print volume description.
if locked_scan_node.type_indicator == (
definitions.TYPE_INDICATOR_APFS_CONTAINER):
line = 'Found an APFS encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
line = 'Found a BitLocker encrypted volume.'
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE:
line = 'Found a CoreStorage (FVDE) encrypted volume.'
else:
line = 'Found an encrypted volume.'
output_writer.WriteLine(line)
credentials_list = list(credentials.CREDENTIALS)
credentials_list.append('skip')
# TODO: check which credentials are available.
output_writer.WriteLine('Supported credentials:')
output_writer.WriteLine('')
for index, name in enumerate(credentials_list):
output_writer.WriteLine(' {0:d}. {1:s}'.format(index + 1, name))
output_writer.WriteLine('')
result = False
while not result:
output_writer.WriteString(
'Select a credential to unlock the volume: ')
# TODO: add an input reader.
input_line = sys.stdin.readline()
input_line = input_line.strip()
if input_line in credentials_list:
credential_identifier = input_line
else:
try:
credential_identifier = int(input_line, 10)
credential_identifier = credentials_list[credential_identifier - 1]
except (IndexError, ValueError):
output_writer.WriteLine(
'Unsupported credential: {0:s}'.format(input_line))
continue
if credential_identifier == 'skip':
break
getpass_string = 'Enter credential data: '
if sys.platform.startswith('win') and sys.version_info[0] < 3:
# For Python 2 on Windows getpass (win_getpass) requires an encoded
# byte string. For Python 3 we need it to be a Unicode string.
getpass_string = self._EncodeString(getpass_string)
credential_data = getpass.getpass(getpass_string)
output_writer.WriteLine('')
result = self._source_scanner.Unlock(
scan_context, locked_scan_node.path_spec, credential_identifier,
credential_data)
if not result:
output_writer.WriteLine('Unable to unlock volume.')
output_writer.WriteLine('') | def function[_PromptUserForEncryptedVolumeCredential, parameter[self, scan_context, locked_scan_node, output_writer]]:
constant[Prompts the user to provide a credential for an encrypted volume.
Args:
scan_context (SourceScannerContext): the source scanner context.
locked_scan_node (SourceScanNode): the locked scan node.
output_writer (StdoutWriter): the output writer.
]
variable[credentials] assign[=] call[name[credentials_manager].CredentialsManager.GetCredentials, parameter[name[locked_scan_node].path_spec]]
if compare[name[locked_scan_node].type_indicator equal[==] name[definitions].TYPE_INDICATOR_APFS_CONTAINER] begin[:]
variable[line] assign[=] constant[Found an APFS encrypted volume.]
call[name[output_writer].WriteLine, parameter[name[line]]]
variable[credentials_list] assign[=] call[name[list], parameter[name[credentials].CREDENTIALS]]
call[name[credentials_list].append, parameter[constant[skip]]]
call[name[output_writer].WriteLine, parameter[constant[Supported credentials:]]]
call[name[output_writer].WriteLine, parameter[constant[]]]
for taget[tuple[[<ast.Name object at 0x7da1b07a1540>, <ast.Name object at 0x7da1b07a17b0>]]] in starred[call[name[enumerate], parameter[name[credentials_list]]]] begin[:]
call[name[output_writer].WriteLine, parameter[call[constant[ {0:d}. {1:s}].format, parameter[binary_operation[name[index] + constant[1]], name[name]]]]]
call[name[output_writer].WriteLine, parameter[constant[]]]
variable[result] assign[=] constant[False]
while <ast.UnaryOp object at 0x7da1b07a06a0> begin[:]
call[name[output_writer].WriteString, parameter[constant[Select a credential to unlock the volume: ]]]
variable[input_line] assign[=] call[name[sys].stdin.readline, parameter[]]
variable[input_line] assign[=] call[name[input_line].strip, parameter[]]
if compare[name[input_line] in name[credentials_list]] begin[:]
variable[credential_identifier] assign[=] name[input_line]
if compare[name[credential_identifier] equal[==] constant[skip]] begin[:]
break
variable[getpass_string] assign[=] constant[Enter credential data: ]
if <ast.BoolOp object at 0x7da1b07ceb60> begin[:]
variable[getpass_string] assign[=] call[name[self]._EncodeString, parameter[name[getpass_string]]]
variable[credential_data] assign[=] call[name[getpass].getpass, parameter[name[getpass_string]]]
call[name[output_writer].WriteLine, parameter[constant[]]]
variable[result] assign[=] call[name[self]._source_scanner.Unlock, parameter[name[scan_context], name[locked_scan_node].path_spec, name[credential_identifier], name[credential_data]]]
if <ast.UnaryOp object at 0x7da1b0678880> begin[:]
call[name[output_writer].WriteLine, parameter[constant[Unable to unlock volume.]]]
call[name[output_writer].WriteLine, parameter[constant[]]] | keyword[def] identifier[_PromptUserForEncryptedVolumeCredential] (
identifier[self] , identifier[scan_context] , identifier[locked_scan_node] , identifier[output_writer] ):
literal[string]
identifier[credentials] = identifier[credentials_manager] . identifier[CredentialsManager] . identifier[GetCredentials] (
identifier[locked_scan_node] . identifier[path_spec] )
keyword[if] identifier[locked_scan_node] . identifier[type_indicator] ==(
identifier[definitions] . identifier[TYPE_INDICATOR_APFS_CONTAINER] ):
identifier[line] = literal[string]
keyword[elif] identifier[locked_scan_node] . identifier[type_indicator] == identifier[definitions] . identifier[TYPE_INDICATOR_BDE] :
identifier[line] = literal[string]
keyword[elif] identifier[locked_scan_node] . identifier[type_indicator] == identifier[definitions] . identifier[TYPE_INDICATOR_FVDE] :
identifier[line] = literal[string]
keyword[else] :
identifier[line] = literal[string]
identifier[output_writer] . identifier[WriteLine] ( identifier[line] )
identifier[credentials_list] = identifier[list] ( identifier[credentials] . identifier[CREDENTIALS] )
identifier[credentials_list] . identifier[append] ( literal[string] )
identifier[output_writer] . identifier[WriteLine] ( literal[string] )
identifier[output_writer] . identifier[WriteLine] ( literal[string] )
keyword[for] identifier[index] , identifier[name] keyword[in] identifier[enumerate] ( identifier[credentials_list] ):
identifier[output_writer] . identifier[WriteLine] ( literal[string] . identifier[format] ( identifier[index] + literal[int] , identifier[name] ))
identifier[output_writer] . identifier[WriteLine] ( literal[string] )
identifier[result] = keyword[False]
keyword[while] keyword[not] identifier[result] :
identifier[output_writer] . identifier[WriteString] (
literal[string] )
identifier[input_line] = identifier[sys] . identifier[stdin] . identifier[readline] ()
identifier[input_line] = identifier[input_line] . identifier[strip] ()
keyword[if] identifier[input_line] keyword[in] identifier[credentials_list] :
identifier[credential_identifier] = identifier[input_line]
keyword[else] :
keyword[try] :
identifier[credential_identifier] = identifier[int] ( identifier[input_line] , literal[int] )
identifier[credential_identifier] = identifier[credentials_list] [ identifier[credential_identifier] - literal[int] ]
keyword[except] ( identifier[IndexError] , identifier[ValueError] ):
identifier[output_writer] . identifier[WriteLine] (
literal[string] . identifier[format] ( identifier[input_line] ))
keyword[continue]
keyword[if] identifier[credential_identifier] == literal[string] :
keyword[break]
identifier[getpass_string] = literal[string]
keyword[if] identifier[sys] . identifier[platform] . identifier[startswith] ( literal[string] ) keyword[and] identifier[sys] . identifier[version_info] [ literal[int] ]< literal[int] :
identifier[getpass_string] = identifier[self] . identifier[_EncodeString] ( identifier[getpass_string] )
identifier[credential_data] = identifier[getpass] . identifier[getpass] ( identifier[getpass_string] )
identifier[output_writer] . identifier[WriteLine] ( literal[string] )
identifier[result] = identifier[self] . identifier[_source_scanner] . identifier[Unlock] (
identifier[scan_context] , identifier[locked_scan_node] . identifier[path_spec] , identifier[credential_identifier] ,
identifier[credential_data] )
keyword[if] keyword[not] identifier[result] :
identifier[output_writer] . identifier[WriteLine] ( literal[string] )
identifier[output_writer] . identifier[WriteLine] ( literal[string] ) | def _PromptUserForEncryptedVolumeCredential(self, scan_context, locked_scan_node, output_writer):
"""Prompts the user to provide a credential for an encrypted volume.
Args:
scan_context (SourceScannerContext): the source scanner context.
locked_scan_node (SourceScanNode): the locked scan node.
output_writer (StdoutWriter): the output writer.
"""
credentials = credentials_manager.CredentialsManager.GetCredentials(locked_scan_node.path_spec)
# TODO: print volume description.
if locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_APFS_CONTAINER:
line = 'Found an APFS encrypted volume.' # depends on [control=['if'], data=[]]
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE:
line = 'Found a BitLocker encrypted volume.' # depends on [control=['if'], data=[]]
elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE:
line = 'Found a CoreStorage (FVDE) encrypted volume.' # depends on [control=['if'], data=[]]
else:
line = 'Found an encrypted volume.'
output_writer.WriteLine(line)
credentials_list = list(credentials.CREDENTIALS)
credentials_list.append('skip')
# TODO: check which credentials are available.
output_writer.WriteLine('Supported credentials:')
output_writer.WriteLine('')
for (index, name) in enumerate(credentials_list):
output_writer.WriteLine(' {0:d}. {1:s}'.format(index + 1, name)) # depends on [control=['for'], data=[]]
output_writer.WriteLine('')
result = False
while not result:
output_writer.WriteString('Select a credential to unlock the volume: ')
# TODO: add an input reader.
input_line = sys.stdin.readline()
input_line = input_line.strip()
if input_line in credentials_list:
credential_identifier = input_line # depends on [control=['if'], data=['input_line']]
else:
try:
credential_identifier = int(input_line, 10)
credential_identifier = credentials_list[credential_identifier - 1] # depends on [control=['try'], data=[]]
except (IndexError, ValueError):
output_writer.WriteLine('Unsupported credential: {0:s}'.format(input_line))
continue # depends on [control=['except'], data=[]]
if credential_identifier == 'skip':
break # depends on [control=['if'], data=[]]
getpass_string = 'Enter credential data: '
if sys.platform.startswith('win') and sys.version_info[0] < 3:
# For Python 2 on Windows getpass (win_getpass) requires an encoded
# byte string. For Python 3 we need it to be a Unicode string.
getpass_string = self._EncodeString(getpass_string) # depends on [control=['if'], data=[]]
credential_data = getpass.getpass(getpass_string)
output_writer.WriteLine('')
result = self._source_scanner.Unlock(scan_context, locked_scan_node.path_spec, credential_identifier, credential_data)
if not result:
output_writer.WriteLine('Unable to unlock volume.')
output_writer.WriteLine('') # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex)
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,))
if vertex in self.nodes:
self.nodes.pop(vertex)
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex)
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element)
for element in edges:
del self.edges[element]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex)
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex) | def function[remove_vertex, parameter[self, vertex]]:
constant[
Remove vertex from G
]
<ast.Try object at 0x7da1b1578ca0>
if compare[name[vertex] in name[self].nodes] begin[:]
call[name[self].nodes.pop, parameter[name[vertex]]]
for taget[name[element]] in starred[name[self].vertices] begin[:]
if compare[name[vertex] in call[name[self].vertices][name[element]]] begin[:]
call[call[name[self].vertices][name[element]].remove, parameter[name[vertex]]]
variable[edges] assign[=] list[[]]
for taget[name[element]] in starred[name[self].edges] begin[:]
if compare[name[vertex] in name[element]] begin[:]
call[name[edges].append, parameter[name[element]]]
for taget[name[element]] in starred[name[edges]] begin[:]
<ast.Delete object at 0x7da20c7cafe0>
for taget[name[element]] in starred[name[self].pred] begin[:]
if compare[name[vertex] in call[name[self].pred][name[element]]] begin[:]
call[call[name[self].pred][name[element]].remove, parameter[name[vertex]]]
for taget[name[element]] in starred[name[self].succ] begin[:]
if compare[name[vertex] in call[name[self].succ][name[element]]] begin[:]
call[call[name[self].succ][name[element]].remove, parameter[name[vertex]]] | keyword[def] identifier[remove_vertex] ( identifier[self] , identifier[vertex] ):
literal[string]
keyword[try] :
identifier[self] . identifier[vertices] . identifier[pop] ( identifier[vertex] )
identifier[self] . identifier[succ] . identifier[pop] ( identifier[vertex] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[GraphInsertError] ( literal[string] %( identifier[vertex] ,))
keyword[if] identifier[vertex] keyword[in] identifier[self] . identifier[nodes] :
identifier[self] . identifier[nodes] . identifier[pop] ( identifier[vertex] )
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[vertices] :
keyword[if] identifier[vertex] keyword[in] identifier[self] . identifier[vertices] [ identifier[element] ]:
identifier[self] . identifier[vertices] [ identifier[element] ]. identifier[remove] ( identifier[vertex] )
identifier[edges] =[]
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[edges] :
keyword[if] identifier[vertex] keyword[in] identifier[element] :
identifier[edges] . identifier[append] ( identifier[element] )
keyword[for] identifier[element] keyword[in] identifier[edges] :
keyword[del] identifier[self] . identifier[edges] [ identifier[element] ]
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[pred] :
keyword[if] identifier[vertex] keyword[in] identifier[self] . identifier[pred] [ identifier[element] ]:
identifier[self] . identifier[pred] [ identifier[element] ]. identifier[remove] ( identifier[vertex] )
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[succ] :
keyword[if] identifier[vertex] keyword[in] identifier[self] . identifier[succ] [ identifier[element] ]:
identifier[self] . identifier[succ] [ identifier[element] ]. identifier[remove] ( identifier[vertex] ) | def remove_vertex(self, vertex):
"""
Remove vertex from G
"""
try:
self.vertices.pop(vertex)
self.succ.pop(vertex) # depends on [control=['try'], data=[]]
except KeyError:
raise GraphInsertError("Vertex %s doesn't exist." % (vertex,)) # depends on [control=['except'], data=[]]
if vertex in self.nodes:
self.nodes.pop(vertex) # depends on [control=['if'], data=['vertex']]
for element in self.vertices:
if vertex in self.vertices[element]:
self.vertices[element].remove(vertex) # depends on [control=['if'], data=['vertex']] # depends on [control=['for'], data=['element']]
edges = [] # List for edges that include vertex
for element in self.edges:
if vertex in element:
edges.append(element) # depends on [control=['if'], data=['element']] # depends on [control=['for'], data=['element']]
for element in edges:
del self.edges[element] # depends on [control=['for'], data=['element']]
for element in self.pred:
if vertex in self.pred[element]:
self.pred[element].remove(vertex) # depends on [control=['if'], data=['vertex']] # depends on [control=['for'], data=['element']]
for element in self.succ:
if vertex in self.succ[element]:
self.succ[element].remove(vertex) # depends on [control=['if'], data=['vertex']] # depends on [control=['for'], data=['element']] |
def fetch_items(self, category, **kwargs):
"""Fetch the messages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Fetching messages of '%s' from %s",
self.uri, str(from_date))
nmessages = 0
archives = self.__retrieve_archives(from_date)
for archive in archives:
logger.debug("Parsing supybot archive %s", archive)
for message in self.parse_supybot_log(archive):
dt = str_to_datetime(message['timestamp'])
if dt < from_date:
logger.debug("Message %s sent before %s; skipped",
str(dt), str(from_date))
continue
yield message
nmessages += 1
logger.info("Fetch process completed: %s messages fetched",
nmessages) | def function[fetch_items, parameter[self, category]]:
constant[Fetch the messages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
]
variable[from_date] assign[=] call[name[kwargs]][constant[from_date]]
call[name[logger].info, parameter[constant[Fetching messages of '%s' from %s], name[self].uri, call[name[str], parameter[name[from_date]]]]]
variable[nmessages] assign[=] constant[0]
variable[archives] assign[=] call[name[self].__retrieve_archives, parameter[name[from_date]]]
for taget[name[archive]] in starred[name[archives]] begin[:]
call[name[logger].debug, parameter[constant[Parsing supybot archive %s], name[archive]]]
for taget[name[message]] in starred[call[name[self].parse_supybot_log, parameter[name[archive]]]] begin[:]
variable[dt] assign[=] call[name[str_to_datetime], parameter[call[name[message]][constant[timestamp]]]]
if compare[name[dt] less[<] name[from_date]] begin[:]
call[name[logger].debug, parameter[constant[Message %s sent before %s; skipped], call[name[str], parameter[name[dt]]], call[name[str], parameter[name[from_date]]]]]
continue
<ast.Yield object at 0x7da1b03832e0>
<ast.AugAssign object at 0x7da1b0383820>
call[name[logger].info, parameter[constant[Fetch process completed: %s messages fetched], name[nmessages]]] | keyword[def] identifier[fetch_items] ( identifier[self] , identifier[category] ,** identifier[kwargs] ):
literal[string]
identifier[from_date] = identifier[kwargs] [ literal[string] ]
identifier[logger] . identifier[info] ( literal[string] ,
identifier[self] . identifier[uri] , identifier[str] ( identifier[from_date] ))
identifier[nmessages] = literal[int]
identifier[archives] = identifier[self] . identifier[__retrieve_archives] ( identifier[from_date] )
keyword[for] identifier[archive] keyword[in] identifier[archives] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[archive] )
keyword[for] identifier[message] keyword[in] identifier[self] . identifier[parse_supybot_log] ( identifier[archive] ):
identifier[dt] = identifier[str_to_datetime] ( identifier[message] [ literal[string] ])
keyword[if] identifier[dt] < identifier[from_date] :
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[str] ( identifier[dt] ), identifier[str] ( identifier[from_date] ))
keyword[continue]
keyword[yield] identifier[message]
identifier[nmessages] += literal[int]
identifier[logger] . identifier[info] ( literal[string] ,
identifier[nmessages] ) | def fetch_items(self, category, **kwargs):
"""Fetch the messages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Fetching messages of '%s' from %s", self.uri, str(from_date))
nmessages = 0
archives = self.__retrieve_archives(from_date)
for archive in archives:
logger.debug('Parsing supybot archive %s', archive)
for message in self.parse_supybot_log(archive):
dt = str_to_datetime(message['timestamp'])
if dt < from_date:
logger.debug('Message %s sent before %s; skipped', str(dt), str(from_date))
continue # depends on [control=['if'], data=['dt', 'from_date']]
yield message
nmessages += 1 # depends on [control=['for'], data=['message']] # depends on [control=['for'], data=['archive']]
logger.info('Fetch process completed: %s messages fetched', nmessages) |
def run(self):
"""The actual event loop.
Calls the ``owner``'s :py:meth:`~Component.start_event` method,
then calls its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the ``owner``'s
:py:meth:`~Component.stop_event` method is called before the
thread terminates.
"""
try:
self.owner.start_event()
while True:
while not self.incoming:
time.sleep(0.01)
while self.incoming:
command = self.incoming.popleft()
if command is None:
raise StopIteration()
command()
except StopIteration:
pass
self.owner.stop_event() | def function[run, parameter[self]]:
constant[The actual event loop.
Calls the ``owner``'s :py:meth:`~Component.start_event` method,
then calls its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the ``owner``'s
:py:meth:`~Component.stop_event` method is called before the
thread terminates.
]
<ast.Try object at 0x7da1b259d390>
call[name[self].owner.stop_event, parameter[]] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[owner] . identifier[start_event] ()
keyword[while] keyword[True] :
keyword[while] keyword[not] identifier[self] . identifier[incoming] :
identifier[time] . identifier[sleep] ( literal[int] )
keyword[while] identifier[self] . identifier[incoming] :
identifier[command] = identifier[self] . identifier[incoming] . identifier[popleft] ()
keyword[if] identifier[command] keyword[is] keyword[None] :
keyword[raise] identifier[StopIteration] ()
identifier[command] ()
keyword[except] identifier[StopIteration] :
keyword[pass]
identifier[self] . identifier[owner] . identifier[stop_event] () | def run(self):
"""The actual event loop.
Calls the ``owner``'s :py:meth:`~Component.start_event` method,
then calls its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the ``owner``'s
:py:meth:`~Component.stop_event` method is called before the
thread terminates.
"""
try:
self.owner.start_event()
while True:
while not self.incoming:
time.sleep(0.01) # depends on [control=['while'], data=[]]
while self.incoming:
command = self.incoming.popleft()
if command is None:
raise StopIteration() # depends on [control=['if'], data=[]]
command() # depends on [control=['while'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except StopIteration:
pass # depends on [control=['except'], data=[]]
self.owner.stop_event() |
def std(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.StandardDev(arg, how, where).to_expr()
expr = expr.name('std')
return expr | def function[std, parameter[arg, where, how]]:
constant[
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
]
variable[expr] assign[=] call[call[name[ops].StandardDev, parameter[name[arg], name[how], name[where]]].to_expr, parameter[]]
variable[expr] assign[=] call[name[expr].name, parameter[constant[std]]]
return[name[expr]] | keyword[def] identifier[std] ( identifier[arg] , identifier[where] = keyword[None] , identifier[how] = literal[string] ):
literal[string]
identifier[expr] = identifier[ops] . identifier[StandardDev] ( identifier[arg] , identifier[how] , identifier[where] ). identifier[to_expr] ()
identifier[expr] = identifier[expr] . identifier[name] ( literal[string] )
keyword[return] identifier[expr] | def std(arg, where=None, how='sample'):
"""
Compute standard deviation of numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
stdev : double scalar
"""
expr = ops.StandardDev(arg, how, where).to_expr()
expr = expr.name('std')
return expr |
def search(self, **kwargs):
"""
Method to search ipv4's based on extends search.
:param search: Dict containing QuerySets to find ipv4's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv4's
"""
return super(ApiV4IPv4, self).get(self.prepare_url('api/v4/ipv4/',
kwargs)) | def function[search, parameter[self]]:
constant[
Method to search ipv4's based on extends search.
:param search: Dict containing QuerySets to find ipv4's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv4's
]
return[call[call[name[super], parameter[name[ApiV4IPv4], name[self]]].get, parameter[call[name[self].prepare_url, parameter[constant[api/v4/ipv4/], name[kwargs]]]]]] | keyword[def] identifier[search] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[super] ( identifier[ApiV4IPv4] , identifier[self] ). identifier[get] ( identifier[self] . identifier[prepare_url] ( literal[string] ,
identifier[kwargs] )) | def search(self, **kwargs):
"""
Method to search ipv4's based on extends search.
:param search: Dict containing QuerySets to find ipv4's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv4's
"""
return super(ApiV4IPv4, self).get(self.prepare_url('api/v4/ipv4/', kwargs)) |
def create_hooks(config: dict, model: AbstractModel,
dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook]:
"""
Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects
"""
logging.info('Creating hooks')
hooks = []
if 'hooks' in config:
for hook_config in config['hooks']:
if isinstance(hook_config, str):
hook_config = {hook_config: {}}
assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).'
hook_path, hook_params = next(iter(hook_config.items()))
if hook_params is None:
logging.warning('\t\t Empty config of `%s` hook', hook_path)
hook_params = {}
# workaround for ruamel.yaml expansion bug; see #222
hook_params = dict(hook_params.items())
hook_module, hook_class = parse_fully_qualified_name(hook_path)
# find the hook module if not specified
if hook_module is None:
hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class)
logging.debug('\tFound hook module `%s` for class `%s`', hook_module, hook_class)
if hook_module is None:
raise ValueError('Can`t find hook module for hook class `{}`. '
'Make sure it is defined under `{}` sub-modules.'
.format(hook_class, CXF_HOOKS_MODULE))
# create hook kwargs
hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params}
# create new hook
try:
hook = create_object(hook_module, hook_class, kwargs=hook_kwargs)
hooks.append(hook)
logging.info('\t%s created', type(hooks[-1]).__name__)
except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex:
logging.error('\tFailed to create a hook from config `%s`', hook_config)
raise ex
return hooks | def function[create_hooks, parameter[config, model, dataset, output_dir]]:
constant[
Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects
]
call[name[logging].info, parameter[constant[Creating hooks]]]
variable[hooks] assign[=] list[[]]
if compare[constant[hooks] in name[config]] begin[:]
for taget[name[hook_config]] in starred[call[name[config]][constant[hooks]]] begin[:]
if call[name[isinstance], parameter[name[hook_config], name[str]]] begin[:]
variable[hook_config] assign[=] dictionary[[<ast.Name object at 0x7da18dc9a1d0>], [<ast.Dict object at 0x7da18dc9ab60>]]
assert[compare[call[name[len], parameter[name[hook_config]]] equal[==] constant[1]]]
<ast.Tuple object at 0x7da18dc9a470> assign[=] call[name[next], parameter[call[name[iter], parameter[call[name[hook_config].items, parameter[]]]]]]
if compare[name[hook_params] is constant[None]] begin[:]
call[name[logging].warning, parameter[constant[ Empty config of `%s` hook], name[hook_path]]]
variable[hook_params] assign[=] dictionary[[], []]
variable[hook_params] assign[=] call[name[dict], parameter[call[name[hook_params].items, parameter[]]]]
<ast.Tuple object at 0x7da204564a90> assign[=] call[name[parse_fully_qualified_name], parameter[name[hook_path]]]
if compare[name[hook_module] is constant[None]] begin[:]
variable[hook_module] assign[=] call[name[get_class_module], parameter[name[CXF_HOOKS_MODULE], name[hook_class]]]
call[name[logging].debug, parameter[constant[ Found hook module `%s` for class `%s`], name[hook_module], name[hook_class]]]
if compare[name[hook_module] is constant[None]] begin[:]
<ast.Raise object at 0x7da18dc999c0>
variable[hook_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9b550>, <ast.Constant object at 0x7da18dc98c10>, <ast.Constant object at 0x7da18dc98c40>, None], [<ast.Name object at 0x7da18dc99c90>, <ast.Name object at 0x7da18dc9bb50>, <ast.Name object at 0x7da18dc98190>, <ast.Name object at 0x7da18dc99420>]]
<ast.Try object at 0x7da18dc9a380>
return[name[hooks]] | keyword[def] identifier[create_hooks] ( identifier[config] : identifier[dict] , identifier[model] : identifier[AbstractModel] ,
identifier[dataset] : identifier[AbstractDataset] , identifier[output_dir] : identifier[str] )-> identifier[Iterable] [ identifier[AbstractHook] ]:
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
identifier[hooks] =[]
keyword[if] literal[string] keyword[in] identifier[config] :
keyword[for] identifier[hook_config] keyword[in] identifier[config] [ literal[string] ]:
keyword[if] identifier[isinstance] ( identifier[hook_config] , identifier[str] ):
identifier[hook_config] ={ identifier[hook_config] :{}}
keyword[assert] identifier[len] ( identifier[hook_config] )== literal[int] , literal[string]
identifier[hook_path] , identifier[hook_params] = identifier[next] ( identifier[iter] ( identifier[hook_config] . identifier[items] ()))
keyword[if] identifier[hook_params] keyword[is] keyword[None] :
identifier[logging] . identifier[warning] ( literal[string] , identifier[hook_path] )
identifier[hook_params] ={}
identifier[hook_params] = identifier[dict] ( identifier[hook_params] . identifier[items] ())
identifier[hook_module] , identifier[hook_class] = identifier[parse_fully_qualified_name] ( identifier[hook_path] )
keyword[if] identifier[hook_module] keyword[is] keyword[None] :
identifier[hook_module] = identifier[get_class_module] ( identifier[CXF_HOOKS_MODULE] , identifier[hook_class] )
identifier[logging] . identifier[debug] ( literal[string] , identifier[hook_module] , identifier[hook_class] )
keyword[if] identifier[hook_module] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
. identifier[format] ( identifier[hook_class] , identifier[CXF_HOOKS_MODULE] ))
identifier[hook_kwargs] ={ literal[string] : identifier[dataset] , literal[string] : identifier[model] , literal[string] : identifier[output_dir] ,** identifier[hook_params] }
keyword[try] :
identifier[hook] = identifier[create_object] ( identifier[hook_module] , identifier[hook_class] , identifier[kwargs] = identifier[hook_kwargs] )
identifier[hooks] . identifier[append] ( identifier[hook] )
identifier[logging] . identifier[info] ( literal[string] , identifier[type] ( identifier[hooks] [- literal[int] ]). identifier[__name__] )
keyword[except] ( identifier[ValueError] , identifier[KeyError] , identifier[TypeError] , identifier[NameError] , identifier[AttributeError] , identifier[AssertionError] , identifier[ImportError] ) keyword[as] identifier[ex] :
identifier[logging] . identifier[error] ( literal[string] , identifier[hook_config] )
keyword[raise] identifier[ex]
keyword[return] identifier[hooks] | def create_hooks(config: dict, model: AbstractModel, dataset: AbstractDataset, output_dir: str) -> Iterable[AbstractHook]:
"""
Create hooks specified in ``config['hooks']`` list.
Hook config entries may be one of the following types:
.. code-block:: yaml
:caption: A hook with default args specified only by its name as a string; e.g.
hooks:
- LogVariables
- cxflow_tensorflow.WriteTensorBoard
.. code-block:: yaml
:caption: A hook with custom args as a dict name -> args; e.g.
hooks:
- StopAfter:
n_epochs: 10
:param config: config dict
:param model: model object to be passed to the hooks
:param dataset: dataset object to be passed to hooks
:param output_dir: training output dir available to the hooks
:return: list of hook objects
"""
logging.info('Creating hooks')
hooks = []
if 'hooks' in config:
for hook_config in config['hooks']:
if isinstance(hook_config, str):
hook_config = {hook_config: {}} # depends on [control=['if'], data=[]]
assert len(hook_config) == 1, 'Hook configuration must have exactly one key (fully qualified name).'
(hook_path, hook_params) = next(iter(hook_config.items()))
if hook_params is None:
logging.warning('\t\t Empty config of `%s` hook', hook_path)
hook_params = {} # depends on [control=['if'], data=['hook_params']]
# workaround for ruamel.yaml expansion bug; see #222
hook_params = dict(hook_params.items())
(hook_module, hook_class) = parse_fully_qualified_name(hook_path)
# find the hook module if not specified
if hook_module is None:
hook_module = get_class_module(CXF_HOOKS_MODULE, hook_class)
logging.debug('\tFound hook module `%s` for class `%s`', hook_module, hook_class)
if hook_module is None:
raise ValueError('Can`t find hook module for hook class `{}`. Make sure it is defined under `{}` sub-modules.'.format(hook_class, CXF_HOOKS_MODULE)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['hook_module']]
# create hook kwargs
hook_kwargs = {'dataset': dataset, 'model': model, 'output_dir': output_dir, **hook_params}
# create new hook
try:
hook = create_object(hook_module, hook_class, kwargs=hook_kwargs)
hooks.append(hook)
logging.info('\t%s created', type(hooks[-1]).__name__) # depends on [control=['try'], data=[]]
except (ValueError, KeyError, TypeError, NameError, AttributeError, AssertionError, ImportError) as ex:
logging.error('\tFailed to create a hook from config `%s`', hook_config)
raise ex # depends on [control=['except'], data=['ex']] # depends on [control=['for'], data=['hook_config']] # depends on [control=['if'], data=['config']]
return hooks |
def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True, **kwargs):
"""
Save ``img`` to the file specified by ``path``.
As default, the shape of ``img`` has to be (height, width, channel).
Args:
path (str): Output path.
img (numpy.ndarray):
Input image.
All pixel values must be positive and in the range [0, 255] of int for uint8, [0, 65535] of int for uint16 or [0, 1] for float.
When you pass float image, you must set `auto_scale` as True (If not, exception will be raised).
If img with negative values is passed as input, exception will be raised.
channel_first (bool):
If True, you can input the image whose shape is (channel, height, width). Default is False.
as_uint16 (bool):
If True, cast image to uint16 before save. Default is False.
auto_scale (bool):
Whether the range of pixel values are scaled up or not.
The range of upscaled pixel values depends on output dtype, which is [0, 255] as uint8 and [0, 65535] as uint16.
"""
backend_manager.module.imsave(
path, img, channel_first=channel_first, as_uint16=as_uint16, auto_scale=auto_scale, **kwargs) | def function[imsave, parameter[path, img, channel_first, as_uint16, auto_scale]]:
constant[
Save ``img`` to the file specified by ``path``.
As default, the shape of ``img`` has to be (height, width, channel).
Args:
path (str): Output path.
img (numpy.ndarray):
Input image.
All pixel values must be positive and in the range [0, 255] of int for uint8, [0, 65535] of int for uint16 or [0, 1] for float.
When you pass float image, you must set `auto_scale` as True (If not, exception will be raised).
If img with negative values is passed as input, exception will be raised.
channel_first (bool):
If True, you can input the image whose shape is (channel, height, width). Default is False.
as_uint16 (bool):
If True, cast image to uint16 before save. Default is False.
auto_scale (bool):
Whether the range of pixel values are scaled up or not.
The range of upscaled pixel values depends on output dtype, which is [0, 255] as uint8 and [0, 65535] as uint16.
]
call[name[backend_manager].module.imsave, parameter[name[path], name[img]]] | keyword[def] identifier[imsave] ( identifier[path] , identifier[img] , identifier[channel_first] = keyword[False] , identifier[as_uint16] = keyword[False] , identifier[auto_scale] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[backend_manager] . identifier[module] . identifier[imsave] (
identifier[path] , identifier[img] , identifier[channel_first] = identifier[channel_first] , identifier[as_uint16] = identifier[as_uint16] , identifier[auto_scale] = identifier[auto_scale] ,** identifier[kwargs] ) | def imsave(path, img, channel_first=False, as_uint16=False, auto_scale=True, **kwargs):
"""
Save ``img`` to the file specified by ``path``.
As default, the shape of ``img`` has to be (height, width, channel).
Args:
path (str): Output path.
img (numpy.ndarray):
Input image.
All pixel values must be positive and in the range [0, 255] of int for uint8, [0, 65535] of int for uint16 or [0, 1] for float.
When you pass float image, you must set `auto_scale` as True (If not, exception will be raised).
If img with negative values is passed as input, exception will be raised.
channel_first (bool):
If True, you can input the image whose shape is (channel, height, width). Default is False.
as_uint16 (bool):
If True, cast image to uint16 before save. Default is False.
auto_scale (bool):
Whether the range of pixel values are scaled up or not.
The range of upscaled pixel values depends on output dtype, which is [0, 255] as uint8 and [0, 65535] as uint16.
"""
backend_manager.module.imsave(path, img, channel_first=channel_first, as_uint16=as_uint16, auto_scale=auto_scale, **kwargs) |
def init_pid(self, kp, ki, kd):
"""Sets the PID constants for the PID modes. Arguments are all
floating point numbers."""
p, i, d = map(lambda x: int(x * (2 ** 16)), (kp, ki, kd))
self._set_as_int(Addr.PConstant, p, 4)
self._set_as_int(Addr.IConstant, i, 4)
self._set_as_int(Addr.DConstant, d, 4) | def function[init_pid, parameter[self, kp, ki, kd]]:
constant[Sets the PID constants for the PID modes. Arguments are all
floating point numbers.]
<ast.Tuple object at 0x7da18eb56680> assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da18eb55b70>, tuple[[<ast.Name object at 0x7da18eb54280>, <ast.Name object at 0x7da18eb54c70>, <ast.Name object at 0x7da18eb57ca0>]]]]
call[name[self]._set_as_int, parameter[name[Addr].PConstant, name[p], constant[4]]]
call[name[self]._set_as_int, parameter[name[Addr].IConstant, name[i], constant[4]]]
call[name[self]._set_as_int, parameter[name[Addr].DConstant, name[d], constant[4]]] | keyword[def] identifier[init_pid] ( identifier[self] , identifier[kp] , identifier[ki] , identifier[kd] ):
literal[string]
identifier[p] , identifier[i] , identifier[d] = identifier[map] ( keyword[lambda] identifier[x] : identifier[int] ( identifier[x] *( literal[int] ** literal[int] )),( identifier[kp] , identifier[ki] , identifier[kd] ))
identifier[self] . identifier[_set_as_int] ( identifier[Addr] . identifier[PConstant] , identifier[p] , literal[int] )
identifier[self] . identifier[_set_as_int] ( identifier[Addr] . identifier[IConstant] , identifier[i] , literal[int] )
identifier[self] . identifier[_set_as_int] ( identifier[Addr] . identifier[DConstant] , identifier[d] , literal[int] ) | def init_pid(self, kp, ki, kd):
"""Sets the PID constants for the PID modes. Arguments are all
floating point numbers."""
(p, i, d) = map(lambda x: int(x * 2 ** 16), (kp, ki, kd))
self._set_as_int(Addr.PConstant, p, 4)
self._set_as_int(Addr.IConstant, i, 4)
self._set_as_int(Addr.DConstant, d, 4) |
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`SSHKey` requires that an ``user`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(user=self.user.id)
"""
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None:
entity = type(self)(
self._server_config,
user=self.user, # pylint:disable=no-member
)
if ignore is None:
ignore = set()
ignore.add('user')
return super(SSHKey, self).read(entity, attrs, ignore, params) | def function[read, parameter[self, entity, attrs, ignore, params]]:
constant[Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`SSHKey` requires that an ``user`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(user=self.user.id)
]
if compare[name[entity] is constant[None]] begin[:]
variable[entity] assign[=] call[call[name[type], parameter[name[self]]], parameter[name[self]._server_config]]
if compare[name[ignore] is constant[None]] begin[:]
variable[ignore] assign[=] call[name[set], parameter[]]
call[name[ignore].add, parameter[constant[user]]]
return[call[call[name[super], parameter[name[SSHKey], name[self]]].read, parameter[name[entity], name[attrs], name[ignore], name[params]]]] | keyword[def] identifier[read] ( identifier[self] , identifier[entity] = keyword[None] , identifier[attrs] = keyword[None] , identifier[ignore] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[entity] keyword[is] keyword[None] :
identifier[entity] = identifier[type] ( identifier[self] )(
identifier[self] . identifier[_server_config] ,
identifier[user] = identifier[self] . identifier[user] ,
)
keyword[if] identifier[ignore] keyword[is] keyword[None] :
identifier[ignore] = identifier[set] ()
identifier[ignore] . identifier[add] ( literal[string] )
keyword[return] identifier[super] ( identifier[SSHKey] , identifier[self] ). identifier[read] ( identifier[entity] , identifier[attrs] , identifier[ignore] , identifier[params] ) | def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Provide a default value for ``entity``.
By default, ``nailgun.entity_mixins.EntityReadMixin.read`` provides a
default value for ``entity`` like so::
entity = type(self)()
However, :class:`SSHKey` requires that an ``user`` be
provided, so this technique will not work. Do this instead::
entity = type(self)(user=self.user.id)
"""
# read() should not change the state of the object it's called on, but
# super() alters the attributes of any entity passed in. Creating a new
# object and passing it to super() lets this one avoid changing state.
if entity is None: # pylint:disable=no-member
entity = type(self)(self._server_config, user=self.user) # depends on [control=['if'], data=['entity']]
if ignore is None:
ignore = set() # depends on [control=['if'], data=['ignore']]
ignore.add('user')
return super(SSHKey, self).read(entity, attrs, ignore, params) |
def _Scroll(self, lines=None):
"""Set attributes to scroll the buffer correctly.
Args:
lines: An int, number of lines to scroll. If None, scrolls
by the terminal length.
"""
if lines is None:
lines = self._cli_lines
if lines < 0:
self._displayed -= self._cli_lines
self._displayed += lines
if self._displayed < 0:
self._displayed = 0
self._lines_to_show = self._cli_lines
else:
self._lines_to_show = lines
self._lastscroll = lines | def function[_Scroll, parameter[self, lines]]:
constant[Set attributes to scroll the buffer correctly.
Args:
lines: An int, number of lines to scroll. If None, scrolls
by the terminal length.
]
if compare[name[lines] is constant[None]] begin[:]
variable[lines] assign[=] name[self]._cli_lines
if compare[name[lines] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b17fb640>
<ast.AugAssign object at 0x7da1b17fb460>
if compare[name[self]._displayed less[<] constant[0]] begin[:]
name[self]._displayed assign[=] constant[0]
name[self]._lines_to_show assign[=] name[self]._cli_lines
name[self]._lastscroll assign[=] name[lines] | keyword[def] identifier[_Scroll] ( identifier[self] , identifier[lines] = keyword[None] ):
literal[string]
keyword[if] identifier[lines] keyword[is] keyword[None] :
identifier[lines] = identifier[self] . identifier[_cli_lines]
keyword[if] identifier[lines] < literal[int] :
identifier[self] . identifier[_displayed] -= identifier[self] . identifier[_cli_lines]
identifier[self] . identifier[_displayed] += identifier[lines]
keyword[if] identifier[self] . identifier[_displayed] < literal[int] :
identifier[self] . identifier[_displayed] = literal[int]
identifier[self] . identifier[_lines_to_show] = identifier[self] . identifier[_cli_lines]
keyword[else] :
identifier[self] . identifier[_lines_to_show] = identifier[lines]
identifier[self] . identifier[_lastscroll] = identifier[lines] | def _Scroll(self, lines=None):
"""Set attributes to scroll the buffer correctly.
Args:
lines: An int, number of lines to scroll. If None, scrolls
by the terminal length.
"""
if lines is None:
lines = self._cli_lines # depends on [control=['if'], data=['lines']]
if lines < 0:
self._displayed -= self._cli_lines
self._displayed += lines
if self._displayed < 0:
self._displayed = 0 # depends on [control=['if'], data=[]]
self._lines_to_show = self._cli_lines # depends on [control=['if'], data=['lines']]
else:
self._lines_to_show = lines
self._lastscroll = lines |
def list_objects(self):
''' list the entire of objects with their (id, serialized_form, actual_value) '''
for i in self._execute('select * from objects'):
_id, code = i
yield _id, code, self.deserialize(code) | def function[list_objects, parameter[self]]:
constant[ list the entire of objects with their (id, serialized_form, actual_value) ]
for taget[name[i]] in starred[call[name[self]._execute, parameter[constant[select * from objects]]]] begin[:]
<ast.Tuple object at 0x7da20c76eb60> assign[=] name[i]
<ast.Yield object at 0x7da20c76e1a0> | keyword[def] identifier[list_objects] ( identifier[self] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_execute] ( literal[string] ):
identifier[_id] , identifier[code] = identifier[i]
keyword[yield] identifier[_id] , identifier[code] , identifier[self] . identifier[deserialize] ( identifier[code] ) | def list_objects(self):
""" list the entire of objects with their (id, serialized_form, actual_value) """
for i in self._execute('select * from objects'):
(_id, code) = i
yield (_id, code, self.deserialize(code)) # depends on [control=['for'], data=['i']] |
def path_ok(match_tuple: MatchTuple) -> bool:
"""Check if a file in this repository exists."""
relative_path = match_tuple.link.split("#")[0]
full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path)
return os.path.exists(full_path) | def function[path_ok, parameter[match_tuple]]:
constant[Check if a file in this repository exists.]
variable[relative_path] assign[=] call[call[name[match_tuple].link.split, parameter[constant[#]]]][constant[0]]
variable[full_path] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[call[name[str], parameter[name[match_tuple].source]]]], name[relative_path]]]
return[call[name[os].path.exists, parameter[name[full_path]]]] | keyword[def] identifier[path_ok] ( identifier[match_tuple] : identifier[MatchTuple] )-> identifier[bool] :
literal[string]
identifier[relative_path] = identifier[match_tuple] . identifier[link] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[full_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[str] ( identifier[match_tuple] . identifier[source] )), identifier[relative_path] )
keyword[return] identifier[os] . identifier[path] . identifier[exists] ( identifier[full_path] ) | def path_ok(match_tuple: MatchTuple) -> bool:
"""Check if a file in this repository exists."""
relative_path = match_tuple.link.split('#')[0]
full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path)
return os.path.exists(full_path) |
def pink(N, depth=80):
"""
N-length vector with (approximate) pink noise
pink noise has 1/f PSD
"""
a = []
s = iterpink(depth)
for n in range(N):
a.append(next(s))
return a | def function[pink, parameter[N, depth]]:
constant[
N-length vector with (approximate) pink noise
pink noise has 1/f PSD
]
variable[a] assign[=] list[[]]
variable[s] assign[=] call[name[iterpink], parameter[name[depth]]]
for taget[name[n]] in starred[call[name[range], parameter[name[N]]]] begin[:]
call[name[a].append, parameter[call[name[next], parameter[name[s]]]]]
return[name[a]] | keyword[def] identifier[pink] ( identifier[N] , identifier[depth] = literal[int] ):
literal[string]
identifier[a] =[]
identifier[s] = identifier[iterpink] ( identifier[depth] )
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[N] ):
identifier[a] . identifier[append] ( identifier[next] ( identifier[s] ))
keyword[return] identifier[a] | def pink(N, depth=80):
"""
N-length vector with (approximate) pink noise
pink noise has 1/f PSD
"""
a = []
s = iterpink(depth)
for n in range(N):
a.append(next(s)) # depends on [control=['for'], data=[]]
return a |
def slice_t(start, until):
"""
Transformation for Sequence.slice
:param start: start index
:param until: until index (does not include element at until)
:return: transformation
"""
return Transformation(
'slice({0}, {1})'.format(start, until),
lambda sequence: islice(sequence, start, until),
None
) | def function[slice_t, parameter[start, until]]:
constant[
Transformation for Sequence.slice
:param start: start index
:param until: until index (does not include element at until)
:return: transformation
]
return[call[name[Transformation], parameter[call[constant[slice({0}, {1})].format, parameter[name[start], name[until]]], <ast.Lambda object at 0x7da204960ee0>, constant[None]]]] | keyword[def] identifier[slice_t] ( identifier[start] , identifier[until] ):
literal[string]
keyword[return] identifier[Transformation] (
literal[string] . identifier[format] ( identifier[start] , identifier[until] ),
keyword[lambda] identifier[sequence] : identifier[islice] ( identifier[sequence] , identifier[start] , identifier[until] ),
keyword[None]
) | def slice_t(start, until):
"""
Transformation for Sequence.slice
:param start: start index
:param until: until index (does not include element at until)
:return: transformation
"""
return Transformation('slice({0}, {1})'.format(start, until), lambda sequence: islice(sequence, start, until), None) |
def refer(self, text):
"""Refers current message and replys a new message
Args:
text(str): message content
Returns:
RTMMessage
"""
data = self.reply(text)
data['refer_key'] = self['key']
return data | def function[refer, parameter[self, text]]:
constant[Refers current message and replys a new message
Args:
text(str): message content
Returns:
RTMMessage
]
variable[data] assign[=] call[name[self].reply, parameter[name[text]]]
call[name[data]][constant[refer_key]] assign[=] call[name[self]][constant[key]]
return[name[data]] | keyword[def] identifier[refer] ( identifier[self] , identifier[text] ):
literal[string]
identifier[data] = identifier[self] . identifier[reply] ( identifier[text] )
identifier[data] [ literal[string] ]= identifier[self] [ literal[string] ]
keyword[return] identifier[data] | def refer(self, text):
"""Refers current message and replys a new message
Args:
text(str): message content
Returns:
RTMMessage
"""
data = self.reply(text)
data['refer_key'] = self['key']
return data |
def show_idle_pc_prop(self):
"""
Dumps the idle PC proposals (previously generated).
:returns: list of idle PC proposal
"""
is_running = yield from self.is_running()
if not is_running:
# router is not running
raise DynamipsError('Router "{name}" is not running'.format(name=self._name))
proposals = yield from self._hypervisor.send('vm show_idle_pc_prop "{}" 0'.format(self._name))
return proposals | def function[show_idle_pc_prop, parameter[self]]:
constant[
Dumps the idle PC proposals (previously generated).
:returns: list of idle PC proposal
]
variable[is_running] assign[=] <ast.YieldFrom object at 0x7da18eb546d0>
if <ast.UnaryOp object at 0x7da20c990b20> begin[:]
<ast.Raise object at 0x7da20c990940>
variable[proposals] assign[=] <ast.YieldFrom object at 0x7da20c990550>
return[name[proposals]] | keyword[def] identifier[show_idle_pc_prop] ( identifier[self] ):
literal[string]
identifier[is_running] = keyword[yield] keyword[from] identifier[self] . identifier[is_running] ()
keyword[if] keyword[not] identifier[is_running] :
keyword[raise] identifier[DynamipsError] ( literal[string] . identifier[format] ( identifier[name] = identifier[self] . identifier[_name] ))
identifier[proposals] = keyword[yield] keyword[from] identifier[self] . identifier[_hypervisor] . identifier[send] ( literal[string] . identifier[format] ( identifier[self] . identifier[_name] ))
keyword[return] identifier[proposals] | def show_idle_pc_prop(self):
"""
Dumps the idle PC proposals (previously generated).
:returns: list of idle PC proposal
"""
is_running = (yield from self.is_running())
if not is_running:
# router is not running
raise DynamipsError('Router "{name}" is not running'.format(name=self._name)) # depends on [control=['if'], data=[]]
proposals = (yield from self._hypervisor.send('vm show_idle_pc_prop "{}" 0'.format(self._name)))
return proposals |
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
fig = Gcf.get_active().canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: send_figure(fig)
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig)
except ValueError:
# ensure it only appears in the draw list once
pass
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True | def function[draw_if_interactive, parameter[]]:
constant[
Is called after every pylab drawing command
]
variable[fig] assign[=] call[name[Gcf].get_active, parameter[]].canvas.figure
if <ast.UnaryOp object at 0x7da1b021e2c0> begin[:]
name[fig].show assign[=] <ast.Lambda object at 0x7da1b021d930>
if <ast.UnaryOp object at 0x7da1b021d480> begin[:]
return[None]
<ast.Try object at 0x7da1b021d540>
call[name[show]._to_draw.append, parameter[name[fig]]]
name[show]._draw_called assign[=] constant[True] | keyword[def] identifier[draw_if_interactive] ():
literal[string]
identifier[fig] = identifier[Gcf] . identifier[get_active] (). identifier[canvas] . identifier[figure]
keyword[if] keyword[not] identifier[hasattr] ( identifier[fig] , literal[string] ):
identifier[fig] . identifier[show] = keyword[lambda] * identifier[a] : identifier[send_figure] ( identifier[fig] )
keyword[if] keyword[not] identifier[matplotlib] . identifier[is_interactive] ():
keyword[return]
keyword[try] :
identifier[show] . identifier[_to_draw] . identifier[remove] ( identifier[fig] )
keyword[except] identifier[ValueError] :
keyword[pass]
identifier[show] . identifier[_to_draw] . identifier[append] ( identifier[fig] )
identifier[show] . identifier[_draw_called] = keyword[True] | def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
fig = Gcf.get_active().canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: send_figure(fig) # depends on [control=['if'], data=[]]
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return # depends on [control=['if'], data=[]]
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig) # depends on [control=['try'], data=[]]
except ValueError:
# ensure it only appears in the draw list once
pass # depends on [control=['except'], data=[]]
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True |
def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy)\
** 2 * area_frac_dict[hkl]
return np.sqrt(square_diff_energy) / weighted_energy | def function[anisotropy, parameter[self]]:
constant[
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
]
variable[square_diff_energy] assign[=] constant[0]
variable[weighted_energy] assign[=] name[self].weighted_surface_energy
variable[area_frac_dict] assign[=] name[self].area_fraction_dict
variable[miller_energy_dict] assign[=] name[self].miller_energy_dict
for taget[name[hkl]] in starred[call[name[miller_energy_dict].keys, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b1cd7760>
return[binary_operation[call[name[np].sqrt, parameter[name[square_diff_energy]]] / name[weighted_energy]]] | keyword[def] identifier[anisotropy] ( identifier[self] ):
literal[string]
identifier[square_diff_energy] = literal[int]
identifier[weighted_energy] = identifier[self] . identifier[weighted_surface_energy]
identifier[area_frac_dict] = identifier[self] . identifier[area_fraction_dict]
identifier[miller_energy_dict] = identifier[self] . identifier[miller_energy_dict]
keyword[for] identifier[hkl] keyword[in] identifier[miller_energy_dict] . identifier[keys] ():
identifier[square_diff_energy] +=( identifier[miller_energy_dict] [ identifier[hkl] ]- identifier[weighted_energy] )** literal[int] * identifier[area_frac_dict] [ identifier[hkl] ]
keyword[return] identifier[np] . identifier[sqrt] ( identifier[square_diff_energy] )/ identifier[weighted_energy] | def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy) ** 2 * area_frac_dict[hkl] # depends on [control=['for'], data=['hkl']]
return np.sqrt(square_diff_energy) / weighted_energy |
def cee_map_priority_table_map_cos0_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
cee_map = ET.SubElement(config, "cee-map", xmlns="urn:brocade.com:mgmt:brocade-cee-map")
name_key = ET.SubElement(cee_map, "name")
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, "priority-table")
map_cos0_pgid = ET.SubElement(priority_table, "map-cos0-pgid")
map_cos0_pgid.text = kwargs.pop('map_cos0_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[cee_map_priority_table_map_cos0_pgid, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[cee_map] assign[=] call[name[ET].SubElement, parameter[name[config], constant[cee-map]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[cee_map], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[priority_table] assign[=] call[name[ET].SubElement, parameter[name[cee_map], constant[priority-table]]]
variable[map_cos0_pgid] assign[=] call[name[ET].SubElement, parameter[name[priority_table], constant[map-cos0-pgid]]]
name[map_cos0_pgid].text assign[=] call[name[kwargs].pop, parameter[constant[map_cos0_pgid]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[cee_map_priority_table_map_cos0_pgid] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[cee_map] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[cee_map] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[priority_table] = identifier[ET] . identifier[SubElement] ( identifier[cee_map] , literal[string] )
identifier[map_cos0_pgid] = identifier[ET] . identifier[SubElement] ( identifier[priority_table] , literal[string] )
identifier[map_cos0_pgid] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def cee_map_priority_table_map_cos0_pgid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
cee_map = ET.SubElement(config, 'cee-map', xmlns='urn:brocade.com:mgmt:brocade-cee-map')
name_key = ET.SubElement(cee_map, 'name')
name_key.text = kwargs.pop('name')
priority_table = ET.SubElement(cee_map, 'priority-table')
map_cos0_pgid = ET.SubElement(priority_table, 'map-cos0-pgid')
map_cos0_pgid.text = kwargs.pop('map_cos0_pgid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def to_google(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
"""
warnings.warn('deprecated, use to_uri instead', DeprecationWarning)
return self.to_uri(type, label, issuer, counter) | def function[to_google, parameter[self, type, label, issuer, counter]]:
constant[Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
]
call[name[warnings].warn, parameter[constant[deprecated, use to_uri instead], name[DeprecationWarning]]]
return[call[name[self].to_uri, parameter[name[type], name[label], name[issuer], name[counter]]]] | keyword[def] identifier[to_google] ( identifier[self] , identifier[type] , identifier[label] , identifier[issuer] , identifier[counter] = keyword[None] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] )
keyword[return] identifier[self] . identifier[to_uri] ( identifier[type] , identifier[label] , identifier[issuer] , identifier[counter] ) | def to_google(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
"""
warnings.warn('deprecated, use to_uri instead', DeprecationWarning)
return self.to_uri(type, label, issuer, counter) |
def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd)
else:
self._argv.extend(["-i", input_source]) | def function[_put_input, parameter[self, input_source]]:
constant[Put input string to ffmpeg command.]
variable[input_cmd] assign[=] call[name[shlex].split, parameter[call[name[str], parameter[name[input_source]]]]]
if compare[call[name[len], parameter[name[input_cmd]]] greater[>] constant[1]] begin[:]
call[name[self]._argv.extend, parameter[name[input_cmd]]] | keyword[def] identifier[_put_input] ( identifier[self] , identifier[input_source] : identifier[str] )-> keyword[None] :
literal[string]
identifier[input_cmd] = identifier[shlex] . identifier[split] ( identifier[str] ( identifier[input_source] ))
keyword[if] identifier[len] ( identifier[input_cmd] )> literal[int] :
identifier[self] . identifier[_argv] . identifier[extend] ( identifier[input_cmd] )
keyword[else] :
identifier[self] . identifier[_argv] . identifier[extend] ([ literal[string] , identifier[input_source] ]) | def _put_input(self, input_source: str) -> None:
"""Put input string to ffmpeg command."""
input_cmd = shlex.split(str(input_source))
if len(input_cmd) > 1:
self._argv.extend(input_cmd) # depends on [control=['if'], data=[]]
else:
self._argv.extend(['-i', input_source]) |
def get_applications(self):
"""Return the list of supported applications."""
if ('applications' not in self.data or
'gecko' not in self.data['applications']):
return []
app = self.data['applications']['gecko']
min_version = app.get('strict_min_version', u'42.0')
max_version = app.get('strict_max_version', u'*')
return [{u'guid': FIREFOX_GUID,
u'min_version': min_version,
u'max_version': max_version}] | def function[get_applications, parameter[self]]:
constant[Return the list of supported applications.]
if <ast.BoolOp object at 0x7da1b1b87eb0> begin[:]
return[list[[]]]
variable[app] assign[=] call[call[name[self].data][constant[applications]]][constant[gecko]]
variable[min_version] assign[=] call[name[app].get, parameter[constant[strict_min_version], constant[42.0]]]
variable[max_version] assign[=] call[name[app].get, parameter[constant[strict_max_version], constant[*]]]
return[list[[<ast.Dict object at 0x7da20e956ad0>]]] | keyword[def] identifier[get_applications] ( identifier[self] ):
literal[string]
keyword[if] ( literal[string] keyword[not] keyword[in] identifier[self] . identifier[data] keyword[or]
literal[string] keyword[not] keyword[in] identifier[self] . identifier[data] [ literal[string] ]):
keyword[return] []
identifier[app] = identifier[self] . identifier[data] [ literal[string] ][ literal[string] ]
identifier[min_version] = identifier[app] . identifier[get] ( literal[string] , literal[string] )
identifier[max_version] = identifier[app] . identifier[get] ( literal[string] , literal[string] )
keyword[return] [{ literal[string] : identifier[FIREFOX_GUID] ,
literal[string] : identifier[min_version] ,
literal[string] : identifier[max_version] }] | def get_applications(self):
"""Return the list of supported applications."""
if 'applications' not in self.data or 'gecko' not in self.data['applications']:
return [] # depends on [control=['if'], data=[]]
app = self.data['applications']['gecko']
min_version = app.get('strict_min_version', u'42.0')
max_version = app.get('strict_max_version', u'*')
return [{u'guid': FIREFOX_GUID, u'min_version': min_version, u'max_version': max_version}] |
def get_deps(self):
"""Grap package requirements from repositories
"""
if self.repo == "rlw":
dependencies = {}
rlw_deps = Utils().read_file(_meta_.conf_path + "rlworkman.deps")
for line in rlw_deps.splitlines():
if line and not line.startswith("#"):
pkgs = line.split(":")
dependencies[pkgs[0]] = pkgs[1]
if self.name in dependencies.keys():
return dependencies[self.name].split()
else:
return ""
else:
PACKAGES_TXT = Utils().read_file("{0}{1}_repo/PACKAGES.TXT".format(
_meta_.lib_path, self.repo))
for line in PACKAGES_TXT.splitlines():
if line.startswith("PACKAGE NAME:"):
pkg_name = split_package(line[14:].strip())[0]
if line.startswith("PACKAGE REQUIRED:"):
if pkg_name == self.name:
if line[18:].strip():
return self._req_fix(line) | def function[get_deps, parameter[self]]:
constant[Grap package requirements from repositories
]
if compare[name[self].repo equal[==] constant[rlw]] begin[:]
variable[dependencies] assign[=] dictionary[[], []]
variable[rlw_deps] assign[=] call[call[name[Utils], parameter[]].read_file, parameter[binary_operation[name[_meta_].conf_path + constant[rlworkman.deps]]]]
for taget[name[line]] in starred[call[name[rlw_deps].splitlines, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b282b520> begin[:]
variable[pkgs] assign[=] call[name[line].split, parameter[constant[:]]]
call[name[dependencies]][call[name[pkgs]][constant[0]]] assign[=] call[name[pkgs]][constant[1]]
if compare[name[self].name in call[name[dependencies].keys, parameter[]]] begin[:]
return[call[call[name[dependencies]][name[self].name].split, parameter[]]] | keyword[def] identifier[get_deps] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[repo] == literal[string] :
identifier[dependencies] ={}
identifier[rlw_deps] = identifier[Utils] (). identifier[read_file] ( identifier[_meta_] . identifier[conf_path] + literal[string] )
keyword[for] identifier[line] keyword[in] identifier[rlw_deps] . identifier[splitlines] ():
keyword[if] identifier[line] keyword[and] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[pkgs] = identifier[line] . identifier[split] ( literal[string] )
identifier[dependencies] [ identifier[pkgs] [ literal[int] ]]= identifier[pkgs] [ literal[int] ]
keyword[if] identifier[self] . identifier[name] keyword[in] identifier[dependencies] . identifier[keys] ():
keyword[return] identifier[dependencies] [ identifier[self] . identifier[name] ]. identifier[split] ()
keyword[else] :
keyword[return] literal[string]
keyword[else] :
identifier[PACKAGES_TXT] = identifier[Utils] (). identifier[read_file] ( literal[string] . identifier[format] (
identifier[_meta_] . identifier[lib_path] , identifier[self] . identifier[repo] ))
keyword[for] identifier[line] keyword[in] identifier[PACKAGES_TXT] . identifier[splitlines] ():
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[pkg_name] = identifier[split_package] ( identifier[line] [ literal[int] :]. identifier[strip] ())[ literal[int] ]
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[pkg_name] == identifier[self] . identifier[name] :
keyword[if] identifier[line] [ literal[int] :]. identifier[strip] ():
keyword[return] identifier[self] . identifier[_req_fix] ( identifier[line] ) | def get_deps(self):
"""Grap package requirements from repositories
"""
if self.repo == 'rlw':
dependencies = {}
rlw_deps = Utils().read_file(_meta_.conf_path + 'rlworkman.deps')
for line in rlw_deps.splitlines():
if line and (not line.startswith('#')):
pkgs = line.split(':')
dependencies[pkgs[0]] = pkgs[1] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
if self.name in dependencies.keys():
return dependencies[self.name].split() # depends on [control=['if'], data=[]]
else:
return '' # depends on [control=['if'], data=[]]
else:
PACKAGES_TXT = Utils().read_file('{0}{1}_repo/PACKAGES.TXT'.format(_meta_.lib_path, self.repo))
for line in PACKAGES_TXT.splitlines():
if line.startswith('PACKAGE NAME:'):
pkg_name = split_package(line[14:].strip())[0] # depends on [control=['if'], data=[]]
if line.startswith('PACKAGE REQUIRED:'):
if pkg_name == self.name:
if line[18:].strip():
return self._req_fix(line) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] |
def process_terminals(self, word):
"""
Deal with terminal Es and Ls and
convert any uppercase Ys back to lowercase.
"""
length = len(word)
if word[length - 1] == 'e':
if self.r2 <= (length - 1):
word = word[:-1]
elif self.r1 <= (length - 1):
if not self.is_short(word[:-1]):
word = word[:-1]
elif word[length - 1] == 'l':
if self.r2 <= (length - 1) and word[length - 2] == 'l':
word = word[:-1]
char_list = [x if x != 'Y' else 'y' for x in word]
word = ''.join(char_list)
return word | def function[process_terminals, parameter[self, word]]:
constant[
Deal with terminal Es and Ls and
convert any uppercase Ys back to lowercase.
]
variable[length] assign[=] call[name[len], parameter[name[word]]]
if compare[call[name[word]][binary_operation[name[length] - constant[1]]] equal[==] constant[e]] begin[:]
if compare[name[self].r2 less_or_equal[<=] binary_operation[name[length] - constant[1]]] begin[:]
variable[word] assign[=] call[name[word]][<ast.Slice object at 0x7da1b0a61690>]
variable[char_list] assign[=] <ast.ListComp object at 0x7da1b0a61d50>
variable[word] assign[=] call[constant[].join, parameter[name[char_list]]]
return[name[word]] | keyword[def] identifier[process_terminals] ( identifier[self] , identifier[word] ):
literal[string]
identifier[length] = identifier[len] ( identifier[word] )
keyword[if] identifier[word] [ identifier[length] - literal[int] ]== literal[string] :
keyword[if] identifier[self] . identifier[r2] <=( identifier[length] - literal[int] ):
identifier[word] = identifier[word] [:- literal[int] ]
keyword[elif] identifier[self] . identifier[r1] <=( identifier[length] - literal[int] ):
keyword[if] keyword[not] identifier[self] . identifier[is_short] ( identifier[word] [:- literal[int] ]):
identifier[word] = identifier[word] [:- literal[int] ]
keyword[elif] identifier[word] [ identifier[length] - literal[int] ]== literal[string] :
keyword[if] identifier[self] . identifier[r2] <=( identifier[length] - literal[int] ) keyword[and] identifier[word] [ identifier[length] - literal[int] ]== literal[string] :
identifier[word] = identifier[word] [:- literal[int] ]
identifier[char_list] =[ identifier[x] keyword[if] identifier[x] != literal[string] keyword[else] literal[string] keyword[for] identifier[x] keyword[in] identifier[word] ]
identifier[word] = literal[string] . identifier[join] ( identifier[char_list] )
keyword[return] identifier[word] | def process_terminals(self, word):
"""
Deal with terminal Es and Ls and
convert any uppercase Ys back to lowercase.
"""
length = len(word)
if word[length - 1] == 'e':
if self.r2 <= length - 1:
word = word[:-1] # depends on [control=['if'], data=[]]
elif self.r1 <= length - 1:
if not self.is_short(word[:-1]):
word = word[:-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif word[length - 1] == 'l':
if self.r2 <= length - 1 and word[length - 2] == 'l':
word = word[:-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
char_list = [x if x != 'Y' else 'y' for x in word]
word = ''.join(char_list)
return word |
def visit_Subscript(self, node: ast.Subscript) -> Any:
"""Visit the ``slice`` and a ``value`` and get the element."""
value = self.visit(node=node.value)
a_slice = self.visit(node=node.slice)
result = value[a_slice]
self.recomputed_values[node] = result
return result | def function[visit_Subscript, parameter[self, node]]:
constant[Visit the ``slice`` and a ``value`` and get the element.]
variable[value] assign[=] call[name[self].visit, parameter[]]
variable[a_slice] assign[=] call[name[self].visit, parameter[]]
variable[result] assign[=] call[name[value]][name[a_slice]]
call[name[self].recomputed_values][name[node]] assign[=] name[result]
return[name[result]] | keyword[def] identifier[visit_Subscript] ( identifier[self] , identifier[node] : identifier[ast] . identifier[Subscript] )-> identifier[Any] :
literal[string]
identifier[value] = identifier[self] . identifier[visit] ( identifier[node] = identifier[node] . identifier[value] )
identifier[a_slice] = identifier[self] . identifier[visit] ( identifier[node] = identifier[node] . identifier[slice] )
identifier[result] = identifier[value] [ identifier[a_slice] ]
identifier[self] . identifier[recomputed_values] [ identifier[node] ]= identifier[result]
keyword[return] identifier[result] | def visit_Subscript(self, node: ast.Subscript) -> Any:
"""Visit the ``slice`` and a ``value`` and get the element."""
value = self.visit(node=node.value)
a_slice = self.visit(node=node.slice)
result = value[a_slice]
self.recomputed_values[node] = result
return result |
def create_handler(cls, message_handler, buffer_size, logger):
"""
Class variables used here since the framework creates an instance for each connection
:param message_handler: the MessageHandler used to process each message.
:param buffer_size: the TCP buffer size.
:param logger: the global logger.
:return: this class.
"""
cls.BUFFER_SIZE = buffer_size
cls.message_handler = message_handler
cls.logger = logger
cls.message_handler.logger = logging.getLogger(message_handler.__class__.__name__)
cls.message_handler.logger.setLevel(logger.level)
return cls | def function[create_handler, parameter[cls, message_handler, buffer_size, logger]]:
constant[
Class variables used here since the framework creates an instance for each connection
:param message_handler: the MessageHandler used to process each message.
:param buffer_size: the TCP buffer size.
:param logger: the global logger.
:return: this class.
]
name[cls].BUFFER_SIZE assign[=] name[buffer_size]
name[cls].message_handler assign[=] name[message_handler]
name[cls].logger assign[=] name[logger]
name[cls].message_handler.logger assign[=] call[name[logging].getLogger, parameter[name[message_handler].__class__.__name__]]
call[name[cls].message_handler.logger.setLevel, parameter[name[logger].level]]
return[name[cls]] | keyword[def] identifier[create_handler] ( identifier[cls] , identifier[message_handler] , identifier[buffer_size] , identifier[logger] ):
literal[string]
identifier[cls] . identifier[BUFFER_SIZE] = identifier[buffer_size]
identifier[cls] . identifier[message_handler] = identifier[message_handler]
identifier[cls] . identifier[logger] = identifier[logger]
identifier[cls] . identifier[message_handler] . identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[message_handler] . identifier[__class__] . identifier[__name__] )
identifier[cls] . identifier[message_handler] . identifier[logger] . identifier[setLevel] ( identifier[logger] . identifier[level] )
keyword[return] identifier[cls] | def create_handler(cls, message_handler, buffer_size, logger):
"""
Class variables used here since the framework creates an instance for each connection
:param message_handler: the MessageHandler used to process each message.
:param buffer_size: the TCP buffer size.
:param logger: the global logger.
:return: this class.
"""
cls.BUFFER_SIZE = buffer_size
cls.message_handler = message_handler
cls.logger = logger
cls.message_handler.logger = logging.getLogger(message_handler.__class__.__name__)
cls.message_handler.logger.setLevel(logger.level)
return cls |
def __check_valid(self, post_data):
'''
To check if the user is succesfully created.
Return the status code dict.
'''
user_create_status = {'success': False, 'code': '00'}
if not tools.check_username_valid(post_data['user_name']):
user_create_status['code'] = '11'
return user_create_status
elif not tools.check_email_valid(post_data['user_email']):
user_create_status['code'] = '21'
return user_create_status
elif MUser.get_by_name(post_data['user_name']):
user_create_status['code'] = '12'
return user_create_status
elif MUser.get_by_email(post_data['user_email']):
user_create_status['code'] = '22'
return user_create_status
user_create_status['success'] = True
return user_create_status | def function[__check_valid, parameter[self, post_data]]:
constant[
To check if the user is succesfully created.
Return the status code dict.
]
variable[user_create_status] assign[=] dictionary[[<ast.Constant object at 0x7da1b04d04c0>, <ast.Constant object at 0x7da1b04d0910>], [<ast.Constant object at 0x7da1b04d1fc0>, <ast.Constant object at 0x7da1b04d2f80>]]
if <ast.UnaryOp object at 0x7da1b04d16f0> begin[:]
call[name[user_create_status]][constant[code]] assign[=] constant[11]
return[name[user_create_status]]
call[name[user_create_status]][constant[success]] assign[=] constant[True]
return[name[user_create_status]] | keyword[def] identifier[__check_valid] ( identifier[self] , identifier[post_data] ):
literal[string]
identifier[user_create_status] ={ literal[string] : keyword[False] , literal[string] : literal[string] }
keyword[if] keyword[not] identifier[tools] . identifier[check_username_valid] ( identifier[post_data] [ literal[string] ]):
identifier[user_create_status] [ literal[string] ]= literal[string]
keyword[return] identifier[user_create_status]
keyword[elif] keyword[not] identifier[tools] . identifier[check_email_valid] ( identifier[post_data] [ literal[string] ]):
identifier[user_create_status] [ literal[string] ]= literal[string]
keyword[return] identifier[user_create_status]
keyword[elif] identifier[MUser] . identifier[get_by_name] ( identifier[post_data] [ literal[string] ]):
identifier[user_create_status] [ literal[string] ]= literal[string]
keyword[return] identifier[user_create_status]
keyword[elif] identifier[MUser] . identifier[get_by_email] ( identifier[post_data] [ literal[string] ]):
identifier[user_create_status] [ literal[string] ]= literal[string]
keyword[return] identifier[user_create_status]
identifier[user_create_status] [ literal[string] ]= keyword[True]
keyword[return] identifier[user_create_status] | def __check_valid(self, post_data):
"""
To check if the user is succesfully created.
Return the status code dict.
"""
user_create_status = {'success': False, 'code': '00'}
if not tools.check_username_valid(post_data['user_name']):
user_create_status['code'] = '11'
return user_create_status # depends on [control=['if'], data=[]]
elif not tools.check_email_valid(post_data['user_email']):
user_create_status['code'] = '21'
return user_create_status # depends on [control=['if'], data=[]]
elif MUser.get_by_name(post_data['user_name']):
user_create_status['code'] = '12'
return user_create_status # depends on [control=['if'], data=[]]
elif MUser.get_by_email(post_data['user_email']):
user_create_status['code'] = '22'
return user_create_status # depends on [control=['if'], data=[]]
user_create_status['success'] = True
return user_create_status |
def etree_to_text(tree,
guess_punct_space=True,
guess_layout=True,
newline_tags=NEWLINE_TAGS,
double_newline_tags=DOUBLE_NEWLINE_TAGS):
"""
Convert a html tree to text. Tree should be cleaned with
``html_text.html_text.cleaner.clean_html`` before passing to this
function.
See html_text.extract_text docstring for description of the
approach and options.
"""
chunks = []
_NEWLINE = object()
_DOUBLE_NEWLINE = object()
class Context:
""" workaround for missing `nonlocal` in Python 2 """
# _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str)
prev = _DOUBLE_NEWLINE
def should_add_space(text, prev):
""" Return True if extra whitespace should be added before text """
if prev in {_NEWLINE, _DOUBLE_NEWLINE}:
return False
if not _has_trailing_whitespace(prev):
if _has_punct_after(text) or _has_open_bracket_before(prev):
return False
return True
def get_space_between(text, prev):
if not text or not guess_punct_space:
return ' '
return ' ' if should_add_space(text, prev) else ''
def add_newlines(tag, context):
if not guess_layout:
return
prev = context.prev
if prev is _DOUBLE_NEWLINE: # don't output more than 1 blank line
return
if tag in double_newline_tags:
context.prev = _DOUBLE_NEWLINE
chunks.append('\n' if prev is _NEWLINE else '\n\n')
elif tag in newline_tags:
context.prev = _NEWLINE
if prev is not _NEWLINE:
chunks.append('\n')
def add_text(text_content, context):
text = _normalize_whitespace(text_content) if text_content else ''
if not text:
return
space = get_space_between(text, context.prev)
chunks.extend([space, text])
context.prev = text_content
def traverse_text_fragments(tree, context, handle_tail=True):
""" Extract text from the ``tree``: fill ``chunks`` variable """
add_newlines(tree.tag, context)
add_text(tree.text, context)
for child in tree:
traverse_text_fragments(child, context)
add_newlines(tree.tag, context)
if handle_tail:
add_text(tree.tail, context)
traverse_text_fragments(tree, context=Context(), handle_tail=False)
return ''.join(chunks).strip() | def function[etree_to_text, parameter[tree, guess_punct_space, guess_layout, newline_tags, double_newline_tags]]:
constant[
Convert a html tree to text. Tree should be cleaned with
``html_text.html_text.cleaner.clean_html`` before passing to this
function.
See html_text.extract_text docstring for description of the
approach and options.
]
variable[chunks] assign[=] list[[]]
variable[_NEWLINE] assign[=] call[name[object], parameter[]]
variable[_DOUBLE_NEWLINE] assign[=] call[name[object], parameter[]]
class class[Context, parameter[]] begin[:]
constant[ workaround for missing `nonlocal` in Python 2 ]
variable[prev] assign[=] name[_DOUBLE_NEWLINE]
def function[should_add_space, parameter[text, prev]]:
constant[ Return True if extra whitespace should be added before text ]
if compare[name[prev] in <ast.Set object at 0x7da20c6a8130>] begin[:]
return[constant[False]]
if <ast.UnaryOp object at 0x7da20c6a8430> begin[:]
if <ast.BoolOp object at 0x7da20c6a83d0> begin[:]
return[constant[False]]
return[constant[True]]
def function[get_space_between, parameter[text, prev]]:
if <ast.BoolOp object at 0x7da20c6a9210> begin[:]
return[constant[ ]]
return[<ast.IfExp object at 0x7da20c6a8dc0>]
def function[add_newlines, parameter[tag, context]]:
if <ast.UnaryOp object at 0x7da20c6ab970> begin[:]
return[None]
variable[prev] assign[=] name[context].prev
if compare[name[prev] is name[_DOUBLE_NEWLINE]] begin[:]
return[None]
if compare[name[tag] in name[double_newline_tags]] begin[:]
name[context].prev assign[=] name[_DOUBLE_NEWLINE]
call[name[chunks].append, parameter[<ast.IfExp object at 0x7da20c6a9450>]]
def function[add_text, parameter[text_content, context]]:
variable[text] assign[=] <ast.IfExp object at 0x7da2041d9e10>
if <ast.UnaryOp object at 0x7da2041da6e0> begin[:]
return[None]
variable[space] assign[=] call[name[get_space_between], parameter[name[text], name[context].prev]]
call[name[chunks].extend, parameter[list[[<ast.Name object at 0x7da2041d95d0>, <ast.Name object at 0x7da2041d9d50>]]]]
name[context].prev assign[=] name[text_content]
def function[traverse_text_fragments, parameter[tree, context, handle_tail]]:
constant[ Extract text from the ``tree``: fill ``chunks`` variable ]
call[name[add_newlines], parameter[name[tree].tag, name[context]]]
call[name[add_text], parameter[name[tree].text, name[context]]]
for taget[name[child]] in starred[name[tree]] begin[:]
call[name[traverse_text_fragments], parameter[name[child], name[context]]]
call[name[add_newlines], parameter[name[tree].tag, name[context]]]
if name[handle_tail] begin[:]
call[name[add_text], parameter[name[tree].tail, name[context]]]
call[name[traverse_text_fragments], parameter[name[tree]]]
return[call[call[constant[].join, parameter[name[chunks]]].strip, parameter[]]] | keyword[def] identifier[etree_to_text] ( identifier[tree] ,
identifier[guess_punct_space] = keyword[True] ,
identifier[guess_layout] = keyword[True] ,
identifier[newline_tags] = identifier[NEWLINE_TAGS] ,
identifier[double_newline_tags] = identifier[DOUBLE_NEWLINE_TAGS] ):
literal[string]
identifier[chunks] =[]
identifier[_NEWLINE] = identifier[object] ()
identifier[_DOUBLE_NEWLINE] = identifier[object] ()
keyword[class] identifier[Context] :
literal[string]
identifier[prev] = identifier[_DOUBLE_NEWLINE]
keyword[def] identifier[should_add_space] ( identifier[text] , identifier[prev] ):
literal[string]
keyword[if] identifier[prev] keyword[in] { identifier[_NEWLINE] , identifier[_DOUBLE_NEWLINE] }:
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[_has_trailing_whitespace] ( identifier[prev] ):
keyword[if] identifier[_has_punct_after] ( identifier[text] ) keyword[or] identifier[_has_open_bracket_before] ( identifier[prev] ):
keyword[return] keyword[False]
keyword[return] keyword[True]
keyword[def] identifier[get_space_between] ( identifier[text] , identifier[prev] ):
keyword[if] keyword[not] identifier[text] keyword[or] keyword[not] identifier[guess_punct_space] :
keyword[return] literal[string]
keyword[return] literal[string] keyword[if] identifier[should_add_space] ( identifier[text] , identifier[prev] ) keyword[else] literal[string]
keyword[def] identifier[add_newlines] ( identifier[tag] , identifier[context] ):
keyword[if] keyword[not] identifier[guess_layout] :
keyword[return]
identifier[prev] = identifier[context] . identifier[prev]
keyword[if] identifier[prev] keyword[is] identifier[_DOUBLE_NEWLINE] :
keyword[return]
keyword[if] identifier[tag] keyword[in] identifier[double_newline_tags] :
identifier[context] . identifier[prev] = identifier[_DOUBLE_NEWLINE]
identifier[chunks] . identifier[append] ( literal[string] keyword[if] identifier[prev] keyword[is] identifier[_NEWLINE] keyword[else] literal[string] )
keyword[elif] identifier[tag] keyword[in] identifier[newline_tags] :
identifier[context] . identifier[prev] = identifier[_NEWLINE]
keyword[if] identifier[prev] keyword[is] keyword[not] identifier[_NEWLINE] :
identifier[chunks] . identifier[append] ( literal[string] )
keyword[def] identifier[add_text] ( identifier[text_content] , identifier[context] ):
identifier[text] = identifier[_normalize_whitespace] ( identifier[text_content] ) keyword[if] identifier[text_content] keyword[else] literal[string]
keyword[if] keyword[not] identifier[text] :
keyword[return]
identifier[space] = identifier[get_space_between] ( identifier[text] , identifier[context] . identifier[prev] )
identifier[chunks] . identifier[extend] ([ identifier[space] , identifier[text] ])
identifier[context] . identifier[prev] = identifier[text_content]
keyword[def] identifier[traverse_text_fragments] ( identifier[tree] , identifier[context] , identifier[handle_tail] = keyword[True] ):
literal[string]
identifier[add_newlines] ( identifier[tree] . identifier[tag] , identifier[context] )
identifier[add_text] ( identifier[tree] . identifier[text] , identifier[context] )
keyword[for] identifier[child] keyword[in] identifier[tree] :
identifier[traverse_text_fragments] ( identifier[child] , identifier[context] )
identifier[add_newlines] ( identifier[tree] . identifier[tag] , identifier[context] )
keyword[if] identifier[handle_tail] :
identifier[add_text] ( identifier[tree] . identifier[tail] , identifier[context] )
identifier[traverse_text_fragments] ( identifier[tree] , identifier[context] = identifier[Context] (), identifier[handle_tail] = keyword[False] )
keyword[return] literal[string] . identifier[join] ( identifier[chunks] ). identifier[strip] () | def etree_to_text(tree, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS):
"""
Convert a html tree to text. Tree should be cleaned with
``html_text.html_text.cleaner.clean_html`` before passing to this
function.
See html_text.extract_text docstring for description of the
approach and options.
"""
chunks = []
_NEWLINE = object()
_DOUBLE_NEWLINE = object()
class Context:
""" workaround for missing `nonlocal` in Python 2 """
# _NEWLINE, _DOUBLE_NEWLINE or content of the previous chunk (str)
prev = _DOUBLE_NEWLINE
def should_add_space(text, prev):
""" Return True if extra whitespace should be added before text """
if prev in {_NEWLINE, _DOUBLE_NEWLINE}:
return False # depends on [control=['if'], data=[]]
if not _has_trailing_whitespace(prev):
if _has_punct_after(text) or _has_open_bracket_before(prev):
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True
def get_space_between(text, prev):
if not text or not guess_punct_space:
return ' ' # depends on [control=['if'], data=[]]
return ' ' if should_add_space(text, prev) else ''
def add_newlines(tag, context):
if not guess_layout:
return # depends on [control=['if'], data=[]]
prev = context.prev
if prev is _DOUBLE_NEWLINE: # don't output more than 1 blank line
return # depends on [control=['if'], data=[]]
if tag in double_newline_tags:
context.prev = _DOUBLE_NEWLINE
chunks.append('\n' if prev is _NEWLINE else '\n\n') # depends on [control=['if'], data=[]]
elif tag in newline_tags:
context.prev = _NEWLINE
if prev is not _NEWLINE:
chunks.append('\n') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def add_text(text_content, context):
text = _normalize_whitespace(text_content) if text_content else ''
if not text:
return # depends on [control=['if'], data=[]]
space = get_space_between(text, context.prev)
chunks.extend([space, text])
context.prev = text_content
def traverse_text_fragments(tree, context, handle_tail=True):
""" Extract text from the ``tree``: fill ``chunks`` variable """
add_newlines(tree.tag, context)
add_text(tree.text, context)
for child in tree:
traverse_text_fragments(child, context) # depends on [control=['for'], data=['child']]
add_newlines(tree.tag, context)
if handle_tail:
add_text(tree.tail, context) # depends on [control=['if'], data=[]]
traverse_text_fragments(tree, context=Context(), handle_tail=False)
return ''.join(chunks).strip() |
def match_level(self, overlay):
"""
Given an overlay, return the match level and applicable slice
of the overall overlay. The level an integer if there is a
match or None if there is no match.
The level integer is the number of matching components. Higher
values indicate a stronger match.
"""
slice_width = len(self._pattern_spec)
if slice_width > len(overlay): return None
# Check all the possible slices and return the best matching one
best_lvl, match_slice = (0, None)
for i in range(len(overlay)-slice_width+1):
overlay_slice = overlay.values()[i:i+slice_width]
lvl = self._slice_match_level(overlay_slice)
if lvl is None: continue
if lvl > best_lvl:
best_lvl = lvl
match_slice = (i, i+slice_width)
return (best_lvl, match_slice) if best_lvl != 0 else None | def function[match_level, parameter[self, overlay]]:
constant[
Given an overlay, return the match level and applicable slice
of the overall overlay. The level an integer if there is a
match or None if there is no match.
The level integer is the number of matching components. Higher
values indicate a stronger match.
]
variable[slice_width] assign[=] call[name[len], parameter[name[self]._pattern_spec]]
if compare[name[slice_width] greater[>] call[name[len], parameter[name[overlay]]]] begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da2054a4340> assign[=] tuple[[<ast.Constant object at 0x7da2054a7a60>, <ast.Constant object at 0x7da2054a4a90>]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[binary_operation[call[name[len], parameter[name[overlay]]] - name[slice_width]] + constant[1]]]]] begin[:]
variable[overlay_slice] assign[=] call[call[name[overlay].values, parameter[]]][<ast.Slice object at 0x7da2054a5510>]
variable[lvl] assign[=] call[name[self]._slice_match_level, parameter[name[overlay_slice]]]
if compare[name[lvl] is constant[None]] begin[:]
continue
if compare[name[lvl] greater[>] name[best_lvl]] begin[:]
variable[best_lvl] assign[=] name[lvl]
variable[match_slice] assign[=] tuple[[<ast.Name object at 0x7da2054a4a30>, <ast.BinOp object at 0x7da2054a6440>]]
return[<ast.IfExp object at 0x7da2054a5930>] | keyword[def] identifier[match_level] ( identifier[self] , identifier[overlay] ):
literal[string]
identifier[slice_width] = identifier[len] ( identifier[self] . identifier[_pattern_spec] )
keyword[if] identifier[slice_width] > identifier[len] ( identifier[overlay] ): keyword[return] keyword[None]
identifier[best_lvl] , identifier[match_slice] =( literal[int] , keyword[None] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[overlay] )- identifier[slice_width] + literal[int] ):
identifier[overlay_slice] = identifier[overlay] . identifier[values] ()[ identifier[i] : identifier[i] + identifier[slice_width] ]
identifier[lvl] = identifier[self] . identifier[_slice_match_level] ( identifier[overlay_slice] )
keyword[if] identifier[lvl] keyword[is] keyword[None] : keyword[continue]
keyword[if] identifier[lvl] > identifier[best_lvl] :
identifier[best_lvl] = identifier[lvl]
identifier[match_slice] =( identifier[i] , identifier[i] + identifier[slice_width] )
keyword[return] ( identifier[best_lvl] , identifier[match_slice] ) keyword[if] identifier[best_lvl] != literal[int] keyword[else] keyword[None] | def match_level(self, overlay):
"""
Given an overlay, return the match level and applicable slice
of the overall overlay. The level an integer if there is a
match or None if there is no match.
The level integer is the number of matching components. Higher
values indicate a stronger match.
"""
slice_width = len(self._pattern_spec)
if slice_width > len(overlay):
return None # depends on [control=['if'], data=[]]
# Check all the possible slices and return the best matching one
(best_lvl, match_slice) = (0, None)
for i in range(len(overlay) - slice_width + 1):
overlay_slice = overlay.values()[i:i + slice_width]
lvl = self._slice_match_level(overlay_slice)
if lvl is None:
continue # depends on [control=['if'], data=[]]
if lvl > best_lvl:
best_lvl = lvl
match_slice = (i, i + slice_width) # depends on [control=['if'], data=['lvl', 'best_lvl']] # depends on [control=['for'], data=['i']]
return (best_lvl, match_slice) if best_lvl != 0 else None |
def _require_homogeneous_roots(self, accept_predicate, reject_predicate):
"""Ensures that there is no ambiguity in the context according to the given predicates.
If any targets in the context satisfy the accept_predicate, and no targets satisfy the
reject_predicate, returns the accepted targets.
If no targets satisfy the accept_predicate, returns None.
Otherwise throws TaskError.
"""
if len(self.context.target_roots) == 0:
raise self.NoActivationsError('No target specified.')
def resolve(targets):
# Recursively resolve target aliases.
for t in targets:
if type(t) == Target:
for r in resolve(t.dependencies):
yield r
else:
yield t
expanded_roots = list(resolve(self.context.target_roots))
accepted = list(filter(accept_predicate, expanded_roots))
rejected = list(filter(reject_predicate, expanded_roots))
if len(accepted) == 0:
# no targets were accepted, regardless of rejects
return None
elif len(rejected) == 0:
# we have at least one accepted target, and no rejected targets
return accepted
else:
# both accepted and rejected targets
# TODO: once https://github.com/pantsbuild/pants/issues/425 lands, we should add
# language-specific flags that would resolve the ambiguity here
def render_target(target):
return '{} (a {})'.format(target.address.reference(), target.type_alias)
raise self.IncompatibleActivationsError('Mutually incompatible targets specified: {} vs {} '
'(and {} others)'
.format(render_target(accepted[0]),
render_target(rejected[0]),
len(accepted) + len(rejected) - 2)) | def function[_require_homogeneous_roots, parameter[self, accept_predicate, reject_predicate]]:
constant[Ensures that there is no ambiguity in the context according to the given predicates.
If any targets in the context satisfy the accept_predicate, and no targets satisfy the
reject_predicate, returns the accepted targets.
If no targets satisfy the accept_predicate, returns None.
Otherwise throws TaskError.
]
if compare[call[name[len], parameter[name[self].context.target_roots]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b22bafe0>
def function[resolve, parameter[targets]]:
for taget[name[t]] in starred[name[targets]] begin[:]
if compare[call[name[type], parameter[name[t]]] equal[==] name[Target]] begin[:]
for taget[name[r]] in starred[call[name[resolve], parameter[name[t].dependencies]]] begin[:]
<ast.Yield object at 0x7da1b22bb9a0>
variable[expanded_roots] assign[=] call[name[list], parameter[call[name[resolve], parameter[name[self].context.target_roots]]]]
variable[accepted] assign[=] call[name[list], parameter[call[name[filter], parameter[name[accept_predicate], name[expanded_roots]]]]]
variable[rejected] assign[=] call[name[list], parameter[call[name[filter], parameter[name[reject_predicate], name[expanded_roots]]]]]
if compare[call[name[len], parameter[name[accepted]]] equal[==] constant[0]] begin[:]
return[constant[None]] | keyword[def] identifier[_require_homogeneous_roots] ( identifier[self] , identifier[accept_predicate] , identifier[reject_predicate] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[context] . identifier[target_roots] )== literal[int] :
keyword[raise] identifier[self] . identifier[NoActivationsError] ( literal[string] )
keyword[def] identifier[resolve] ( identifier[targets] ):
keyword[for] identifier[t] keyword[in] identifier[targets] :
keyword[if] identifier[type] ( identifier[t] )== identifier[Target] :
keyword[for] identifier[r] keyword[in] identifier[resolve] ( identifier[t] . identifier[dependencies] ):
keyword[yield] identifier[r]
keyword[else] :
keyword[yield] identifier[t]
identifier[expanded_roots] = identifier[list] ( identifier[resolve] ( identifier[self] . identifier[context] . identifier[target_roots] ))
identifier[accepted] = identifier[list] ( identifier[filter] ( identifier[accept_predicate] , identifier[expanded_roots] ))
identifier[rejected] = identifier[list] ( identifier[filter] ( identifier[reject_predicate] , identifier[expanded_roots] ))
keyword[if] identifier[len] ( identifier[accepted] )== literal[int] :
keyword[return] keyword[None]
keyword[elif] identifier[len] ( identifier[rejected] )== literal[int] :
keyword[return] identifier[accepted]
keyword[else] :
keyword[def] identifier[render_target] ( identifier[target] ):
keyword[return] literal[string] . identifier[format] ( identifier[target] . identifier[address] . identifier[reference] (), identifier[target] . identifier[type_alias] )
keyword[raise] identifier[self] . identifier[IncompatibleActivationsError] ( literal[string]
literal[string]
. identifier[format] ( identifier[render_target] ( identifier[accepted] [ literal[int] ]),
identifier[render_target] ( identifier[rejected] [ literal[int] ]),
identifier[len] ( identifier[accepted] )+ identifier[len] ( identifier[rejected] )- literal[int] )) | def _require_homogeneous_roots(self, accept_predicate, reject_predicate):
"""Ensures that there is no ambiguity in the context according to the given predicates.
If any targets in the context satisfy the accept_predicate, and no targets satisfy the
reject_predicate, returns the accepted targets.
If no targets satisfy the accept_predicate, returns None.
Otherwise throws TaskError.
"""
if len(self.context.target_roots) == 0:
raise self.NoActivationsError('No target specified.') # depends on [control=['if'], data=[]]
def resolve(targets):
# Recursively resolve target aliases.
for t in targets:
if type(t) == Target:
for r in resolve(t.dependencies):
yield r # depends on [control=['for'], data=['r']] # depends on [control=['if'], data=[]]
else:
yield t # depends on [control=['for'], data=['t']]
expanded_roots = list(resolve(self.context.target_roots))
accepted = list(filter(accept_predicate, expanded_roots))
rejected = list(filter(reject_predicate, expanded_roots))
if len(accepted) == 0:
# no targets were accepted, regardless of rejects
return None # depends on [control=['if'], data=[]]
elif len(rejected) == 0:
# we have at least one accepted target, and no rejected targets
return accepted # depends on [control=['if'], data=[]]
else:
# both accepted and rejected targets
# TODO: once https://github.com/pantsbuild/pants/issues/425 lands, we should add
# language-specific flags that would resolve the ambiguity here
def render_target(target):
return '{} (a {})'.format(target.address.reference(), target.type_alias)
raise self.IncompatibleActivationsError('Mutually incompatible targets specified: {} vs {} (and {} others)'.format(render_target(accepted[0]), render_target(rejected[0]), len(accepted) + len(rejected) - 2)) |
def defverb(self, s1, p1, s2, p2, s3, p3):
"""
Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively.
Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb.
"""
self.checkpat(s1)
self.checkpat(s2)
self.checkpat(s3)
self.checkpatplural(p1)
self.checkpatplural(p2)
self.checkpatplural(p3)
self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3))
return 1 | def function[defverb, parameter[self, s1, p1, s2, p2, s3, p3]]:
constant[
Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively.
Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb.
]
call[name[self].checkpat, parameter[name[s1]]]
call[name[self].checkpat, parameter[name[s2]]]
call[name[self].checkpat, parameter[name[s3]]]
call[name[self].checkpatplural, parameter[name[p1]]]
call[name[self].checkpatplural, parameter[name[p2]]]
call[name[self].checkpatplural, parameter[name[p3]]]
call[name[self].pl_v_user_defined.extend, parameter[tuple[[<ast.Name object at 0x7da20c7942e0>, <ast.Name object at 0x7da20c796b30>, <ast.Name object at 0x7da20c7951e0>, <ast.Name object at 0x7da20c7964a0>, <ast.Name object at 0x7da20c7966b0>, <ast.Name object at 0x7da20c794310>]]]]
return[constant[1]] | keyword[def] identifier[defverb] ( identifier[self] , identifier[s1] , identifier[p1] , identifier[s2] , identifier[p2] , identifier[s3] , identifier[p3] ):
literal[string]
identifier[self] . identifier[checkpat] ( identifier[s1] )
identifier[self] . identifier[checkpat] ( identifier[s2] )
identifier[self] . identifier[checkpat] ( identifier[s3] )
identifier[self] . identifier[checkpatplural] ( identifier[p1] )
identifier[self] . identifier[checkpatplural] ( identifier[p2] )
identifier[self] . identifier[checkpatplural] ( identifier[p3] )
identifier[self] . identifier[pl_v_user_defined] . identifier[extend] (( identifier[s1] , identifier[p1] , identifier[s2] , identifier[p2] , identifier[s3] , identifier[p3] ))
keyword[return] literal[int] | def defverb(self, s1, p1, s2, p2, s3, p3):
"""
Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively.
Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb.
"""
self.checkpat(s1)
self.checkpat(s2)
self.checkpat(s3)
self.checkpatplural(p1)
self.checkpatplural(p2)
self.checkpatplural(p3)
self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3))
return 1 |
def rowcol_from_template(target_tab, template_tab=0):
"""Adjusts row heights and column widths to match the template
Parameters
----------
target_tab: Integer
\tTable to be adjusted
template_tab: Integer, defaults to 0
\tTemplate table
"""
for row, tab in S.row_heights.keys():
# Delete all row heights in target table
if tab == target_tab:
S.row_heights.pop((row, tab))
if tab == template_tab:
S.row_heights[(row, target_tab)] = \
S.row_heights[(row, tab)]
for col, tab in S.col_widths.keys():
# Delete all column widths in target table
if tab == target_tab:
S.col_widths.pop((col, tab))
if tab == template_tab:
S.col_widths[(col, target_tab)] = \
S.col_widths[(col, tab)]
return "Table {tab} adjusted.".format(tab=target_tab) | def function[rowcol_from_template, parameter[target_tab, template_tab]]:
constant[Adjusts row heights and column widths to match the template
Parameters
----------
target_tab: Integer
Table to be adjusted
template_tab: Integer, defaults to 0
Template table
]
for taget[tuple[[<ast.Name object at 0x7da204620e20>, <ast.Name object at 0x7da204620ca0>]]] in starred[call[name[S].row_heights.keys, parameter[]]] begin[:]
if compare[name[tab] equal[==] name[target_tab]] begin[:]
call[name[S].row_heights.pop, parameter[tuple[[<ast.Name object at 0x7da204620f10>, <ast.Name object at 0x7da204622aa0>]]]]
if compare[name[tab] equal[==] name[template_tab]] begin[:]
call[name[S].row_heights][tuple[[<ast.Name object at 0x7da204623010>, <ast.Name object at 0x7da204623ee0>]]] assign[=] call[name[S].row_heights][tuple[[<ast.Name object at 0x7da2046233a0>, <ast.Name object at 0x7da204623e80>]]]
for taget[tuple[[<ast.Name object at 0x7da204623370>, <ast.Name object at 0x7da204620bb0>]]] in starred[call[name[S].col_widths.keys, parameter[]]] begin[:]
if compare[name[tab] equal[==] name[target_tab]] begin[:]
call[name[S].col_widths.pop, parameter[tuple[[<ast.Name object at 0x7da204621210>, <ast.Name object at 0x7da204623640>]]]]
if compare[name[tab] equal[==] name[template_tab]] begin[:]
call[name[S].col_widths][tuple[[<ast.Name object at 0x7da204623e20>, <ast.Name object at 0x7da204622da0>]]] assign[=] call[name[S].col_widths][tuple[[<ast.Name object at 0x7da204623040>, <ast.Name object at 0x7da1b16166b0>]]]
return[call[constant[Table {tab} adjusted.].format, parameter[]]] | keyword[def] identifier[rowcol_from_template] ( identifier[target_tab] , identifier[template_tab] = literal[int] ):
literal[string]
keyword[for] identifier[row] , identifier[tab] keyword[in] identifier[S] . identifier[row_heights] . identifier[keys] ():
keyword[if] identifier[tab] == identifier[target_tab] :
identifier[S] . identifier[row_heights] . identifier[pop] (( identifier[row] , identifier[tab] ))
keyword[if] identifier[tab] == identifier[template_tab] :
identifier[S] . identifier[row_heights] [( identifier[row] , identifier[target_tab] )]= identifier[S] . identifier[row_heights] [( identifier[row] , identifier[tab] )]
keyword[for] identifier[col] , identifier[tab] keyword[in] identifier[S] . identifier[col_widths] . identifier[keys] ():
keyword[if] identifier[tab] == identifier[target_tab] :
identifier[S] . identifier[col_widths] . identifier[pop] (( identifier[col] , identifier[tab] ))
keyword[if] identifier[tab] == identifier[template_tab] :
identifier[S] . identifier[col_widths] [( identifier[col] , identifier[target_tab] )]= identifier[S] . identifier[col_widths] [( identifier[col] , identifier[tab] )]
keyword[return] literal[string] . identifier[format] ( identifier[tab] = identifier[target_tab] ) | def rowcol_from_template(target_tab, template_tab=0):
"""Adjusts row heights and column widths to match the template
Parameters
----------
target_tab: Integer
Table to be adjusted
template_tab: Integer, defaults to 0
Template table
"""
for (row, tab) in S.row_heights.keys(): # Delete all row heights in target table
if tab == target_tab:
S.row_heights.pop((row, tab)) # depends on [control=['if'], data=['tab']]
if tab == template_tab:
S.row_heights[row, target_tab] = S.row_heights[row, tab] # depends on [control=['if'], data=['tab']] # depends on [control=['for'], data=[]]
for (col, tab) in S.col_widths.keys(): # Delete all column widths in target table
if tab == target_tab:
S.col_widths.pop((col, tab)) # depends on [control=['if'], data=['tab']]
if tab == template_tab:
S.col_widths[col, target_tab] = S.col_widths[col, tab] # depends on [control=['if'], data=['tab']] # depends on [control=['for'], data=[]]
return 'Table {tab} adjusted.'.format(tab=target_tab) |
def _set_cookies(self, cookies):
"""设置雪球 cookies,代码来自于
https://github.com/shidenggui/easytrader/issues/269
:param cookies: 雪球 cookies
:type cookies: str
"""
cookie_dict = helpers.parse_cookies_str(cookies)
self.s.cookies.update(cookie_dict) | def function[_set_cookies, parameter[self, cookies]]:
constant[设置雪球 cookies,代码来自于
https://github.com/shidenggui/easytrader/issues/269
:param cookies: 雪球 cookies
:type cookies: str
]
variable[cookie_dict] assign[=] call[name[helpers].parse_cookies_str, parameter[name[cookies]]]
call[name[self].s.cookies.update, parameter[name[cookie_dict]]] | keyword[def] identifier[_set_cookies] ( identifier[self] , identifier[cookies] ):
literal[string]
identifier[cookie_dict] = identifier[helpers] . identifier[parse_cookies_str] ( identifier[cookies] )
identifier[self] . identifier[s] . identifier[cookies] . identifier[update] ( identifier[cookie_dict] ) | def _set_cookies(self, cookies):
"""设置雪球 cookies,代码来自于
https://github.com/shidenggui/easytrader/issues/269
:param cookies: 雪球 cookies
:type cookies: str
"""
cookie_dict = helpers.parse_cookies_str(cookies)
self.s.cookies.update(cookie_dict) |
def _send(self, sender, to, subject, message, cc=None, bcc=None, attach=None, replyto=None):
"""
Send a Letter (MESSAGE) from SENDER to TO, with the subject SUBJECT
Arguments:
- `sender`: unicode
- `to`: unicode
- `subject`: unicode
- `message`: unicode
- `cc`: str or [str]
- `bcc`: str or [str]
` `replyto`: str
Return: None
Exceptions: None
"""
self.mailer.send(sender, to, subject, plain=message, cc=cc, bcc=bcc, attach=attach, replyto=replyto)
return | def function[_send, parameter[self, sender, to, subject, message, cc, bcc, attach, replyto]]:
constant[
Send a Letter (MESSAGE) from SENDER to TO, with the subject SUBJECT
Arguments:
- `sender`: unicode
- `to`: unicode
- `subject`: unicode
- `message`: unicode
- `cc`: str or [str]
- `bcc`: str or [str]
` `replyto`: str
Return: None
Exceptions: None
]
call[name[self].mailer.send, parameter[name[sender], name[to], name[subject]]]
return[None] | keyword[def] identifier[_send] ( identifier[self] , identifier[sender] , identifier[to] , identifier[subject] , identifier[message] , identifier[cc] = keyword[None] , identifier[bcc] = keyword[None] , identifier[attach] = keyword[None] , identifier[replyto] = keyword[None] ):
literal[string]
identifier[self] . identifier[mailer] . identifier[send] ( identifier[sender] , identifier[to] , identifier[subject] , identifier[plain] = identifier[message] , identifier[cc] = identifier[cc] , identifier[bcc] = identifier[bcc] , identifier[attach] = identifier[attach] , identifier[replyto] = identifier[replyto] )
keyword[return] | def _send(self, sender, to, subject, message, cc=None, bcc=None, attach=None, replyto=None):
"""
Send a Letter (MESSAGE) from SENDER to TO, with the subject SUBJECT
Arguments:
- `sender`: unicode
- `to`: unicode
- `subject`: unicode
- `message`: unicode
- `cc`: str or [str]
- `bcc`: str or [str]
` `replyto`: str
Return: None
Exceptions: None
"""
self.mailer.send(sender, to, subject, plain=message, cc=cc, bcc=bcc, attach=attach, replyto=replyto)
return |
def filterAcceptsRow(self, row_num, parent):
"""Qt override.
Reimplemented from base class to allow the use of custom filtering.
"""
model = self.sourceModel()
# The source model should have a method called row()
# which returns the table row as a python list.
tests = [func(model.row(row_num), self._filter_string,
self._filter_status) for func in
self._filter_functions.values()]
return False not in tests | def function[filterAcceptsRow, parameter[self, row_num, parent]]:
constant[Qt override.
Reimplemented from base class to allow the use of custom filtering.
]
variable[model] assign[=] call[name[self].sourceModel, parameter[]]
variable[tests] assign[=] <ast.ListComp object at 0x7da1b27a6cb0>
return[compare[constant[False] <ast.NotIn object at 0x7da2590d7190> name[tests]]] | keyword[def] identifier[filterAcceptsRow] ( identifier[self] , identifier[row_num] , identifier[parent] ):
literal[string]
identifier[model] = identifier[self] . identifier[sourceModel] ()
identifier[tests] =[ identifier[func] ( identifier[model] . identifier[row] ( identifier[row_num] ), identifier[self] . identifier[_filter_string] ,
identifier[self] . identifier[_filter_status] ) keyword[for] identifier[func] keyword[in]
identifier[self] . identifier[_filter_functions] . identifier[values] ()]
keyword[return] keyword[False] keyword[not] keyword[in] identifier[tests] | def filterAcceptsRow(self, row_num, parent):
"""Qt override.
Reimplemented from base class to allow the use of custom filtering.
"""
model = self.sourceModel()
# The source model should have a method called row()
# which returns the table row as a python list.
tests = [func(model.row(row_num), self._filter_string, self._filter_status) for func in self._filter_functions.values()]
return False not in tests |
def timed_call(self, ms, callback, *args, **kwargs):
""" We have to wake up the reactor after every call because
it may calculate a long delay where it can sleep which causes events
that happen during this period to seem really slow as they do not get
processed until after the reactor "wakes up"
"""
loop = self.loop
r = loop.callLater(ms/1000.0, callback, *args, **kwargs)
loop.wakeUp()
return r | def function[timed_call, parameter[self, ms, callback]]:
constant[ We have to wake up the reactor after every call because
it may calculate a long delay where it can sleep which causes events
that happen during this period to seem really slow as they do not get
processed until after the reactor "wakes up"
]
variable[loop] assign[=] name[self].loop
variable[r] assign[=] call[name[loop].callLater, parameter[binary_operation[name[ms] / constant[1000.0]], name[callback], <ast.Starred object at 0x7da1b1b168c0>]]
call[name[loop].wakeUp, parameter[]]
return[name[r]] | keyword[def] identifier[timed_call] ( identifier[self] , identifier[ms] , identifier[callback] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[loop] = identifier[self] . identifier[loop]
identifier[r] = identifier[loop] . identifier[callLater] ( identifier[ms] / literal[int] , identifier[callback] ,* identifier[args] ,** identifier[kwargs] )
identifier[loop] . identifier[wakeUp] ()
keyword[return] identifier[r] | def timed_call(self, ms, callback, *args, **kwargs):
""" We have to wake up the reactor after every call because
it may calculate a long delay where it can sleep which causes events
that happen during this period to seem really slow as they do not get
processed until after the reactor "wakes up"
"""
loop = self.loop
r = loop.callLater(ms / 1000.0, callback, *args, **kwargs)
loop.wakeUp()
return r |
def show_deployment(name, namespace='default', **kwargs):
'''
Return the kubernetes deployment defined by name and namespace
CLI Examples::
salt '*' kubernetes.show_deployment my-nginx default
salt '*' kubernetes.show_deployment name=my-nginx namespace=default
'''
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
api_response = api_instance.read_namespaced_deployment(name, namespace)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception(
'Exception when calling '
'ExtensionsV1beta1Api->read_namespaced_deployment'
)
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg) | def function[show_deployment, parameter[name, namespace]]:
constant[
Return the kubernetes deployment defined by name and namespace
CLI Examples::
salt '*' kubernetes.show_deployment my-nginx default
salt '*' kubernetes.show_deployment name=my-nginx namespace=default
]
variable[cfg] assign[=] call[name[_setup_conn], parameter[]]
<ast.Try object at 0x7da20c6a9090> | keyword[def] identifier[show_deployment] ( identifier[name] , identifier[namespace] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[cfg] = identifier[_setup_conn] (** identifier[kwargs] )
keyword[try] :
identifier[api_instance] = identifier[kubernetes] . identifier[client] . identifier[ExtensionsV1beta1Api] ()
identifier[api_response] = identifier[api_instance] . identifier[read_namespaced_deployment] ( identifier[name] , identifier[namespace] )
keyword[return] identifier[api_response] . identifier[to_dict] ()
keyword[except] ( identifier[ApiException] , identifier[HTTPError] ) keyword[as] identifier[exc] :
keyword[if] identifier[isinstance] ( identifier[exc] , identifier[ApiException] ) keyword[and] identifier[exc] . identifier[status] == literal[int] :
keyword[return] keyword[None]
keyword[else] :
identifier[log] . identifier[exception] (
literal[string]
literal[string]
)
keyword[raise] identifier[CommandExecutionError] ( identifier[exc] )
keyword[finally] :
identifier[_cleanup] (** identifier[cfg] ) | def show_deployment(name, namespace='default', **kwargs):
"""
Return the kubernetes deployment defined by name and namespace
CLI Examples::
salt '*' kubernetes.show_deployment my-nginx default
salt '*' kubernetes.show_deployment name=my-nginx namespace=default
"""
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.ExtensionsV1beta1Api()
api_response = api_instance.read_namespaced_deployment(name, namespace)
return api_response.to_dict() # depends on [control=['try'], data=[]]
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None # depends on [control=['if'], data=[]]
else:
log.exception('Exception when calling ExtensionsV1beta1Api->read_namespaced_deployment')
raise CommandExecutionError(exc) # depends on [control=['except'], data=['exc']]
finally:
_cleanup(**cfg) |
def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
Invalid parameters: (these would require modifying the kernel matrix)
- kernel_symm
- theta
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
if 'theta' in params and params['theta'] != self.theta:
raise ValueError("Cannot update theta. Please create a new graph")
if 'anisotropy' in params and params['anisotropy'] != self.anisotropy:
raise ValueError(
"Cannot update anisotropy. Please create a new graph")
if 'kernel_symm' in params and \
params['kernel_symm'] != self.kernel_symm:
raise ValueError(
"Cannot update kernel_symm. Please create a new graph")
super().set_params(**params)
return self | def function[set_params, parameter[self]]:
constant[Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
Invalid parameters: (these would require modifying the kernel matrix)
- kernel_symm
- theta
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
]
if <ast.BoolOp object at 0x7da1b0b04a00> begin[:]
<ast.Raise object at 0x7da1b0b05810>
if <ast.BoolOp object at 0x7da1b0b05ba0> begin[:]
<ast.Raise object at 0x7da1b0b07190>
if <ast.BoolOp object at 0x7da1b0b04880> begin[:]
<ast.Raise object at 0x7da1b0b05b10>
call[call[name[super], parameter[]].set_params, parameter[]]
return[name[self]] | keyword[def] identifier[set_params] ( identifier[self] ,** identifier[params] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[params] keyword[and] identifier[params] [ literal[string] ]!= identifier[self] . identifier[theta] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[params] keyword[and] identifier[params] [ literal[string] ]!= identifier[self] . identifier[anisotropy] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] literal[string] keyword[in] identifier[params] keyword[and] identifier[params] [ literal[string] ]!= identifier[self] . identifier[kernel_symm] :
keyword[raise] identifier[ValueError] (
literal[string] )
identifier[super] (). identifier[set_params] (** identifier[params] )
keyword[return] identifier[self] | def set_params(self, **params):
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid.
Valid parameters:
Invalid parameters: (these would require modifying the kernel matrix)
- kernel_symm
- theta
Parameters
----------
params : key-value pairs of parameter name and new values
Returns
-------
self
"""
if 'theta' in params and params['theta'] != self.theta:
raise ValueError('Cannot update theta. Please create a new graph') # depends on [control=['if'], data=[]]
if 'anisotropy' in params and params['anisotropy'] != self.anisotropy:
raise ValueError('Cannot update anisotropy. Please create a new graph') # depends on [control=['if'], data=[]]
if 'kernel_symm' in params and params['kernel_symm'] != self.kernel_symm:
raise ValueError('Cannot update kernel_symm. Please create a new graph') # depends on [control=['if'], data=[]]
super().set_params(**params)
return self |
def update_browse_tabs_menu(self):
"""Update browse tabs menu"""
self.browse_tabs_menu.clear()
names = []
dirnames = []
for index in range(self.count()):
if self.menu_use_tooltips:
text = to_text_string(self.tabToolTip(index))
else:
text = to_text_string(self.tabText(index))
names.append(text)
if osp.isfile(text):
# Testing if tab names are filenames
dirnames.append(osp.dirname(text))
offset = None
# If tab names are all filenames, removing common path:
if len(names) == len(dirnames):
common = get_common_path(dirnames)
if common is None:
offset = None
else:
offset = len(common)+1
if offset <= 3:
# Common path is not a path but a drive letter...
offset = None
for index, text in enumerate(names):
tab_action = create_action(self, text[offset:],
icon=self.tabIcon(index),
toggled=lambda state, index=index:
self.setCurrentIndex(index),
tip=self.tabToolTip(index))
tab_action.setChecked(index == self.currentIndex())
self.browse_tabs_menu.addAction(tab_action) | def function[update_browse_tabs_menu, parameter[self]]:
constant[Update browse tabs menu]
call[name[self].browse_tabs_menu.clear, parameter[]]
variable[names] assign[=] list[[]]
variable[dirnames] assign[=] list[[]]
for taget[name[index]] in starred[call[name[range], parameter[call[name[self].count, parameter[]]]]] begin[:]
if name[self].menu_use_tooltips begin[:]
variable[text] assign[=] call[name[to_text_string], parameter[call[name[self].tabToolTip, parameter[name[index]]]]]
call[name[names].append, parameter[name[text]]]
if call[name[osp].isfile, parameter[name[text]]] begin[:]
call[name[dirnames].append, parameter[call[name[osp].dirname, parameter[name[text]]]]]
variable[offset] assign[=] constant[None]
if compare[call[name[len], parameter[name[names]]] equal[==] call[name[len], parameter[name[dirnames]]]] begin[:]
variable[common] assign[=] call[name[get_common_path], parameter[name[dirnames]]]
if compare[name[common] is constant[None]] begin[:]
variable[offset] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da18f720220>, <ast.Name object at 0x7da18f721000>]]] in starred[call[name[enumerate], parameter[name[names]]]] begin[:]
variable[tab_action] assign[=] call[name[create_action], parameter[name[self], call[name[text]][<ast.Slice object at 0x7da18f7215d0>]]]
call[name[tab_action].setChecked, parameter[compare[name[index] equal[==] call[name[self].currentIndex, parameter[]]]]]
call[name[self].browse_tabs_menu.addAction, parameter[name[tab_action]]] | keyword[def] identifier[update_browse_tabs_menu] ( identifier[self] ):
literal[string]
identifier[self] . identifier[browse_tabs_menu] . identifier[clear] ()
identifier[names] =[]
identifier[dirnames] =[]
keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[self] . identifier[count] ()):
keyword[if] identifier[self] . identifier[menu_use_tooltips] :
identifier[text] = identifier[to_text_string] ( identifier[self] . identifier[tabToolTip] ( identifier[index] ))
keyword[else] :
identifier[text] = identifier[to_text_string] ( identifier[self] . identifier[tabText] ( identifier[index] ))
identifier[names] . identifier[append] ( identifier[text] )
keyword[if] identifier[osp] . identifier[isfile] ( identifier[text] ):
identifier[dirnames] . identifier[append] ( identifier[osp] . identifier[dirname] ( identifier[text] ))
identifier[offset] = keyword[None]
keyword[if] identifier[len] ( identifier[names] )== identifier[len] ( identifier[dirnames] ):
identifier[common] = identifier[get_common_path] ( identifier[dirnames] )
keyword[if] identifier[common] keyword[is] keyword[None] :
identifier[offset] = keyword[None]
keyword[else] :
identifier[offset] = identifier[len] ( identifier[common] )+ literal[int]
keyword[if] identifier[offset] <= literal[int] :
identifier[offset] = keyword[None]
keyword[for] identifier[index] , identifier[text] keyword[in] identifier[enumerate] ( identifier[names] ):
identifier[tab_action] = identifier[create_action] ( identifier[self] , identifier[text] [ identifier[offset] :],
identifier[icon] = identifier[self] . identifier[tabIcon] ( identifier[index] ),
identifier[toggled] = keyword[lambda] identifier[state] , identifier[index] = identifier[index] :
identifier[self] . identifier[setCurrentIndex] ( identifier[index] ),
identifier[tip] = identifier[self] . identifier[tabToolTip] ( identifier[index] ))
identifier[tab_action] . identifier[setChecked] ( identifier[index] == identifier[self] . identifier[currentIndex] ())
identifier[self] . identifier[browse_tabs_menu] . identifier[addAction] ( identifier[tab_action] ) | def update_browse_tabs_menu(self):
"""Update browse tabs menu"""
self.browse_tabs_menu.clear()
names = []
dirnames = []
for index in range(self.count()):
if self.menu_use_tooltips:
text = to_text_string(self.tabToolTip(index)) # depends on [control=['if'], data=[]]
else:
text = to_text_string(self.tabText(index))
names.append(text)
if osp.isfile(text): # Testing if tab names are filenames
dirnames.append(osp.dirname(text)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
offset = None # If tab names are all filenames, removing common path:
if len(names) == len(dirnames):
common = get_common_path(dirnames)
if common is None:
offset = None # depends on [control=['if'], data=[]]
else:
offset = len(common) + 1
if offset <= 3: # Common path is not a path but a drive letter...
offset = None # depends on [control=['if'], data=['offset']] # depends on [control=['if'], data=[]]
for (index, text) in enumerate(names):
tab_action = create_action(self, text[offset:], icon=self.tabIcon(index), toggled=lambda state, index=index: self.setCurrentIndex(index), tip=self.tabToolTip(index))
tab_action.setChecked(index == self.currentIndex())
self.browse_tabs_menu.addAction(tab_action) # depends on [control=['for'], data=[]] |
def make_strain_from_inj_object(self, inj, delta_t, detector_name,
f_lower=None, distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
a sim_inspiral table, for example.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
detector = Detector(detector_name)
if f_lower is None:
f_l = inj.f_lower
else:
f_l = f_lower
name, phase_order = legacy_approximant_name(inj.waveform)
# compute the waveform time series
hp, hc = get_td_waveform(
inj, approximant=name, delta_t=delta_t,
phase_order=phase_order,
f_lower=f_l, distance=inj.distance,
**self.extra_args)
hp /= distance_scale
hc /= distance_scale
hp._epoch += inj.get_time_geocent()
hc._epoch += inj.get_time_geocent()
# taper the polarizations
hp_tapered = wfutils.taper_timeseries(hp, inj.taper)
hc_tapered = wfutils.taper_timeseries(hc, inj.taper)
# compute the detector response and add it to the strain
signal = detector.project_wave(hp_tapered, hc_tapered,
inj.longitude, inj.latitude, inj.polarization)
return signal | def function[make_strain_from_inj_object, parameter[self, inj, delta_t, detector_name, f_lower, distance_scale]]:
constant[Make a h(t) strain time-series from an injection object as read from
a sim_inspiral table, for example.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
]
variable[detector] assign[=] call[name[Detector], parameter[name[detector_name]]]
if compare[name[f_lower] is constant[None]] begin[:]
variable[f_l] assign[=] name[inj].f_lower
<ast.Tuple object at 0x7da20e9b30a0> assign[=] call[name[legacy_approximant_name], parameter[name[inj].waveform]]
<ast.Tuple object at 0x7da20e9b0e80> assign[=] call[name[get_td_waveform], parameter[name[inj]]]
<ast.AugAssign object at 0x7da20e9b3550>
<ast.AugAssign object at 0x7da20e9b2c50>
<ast.AugAssign object at 0x7da20e9b1cf0>
<ast.AugAssign object at 0x7da20e9b1bd0>
variable[hp_tapered] assign[=] call[name[wfutils].taper_timeseries, parameter[name[hp], name[inj].taper]]
variable[hc_tapered] assign[=] call[name[wfutils].taper_timeseries, parameter[name[hc], name[inj].taper]]
variable[signal] assign[=] call[name[detector].project_wave, parameter[name[hp_tapered], name[hc_tapered], name[inj].longitude, name[inj].latitude, name[inj].polarization]]
return[name[signal]] | keyword[def] identifier[make_strain_from_inj_object] ( identifier[self] , identifier[inj] , identifier[delta_t] , identifier[detector_name] ,
identifier[f_lower] = keyword[None] , identifier[distance_scale] = literal[int] ):
literal[string]
identifier[detector] = identifier[Detector] ( identifier[detector_name] )
keyword[if] identifier[f_lower] keyword[is] keyword[None] :
identifier[f_l] = identifier[inj] . identifier[f_lower]
keyword[else] :
identifier[f_l] = identifier[f_lower]
identifier[name] , identifier[phase_order] = identifier[legacy_approximant_name] ( identifier[inj] . identifier[waveform] )
identifier[hp] , identifier[hc] = identifier[get_td_waveform] (
identifier[inj] , identifier[approximant] = identifier[name] , identifier[delta_t] = identifier[delta_t] ,
identifier[phase_order] = identifier[phase_order] ,
identifier[f_lower] = identifier[f_l] , identifier[distance] = identifier[inj] . identifier[distance] ,
** identifier[self] . identifier[extra_args] )
identifier[hp] /= identifier[distance_scale]
identifier[hc] /= identifier[distance_scale]
identifier[hp] . identifier[_epoch] += identifier[inj] . identifier[get_time_geocent] ()
identifier[hc] . identifier[_epoch] += identifier[inj] . identifier[get_time_geocent] ()
identifier[hp_tapered] = identifier[wfutils] . identifier[taper_timeseries] ( identifier[hp] , identifier[inj] . identifier[taper] )
identifier[hc_tapered] = identifier[wfutils] . identifier[taper_timeseries] ( identifier[hc] , identifier[inj] . identifier[taper] )
identifier[signal] = identifier[detector] . identifier[project_wave] ( identifier[hp_tapered] , identifier[hc_tapered] ,
identifier[inj] . identifier[longitude] , identifier[inj] . identifier[latitude] , identifier[inj] . identifier[polarization] )
keyword[return] identifier[signal] | def make_strain_from_inj_object(self, inj, delta_t, detector_name, f_lower=None, distance_scale=1):
"""Make a h(t) strain time-series from an injection object as read from
a sim_inspiral table, for example.
Parameters
-----------
inj : injection object
The injection object to turn into a strain h(t).
delta_t : float
Sample rate to make injection at.
detector_name : string
Name of the detector used for projecting injections.
f_lower : {None, float}, optional
Low-frequency cutoff for injected signals. If None, use value
provided by each injection.
distance_scale: {1, float}, optional
Factor to scale the distance of an injection with. The default is
no scaling.
Returns
--------
signal : float
h(t) corresponding to the injection.
"""
detector = Detector(detector_name)
if f_lower is None:
f_l = inj.f_lower # depends on [control=['if'], data=[]]
else:
f_l = f_lower
(name, phase_order) = legacy_approximant_name(inj.waveform)
# compute the waveform time series
(hp, hc) = get_td_waveform(inj, approximant=name, delta_t=delta_t, phase_order=phase_order, f_lower=f_l, distance=inj.distance, **self.extra_args)
hp /= distance_scale
hc /= distance_scale
hp._epoch += inj.get_time_geocent()
hc._epoch += inj.get_time_geocent()
# taper the polarizations
hp_tapered = wfutils.taper_timeseries(hp, inj.taper)
hc_tapered = wfutils.taper_timeseries(hc, inj.taper)
# compute the detector response and add it to the strain
signal = detector.project_wave(hp_tapered, hc_tapered, inj.longitude, inj.latitude, inj.polarization)
return signal |
def force(self,c):
'''control force plus the external forces (drag, etc)'''
# bring instance variables into local scope
x = self.x
v = self.v
b = self.b
k = self.k
x0 = self.x0
F = self.control_force(c)
return F - b*v - k*(x - x0) | def function[force, parameter[self, c]]:
constant[control force plus the external forces (drag, etc)]
variable[x] assign[=] name[self].x
variable[v] assign[=] name[self].v
variable[b] assign[=] name[self].b
variable[k] assign[=] name[self].k
variable[x0] assign[=] name[self].x0
variable[F] assign[=] call[name[self].control_force, parameter[name[c]]]
return[binary_operation[binary_operation[name[F] - binary_operation[name[b] * name[v]]] - binary_operation[name[k] * binary_operation[name[x] - name[x0]]]]] | keyword[def] identifier[force] ( identifier[self] , identifier[c] ):
literal[string]
identifier[x] = identifier[self] . identifier[x]
identifier[v] = identifier[self] . identifier[v]
identifier[b] = identifier[self] . identifier[b]
identifier[k] = identifier[self] . identifier[k]
identifier[x0] = identifier[self] . identifier[x0]
identifier[F] = identifier[self] . identifier[control_force] ( identifier[c] )
keyword[return] identifier[F] - identifier[b] * identifier[v] - identifier[k] *( identifier[x] - identifier[x0] ) | def force(self, c):
"""control force plus the external forces (drag, etc)"""
# bring instance variables into local scope
x = self.x
v = self.v
b = self.b
k = self.k
x0 = self.x0
F = self.control_force(c)
return F - b * v - k * (x - x0) |
def k_material(ID, T=298.15):
r'''Returns thermal conductivity of a building, insulating, or refractory
material from tables in [1]_, [2]_, and [3]_. Thermal conductivity may or
may not be dependent on temperature depending on the source used. Function
must be provided with either a key to one of the dictionaries
`refractories`, `ASHRAE`, or `building_materials` - or a search term which
will pick the closest match based on a fuzzy search. To determine which
source the fuzzy search will pick, use the function `nearest_material`.
Fuzzy searches are slow; it is preferable to call this function with a
material key directly.
Parameters
----------
ID : str
String as described above
T : float, optional
Temperature of the material, [K]
Returns
-------
k : float
Thermal conductivity of the material, [W/m/K]
Examples
--------
>>> k_material('Mineral fiber')
0.036
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
.. [2] DIN EN 12524 (2000-07) Building Materials and Products
Hygrothermal Properties - Tabulated Design Values; English Version of
DIN EN 12524.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
'''
if ID not in materials_dict:
ID = nearest_material(ID)
if ID in refractories:
return refractory_VDI_k(ID, T)
elif ID in ASHRAE:
return ASHRAE_k(ID)
else:
return float(building_materials[ID][1]) | def function[k_material, parameter[ID, T]]:
constant[Returns thermal conductivity of a building, insulating, or refractory
material from tables in [1]_, [2]_, and [3]_. Thermal conductivity may or
may not be dependent on temperature depending on the source used. Function
must be provided with either a key to one of the dictionaries
`refractories`, `ASHRAE`, or `building_materials` - or a search term which
will pick the closest match based on a fuzzy search. To determine which
source the fuzzy search will pick, use the function `nearest_material`.
Fuzzy searches are slow; it is preferable to call this function with a
material key directly.
Parameters
----------
ID : str
String as described above
T : float, optional
Temperature of the material, [K]
Returns
-------
k : float
Thermal conductivity of the material, [W/m/K]
Examples
--------
>>> k_material('Mineral fiber')
0.036
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
.. [2] DIN EN 12524 (2000-07) Building Materials and Products
Hygrothermal Properties - Tabulated Design Values; English Version of
DIN EN 12524.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
]
if compare[name[ID] <ast.NotIn object at 0x7da2590d7190> name[materials_dict]] begin[:]
variable[ID] assign[=] call[name[nearest_material], parameter[name[ID]]]
if compare[name[ID] in name[refractories]] begin[:]
return[call[name[refractory_VDI_k], parameter[name[ID], name[T]]]] | keyword[def] identifier[k_material] ( identifier[ID] , identifier[T] = literal[int] ):
literal[string]
keyword[if] identifier[ID] keyword[not] keyword[in] identifier[materials_dict] :
identifier[ID] = identifier[nearest_material] ( identifier[ID] )
keyword[if] identifier[ID] keyword[in] identifier[refractories] :
keyword[return] identifier[refractory_VDI_k] ( identifier[ID] , identifier[T] )
keyword[elif] identifier[ID] keyword[in] identifier[ASHRAE] :
keyword[return] identifier[ASHRAE_k] ( identifier[ID] )
keyword[else] :
keyword[return] identifier[float] ( identifier[building_materials] [ identifier[ID] ][ literal[int] ]) | def k_material(ID, T=298.15):
"""Returns thermal conductivity of a building, insulating, or refractory
material from tables in [1]_, [2]_, and [3]_. Thermal conductivity may or
may not be dependent on temperature depending on the source used. Function
must be provided with either a key to one of the dictionaries
`refractories`, `ASHRAE`, or `building_materials` - or a search term which
will pick the closest match based on a fuzzy search. To determine which
source the fuzzy search will pick, use the function `nearest_material`.
Fuzzy searches are slow; it is preferable to call this function with a
material key directly.
Parameters
----------
ID : str
String as described above
T : float, optional
Temperature of the material, [K]
Returns
-------
k : float
Thermal conductivity of the material, [W/m/K]
Examples
--------
>>> k_material('Mineral fiber')
0.036
References
----------
.. [1] ASHRAE Handbook: Fundamentals. American Society of Heating,
Refrigerating and Air-Conditioning Engineers, Incorporated, 2013.
.. [2] DIN EN 12524 (2000-07) Building Materials and Products
Hygrothermal Properties - Tabulated Design Values; English Version of
DIN EN 12524.
.. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition.
Berlin; New York:: Springer, 2010.
"""
if ID not in materials_dict:
ID = nearest_material(ID) # depends on [control=['if'], data=['ID']]
if ID in refractories:
return refractory_VDI_k(ID, T) # depends on [control=['if'], data=['ID']]
elif ID in ASHRAE:
return ASHRAE_k(ID) # depends on [control=['if'], data=['ID']]
else:
return float(building_materials[ID][1]) |
def delete(cls, schedule_id, schedule_instance_id,
note_attachment_schedule_instance_id, monetary_account_id=None,
custom_headers=None):
"""
:type user_id: int
:type monetary_account_id: int
:type schedule_id: int
:type schedule_instance_id: int
:type note_attachment_schedule_instance_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseNone
"""
if custom_headers is None:
custom_headers = {}
api_client = client.ApiClient(cls._get_api_context())
endpoint_url = cls._ENDPOINT_URL_DELETE.format(cls._determine_user_id(),
cls._determine_monetary_account_id(
monetary_account_id),
schedule_id,
schedule_instance_id,
note_attachment_schedule_instance_id)
response_raw = api_client.delete(endpoint_url, custom_headers)
return BunqResponseNone.cast_from_bunq_response(
client.BunqResponse(None, response_raw.headers)
) | def function[delete, parameter[cls, schedule_id, schedule_instance_id, note_attachment_schedule_instance_id, monetary_account_id, custom_headers]]:
constant[
:type user_id: int
:type monetary_account_id: int
:type schedule_id: int
:type schedule_instance_id: int
:type note_attachment_schedule_instance_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseNone
]
if compare[name[custom_headers] is constant[None]] begin[:]
variable[custom_headers] assign[=] dictionary[[], []]
variable[api_client] assign[=] call[name[client].ApiClient, parameter[call[name[cls]._get_api_context, parameter[]]]]
variable[endpoint_url] assign[=] call[name[cls]._ENDPOINT_URL_DELETE.format, parameter[call[name[cls]._determine_user_id, parameter[]], call[name[cls]._determine_monetary_account_id, parameter[name[monetary_account_id]]], name[schedule_id], name[schedule_instance_id], name[note_attachment_schedule_instance_id]]]
variable[response_raw] assign[=] call[name[api_client].delete, parameter[name[endpoint_url], name[custom_headers]]]
return[call[name[BunqResponseNone].cast_from_bunq_response, parameter[call[name[client].BunqResponse, parameter[constant[None], name[response_raw].headers]]]]] | keyword[def] identifier[delete] ( identifier[cls] , identifier[schedule_id] , identifier[schedule_instance_id] ,
identifier[note_attachment_schedule_instance_id] , identifier[monetary_account_id] = keyword[None] ,
identifier[custom_headers] = keyword[None] ):
literal[string]
keyword[if] identifier[custom_headers] keyword[is] keyword[None] :
identifier[custom_headers] ={}
identifier[api_client] = identifier[client] . identifier[ApiClient] ( identifier[cls] . identifier[_get_api_context] ())
identifier[endpoint_url] = identifier[cls] . identifier[_ENDPOINT_URL_DELETE] . identifier[format] ( identifier[cls] . identifier[_determine_user_id] (),
identifier[cls] . identifier[_determine_monetary_account_id] (
identifier[monetary_account_id] ),
identifier[schedule_id] ,
identifier[schedule_instance_id] ,
identifier[note_attachment_schedule_instance_id] )
identifier[response_raw] = identifier[api_client] . identifier[delete] ( identifier[endpoint_url] , identifier[custom_headers] )
keyword[return] identifier[BunqResponseNone] . identifier[cast_from_bunq_response] (
identifier[client] . identifier[BunqResponse] ( keyword[None] , identifier[response_raw] . identifier[headers] )
) | def delete(cls, schedule_id, schedule_instance_id, note_attachment_schedule_instance_id, monetary_account_id=None, custom_headers=None):
"""
:type user_id: int
:type monetary_account_id: int
:type schedule_id: int
:type schedule_instance_id: int
:type note_attachment_schedule_instance_id: int
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseNone
"""
if custom_headers is None:
custom_headers = {} # depends on [control=['if'], data=['custom_headers']]
api_client = client.ApiClient(cls._get_api_context())
endpoint_url = cls._ENDPOINT_URL_DELETE.format(cls._determine_user_id(), cls._determine_monetary_account_id(monetary_account_id), schedule_id, schedule_instance_id, note_attachment_schedule_instance_id)
response_raw = api_client.delete(endpoint_url, custom_headers)
return BunqResponseNone.cast_from_bunq_response(client.BunqResponse(None, response_raw.headers)) |
def run(self, args):
"""**next**[**+**|**-**] [*count*]
Step one statement ignoring steps into function calls at this level.
With an integer argument, perform `next` that many times. However if
an exception occurs at this level, or we *return*, *yield* or the
thread changes, we stop regardless of count.
A suffix of `+` on the command or an alias to the command forces to
move to another line, while a suffix of `-` does the opposite and
disables the requiring a move to a new line. If no suffix is given,
the debugger setting 'different-line' determines this behavior.
See also:
---------
`step`, `skip`, `jump` (there's no `hop` yet), `continue`, and
`finish` for other ways to progress execution.
"""
if len(args) <= 1:
step_ignore = 0
else:
step_ignore = self.proc.get_int(args[1], default=1,
cmdname='next')
if step_ignore is None: return False
# 0 means stop now or step 1, so we subtract 1.
step_ignore -= 1
pass
self.core.different_line = \
Mcmdfns.want_different_line(args[0],
self.debugger.settings['different'])
self.core.set_next(self.proc.frame, step_ignore)
self.proc.continue_running = True # Break out of command read loop
return True | def function[run, parameter[self, args]]:
constant[**next**[**+**|**-**] [*count*]
Step one statement ignoring steps into function calls at this level.
With an integer argument, perform `next` that many times. However if
an exception occurs at this level, or we *return*, *yield* or the
thread changes, we stop regardless of count.
A suffix of `+` on the command or an alias to the command forces to
move to another line, while a suffix of `-` does the opposite and
disables the requiring a move to a new line. If no suffix is given,
the debugger setting 'different-line' determines this behavior.
See also:
---------
`step`, `skip`, `jump` (there's no `hop` yet), `continue`, and
`finish` for other ways to progress execution.
]
if compare[call[name[len], parameter[name[args]]] less_or_equal[<=] constant[1]] begin[:]
variable[step_ignore] assign[=] constant[0]
name[self].core.different_line assign[=] call[name[Mcmdfns].want_different_line, parameter[call[name[args]][constant[0]], call[name[self].debugger.settings][constant[different]]]]
call[name[self].core.set_next, parameter[name[self].proc.frame, name[step_ignore]]]
name[self].proc.continue_running assign[=] constant[True]
return[constant[True]] | keyword[def] identifier[run] ( identifier[self] , identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )<= literal[int] :
identifier[step_ignore] = literal[int]
keyword[else] :
identifier[step_ignore] = identifier[self] . identifier[proc] . identifier[get_int] ( identifier[args] [ literal[int] ], identifier[default] = literal[int] ,
identifier[cmdname] = literal[string] )
keyword[if] identifier[step_ignore] keyword[is] keyword[None] : keyword[return] keyword[False]
identifier[step_ignore] -= literal[int]
keyword[pass]
identifier[self] . identifier[core] . identifier[different_line] = identifier[Mcmdfns] . identifier[want_different_line] ( identifier[args] [ literal[int] ],
identifier[self] . identifier[debugger] . identifier[settings] [ literal[string] ])
identifier[self] . identifier[core] . identifier[set_next] ( identifier[self] . identifier[proc] . identifier[frame] , identifier[step_ignore] )
identifier[self] . identifier[proc] . identifier[continue_running] = keyword[True]
keyword[return] keyword[True] | def run(self, args):
"""**next**[**+**|**-**] [*count*]
Step one statement ignoring steps into function calls at this level.
With an integer argument, perform `next` that many times. However if
an exception occurs at this level, or we *return*, *yield* or the
thread changes, we stop regardless of count.
A suffix of `+` on the command or an alias to the command forces to
move to another line, while a suffix of `-` does the opposite and
disables the requiring a move to a new line. If no suffix is given,
the debugger setting 'different-line' determines this behavior.
See also:
---------
`step`, `skip`, `jump` (there's no `hop` yet), `continue`, and
`finish` for other ways to progress execution.
"""
if len(args) <= 1:
step_ignore = 0 # depends on [control=['if'], data=[]]
else:
step_ignore = self.proc.get_int(args[1], default=1, cmdname='next')
if step_ignore is None:
return False # depends on [control=['if'], data=[]]
# 0 means stop now or step 1, so we subtract 1.
step_ignore -= 1
pass
self.core.different_line = Mcmdfns.want_different_line(args[0], self.debugger.settings['different'])
self.core.set_next(self.proc.frame, step_ignore)
self.proc.continue_running = True # Break out of command read loop
return True |
def create_log_inform(self, level_name, msg, name, timestamp=None):
"""Create a katcp logging inform message.
Usually this will be called from inside a DeviceLogger object,
but it is also used by the methods in this class when errors
need to be reported to the client.
"""
if timestamp is None:
timestamp = time.time()
katcp_version = self.PROTOCOL_INFO.major
timestamp_msg = ('%.6f' % timestamp
if katcp_version >= SEC_TS_KATCP_MAJOR
else str(int(timestamp*1000)))
return Message.inform("log", level_name, timestamp_msg, name, msg) | def function[create_log_inform, parameter[self, level_name, msg, name, timestamp]]:
constant[Create a katcp logging inform message.
Usually this will be called from inside a DeviceLogger object,
but it is also used by the methods in this class when errors
need to be reported to the client.
]
if compare[name[timestamp] is constant[None]] begin[:]
variable[timestamp] assign[=] call[name[time].time, parameter[]]
variable[katcp_version] assign[=] name[self].PROTOCOL_INFO.major
variable[timestamp_msg] assign[=] <ast.IfExp object at 0x7da1b05364d0>
return[call[name[Message].inform, parameter[constant[log], name[level_name], name[timestamp_msg], name[name], name[msg]]]] | keyword[def] identifier[create_log_inform] ( identifier[self] , identifier[level_name] , identifier[msg] , identifier[name] , identifier[timestamp] = keyword[None] ):
literal[string]
keyword[if] identifier[timestamp] keyword[is] keyword[None] :
identifier[timestamp] = identifier[time] . identifier[time] ()
identifier[katcp_version] = identifier[self] . identifier[PROTOCOL_INFO] . identifier[major]
identifier[timestamp_msg] =( literal[string] % identifier[timestamp]
keyword[if] identifier[katcp_version] >= identifier[SEC_TS_KATCP_MAJOR]
keyword[else] identifier[str] ( identifier[int] ( identifier[timestamp] * literal[int] )))
keyword[return] identifier[Message] . identifier[inform] ( literal[string] , identifier[level_name] , identifier[timestamp_msg] , identifier[name] , identifier[msg] ) | def create_log_inform(self, level_name, msg, name, timestamp=None):
"""Create a katcp logging inform message.
Usually this will be called from inside a DeviceLogger object,
but it is also used by the methods in this class when errors
need to be reported to the client.
"""
if timestamp is None:
timestamp = time.time() # depends on [control=['if'], data=['timestamp']]
katcp_version = self.PROTOCOL_INFO.major
timestamp_msg = '%.6f' % timestamp if katcp_version >= SEC_TS_KATCP_MAJOR else str(int(timestamp * 1000))
return Message.inform('log', level_name, timestamp_msg, name, msg) |
def verify(
self, headers, serialized_request_env, deserialized_request_env):
# type: (Dict[str, Any], str, RequestEnvelope) -> None
"""Verify if the input request timestamp is in tolerated limits.
The verify method retrieves the request timestamp and check if
it falls in the limit set by the tolerance, by checking with
the current timestamp in UTC.
:param headers: headers of the input POST request
:type headers: Dict[str, Any]
:param serialized_request_env: raw request envelope in the
input POST request
:type serialized_request_env: str
:param deserialized_request_env: deserialized request envelope
instance of the input POST request
:type deserialized_request_env:
:py:class:`ask_sdk_model.request_envelope.RequestEnvelope`
:raises: :py:class:`VerificationException` if difference between
local timestamp and input request timestamp is more than
specific tolerance limit
"""
local_now = datetime.now(tz.tzutc())
request_timestamp = deserialized_request_env.request.timestamp
if (abs((local_now - request_timestamp).seconds) >
(self._tolerance_in_millis / 1000)):
raise VerificationException("Timestamp verification failed") | def function[verify, parameter[self, headers, serialized_request_env, deserialized_request_env]]:
constant[Verify if the input request timestamp is in tolerated limits.
The verify method retrieves the request timestamp and check if
it falls in the limit set by the tolerance, by checking with
the current timestamp in UTC.
:param headers: headers of the input POST request
:type headers: Dict[str, Any]
:param serialized_request_env: raw request envelope in the
input POST request
:type serialized_request_env: str
:param deserialized_request_env: deserialized request envelope
instance of the input POST request
:type deserialized_request_env:
:py:class:`ask_sdk_model.request_envelope.RequestEnvelope`
:raises: :py:class:`VerificationException` if difference between
local timestamp and input request timestamp is more than
specific tolerance limit
]
variable[local_now] assign[=] call[name[datetime].now, parameter[call[name[tz].tzutc, parameter[]]]]
variable[request_timestamp] assign[=] name[deserialized_request_env].request.timestamp
if compare[call[name[abs], parameter[binary_operation[name[local_now] - name[request_timestamp]].seconds]] greater[>] binary_operation[name[self]._tolerance_in_millis / constant[1000]]] begin[:]
<ast.Raise object at 0x7da1b19efd30> | keyword[def] identifier[verify] (
identifier[self] , identifier[headers] , identifier[serialized_request_env] , identifier[deserialized_request_env] ):
literal[string]
identifier[local_now] = identifier[datetime] . identifier[now] ( identifier[tz] . identifier[tzutc] ())
identifier[request_timestamp] = identifier[deserialized_request_env] . identifier[request] . identifier[timestamp]
keyword[if] ( identifier[abs] (( identifier[local_now] - identifier[request_timestamp] ). identifier[seconds] )>
( identifier[self] . identifier[_tolerance_in_millis] / literal[int] )):
keyword[raise] identifier[VerificationException] ( literal[string] ) | def verify(self, headers, serialized_request_env, deserialized_request_env):
# type: (Dict[str, Any], str, RequestEnvelope) -> None
'Verify if the input request timestamp is in tolerated limits.\n\n The verify method retrieves the request timestamp and check if\n it falls in the limit set by the tolerance, by checking with\n the current timestamp in UTC.\n\n :param headers: headers of the input POST request\n :type headers: Dict[str, Any]\n :param serialized_request_env: raw request envelope in the\n input POST request\n :type serialized_request_env: str\n :param deserialized_request_env: deserialized request envelope\n instance of the input POST request\n :type deserialized_request_env:\n :py:class:`ask_sdk_model.request_envelope.RequestEnvelope`\n :raises: :py:class:`VerificationException` if difference between\n local timestamp and input request timestamp is more than\n specific tolerance limit\n '
local_now = datetime.now(tz.tzutc())
request_timestamp = deserialized_request_env.request.timestamp
if abs((local_now - request_timestamp).seconds) > self._tolerance_in_millis / 1000:
raise VerificationException('Timestamp verification failed') # depends on [control=['if'], data=[]] |
def _get_keywords(self, location, keywords):
"""Format GET request's parameters from keywords."""
if 'xml' in keywords:
keywords.pop('xml')
self.xml = True
else:
keywords['file_type'] = 'json'
if 'id' in keywords:
if location != 'series':
location = location.rstrip('s')
key = '%s_id' % location
value = keywords.pop('id')
keywords[key] = value
if 'start' in keywords:
time = keywords.pop('start')
keywords['realtime_start'] = time
if 'end' in keywords:
time = keywords.pop('end')
keywords['realtime_end'] = time
if 'sort' in keywords:
order = keywords.pop('sort')
keywords['sort_order'] = order
keywords['api_key'] = self.api_key
return keywords | def function[_get_keywords, parameter[self, location, keywords]]:
constant[Format GET request's parameters from keywords.]
if compare[constant[xml] in name[keywords]] begin[:]
call[name[keywords].pop, parameter[constant[xml]]]
name[self].xml assign[=] constant[True]
if compare[constant[id] in name[keywords]] begin[:]
if compare[name[location] not_equal[!=] constant[series]] begin[:]
variable[location] assign[=] call[name[location].rstrip, parameter[constant[s]]]
variable[key] assign[=] binary_operation[constant[%s_id] <ast.Mod object at 0x7da2590d6920> name[location]]
variable[value] assign[=] call[name[keywords].pop, parameter[constant[id]]]
call[name[keywords]][name[key]] assign[=] name[value]
if compare[constant[start] in name[keywords]] begin[:]
variable[time] assign[=] call[name[keywords].pop, parameter[constant[start]]]
call[name[keywords]][constant[realtime_start]] assign[=] name[time]
if compare[constant[end] in name[keywords]] begin[:]
variable[time] assign[=] call[name[keywords].pop, parameter[constant[end]]]
call[name[keywords]][constant[realtime_end]] assign[=] name[time]
if compare[constant[sort] in name[keywords]] begin[:]
variable[order] assign[=] call[name[keywords].pop, parameter[constant[sort]]]
call[name[keywords]][constant[sort_order]] assign[=] name[order]
call[name[keywords]][constant[api_key]] assign[=] name[self].api_key
return[name[keywords]] | keyword[def] identifier[_get_keywords] ( identifier[self] , identifier[location] , identifier[keywords] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[keywords] :
identifier[keywords] . identifier[pop] ( literal[string] )
identifier[self] . identifier[xml] = keyword[True]
keyword[else] :
identifier[keywords] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[in] identifier[keywords] :
keyword[if] identifier[location] != literal[string] :
identifier[location] = identifier[location] . identifier[rstrip] ( literal[string] )
identifier[key] = literal[string] % identifier[location]
identifier[value] = identifier[keywords] . identifier[pop] ( literal[string] )
identifier[keywords] [ identifier[key] ]= identifier[value]
keyword[if] literal[string] keyword[in] identifier[keywords] :
identifier[time] = identifier[keywords] . identifier[pop] ( literal[string] )
identifier[keywords] [ literal[string] ]= identifier[time]
keyword[if] literal[string] keyword[in] identifier[keywords] :
identifier[time] = identifier[keywords] . identifier[pop] ( literal[string] )
identifier[keywords] [ literal[string] ]= identifier[time]
keyword[if] literal[string] keyword[in] identifier[keywords] :
identifier[order] = identifier[keywords] . identifier[pop] ( literal[string] )
identifier[keywords] [ literal[string] ]= identifier[order]
identifier[keywords] [ literal[string] ]= identifier[self] . identifier[api_key]
keyword[return] identifier[keywords] | def _get_keywords(self, location, keywords):
"""Format GET request's parameters from keywords."""
if 'xml' in keywords:
keywords.pop('xml')
self.xml = True # depends on [control=['if'], data=['keywords']]
else:
keywords['file_type'] = 'json'
if 'id' in keywords:
if location != 'series':
location = location.rstrip('s') # depends on [control=['if'], data=['location']]
key = '%s_id' % location
value = keywords.pop('id')
keywords[key] = value # depends on [control=['if'], data=['keywords']]
if 'start' in keywords:
time = keywords.pop('start')
keywords['realtime_start'] = time # depends on [control=['if'], data=['keywords']]
if 'end' in keywords:
time = keywords.pop('end')
keywords['realtime_end'] = time # depends on [control=['if'], data=['keywords']]
if 'sort' in keywords:
order = keywords.pop('sort')
keywords['sort_order'] = order # depends on [control=['if'], data=['keywords']]
keywords['api_key'] = self.api_key
return keywords |
def data_chunk(data, chunk, with_overlap=False):
"""Get a data chunk."""
assert isinstance(chunk, tuple)
if len(chunk) == 2:
i, j = chunk
elif len(chunk) == 4:
if with_overlap:
i, j = chunk[:2]
else:
i, j = chunk[2:]
else:
raise ValueError("'chunk' should have 2 or 4 elements, "
"not {0:d}".format(len(chunk)))
return data[i:j, ...] | def function[data_chunk, parameter[data, chunk, with_overlap]]:
constant[Get a data chunk.]
assert[call[name[isinstance], parameter[name[chunk], name[tuple]]]]
if compare[call[name[len], parameter[name[chunk]]] equal[==] constant[2]] begin[:]
<ast.Tuple object at 0x7da1b12f2a40> assign[=] name[chunk]
return[call[name[data]][tuple[[<ast.Slice object at 0x7da1b12bbf70>, <ast.Constant object at 0x7da1b12b97b0>]]]] | keyword[def] identifier[data_chunk] ( identifier[data] , identifier[chunk] , identifier[with_overlap] = keyword[False] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[chunk] , identifier[tuple] )
keyword[if] identifier[len] ( identifier[chunk] )== literal[int] :
identifier[i] , identifier[j] = identifier[chunk]
keyword[elif] identifier[len] ( identifier[chunk] )== literal[int] :
keyword[if] identifier[with_overlap] :
identifier[i] , identifier[j] = identifier[chunk] [: literal[int] ]
keyword[else] :
identifier[i] , identifier[j] = identifier[chunk] [ literal[int] :]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[len] ( identifier[chunk] )))
keyword[return] identifier[data] [ identifier[i] : identifier[j] ,...] | def data_chunk(data, chunk, with_overlap=False):
"""Get a data chunk."""
assert isinstance(chunk, tuple)
if len(chunk) == 2:
(i, j) = chunk # depends on [control=['if'], data=[]]
elif len(chunk) == 4:
if with_overlap:
(i, j) = chunk[:2] # depends on [control=['if'], data=[]]
else:
(i, j) = chunk[2:] # depends on [control=['if'], data=[]]
else:
raise ValueError("'chunk' should have 2 or 4 elements, not {0:d}".format(len(chunk)))
return data[i:j, ...] |
def IterQueryInstances(self, FilterQueryLanguage, FilterQuery,
namespace=None, ReturnQueryResultClass=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_ITER_MAXOBJECTCOUNT,
**extra):
# pylint: disable=line-too-long
"""
Execute a query in a namespace, using the Python :term:`py:generator`
idiom to return the result.
*New in pywbem 0.10 as experimental and finalized in 0.12.*
This method uses the corresponding pull operations if supported by the
WBEM server or otherwise the corresponding traditional operation.
This method is an alternative to using the pull operations directly,
that frees the user of having to know whether the WBEM server supports
pull operations.
Other than the other Iter...() methods, this method does not return
a generator object directly, but as a property of the returned object.
The reason for this design is the additionally returned query
result class. The generator property in the returned object is a
generator object that returns the instances in the query result one by
one (using :keyword:`yield`) when the caller iterates through the
generator object. This design causes the entire query result to be
materialized, even if pull operations are used.
By default, this method attempts to perform the corresponding pull
operations
(:meth:`~pywbem.WBEMConnection.OpenQueryInstances` and
:meth:`~pywbem.WBEMConnection.PullInstances`).
If these pull operations are not supported by the WBEM server, this
method falls back to using the corresponding traditional operation
(:meth:`~pywbem.WBEMConnection.ExecQuery`).
Whether the WBEM server supports these pull operations is remembered
in the :class:`~pywbem.WBEMConnection` object (by operation type), and
avoids unnecessary attempts to try these pull operations on that
connection in the future.
The `use_pull_operations` init parameter of
:class:`~pywbem.WBEMConnection` can be used to control the preference
for always using pull operations, always using traditional operations,
or using pull operations if supported by the WBEM server (the default).
This method provides all of the controls of the corresponding pull
operations except for the ability to set different response sizes on
each request; the response size (defined by the `MaxObjectCount`
parameter) is the same for all pull operations in the enumeration
session.
In addition, some functionality is only available if the corresponding
pull operations are used by this method:
* Setting the `ContinueOnError` parameter to `True` will be rejected if
the corresponding traditional operation is used by this method.
The enumeration session that is opened with the WBEM server when using
pull operations is closed automatically when the returned generator
object is exhausted, or when the generator object is closed using its
:meth:`~py:generator.close` method (which may also be called before the
generator is exhausted).
Parameters:
QueryLanguage (:term:`string`):
Name of the query language used in the `Query` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
Query (:term:`string`):
Query string in the query language specified in the `QueryLanguage`
parameter.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the default namespace of the connection object will be
used.
ReturnQueryResultClass (:class:`py:bool`):
Controls whether a class definition describing the returned
instances will be returned.
`None` will cause the server to use its default of `False`.
`None` will cause the server to use its default of `False`.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
If the corresponding traditional operation is used by this
method, :exc:`~py:exceptions.ValueError` will be raised.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return for each of
the open and pull requests issued during the iterations over the
returned generator object.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* Zero is not allowed; it would mean that zero instances
are to be returned for open and all pull requests issued to the
server.
* The default is defined as a system config variable.
* `None` is not allowed.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
:class:`~pywbem.IterQueryInstancesReturn`: An object with the
following properties:
* **query_result_class** (:class:`~pywbem.CIMClass`):
The query result class, if requested via the
`ReturnQueryResultClass` parameter.
`None`, if a query result class was not requested.
* **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`):
A generator object that iterates the CIM instances representing the
query result. These instances do not have an instance path set.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
result = conn.IterQueryInstances(
'DMTF:CQL',
'SELECT FROM * where pl > 2')
for inst in result.generator:
print('instance {0}'.format(inst.tomof()))
""" # noqa: E501
# pylint: enable=line-too-long
class IterQueryInstancesReturn(object):
"""
The return data for
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
"""
def __init__(self, instances, query_result_class=None):
"""Save any query_result_class and instances returned"""
self._query_result_class = query_result_class
self.instances = instances
@property
def query_result_class(self):
"""
:class:`~pywbem.CIMClass`: The query result class, if requested
via the `ReturnQueryResultClass` parameter of
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
`None`, if a query result class was not requested.
"""
return self._query_result_class
@property
def generator(self):
"""
:term:`py:generator` iterating :class:`~pywbem.CIMInstance`:
A generator object that iterates the CIM instances representing
the query result. These instances do not have an instance path
set.
"""
for inst in self.instances:
yield inst
_validateIterCommonParams(MaxObjectCount, OperationTimeout)
# Common variable for pull result tuple used by pulls and finally:
pull_result = None
try: # try / finally block to allow iter.close()
_instances = []
if (self._use_query_pull_operations is None or
self._use_query_pull_operations):
try: # operation try block
pull_result = self.OpenQueryInstances(
FilterQueryLanguage,
FilterQuery,
namespace=namespace,
ReturnQueryResultClass=ReturnQueryResultClass,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount, **extra)
# Open operation succeeded; set has_pull flag
self._use_query_pull_operations = True
_instances = pull_result.instances
# get QueryResultClass from if returned with open
# request.
qrc = pull_result.query_result_class if \
ReturnQueryResultClass else None
if not pull_result.eos:
while not pull_result.eos:
pull_result = self.PullInstances(
pull_result.context,
MaxObjectCount=MaxObjectCount)
_instances.extend(pull_result.instances)
rtn = IterQueryInstancesReturn(_instances,
query_result_class=qrc)
pull_result = None # clear the pull_result
return rtn
# If NOT_SUPPORTED and first request, set flag and try
# alternative request operation.
# If _use_query_pull_operations is True, always raise
# the exception
except CIMError as ce:
if self._use_query_pull_operations is None and \
ce.status_code == CIM_ERR_NOT_SUPPORTED:
self._use_query_pull_operations = False
else:
raise
# Alternate request if Pull not implemented. This does not allow
# the ContinueOnError or ReturnQueryResultClass
assert self._use_query_pull_operations is False
if ReturnQueryResultClass is not None:
raise ValueError('ExecQuery does not support'
' ReturnQueryResultClass.')
if ContinueOnError is not None:
raise ValueError('ExecQuery does not support '
'ContinueOnError.')
# The parameters are QueryLanguage and Query for ExecQuery
_instances = self.ExecQuery(FilterQueryLanguage,
FilterQuery,
namespace=namespace, **extra)
rtn = IterQueryInstancesReturn(_instances)
return rtn
# Cleanup if caller closes the iterator before exhausting it
finally:
# Cleanup only required if the pull context is open and not complete
if pull_result is not None and not pull_result.eos:
self.CloseEnumeration(pull_result.context)
pull_result = None | def function[IterQueryInstances, parameter[self, FilterQueryLanguage, FilterQuery, namespace, ReturnQueryResultClass, OperationTimeout, ContinueOnError, MaxObjectCount]]:
constant[
Execute a query in a namespace, using the Python :term:`py:generator`
idiom to return the result.
*New in pywbem 0.10 as experimental and finalized in 0.12.*
This method uses the corresponding pull operations if supported by the
WBEM server or otherwise the corresponding traditional operation.
This method is an alternative to using the pull operations directly,
that frees the user of having to know whether the WBEM server supports
pull operations.
Other than the other Iter...() methods, this method does not return
a generator object directly, but as a property of the returned object.
The reason for this design is the additionally returned query
result class. The generator property in the returned object is a
generator object that returns the instances in the query result one by
one (using :keyword:`yield`) when the caller iterates through the
generator object. This design causes the entire query result to be
materialized, even if pull operations are used.
By default, this method attempts to perform the corresponding pull
operations
(:meth:`~pywbem.WBEMConnection.OpenQueryInstances` and
:meth:`~pywbem.WBEMConnection.PullInstances`).
If these pull operations are not supported by the WBEM server, this
method falls back to using the corresponding traditional operation
(:meth:`~pywbem.WBEMConnection.ExecQuery`).
Whether the WBEM server supports these pull operations is remembered
in the :class:`~pywbem.WBEMConnection` object (by operation type), and
avoids unnecessary attempts to try these pull operations on that
connection in the future.
The `use_pull_operations` init parameter of
:class:`~pywbem.WBEMConnection` can be used to control the preference
for always using pull operations, always using traditional operations,
or using pull operations if supported by the WBEM server (the default).
This method provides all of the controls of the corresponding pull
operations except for the ability to set different response sizes on
each request; the response size (defined by the `MaxObjectCount`
parameter) is the same for all pull operations in the enumeration
session.
In addition, some functionality is only available if the corresponding
pull operations are used by this method:
* Setting the `ContinueOnError` parameter to `True` will be rejected if
the corresponding traditional operation is used by this method.
The enumeration session that is opened with the WBEM server when using
pull operations is closed automatically when the returned generator
object is exhausted, or when the generator object is closed using its
:meth:`~py:generator.close` method (which may also be called before the
generator is exhausted).
Parameters:
QueryLanguage (:term:`string`):
Name of the query language used in the `Query` parameter, e.g.
"DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query
Language. Because this is not a filter query, "DMTF:FQL" is not a
valid query language for this request.
Query (:term:`string`):
Query string in the query language specified in the `QueryLanguage`
parameter.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the default namespace of the connection object will be
used.
ReturnQueryResultClass (:class:`py:bool`):
Controls whether a class definition describing the returned
instances will be returned.
`None` will cause the server to use its default of `False`.
`None` will cause the server to use its default of `False`.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
If the corresponding traditional operation is used by this
method, :exc:`~py:exceptions.ValueError` will be raised.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return for each of
the open and pull requests issued during the iterations over the
returned generator object.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* Zero is not allowed; it would mean that zero instances
are to be returned for open and all pull requests issued to the
server.
* The default is defined as a system config variable.
* `None` is not allowed.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
:class:`~pywbem.IterQueryInstancesReturn`: An object with the
following properties:
* **query_result_class** (:class:`~pywbem.CIMClass`):
The query result class, if requested via the
`ReturnQueryResultClass` parameter.
`None`, if a query result class was not requested.
* **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`):
A generator object that iterates the CIM instances representing the
query result. These instances do not have an instance path set.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
result = conn.IterQueryInstances(
'DMTF:CQL',
'SELECT FROM * where pl > 2')
for inst in result.generator:
print('instance {0}'.format(inst.tomof()))
]
class class[IterQueryInstancesReturn, parameter[]] begin[:]
constant[
The return data for
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
]
def function[__init__, parameter[self, instances, query_result_class]]:
constant[Save any query_result_class and instances returned]
name[self]._query_result_class assign[=] name[query_result_class]
name[self].instances assign[=] name[instances]
def function[query_result_class, parameter[self]]:
constant[
:class:`~pywbem.CIMClass`: The query result class, if requested
via the `ReturnQueryResultClass` parameter of
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
`None`, if a query result class was not requested.
]
return[name[self]._query_result_class]
def function[generator, parameter[self]]:
constant[
:term:`py:generator` iterating :class:`~pywbem.CIMInstance`:
A generator object that iterates the CIM instances representing
the query result. These instances do not have an instance path
set.
]
for taget[name[inst]] in starred[name[self].instances] begin[:]
<ast.Yield object at 0x7da20e963640>
call[name[_validateIterCommonParams], parameter[name[MaxObjectCount], name[OperationTimeout]]]
variable[pull_result] assign[=] constant[None]
<ast.Try object at 0x7da20e960100> | keyword[def] identifier[IterQueryInstances] ( identifier[self] , identifier[FilterQueryLanguage] , identifier[FilterQuery] ,
identifier[namespace] = keyword[None] , identifier[ReturnQueryResultClass] = keyword[None] ,
identifier[OperationTimeout] = keyword[None] , identifier[ContinueOnError] = keyword[None] ,
identifier[MaxObjectCount] = identifier[DEFAULT_ITER_MAXOBJECTCOUNT] ,
** identifier[extra] ):
literal[string]
keyword[class] identifier[IterQueryInstancesReturn] ( identifier[object] ):
literal[string]
keyword[def] identifier[__init__] ( identifier[self] , identifier[instances] , identifier[query_result_class] = keyword[None] ):
literal[string]
identifier[self] . identifier[_query_result_class] = identifier[query_result_class]
identifier[self] . identifier[instances] = identifier[instances]
@ identifier[property]
keyword[def] identifier[query_result_class] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[_query_result_class]
@ identifier[property]
keyword[def] identifier[generator] ( identifier[self] ):
literal[string]
keyword[for] identifier[inst] keyword[in] identifier[self] . identifier[instances] :
keyword[yield] identifier[inst]
identifier[_validateIterCommonParams] ( identifier[MaxObjectCount] , identifier[OperationTimeout] )
identifier[pull_result] = keyword[None]
keyword[try] :
identifier[_instances] =[]
keyword[if] ( identifier[self] . identifier[_use_query_pull_operations] keyword[is] keyword[None] keyword[or]
identifier[self] . identifier[_use_query_pull_operations] ):
keyword[try] :
identifier[pull_result] = identifier[self] . identifier[OpenQueryInstances] (
identifier[FilterQueryLanguage] ,
identifier[FilterQuery] ,
identifier[namespace] = identifier[namespace] ,
identifier[ReturnQueryResultClass] = identifier[ReturnQueryResultClass] ,
identifier[OperationTimeout] = identifier[OperationTimeout] ,
identifier[ContinueOnError] = identifier[ContinueOnError] ,
identifier[MaxObjectCount] = identifier[MaxObjectCount] ,** identifier[extra] )
identifier[self] . identifier[_use_query_pull_operations] = keyword[True]
identifier[_instances] = identifier[pull_result] . identifier[instances]
identifier[qrc] = identifier[pull_result] . identifier[query_result_class] keyword[if] identifier[ReturnQueryResultClass] keyword[else] keyword[None]
keyword[if] keyword[not] identifier[pull_result] . identifier[eos] :
keyword[while] keyword[not] identifier[pull_result] . identifier[eos] :
identifier[pull_result] = identifier[self] . identifier[PullInstances] (
identifier[pull_result] . identifier[context] ,
identifier[MaxObjectCount] = identifier[MaxObjectCount] )
identifier[_instances] . identifier[extend] ( identifier[pull_result] . identifier[instances] )
identifier[rtn] = identifier[IterQueryInstancesReturn] ( identifier[_instances] ,
identifier[query_result_class] = identifier[qrc] )
identifier[pull_result] = keyword[None]
keyword[return] identifier[rtn]
keyword[except] identifier[CIMError] keyword[as] identifier[ce] :
keyword[if] identifier[self] . identifier[_use_query_pull_operations] keyword[is] keyword[None] keyword[and] identifier[ce] . identifier[status_code] == identifier[CIM_ERR_NOT_SUPPORTED] :
identifier[self] . identifier[_use_query_pull_operations] = keyword[False]
keyword[else] :
keyword[raise]
keyword[assert] identifier[self] . identifier[_use_query_pull_operations] keyword[is] keyword[False]
keyword[if] identifier[ReturnQueryResultClass] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
keyword[if] identifier[ContinueOnError] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[_instances] = identifier[self] . identifier[ExecQuery] ( identifier[FilterQueryLanguage] ,
identifier[FilterQuery] ,
identifier[namespace] = identifier[namespace] ,** identifier[extra] )
identifier[rtn] = identifier[IterQueryInstancesReturn] ( identifier[_instances] )
keyword[return] identifier[rtn]
keyword[finally] :
keyword[if] identifier[pull_result] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[pull_result] . identifier[eos] :
identifier[self] . identifier[CloseEnumeration] ( identifier[pull_result] . identifier[context] )
identifier[pull_result] = keyword[None] | def IterQueryInstances(self, FilterQueryLanguage, FilterQuery, namespace=None, ReturnQueryResultClass=None, OperationTimeout=None, ContinueOnError=None, MaxObjectCount=DEFAULT_ITER_MAXOBJECTCOUNT, **extra):
# pylint: disable=line-too-long
'\n Execute a query in a namespace, using the Python :term:`py:generator`\n idiom to return the result.\n\n *New in pywbem 0.10 as experimental and finalized in 0.12.*\n\n This method uses the corresponding pull operations if supported by the\n WBEM server or otherwise the corresponding traditional operation.\n This method is an alternative to using the pull operations directly,\n that frees the user of having to know whether the WBEM server supports\n pull operations.\n\n Other than the other Iter...() methods, this method does not return\n a generator object directly, but as a property of the returned object.\n The reason for this design is the additionally returned query\n result class. The generator property in the returned object is a\n generator object that returns the instances in the query result one by\n one (using :keyword:`yield`) when the caller iterates through the\n generator object. This design causes the entire query result to be\n materialized, even if pull operations are used.\n\n By default, this method attempts to perform the corresponding pull\n operations\n (:meth:`~pywbem.WBEMConnection.OpenQueryInstances` and\n :meth:`~pywbem.WBEMConnection.PullInstances`).\n If these pull operations are not supported by the WBEM server, this\n method falls back to using the corresponding traditional operation\n (:meth:`~pywbem.WBEMConnection.ExecQuery`).\n Whether the WBEM server supports these pull operations is remembered\n in the :class:`~pywbem.WBEMConnection` object (by operation type), and\n avoids unnecessary attempts to try these pull operations on that\n connection in the future.\n The `use_pull_operations` init parameter of\n :class:`~pywbem.WBEMConnection` can be used to control the preference\n for always using pull operations, always using traditional operations,\n or using pull operations if supported by the WBEM server (the default).\n\n This method provides all of the controls of the corresponding pull\n operations except for the ability to set different response sizes on\n each request; the response size (defined by the `MaxObjectCount`\n parameter) is the same for all pull operations in the enumeration\n session.\n\n In addition, some functionality is only available if the corresponding\n pull operations are used by this method:\n\n * Setting the `ContinueOnError` parameter to `True` will be rejected if\n the corresponding traditional operation is used by this method.\n\n The enumeration session that is opened with the WBEM server when using\n pull operations is closed automatically when the returned generator\n object is exhausted, or when the generator object is closed using its\n :meth:`~py:generator.close` method (which may also be called before the\n generator is exhausted).\n\n Parameters:\n\n QueryLanguage (:term:`string`):\n Name of the query language used in the `Query` parameter, e.g.\n "DMTF:CQL" for CIM Query Language, and "WQL" for WBEM Query\n Language. Because this is not a filter query, "DMTF:FQL" is not a\n valid query language for this request.\n\n Query (:term:`string`):\n Query string in the query language specified in the `QueryLanguage`\n parameter.\n\n namespace (:term:`string`):\n Name of the CIM namespace to be used (case independent).\n\n Leading and trailing slash characters will be stripped. The lexical\n case will be preserved.\n\n If `None`, the default namespace of the connection object will be\n used.\n\n ReturnQueryResultClass (:class:`py:bool`):\n Controls whether a class definition describing the returned\n instances will be returned.\n\n `None` will cause the server to use its default of `False`.\n\n `None` will cause the server to use its default of `False`.\n\n OperationTimeout (:class:`~pywbem.Uint32`):\n Minimum time in seconds the WBEM Server shall maintain an open\n enumeration session after a previous Open or Pull request is\n sent to the client. Once this timeout time has expired, the\n WBEM server may close the enumeration session.\n\n * If not `None`, this parameter is sent to the WBEM server as the\n proposed timeout for the enumeration session. A value of 0\n indicates that the server is expected to never time out. The\n server may reject the proposed value, causing a\n :class:`~pywbem.CIMError` to be raised with status code\n :attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.\n * If `None`, this parameter is not passed to the WBEM server, and\n causes the server-implemented default timeout to be used.\n\n ContinueOnError (:class:`py:bool`):\n Indicates to the WBEM server to continue sending responses\n after an error response has been sent.\n\n * If `True`, the server is to continue sending responses after\n sending an error response. Not all servers support continuation\n on error; a server that does not support it must send an error\n response if `True` was specified, causing\n :class:`~pywbem.CIMError` to be raised with status code\n :attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.\n If the corresponding traditional operation is used by this\n method, :exc:`~py:exceptions.ValueError` will be raised.\n * If `False`, the server is requested to close the enumeration after\n sending an error response.\n * If `None`, this parameter is not passed to the WBEM server, and\n causes the server-implemented default behaviour to be used.\n :term:`DSP0200` defines that the server-implemented default is\n `False`.\n\n MaxObjectCount (:class:`~pywbem.Uint32`)\n Maximum number of instances the WBEM server may return for each of\n the open and pull requests issued during the iterations over the\n returned generator object.\n\n * If positive, the WBEM server is to return no more than the\n specified number of instances.\n * Zero is not allowed; it would mean that zero instances\n are to be returned for open and all pull requests issued to the\n server.\n * The default is defined as a system config variable.\n * `None` is not allowed.\n\n **extra :\n Additional keyword arguments are passed as additional operation\n parameters to the WBEM server.\n Note that :term:`DSP0200` does not define any additional parameters\n for this operation.\n\n Returns:\n\n :class:`~pywbem.IterQueryInstancesReturn`: An object with the\n following properties:\n\n * **query_result_class** (:class:`~pywbem.CIMClass`):\n\n The query result class, if requested via the\n `ReturnQueryResultClass` parameter.\n\n `None`, if a query result class was not requested.\n\n * **generator** (:term:`py:generator` iterating :class:`~pywbem.CIMInstance`):\n\n A generator object that iterates the CIM instances representing the\n query result. These instances do not have an instance path set.\n\n Raises:\n\n Exceptions described in :class:`~pywbem.WBEMConnection`.\n\n Example::\n\n result = conn.IterQueryInstances(\n \'DMTF:CQL\',\n \'SELECT FROM * where pl > 2\')\n for inst in result.generator:\n print(\'instance {0}\'.format(inst.tomof()))\n ' # noqa: E501
# pylint: enable=line-too-long
class IterQueryInstancesReturn(object):
"""
The return data for
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
"""
def __init__(self, instances, query_result_class=None):
"""Save any query_result_class and instances returned"""
self._query_result_class = query_result_class
self.instances = instances
@property
def query_result_class(self):
"""
:class:`~pywbem.CIMClass`: The query result class, if requested
via the `ReturnQueryResultClass` parameter of
:meth:`~pywbem.WBEMConnection.IterQueryInstances`.
`None`, if a query result class was not requested.
"""
return self._query_result_class
@property
def generator(self):
"""
:term:`py:generator` iterating :class:`~pywbem.CIMInstance`:
A generator object that iterates the CIM instances representing
the query result. These instances do not have an instance path
set.
"""
for inst in self.instances:
yield inst # depends on [control=['for'], data=['inst']]
_validateIterCommonParams(MaxObjectCount, OperationTimeout)
# Common variable for pull result tuple used by pulls and finally:
pull_result = None
try: # try / finally block to allow iter.close()
_instances = []
if self._use_query_pull_operations is None or self._use_query_pull_operations:
try: # operation try block
pull_result = self.OpenQueryInstances(FilterQueryLanguage, FilterQuery, namespace=namespace, ReturnQueryResultClass=ReturnQueryResultClass, OperationTimeout=OperationTimeout, ContinueOnError=ContinueOnError, MaxObjectCount=MaxObjectCount, **extra)
# Open operation succeeded; set has_pull flag
self._use_query_pull_operations = True
_instances = pull_result.instances
# get QueryResultClass from if returned with open
# request.
qrc = pull_result.query_result_class if ReturnQueryResultClass else None
if not pull_result.eos:
while not pull_result.eos:
pull_result = self.PullInstances(pull_result.context, MaxObjectCount=MaxObjectCount)
_instances.extend(pull_result.instances) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
rtn = IterQueryInstancesReturn(_instances, query_result_class=qrc)
pull_result = None # clear the pull_result
return rtn # depends on [control=['try'], data=[]]
# If NOT_SUPPORTED and first request, set flag and try
# alternative request operation.
# If _use_query_pull_operations is True, always raise
# the exception
except CIMError as ce:
if self._use_query_pull_operations is None and ce.status_code == CIM_ERR_NOT_SUPPORTED:
self._use_query_pull_operations = False # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['ce']] # depends on [control=['if'], data=[]]
# Alternate request if Pull not implemented. This does not allow
# the ContinueOnError or ReturnQueryResultClass
assert self._use_query_pull_operations is False
if ReturnQueryResultClass is not None:
raise ValueError('ExecQuery does not support ReturnQueryResultClass.') # depends on [control=['if'], data=[]]
if ContinueOnError is not None:
raise ValueError('ExecQuery does not support ContinueOnError.') # depends on [control=['if'], data=[]]
# The parameters are QueryLanguage and Query for ExecQuery
_instances = self.ExecQuery(FilterQueryLanguage, FilterQuery, namespace=namespace, **extra)
rtn = IterQueryInstancesReturn(_instances)
return rtn # depends on [control=['try'], data=[]]
finally:
# Cleanup if caller closes the iterator before exhausting it
# Cleanup only required if the pull context is open and not complete
if pull_result is not None and (not pull_result.eos):
self.CloseEnumeration(pull_result.context)
pull_result = None # depends on [control=['if'], data=[]] |
def _decode_transfer_data(self, data):
"""
Take a byte array and extract the data from it
Decode the response returned by a DAP_Transfer CMSIS-DAP command
and return it as an array of bytes.
"""
assert self.get_empty() is False
if data[0] != Command.DAP_TRANSFER:
raise ValueError('DAP_TRANSFER response error')
if data[2] != DAP_TRANSFER_OK:
if data[2] == DAP_TRANSFER_FAULT:
raise DAPAccessIntf.TransferFaultError()
elif data[2] == DAP_TRANSFER_WAIT:
raise DAPAccessIntf.TransferTimeoutError()
raise DAPAccessIntf.TransferError()
# Check for count mismatch after checking for DAP_TRANSFER_FAULT
# This allows TransferFaultError or TransferTimeoutError to get
# thrown instead of TransferFaultError
if data[1] != self._read_count + self._write_count:
raise DAPAccessIntf.TransferError()
return data[3:3 + 4 * self._read_count] | def function[_decode_transfer_data, parameter[self, data]]:
constant[
Take a byte array and extract the data from it
Decode the response returned by a DAP_Transfer CMSIS-DAP command
and return it as an array of bytes.
]
assert[compare[call[name[self].get_empty, parameter[]] is constant[False]]]
if compare[call[name[data]][constant[0]] not_equal[!=] name[Command].DAP_TRANSFER] begin[:]
<ast.Raise object at 0x7da1b18be9e0>
if compare[call[name[data]][constant[2]] not_equal[!=] name[DAP_TRANSFER_OK]] begin[:]
if compare[call[name[data]][constant[2]] equal[==] name[DAP_TRANSFER_FAULT]] begin[:]
<ast.Raise object at 0x7da1b18bd300>
<ast.Raise object at 0x7da1b18bee00>
if compare[call[name[data]][constant[1]] not_equal[!=] binary_operation[name[self]._read_count + name[self]._write_count]] begin[:]
<ast.Raise object at 0x7da18ede5cf0>
return[call[name[data]][<ast.Slice object at 0x7da18ede52d0>]] | keyword[def] identifier[_decode_transfer_data] ( identifier[self] , identifier[data] ):
literal[string]
keyword[assert] identifier[self] . identifier[get_empty] () keyword[is] keyword[False]
keyword[if] identifier[data] [ literal[int] ]!= identifier[Command] . identifier[DAP_TRANSFER] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[data] [ literal[int] ]!= identifier[DAP_TRANSFER_OK] :
keyword[if] identifier[data] [ literal[int] ]== identifier[DAP_TRANSFER_FAULT] :
keyword[raise] identifier[DAPAccessIntf] . identifier[TransferFaultError] ()
keyword[elif] identifier[data] [ literal[int] ]== identifier[DAP_TRANSFER_WAIT] :
keyword[raise] identifier[DAPAccessIntf] . identifier[TransferTimeoutError] ()
keyword[raise] identifier[DAPAccessIntf] . identifier[TransferError] ()
keyword[if] identifier[data] [ literal[int] ]!= identifier[self] . identifier[_read_count] + identifier[self] . identifier[_write_count] :
keyword[raise] identifier[DAPAccessIntf] . identifier[TransferError] ()
keyword[return] identifier[data] [ literal[int] : literal[int] + literal[int] * identifier[self] . identifier[_read_count] ] | def _decode_transfer_data(self, data):
"""
Take a byte array and extract the data from it
Decode the response returned by a DAP_Transfer CMSIS-DAP command
and return it as an array of bytes.
"""
assert self.get_empty() is False
if data[0] != Command.DAP_TRANSFER:
raise ValueError('DAP_TRANSFER response error') # depends on [control=['if'], data=[]]
if data[2] != DAP_TRANSFER_OK:
if data[2] == DAP_TRANSFER_FAULT:
raise DAPAccessIntf.TransferFaultError() # depends on [control=['if'], data=[]]
elif data[2] == DAP_TRANSFER_WAIT:
raise DAPAccessIntf.TransferTimeoutError() # depends on [control=['if'], data=[]]
raise DAPAccessIntf.TransferError() # depends on [control=['if'], data=[]]
# Check for count mismatch after checking for DAP_TRANSFER_FAULT
# This allows TransferFaultError or TransferTimeoutError to get
# thrown instead of TransferFaultError
if data[1] != self._read_count + self._write_count:
raise DAPAccessIntf.TransferError() # depends on [control=['if'], data=[]]
return data[3:3 + 4 * self._read_count] |
def delete_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs):
"""Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
else:
(data) = cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
return data | def function[delete_store_credit_payment_by_id, parameter[cls, store_credit_payment_id]]:
constant[Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._delete_store_credit_payment_by_id_with_http_info, parameter[name[store_credit_payment_id]]]] | keyword[def] identifier[delete_store_credit_payment_by_id] ( identifier[cls] , identifier[store_credit_payment_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_delete_store_credit_payment_by_id_with_http_info] ( identifier[store_credit_payment_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_delete_store_credit_payment_by_id_with_http_info] ( identifier[store_credit_payment_id] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs):
"""Delete StoreCreditPayment
Delete an instance of StoreCreditPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_payment_id: ID of storeCreditPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs)
return data |
def _process_dbxref(self):
"""
We bring in the dbxref identifiers and store them in a hashmap for
lookup in other functions.
Note that some dbxrefs aren't mapped to identifiers.
For example, 5004018 is mapped to a string,
"endosome & imaginal disc epithelial cell | somatic clone..."
In those cases, there just isn't a dbxref that's used
when referencing with a cvterm; it'll just use the internal key.
:return:
"""
raw = '/'.join((self.rawdir, 'dbxref'))
LOG.info("processing dbxrefs")
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
f.readline() # read the header row; skip
for line in filereader:
(dbxref_id, db_id, accession, version, description, url) = line
# dbxref_id db_id accession version description url
# 1 2 SO:0000000 ""
accession = accession.strip()
db_id = db_id.strip()
if accession != '' and db_id in self.localtt:
# scrub some identifiers here
mch = re.match(
r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):',
accession)
if mch:
accession = re.sub(mch.group(1)+r'\:', '', accession)
elif re.match(
r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)',
accession):
continue
elif re.match(r'\:', accession): # starts with a colon
accession = re.sub(r'\:', '', accession)
elif re.search(r'\s', accession):
# skip anything with a space
# LOG.debug(
# 'dbxref %s accession has a space: %s', dbxref_id, accession)
continue
if re.match(r'http', accession):
did = accession
else:
prefix = self.localtt[db_id]
did = ':'.join((prefix, accession))
if re.search(r'\:', accession) and prefix != 'DOI':
LOG.warning('id %s may be malformed; skipping', did)
self.dbxrefs[dbxref_id] = {db_id: did}
elif url != '':
self.dbxrefs[dbxref_id] = {db_id: url.strip()}
else:
continue
# the following are some special cases that we scrub
if int(db_id) == 2 and accession.strip() == 'transgenic_transposon':
# transgenic_transposable_element
self.dbxrefs[dbxref_id] = {
db_id: self.globaltt['transgenic_transposable_element']}
line_counter += 1
return | def function[_process_dbxref, parameter[self]]:
constant[
We bring in the dbxref identifiers and store them in a hashmap for
lookup in other functions.
Note that some dbxrefs aren't mapped to identifiers.
For example, 5004018 is mapped to a string,
"endosome & imaginal disc epithelial cell | somatic clone..."
In those cases, there just isn't a dbxref that's used
when referencing with a cvterm; it'll just use the internal key.
:return:
]
variable[raw] assign[=] call[constant[/].join, parameter[tuple[[<ast.Attribute object at 0x7da2054a5330>, <ast.Constant object at 0x7da2054a62f0>]]]]
call[name[LOG].info, parameter[constant[processing dbxrefs]]]
variable[line_counter] assign[=] constant[0]
with call[name[open], parameter[name[raw], constant[r]]] begin[:]
variable[filereader] assign[=] call[name[csv].reader, parameter[name[f]]]
call[name[f].readline, parameter[]]
for taget[name[line]] in starred[name[filereader]] begin[:]
<ast.Tuple object at 0x7da2047e9db0> assign[=] name[line]
variable[accession] assign[=] call[name[accession].strip, parameter[]]
variable[db_id] assign[=] call[name[db_id].strip, parameter[]]
if <ast.BoolOp object at 0x7da2047eb0a0> begin[:]
variable[mch] assign[=] call[name[re].match, parameter[constant[(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):], name[accession]]]
if name[mch] begin[:]
variable[accession] assign[=] call[name[re].sub, parameter[binary_operation[call[name[mch].group, parameter[constant[1]]] + constant[\:]], constant[], name[accession]]]
if call[name[re].match, parameter[constant[http], name[accession]]] begin[:]
variable[did] assign[=] name[accession]
call[name[self].dbxrefs][name[dbxref_id]] assign[=] dictionary[[<ast.Name object at 0x7da2047e81f0>], [<ast.Name object at 0x7da2047eb850>]]
if <ast.BoolOp object at 0x7da2047e9c90> begin[:]
call[name[self].dbxrefs][name[dbxref_id]] assign[=] dictionary[[<ast.Name object at 0x7da2047e9ba0>], [<ast.Subscript object at 0x7da2047e8df0>]]
<ast.AugAssign object at 0x7da2047eb640>
return[None] | keyword[def] identifier[_process_dbxref] ( identifier[self] ):
literal[string]
identifier[raw] = literal[string] . identifier[join] (( identifier[self] . identifier[rawdir] , literal[string] ))
identifier[LOG] . identifier[info] ( literal[string] )
identifier[line_counter] = literal[int]
keyword[with] identifier[open] ( identifier[raw] , literal[string] ) keyword[as] identifier[f] :
identifier[filereader] = identifier[csv] . identifier[reader] ( identifier[f] , identifier[delimiter] = literal[string] , identifier[quotechar] = literal[string] )
identifier[f] . identifier[readline] ()
keyword[for] identifier[line] keyword[in] identifier[filereader] :
( identifier[dbxref_id] , identifier[db_id] , identifier[accession] , identifier[version] , identifier[description] , identifier[url] )= identifier[line]
identifier[accession] = identifier[accession] . identifier[strip] ()
identifier[db_id] = identifier[db_id] . identifier[strip] ()
keyword[if] identifier[accession] != literal[string] keyword[and] identifier[db_id] keyword[in] identifier[self] . identifier[localtt] :
identifier[mch] = identifier[re] . identifier[match] (
literal[string] ,
identifier[accession] )
keyword[if] identifier[mch] :
identifier[accession] = identifier[re] . identifier[sub] ( identifier[mch] . identifier[group] ( literal[int] )+ literal[string] , literal[string] , identifier[accession] )
keyword[elif] identifier[re] . identifier[match] (
literal[string] ,
identifier[accession] ):
keyword[continue]
keyword[elif] identifier[re] . identifier[match] ( literal[string] , identifier[accession] ):
identifier[accession] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[accession] )
keyword[elif] identifier[re] . identifier[search] ( literal[string] , identifier[accession] ):
keyword[continue]
keyword[if] identifier[re] . identifier[match] ( literal[string] , identifier[accession] ):
identifier[did] = identifier[accession]
keyword[else] :
identifier[prefix] = identifier[self] . identifier[localtt] [ identifier[db_id] ]
identifier[did] = literal[string] . identifier[join] (( identifier[prefix] , identifier[accession] ))
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[accession] ) keyword[and] identifier[prefix] != literal[string] :
identifier[LOG] . identifier[warning] ( literal[string] , identifier[did] )
identifier[self] . identifier[dbxrefs] [ identifier[dbxref_id] ]={ identifier[db_id] : identifier[did] }
keyword[elif] identifier[url] != literal[string] :
identifier[self] . identifier[dbxrefs] [ identifier[dbxref_id] ]={ identifier[db_id] : identifier[url] . identifier[strip] ()}
keyword[else] :
keyword[continue]
keyword[if] identifier[int] ( identifier[db_id] )== literal[int] keyword[and] identifier[accession] . identifier[strip] ()== literal[string] :
identifier[self] . identifier[dbxrefs] [ identifier[dbxref_id] ]={
identifier[db_id] : identifier[self] . identifier[globaltt] [ literal[string] ]}
identifier[line_counter] += literal[int]
keyword[return] | def _process_dbxref(self):
"""
We bring in the dbxref identifiers and store them in a hashmap for
lookup in other functions.
Note that some dbxrefs aren't mapped to identifiers.
For example, 5004018 is mapped to a string,
"endosome & imaginal disc epithelial cell | somatic clone..."
In those cases, there just isn't a dbxref that's used
when referencing with a cvterm; it'll just use the internal key.
:return:
"""
raw = '/'.join((self.rawdir, 'dbxref'))
LOG.info('processing dbxrefs')
line_counter = 0
with open(raw, 'r') as f:
filereader = csv.reader(f, delimiter='\t', quotechar='"')
f.readline() # read the header row; skip
for line in filereader:
(dbxref_id, db_id, accession, version, description, url) = line
# dbxref_id db_id accession version description url
# 1 2 SO:0000000 ""
accession = accession.strip()
db_id = db_id.strip()
if accession != '' and db_id in self.localtt:
# scrub some identifiers here
mch = re.match('(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):', accession)
if mch:
accession = re.sub(mch.group(1) + '\\:', '', accession) # depends on [control=['if'], data=[]]
elif re.match('(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)', accession):
continue # depends on [control=['if'], data=[]]
elif re.match('\\:', accession): # starts with a colon
accession = re.sub('\\:', '', accession) # depends on [control=['if'], data=[]]
elif re.search('\\s', accession):
# skip anything with a space
# LOG.debug(
# 'dbxref %s accession has a space: %s', dbxref_id, accession)
continue # depends on [control=['if'], data=[]]
if re.match('http', accession):
did = accession # depends on [control=['if'], data=[]]
else:
prefix = self.localtt[db_id]
did = ':'.join((prefix, accession))
if re.search('\\:', accession) and prefix != 'DOI':
LOG.warning('id %s may be malformed; skipping', did) # depends on [control=['if'], data=[]]
self.dbxrefs[dbxref_id] = {db_id: did} # depends on [control=['if'], data=[]]
elif url != '':
self.dbxrefs[dbxref_id] = {db_id: url.strip()} # depends on [control=['if'], data=['url']]
else:
continue
# the following are some special cases that we scrub
if int(db_id) == 2 and accession.strip() == 'transgenic_transposon':
# transgenic_transposable_element
self.dbxrefs[dbxref_id] = {db_id: self.globaltt['transgenic_transposable_element']} # depends on [control=['if'], data=[]]
line_counter += 1 # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
return |
def transmit(self, payload, **kwargs):
"""
Transmit content metadata items to the integrated channel.
"""
items_to_create, items_to_update, items_to_delete, transmission_map = self._partition_items(payload)
self._prepare_items_for_delete(items_to_delete)
prepared_items = {}
prepared_items.update(items_to_create)
prepared_items.update(items_to_update)
prepared_items.update(items_to_delete)
skip_metadata_transmission = False
for chunk in chunks(prepared_items, self.enterprise_configuration.transmission_chunk_size):
chunked_items = list(chunk.values())
if skip_metadata_transmission:
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)
else:
try:
self.client.update_content_metadata(self._serialize_items(chunked_items))
except ClientError as exc:
LOGGER.error(
'Failed to update [%s] content metadata items for integrated channel [%s] [%s]',
len(chunked_items),
self.enterprise_configuration.enterprise_customer.name,
self.enterprise_configuration.channel_code,
)
LOGGER.error(exc)
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)
# SAP servers throttle incoming traffic, If a request fails than the subsequent would fail too,
# So, no need to keep trying and failing. We should stop here and retry later.
skip_metadata_transmission = True
self._create_transmissions(items_to_create)
self._update_transmissions(items_to_update, transmission_map)
self._delete_transmissions(items_to_delete.keys()) | def function[transmit, parameter[self, payload]]:
constant[
Transmit content metadata items to the integrated channel.
]
<ast.Tuple object at 0x7da1b0127eb0> assign[=] call[name[self]._partition_items, parameter[name[payload]]]
call[name[self]._prepare_items_for_delete, parameter[name[items_to_delete]]]
variable[prepared_items] assign[=] dictionary[[], []]
call[name[prepared_items].update, parameter[name[items_to_create]]]
call[name[prepared_items].update, parameter[name[items_to_update]]]
call[name[prepared_items].update, parameter[name[items_to_delete]]]
variable[skip_metadata_transmission] assign[=] constant[False]
for taget[name[chunk]] in starred[call[name[chunks], parameter[name[prepared_items], name[self].enterprise_configuration.transmission_chunk_size]]] begin[:]
variable[chunked_items] assign[=] call[name[list], parameter[call[name[chunk].values, parameter[]]]]
if name[skip_metadata_transmission] begin[:]
call[name[self]._remove_failed_items, parameter[name[chunked_items], name[items_to_create], name[items_to_update], name[items_to_delete]]]
call[name[self]._create_transmissions, parameter[name[items_to_create]]]
call[name[self]._update_transmissions, parameter[name[items_to_update], name[transmission_map]]]
call[name[self]._delete_transmissions, parameter[call[name[items_to_delete].keys, parameter[]]]] | keyword[def] identifier[transmit] ( identifier[self] , identifier[payload] ,** identifier[kwargs] ):
literal[string]
identifier[items_to_create] , identifier[items_to_update] , identifier[items_to_delete] , identifier[transmission_map] = identifier[self] . identifier[_partition_items] ( identifier[payload] )
identifier[self] . identifier[_prepare_items_for_delete] ( identifier[items_to_delete] )
identifier[prepared_items] ={}
identifier[prepared_items] . identifier[update] ( identifier[items_to_create] )
identifier[prepared_items] . identifier[update] ( identifier[items_to_update] )
identifier[prepared_items] . identifier[update] ( identifier[items_to_delete] )
identifier[skip_metadata_transmission] = keyword[False]
keyword[for] identifier[chunk] keyword[in] identifier[chunks] ( identifier[prepared_items] , identifier[self] . identifier[enterprise_configuration] . identifier[transmission_chunk_size] ):
identifier[chunked_items] = identifier[list] ( identifier[chunk] . identifier[values] ())
keyword[if] identifier[skip_metadata_transmission] :
identifier[self] . identifier[_remove_failed_items] ( identifier[chunked_items] , identifier[items_to_create] , identifier[items_to_update] , identifier[items_to_delete] )
keyword[else] :
keyword[try] :
identifier[self] . identifier[client] . identifier[update_content_metadata] ( identifier[self] . identifier[_serialize_items] ( identifier[chunked_items] ))
keyword[except] identifier[ClientError] keyword[as] identifier[exc] :
identifier[LOGGER] . identifier[error] (
literal[string] ,
identifier[len] ( identifier[chunked_items] ),
identifier[self] . identifier[enterprise_configuration] . identifier[enterprise_customer] . identifier[name] ,
identifier[self] . identifier[enterprise_configuration] . identifier[channel_code] ,
)
identifier[LOGGER] . identifier[error] ( identifier[exc] )
identifier[self] . identifier[_remove_failed_items] ( identifier[chunked_items] , identifier[items_to_create] , identifier[items_to_update] , identifier[items_to_delete] )
identifier[skip_metadata_transmission] = keyword[True]
identifier[self] . identifier[_create_transmissions] ( identifier[items_to_create] )
identifier[self] . identifier[_update_transmissions] ( identifier[items_to_update] , identifier[transmission_map] )
identifier[self] . identifier[_delete_transmissions] ( identifier[items_to_delete] . identifier[keys] ()) | def transmit(self, payload, **kwargs):
"""
Transmit content metadata items to the integrated channel.
"""
(items_to_create, items_to_update, items_to_delete, transmission_map) = self._partition_items(payload)
self._prepare_items_for_delete(items_to_delete)
prepared_items = {}
prepared_items.update(items_to_create)
prepared_items.update(items_to_update)
prepared_items.update(items_to_delete)
skip_metadata_transmission = False
for chunk in chunks(prepared_items, self.enterprise_configuration.transmission_chunk_size):
chunked_items = list(chunk.values())
if skip_metadata_transmission:
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete) # depends on [control=['if'], data=[]]
else:
try:
self.client.update_content_metadata(self._serialize_items(chunked_items)) # depends on [control=['try'], data=[]]
except ClientError as exc:
LOGGER.error('Failed to update [%s] content metadata items for integrated channel [%s] [%s]', len(chunked_items), self.enterprise_configuration.enterprise_customer.name, self.enterprise_configuration.channel_code)
LOGGER.error(exc)
# Remove the failed items from the create/update/delete dictionaries,
# so ContentMetadataItemTransmission objects are not synchronized for
# these items below.
self._remove_failed_items(chunked_items, items_to_create, items_to_update, items_to_delete)
# SAP servers throttle incoming traffic, If a request fails than the subsequent would fail too,
# So, no need to keep trying and failing. We should stop here and retry later.
skip_metadata_transmission = True # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['chunk']]
self._create_transmissions(items_to_create)
self._update_transmissions(items_to_update, transmission_map)
self._delete_transmissions(items_to_delete.keys()) |
def __fun_check(self, valid, fun, args=None, kwargs=None):
'''
Check the given function name (fun) and its arguments (args) against the list of conditions.
'''
if not isinstance(valid, list):
valid = [valid]
for cond in valid:
# Function name match
if isinstance(cond, six.string_types):
if self.match_check(cond, fun):
return True
# Function and args match
elif isinstance(cond, dict):
if len(cond) != 1:
# Invalid argument
continue
fname_cond = next(six.iterkeys(cond))
if self.match_check(fname_cond, fun): # check key that is function name match
if self.__args_check(cond[fname_cond], args, kwargs):
return True
return False | def function[__fun_check, parameter[self, valid, fun, args, kwargs]]:
constant[
Check the given function name (fun) and its arguments (args) against the list of conditions.
]
if <ast.UnaryOp object at 0x7da1b21638e0> begin[:]
variable[valid] assign[=] list[[<ast.Name object at 0x7da1b2163340>]]
for taget[name[cond]] in starred[name[valid]] begin[:]
if call[name[isinstance], parameter[name[cond], name[six].string_types]] begin[:]
if call[name[self].match_check, parameter[name[cond], name[fun]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[__fun_check] ( identifier[self] , identifier[valid] , identifier[fun] , identifier[args] = keyword[None] , identifier[kwargs] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[valid] , identifier[list] ):
identifier[valid] =[ identifier[valid] ]
keyword[for] identifier[cond] keyword[in] identifier[valid] :
keyword[if] identifier[isinstance] ( identifier[cond] , identifier[six] . identifier[string_types] ):
keyword[if] identifier[self] . identifier[match_check] ( identifier[cond] , identifier[fun] ):
keyword[return] keyword[True]
keyword[elif] identifier[isinstance] ( identifier[cond] , identifier[dict] ):
keyword[if] identifier[len] ( identifier[cond] )!= literal[int] :
keyword[continue]
identifier[fname_cond] = identifier[next] ( identifier[six] . identifier[iterkeys] ( identifier[cond] ))
keyword[if] identifier[self] . identifier[match_check] ( identifier[fname_cond] , identifier[fun] ):
keyword[if] identifier[self] . identifier[__args_check] ( identifier[cond] [ identifier[fname_cond] ], identifier[args] , identifier[kwargs] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def __fun_check(self, valid, fun, args=None, kwargs=None):
"""
Check the given function name (fun) and its arguments (args) against the list of conditions.
"""
if not isinstance(valid, list):
valid = [valid] # depends on [control=['if'], data=[]]
for cond in valid:
# Function name match
if isinstance(cond, six.string_types):
if self.match_check(cond, fun):
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Function and args match
elif isinstance(cond, dict):
if len(cond) != 1:
# Invalid argument
continue # depends on [control=['if'], data=[]]
fname_cond = next(six.iterkeys(cond))
if self.match_check(fname_cond, fun): # check key that is function name match
if self.__args_check(cond[fname_cond], args, kwargs):
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cond']]
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.