code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _remove_strings(code) :
""" Remove strings in code
"""
removed_string = ""
is_string_now = None
for i in range(0, len(code)-1) :
append_this_turn = False
if code[i] == "'" and (i == 0 or code[i-1] != '\\') :
if is_string_now == "'" :
is_string_now = None
elif is_string_now == None :
is_string_now = "'"
append_this_turn = True
elif code[i] == '"' and (i == 0 or code[i-1] != '\\') :
if is_string_now == '"' :
is_string_now = None
elif is_string_now == None :
is_string_now = '"'
append_this_turn = True
if is_string_now == None or append_this_turn == True :
removed_string += code[i]
return removed_string | Remove strings in code | Below is the the instruction that describes the task:
### Input:
Remove strings in code
### Response:
def _remove_strings(code) :
""" Remove strings in code
"""
removed_string = ""
is_string_now = None
for i in range(0, len(code)-1) :
append_this_turn = False
if code[i] == "'" and (i == 0 or code[i-1] != '\\') :
if is_string_now == "'" :
is_string_now = None
elif is_string_now == None :
is_string_now = "'"
append_this_turn = True
elif code[i] == '"' and (i == 0 or code[i-1] != '\\') :
if is_string_now == '"' :
is_string_now = None
elif is_string_now == None :
is_string_now = '"'
append_this_turn = True
if is_string_now == None or append_this_turn == True :
removed_string += code[i]
return removed_string |
def angular_crossmatch_against_catalogue(
self,
objectList,
searchPara={},
search_name="",
brightnessFilter=False,
physicalSearch=False,
classificationType=False
):
"""*perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``angular_crossmatch_against_catalogue`` method')
self.log.info("STARTING %s SEARCH" %
(search_name,))
start_time = time.time()
# DEFAULTS
# print search_name, classificationType
magnitudeLimitFilter = None
upperMagnitudeLimit = False
lowerMagnitudeLimit = False
catalogueName = searchPara["database table"]
if not "mag column" in searchPara:
searchPara["mag column"] = None
if brightnessFilter:
if "mag column" in searchPara and searchPara["mag column"]:
magnitudeLimitFilter = self.colMaps[
catalogueName][searchPara["mag column"] + "ColName"]
theseSearchPara = searchPara[brightnessFilter]
else:
theseSearchPara = searchPara
# EXTRACT PARAMETERS FROM ARGUMENTS & SETTINGS FILE
if classificationType == "synonym":
radius = self.settings["synonym radius arcsec"]
matchedType = theseSearchPara["synonym"]
elif classificationType == "association":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["association"]
elif classificationType == "annotation":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["annotation"]
if brightnessFilter == "faint":
upperMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "bright":
lowerMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "general":
if "faint" in searchPara:
lowerMagnitudeLimit = searchPara["faint"]["mag limit"]
if "bright" in searchPara:
upperMagnitudeLimit = searchPara["bright"]["mag limit"]
# VARIABLES
matchedObjects = []
matchSubset = []
transRAs = []
transRAs[:] = [t['ra'] for t in objectList]
transDecs = []
transDecs[:] = [t['dec'] for t in objectList]
if len(transRAs) == 0:
return []
cs = catalogue_conesearch(
log=self.log,
ra=transRAs,
dec=transDecs,
radiusArcsec=radius,
colMaps=self.colMaps,
tableName=catalogueName,
dbConn=self.dbConn,
nearestOnly=False,
physicalSearch=physicalSearch,
upperMagnitudeLimit=upperMagnitudeLimit,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=magnitudeLimitFilter
)
# catalogueMatches ARE ORDERED BY ANGULAR SEPARATION
indices, catalogueMatches = cs.search()
count = 1
annotatedcatalogueMatches = []
for i, xm in zip(indices, catalogueMatches):
# CALCULATE PHYSICAL PARAMETERS ... IF WE CAN
if "cmSepArcsec" in xm:
xm["separationArcsec"] = xm["cmSepArcsec"]
# CALCULATE SEPARATION IN ARCSEC
calculator = separations(
log=self.log,
ra1=objectList[i]["ra"],
dec1=objectList[i]["dec"],
ra2=xm["ra"],
dec2=xm["dec"]
)
angularSeparation, north, east = calculator.get()
xm["northSeparationArcsec"] = north
xm["eastSeparationArcsec"] = east
del xm["cmSepArcsec"]
xm["association_type"] = matchedType
xm["catalogue_view_name"] = catalogueName
xm["transient_object_id"] = objectList[i]["id"]
xm["catalogue_table_name"] = self.colMaps[
catalogueName]["description"]
xm["catalogue_table_id"] = self.colMaps[
catalogueName]["table_id"]
xm["catalogue_view_id"] = self.colMaps[
catalogueName]["id"]
if classificationType == "synonym":
xm["classificationReliability"] = 1
elif classificationType == "association":
xm["classificationReliability"] = 2
elif classificationType == "annotation":
xm["classificationReliability"] = 3
xm = self._annotate_crossmatch_with_value_added_parameters(
crossmatchDict=xm,
catalogueName=catalogueName,
searchPara=theseSearchPara,
search_name=search_name
)
annotatedcatalogueMatches.append(xm)
catalogueMatches = annotatedcatalogueMatches
# IF BRIGHT STAR SEARCH
if brightnessFilter == "bright" and "star" in search_name:
catalogueMatches = self._bright_star_match(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if brightnessFilter == "general" and "galaxy" in search_name and "galaxy-like" not in search_name and "physical radius kpc" not in theseSearchPara:
catalogueMatches = self._galaxy_association_cuts(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
upperMagnitudeLimit=upperMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if "match nearest source only" in theseSearchPara and theseSearchPara["match nearest source only"] == True and len(catalogueMatches):
nearestMatches = []
transList = []
for c in catalogueMatches:
if c["transient_object_id"] not in transList:
transList.append(c["transient_object_id"])
nearestMatches.append(c)
catalogueMatches = nearestMatches
self.log.debug(
'completed the ``angular_crossmatch_against_catalogue`` method')
self.log.debug("FINISHED %s SEARCH IN %0.5f s" %
(search_name, time.time() - start_time,))
return catalogueMatches | *perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | Below is the the instruction that describes the task:
### Input:
*perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
### Response:
def angular_crossmatch_against_catalogue(
self,
objectList,
searchPara={},
search_name="",
brightnessFilter=False,
physicalSearch=False,
classificationType=False
):
"""*perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters (distances, physical separations, sub-type of transient etc)*
**Key Arguments:**
- ``objectList`` -- the list of transient locations to match against the crossmatch catalogue
- ``searchPara`` -- the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ``search_name`` -- the name of the search as given in the sherlock settings file
- ``brightnessFilter`` -- is this search to be constrained by magnitude of the catalogue sources? Default *False*. [bright|faint|general]
- ``physicalSearch`` -- is this angular search a sub-part of a physical separation search
- ``classificationType`` -- synonym, association or annotation. Default *False*
**Return:**
- matchedObjects -- any sources matched against the object
**Usage:**
Take a list of transients from somewhere
.. code-block:: python
transients = [
{'ps1_designation': u'PS1-14aef',
'name': u'4L3Piiq',
'detection_list_id': 2,
'local_comments': u'',
'ra': 0.02548233704918263,
'followup_id': 2065412L,
'dec': -4.284933417540423,
'id': 1000006110041705700L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dcr',
'name': u'3I3Phzx',
'detection_list_id': 2,
'local_comments': u'',
'ra': 4.754236999477372,
'followup_id': 1140386L,
'dec': 28.276703631398625,
'id': 1001901011281636100L,
'object_classification': 0L
},
{'ps1_designation': u'PS1-13dhc',
'name': u'3I3Pixd',
'detection_list_id': 2,
'local_comments': u'',
'ra': 1.3324973428505413,
'followup_id': 1202386L,
'dec': 32.98869220595689,
'id': 1000519791325919200L,
'object_classification': 0L
}
]
Then run the ``angular_crossmatch_against_catalogue`` method to crossmatch against the catalogues and return results:
.. code-block:: python
# ANGULAR CONESEARCH ON CATALOGUE
search_name = "ned_d spec sn"
searchPara = self.settings["search algorithm"][search_name]
matchedObjects = xmatcher.angular_crossmatch_against_catalogue(
objectList=transients,
searchPara=searchPara,
search_name=search_name
)
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug(
'starting the ``angular_crossmatch_against_catalogue`` method')
self.log.info("STARTING %s SEARCH" %
(search_name,))
start_time = time.time()
# DEFAULTS
# print search_name, classificationType
magnitudeLimitFilter = None
upperMagnitudeLimit = False
lowerMagnitudeLimit = False
catalogueName = searchPara["database table"]
if not "mag column" in searchPara:
searchPara["mag column"] = None
if brightnessFilter:
if "mag column" in searchPara and searchPara["mag column"]:
magnitudeLimitFilter = self.colMaps[
catalogueName][searchPara["mag column"] + "ColName"]
theseSearchPara = searchPara[brightnessFilter]
else:
theseSearchPara = searchPara
# EXTRACT PARAMETERS FROM ARGUMENTS & SETTINGS FILE
if classificationType == "synonym":
radius = self.settings["synonym radius arcsec"]
matchedType = theseSearchPara["synonym"]
elif classificationType == "association":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["association"]
elif classificationType == "annotation":
radius = theseSearchPara["angular radius arcsec"]
matchedType = theseSearchPara["annotation"]
if brightnessFilter == "faint":
upperMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "bright":
lowerMagnitudeLimit = theseSearchPara["mag limit"]
elif brightnessFilter == "general":
if "faint" in searchPara:
lowerMagnitudeLimit = searchPara["faint"]["mag limit"]
if "bright" in searchPara:
upperMagnitudeLimit = searchPara["bright"]["mag limit"]
# VARIABLES
matchedObjects = []
matchSubset = []
transRAs = []
transRAs[:] = [t['ra'] for t in objectList]
transDecs = []
transDecs[:] = [t['dec'] for t in objectList]
if len(transRAs) == 0:
return []
cs = catalogue_conesearch(
log=self.log,
ra=transRAs,
dec=transDecs,
radiusArcsec=radius,
colMaps=self.colMaps,
tableName=catalogueName,
dbConn=self.dbConn,
nearestOnly=False,
physicalSearch=physicalSearch,
upperMagnitudeLimit=upperMagnitudeLimit,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=magnitudeLimitFilter
)
# catalogueMatches ARE ORDERED BY ANGULAR SEPARATION
indices, catalogueMatches = cs.search()
count = 1
annotatedcatalogueMatches = []
for i, xm in zip(indices, catalogueMatches):
# CALCULATE PHYSICAL PARAMETERS ... IF WE CAN
if "cmSepArcsec" in xm:
xm["separationArcsec"] = xm["cmSepArcsec"]
# CALCULATE SEPARATION IN ARCSEC
calculator = separations(
log=self.log,
ra1=objectList[i]["ra"],
dec1=objectList[i]["dec"],
ra2=xm["ra"],
dec2=xm["dec"]
)
angularSeparation, north, east = calculator.get()
xm["northSeparationArcsec"] = north
xm["eastSeparationArcsec"] = east
del xm["cmSepArcsec"]
xm["association_type"] = matchedType
xm["catalogue_view_name"] = catalogueName
xm["transient_object_id"] = objectList[i]["id"]
xm["catalogue_table_name"] = self.colMaps[
catalogueName]["description"]
xm["catalogue_table_id"] = self.colMaps[
catalogueName]["table_id"]
xm["catalogue_view_id"] = self.colMaps[
catalogueName]["id"]
if classificationType == "synonym":
xm["classificationReliability"] = 1
elif classificationType == "association":
xm["classificationReliability"] = 2
elif classificationType == "annotation":
xm["classificationReliability"] = 3
xm = self._annotate_crossmatch_with_value_added_parameters(
crossmatchDict=xm,
catalogueName=catalogueName,
searchPara=theseSearchPara,
search_name=search_name
)
annotatedcatalogueMatches.append(xm)
catalogueMatches = annotatedcatalogueMatches
# IF BRIGHT STAR SEARCH
if brightnessFilter == "bright" and "star" in search_name:
catalogueMatches = self._bright_star_match(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if brightnessFilter == "general" and "galaxy" in search_name and "galaxy-like" not in search_name and "physical radius kpc" not in theseSearchPara:
catalogueMatches = self._galaxy_association_cuts(
matchedObjects=catalogueMatches,
catalogueName=catalogueName,
lowerMagnitudeLimit=lowerMagnitudeLimit,
upperMagnitudeLimit=upperMagnitudeLimit,
magnitudeLimitFilter=searchPara["mag column"]
)
if "match nearest source only" in theseSearchPara and theseSearchPara["match nearest source only"] == True and len(catalogueMatches):
nearestMatches = []
transList = []
for c in catalogueMatches:
if c["transient_object_id"] not in transList:
transList.append(c["transient_object_id"])
nearestMatches.append(c)
catalogueMatches = nearestMatches
self.log.debug(
'completed the ``angular_crossmatch_against_catalogue`` method')
self.log.debug("FINISHED %s SEARCH IN %0.5f s" %
(search_name, time.time() - start_time,))
return catalogueMatches |
def _software_params_to_argparse(parameters):
"""
Converts a SoftwareParameterCollection into an ArgumentParser object.
Parameters
----------
parameters: SoftwareParameterCollection
The software parameters
Returns
-------
argparse: ArgumentParser
An initialized argument parser
"""
# Check software parameters
argparse = ArgumentParser()
boolean_defaults = {}
for parameter in parameters:
arg_desc = {"dest": parameter.name, "required": parameter.required, "help": ""} # TODO add help
if parameter.type == "Boolean":
default = _to_bool(parameter.defaultParamValue)
arg_desc["action"] = "store_true" if not default else "store_false"
boolean_defaults[parameter.name] = default
else:
python_type = _convert_type(parameter.type)
arg_desc["type"] = python_type
arg_desc["default"] = None if parameter.defaultParamValue is None else python_type(parameter.defaultParamValue)
argparse.add_argument(*_cytomine_parameter_name_synonyms(parameter.name), **arg_desc)
argparse.set_defaults(**boolean_defaults)
return argparse | Converts a SoftwareParameterCollection into an ArgumentParser object.
Parameters
----------
parameters: SoftwareParameterCollection
The software parameters
Returns
-------
argparse: ArgumentParser
An initialized argument parser | Below is the the instruction that describes the task:
### Input:
Converts a SoftwareParameterCollection into an ArgumentParser object.
Parameters
----------
parameters: SoftwareParameterCollection
The software parameters
Returns
-------
argparse: ArgumentParser
An initialized argument parser
### Response:
def _software_params_to_argparse(parameters):
"""
Converts a SoftwareParameterCollection into an ArgumentParser object.
Parameters
----------
parameters: SoftwareParameterCollection
The software parameters
Returns
-------
argparse: ArgumentParser
An initialized argument parser
"""
# Check software parameters
argparse = ArgumentParser()
boolean_defaults = {}
for parameter in parameters:
arg_desc = {"dest": parameter.name, "required": parameter.required, "help": ""} # TODO add help
if parameter.type == "Boolean":
default = _to_bool(parameter.defaultParamValue)
arg_desc["action"] = "store_true" if not default else "store_false"
boolean_defaults[parameter.name] = default
else:
python_type = _convert_type(parameter.type)
arg_desc["type"] = python_type
arg_desc["default"] = None if parameter.defaultParamValue is None else python_type(parameter.defaultParamValue)
argparse.add_argument(*_cytomine_parameter_name_synonyms(parameter.name), **arg_desc)
argparse.set_defaults(**boolean_defaults)
return argparse |
def add_transition_view_for_model(self, transition_m, parent_state_m):
"""Creates a `TransitionView` and adds it to the canvas
The method creates a`TransitionView` from the given `TransitionModel `transition_m` and adds it to the canvas.
:param TransitionModel transition_m: The transition for which a view is to be created
:param ContainerStateModel parent_state_m: The parental `StateModel` of the transition
"""
parent_state_v = self.canvas.get_view_for_model(parent_state_m)
hierarchy_level = parent_state_v.hierarchy_level
transition_v = TransitionView(transition_m, hierarchy_level)
# Draw transition above all other state elements
self.canvas.add(transition_v, parent_state_v, index=None)
self._connect_transition_to_ports(transition_m, transition_v, parent_state_m, parent_state_v)
return transition_v | Creates a `TransitionView` and adds it to the canvas
The method creates a`TransitionView` from the given `TransitionModel `transition_m` and adds it to the canvas.
:param TransitionModel transition_m: The transition for which a view is to be created
:param ContainerStateModel parent_state_m: The parental `StateModel` of the transition | Below is the the instruction that describes the task:
### Input:
Creates a `TransitionView` and adds it to the canvas
The method creates a`TransitionView` from the given `TransitionModel `transition_m` and adds it to the canvas.
:param TransitionModel transition_m: The transition for which a view is to be created
:param ContainerStateModel parent_state_m: The parental `StateModel` of the transition
### Response:
def add_transition_view_for_model(self, transition_m, parent_state_m):
"""Creates a `TransitionView` and adds it to the canvas
The method creates a`TransitionView` from the given `TransitionModel `transition_m` and adds it to the canvas.
:param TransitionModel transition_m: The transition for which a view is to be created
:param ContainerStateModel parent_state_m: The parental `StateModel` of the transition
"""
parent_state_v = self.canvas.get_view_for_model(parent_state_m)
hierarchy_level = parent_state_v.hierarchy_level
transition_v = TransitionView(transition_m, hierarchy_level)
# Draw transition above all other state elements
self.canvas.add(transition_v, parent_state_v, index=None)
self._connect_transition_to_ports(transition_m, transition_v, parent_state_m, parent_state_v)
return transition_v |
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self)) | Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True | Below is the the instruction that describes the task:
### Input:
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
### Response:
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self)) |
def is_valid_mac(addr):
"""Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx
"""
addrs = addr.split(':')
if len(addrs) != 6:
return False
for m in addrs:
try:
if int(m, 16) > 255:
return False
except ValueError:
return False
return True | Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx | Below is the the instruction that describes the task:
### Input:
Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx
### Response:
def is_valid_mac(addr):
"""Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx
"""
addrs = addr.split(':')
if len(addrs) != 6:
return False
for m in addrs:
try:
if int(m, 16) > 255:
return False
except ValueError:
return False
return True |
def get_metric(self, reset: bool) -> Union[float, Tuple[float, ...], Dict[str, float], Dict[str, List[float]]]:
"""
Compute and return the metric. Optionally also call :func:`self.reset`.
"""
raise NotImplementedError | Compute and return the metric. Optionally also call :func:`self.reset`. | Below is the the instruction that describes the task:
### Input:
Compute and return the metric. Optionally also call :func:`self.reset`.
### Response:
def get_metric(self, reset: bool) -> Union[float, Tuple[float, ...], Dict[str, float], Dict[str, List[float]]]:
"""
Compute and return the metric. Optionally also call :func:`self.reset`.
"""
raise NotImplementedError |
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(*args, **kwargs) | Quickly set single value at (item, major, minor) location.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object. | Below is the the instruction that describes the task:
### Input:
Quickly set single value at (item, major, minor) location.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object.
### Response:
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(*args, **kwargs) |
def relative_time_to_text(l10n=locales.get(_default), **kwargs):
"""
Return an aproximate textual representation of the provioded duration of
time.
Examples:
relative_time_to_text(hours=6, minutes=34) -> "six and a half hours"
relative_time_to_text(years=5, months=8, days=5) -> "less than six years"
Keyword arguments:
l10n -- The locale of the language for the result. Default is en_US.
seconds
minutes
hours
days
weeks
months
years
"""
kwargs = _normalize(**kwargs)
cor = _Chain()
cor.add(_LessThan1M(l10n, **kwargs))
cor.add(_LessThan1H(l10n, **kwargs))
cor.add(_LessThan23H(l10n, **kwargs))
cor.add(_LessThan6D1H(l10n, **kwargs))
cor.add(_LessThan25D10H(l10n, **kwargs))
cor.add(_LessThan11MM(l10n, **kwargs))
cor.add(_LessThan10Y(l10n, **kwargs))
cor.add(_MoreThan10Y(l10n, **kwargs))
return cor.run() | Return an aproximate textual representation of the provioded duration of
time.
Examples:
relative_time_to_text(hours=6, minutes=34) -> "six and a half hours"
relative_time_to_text(years=5, months=8, days=5) -> "less than six years"
Keyword arguments:
l10n -- The locale of the language for the result. Default is en_US.
seconds
minutes
hours
days
weeks
months
years | Below is the the instruction that describes the task:
### Input:
Return an aproximate textual representation of the provioded duration of
time.
Examples:
relative_time_to_text(hours=6, minutes=34) -> "six and a half hours"
relative_time_to_text(years=5, months=8, days=5) -> "less than six years"
Keyword arguments:
l10n -- The locale of the language for the result. Default is en_US.
seconds
minutes
hours
days
weeks
months
years
### Response:
def relative_time_to_text(l10n=locales.get(_default), **kwargs):
"""
Return an aproximate textual representation of the provioded duration of
time.
Examples:
relative_time_to_text(hours=6, minutes=34) -> "six and a half hours"
relative_time_to_text(years=5, months=8, days=5) -> "less than six years"
Keyword arguments:
l10n -- The locale of the language for the result. Default is en_US.
seconds
minutes
hours
days
weeks
months
years
"""
kwargs = _normalize(**kwargs)
cor = _Chain()
cor.add(_LessThan1M(l10n, **kwargs))
cor.add(_LessThan1H(l10n, **kwargs))
cor.add(_LessThan23H(l10n, **kwargs))
cor.add(_LessThan6D1H(l10n, **kwargs))
cor.add(_LessThan25D10H(l10n, **kwargs))
cor.add(_LessThan11MM(l10n, **kwargs))
cor.add(_LessThan10Y(l10n, **kwargs))
cor.add(_MoreThan10Y(l10n, **kwargs))
return cor.run() |
def set_inteface_down(devid, ifindex):
"""
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to " shut" the specifie
d interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
set_int_down_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/down"
f_url = url + set_int_down_url
payload = None
r = requests.put(f_url, auth=auth,
headers=headers) # creates the URL using the payload variable as the contents
print(r.status_code)
if r.status_code == 204:
return r.status_code
else:
print("An Error has occured") | function takest devid and ifindex of specific device and interface and issues a RESTFUL call to " shut" the specifie
d interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values. | Below is the the instruction that describes the task:
### Input:
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to " shut" the specifie
d interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values.
### Response:
def set_inteface_down(devid, ifindex):
"""
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to " shut" the specifie
d interface on the target device.
:param devid: int or str value of the target device
:param ifindex: int or str value of the target interface
:return: HTTP status code 204 with no values.
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
set_int_down_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/down"
f_url = url + set_int_down_url
payload = None
r = requests.put(f_url, auth=auth,
headers=headers) # creates the URL using the payload variable as the contents
print(r.status_code)
if r.status_code == 204:
return r.status_code
else:
print("An Error has occured") |
def _async_open(self, session_id, proto_version):
''' Perform the specific steps needed to open a connection to a Bokeh session
Specifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and hander
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
proto_version (str):
The protocol version requested by the connecting client.
Returns:
None
'''
try:
yield self.application_context.create_session_if_needed(session_id, self.request)
session = self.application_context.get_session(session_id)
protocol = Protocol(proto_version)
self.receiver = Receiver(protocol)
log.debug("Receiver created for %r", protocol)
self.handler = ProtocolHandler()
log.debug("ProtocolHandler created for %r", protocol)
self.connection = self.application.new_connection(protocol, self, self.application_context, session)
log.info("ServerConnection created")
except ProtocolError as e:
log.error("Could not create new server session, reason: %s", e)
self.close()
raise e
msg = self.connection.protocol.create('ACK')
yield self.send_message(msg)
raise gen.Return(None) | Perform the specific steps needed to open a connection to a Bokeh session
Specifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and hander
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
proto_version (str):
The protocol version requested by the connecting client.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Perform the specific steps needed to open a connection to a Bokeh session
Specifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and hander
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
proto_version (str):
The protocol version requested by the connecting client.
Returns:
None
### Response:
def _async_open(self, session_id, proto_version):
''' Perform the specific steps needed to open a connection to a Bokeh session
Specifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and hander
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
proto_version (str):
The protocol version requested by the connecting client.
Returns:
None
'''
try:
yield self.application_context.create_session_if_needed(session_id, self.request)
session = self.application_context.get_session(session_id)
protocol = Protocol(proto_version)
self.receiver = Receiver(protocol)
log.debug("Receiver created for %r", protocol)
self.handler = ProtocolHandler()
log.debug("ProtocolHandler created for %r", protocol)
self.connection = self.application.new_connection(protocol, self, self.application_context, session)
log.info("ServerConnection created")
except ProtocolError as e:
log.error("Could not create new server session, reason: %s", e)
self.close()
raise e
msg = self.connection.protocol.create('ACK')
yield self.send_message(msg)
raise gen.Return(None) |
def hook_focus_events(self):
""" Install the hooks for focus events.
This method may be overridden by subclasses as needed.
"""
widget = self.widget
widget.focusInEvent = self.focusInEvent
widget.focusOutEvent = self.focusOutEvent | Install the hooks for focus events.
This method may be overridden by subclasses as needed. | Below is the the instruction that describes the task:
### Input:
Install the hooks for focus events.
This method may be overridden by subclasses as needed.
### Response:
def hook_focus_events(self):
""" Install the hooks for focus events.
This method may be overridden by subclasses as needed.
"""
widget = self.widget
widget.focusInEvent = self.focusInEvent
widget.focusOutEvent = self.focusOutEvent |
def __expr_str(cls, expr, level):
'''Returns string representing the expression.'''
ident = ' ' * level * 4
if isinstance(expr, tuple):
return '{}{}'.format(ident, str(expr))
if expr.etype[0] in ['pvar', 'constant']:
return '{}Expression(etype={}, args={})'.format(ident, expr.etype, expr.args)
if not isinstance(expr, Expression):
return '{}{}'.format(ident, str(expr))
args = list(cls.__expr_str(arg, level + 1) for arg in expr.args)
args = '\n'.join(args)
return '{}Expression(etype={}, args=\n{})'.format(ident, expr.etype, args) | Returns string representing the expression. | Below is the the instruction that describes the task:
### Input:
Returns string representing the expression.
### Response:
def __expr_str(cls, expr, level):
'''Returns string representing the expression.'''
ident = ' ' * level * 4
if isinstance(expr, tuple):
return '{}{}'.format(ident, str(expr))
if expr.etype[0] in ['pvar', 'constant']:
return '{}Expression(etype={}, args={})'.format(ident, expr.etype, expr.args)
if not isinstance(expr, Expression):
return '{}{}'.format(ident, str(expr))
args = list(cls.__expr_str(arg, level + 1) for arg in expr.args)
args = '\n'.join(args)
return '{}Expression(etype={}, args=\n{})'.format(ident, expr.etype, args) |
def _list_selection_changed(self):
"""Selection has changed in the list."""
items = self.list_layers_in_map_report.selectedItems()
self.remove_layer.setEnabled(len(items) >= 1)
if len(items) == 1 and self.list_layers_in_map_report.count() >= 2:
index = self.list_layers_in_map_report.indexFromItem(items[0])
index = index.row()
if index == 0:
self.move_up.setEnabled(False)
self.move_down.setEnabled(True)
elif index == self.list_layers_in_map_report.count() - 1:
self.move_up.setEnabled(True)
self.move_down.setEnabled(False)
else:
self.move_up.setEnabled(True)
self.move_down.setEnabled(True)
else:
self.move_up.setEnabled(False)
self.move_down.setEnabled(False) | Selection has changed in the list. | Below is the the instruction that describes the task:
### Input:
Selection has changed in the list.
### Response:
def _list_selection_changed(self):
"""Selection has changed in the list."""
items = self.list_layers_in_map_report.selectedItems()
self.remove_layer.setEnabled(len(items) >= 1)
if len(items) == 1 and self.list_layers_in_map_report.count() >= 2:
index = self.list_layers_in_map_report.indexFromItem(items[0])
index = index.row()
if index == 0:
self.move_up.setEnabled(False)
self.move_down.setEnabled(True)
elif index == self.list_layers_in_map_report.count() - 1:
self.move_up.setEnabled(True)
self.move_down.setEnabled(False)
else:
self.move_up.setEnabled(True)
self.move_down.setEnabled(True)
else:
self.move_up.setEnabled(False)
self.move_down.setEnabled(False) |
def _apply_prefix(prefix, model):
"""
Prefix all path entries in model with the given prefix.
"""
if not isinstance(model, dict):
raise TypeError("Expected dict for model, got %s" % type(model))
# We get unwanted leading/trailing slashes if prefix or model['path'] are
# '', both of which are legal values.
model['path'] = '/'.join((prefix, model['path'])).strip('/')
if model['type'] in ('notebook', 'file'):
return model
if model['type'] != 'directory':
raise ValueError("Unknown model type %s." % type(model))
content = model.get('content', None)
if content is not None:
for sub_model in content:
_apply_prefix(prefix, sub_model)
return model | Prefix all path entries in model with the given prefix. | Below is the the instruction that describes the task:
### Input:
Prefix all path entries in model with the given prefix.
### Response:
def _apply_prefix(prefix, model):
"""
Prefix all path entries in model with the given prefix.
"""
if not isinstance(model, dict):
raise TypeError("Expected dict for model, got %s" % type(model))
# We get unwanted leading/trailing slashes if prefix or model['path'] are
# '', both of which are legal values.
model['path'] = '/'.join((prefix, model['path'])).strip('/')
if model['type'] in ('notebook', 'file'):
return model
if model['type'] != 'directory':
raise ValueError("Unknown model type %s." % type(model))
content = model.get('content', None)
if content is not None:
for sub_model in content:
_apply_prefix(prefix, sub_model)
return model |
def structure_dir(self):
"""str: Directory where structure related files are stored"""
if self.root_dir:
return op.join(self.protein_dir, 'structures')
else:
log.debug('Root directory not set')
return None | str: Directory where structure related files are stored | Below is the the instruction that describes the task:
### Input:
str: Directory where structure related files are stored
### Response:
def structure_dir(self):
"""str: Directory where structure related files are stored"""
if self.root_dir:
return op.join(self.protein_dir, 'structures')
else:
log.debug('Root directory not set')
return None |
def calculate_signature(key, data, timestamp=None):
"""
Calculates the signature for the given request data.
"""
# Create a timestamp if one was not given
if timestamp is None:
timestamp = int(time.time())
# Construct the message from the timestamp and the data in the request
message = str(timestamp) + ''.join("%s%s" % (k,v) for k,v in sorted(data.items()))
# Calculate the signature (HMAC SHA256) according to RFC 2104
signature = hmac.HMAC(str(key), message, hashlib.sha256).hexdigest()
return signature | Calculates the signature for the given request data. | Below is the the instruction that describes the task:
### Input:
Calculates the signature for the given request data.
### Response:
def calculate_signature(key, data, timestamp=None):
"""
Calculates the signature for the given request data.
"""
# Create a timestamp if one was not given
if timestamp is None:
timestamp = int(time.time())
# Construct the message from the timestamp and the data in the request
message = str(timestamp) + ''.join("%s%s" % (k,v) for k,v in sorted(data.items()))
# Calculate the signature (HMAC SHA256) according to RFC 2104
signature = hmac.HMAC(str(key), message, hashlib.sha256).hexdigest()
return signature |
def _kill_process(self, pid, sig=signal.SIGKILL):
"""Try to send signal to given process."""
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH: # process itself returned and exited before killing
logging.debug("Failure %s while killing process %s with signal %s: %s",
e.errno, pid, sig, e.strerror)
else:
logging.warning("Failure %s while killing process %s with signal %s: %s",
e.errno, pid, sig, e.strerror) | Try to send signal to given process. | Below is the the instruction that describes the task:
### Input:
Try to send signal to given process.
### Response:
def _kill_process(self, pid, sig=signal.SIGKILL):
"""Try to send signal to given process."""
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH: # process itself returned and exited before killing
logging.debug("Failure %s while killing process %s with signal %s: %s",
e.errno, pid, sig, e.strerror)
else:
logging.warning("Failure %s while killing process %s with signal %s: %s",
e.errno, pid, sig, e.strerror) |
def _is_request_in_include_path(self, request):
"""Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
"""
if self._include_paths:
for path in self._include_paths:
if request.path.startswith(path):
return True
return False
else:
return True | Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths. | Below is the the instruction that describes the task:
### Input:
Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
### Response:
def _is_request_in_include_path(self, request):
"""Check if the request path is in the `_include_paths` list.
If no specific include paths are given then we assume that
authentication is required for all paths.
"""
if self._include_paths:
for path in self._include_paths:
if request.path.startswith(path):
return True
return False
else:
return True |
def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ=7):
'''def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ = 7): Convert a lunar date
to the corresponding solar date.'''
if (lunarM < 11):
a11 = getLunarMonth11(lunarY - 1, tZ)
b11 = getLunarMonth11(lunarY, tZ)
else:
a11 = getLunarMonth11(lunarY, tZ)
b11 = getLunarMonth11(lunarY + 1, tZ)
k = int(0.5 +
(a11 - 2415021.076998695) / 29.530588853)
off = lunarM - 11
if (off < 0):
off += 12
if (b11 - a11 > 365):
leapOff = getLeapMonthOffset(a11, tZ)
leapM = leapOff - 2
if (leapM < 0):
leapM += 12
if (lunarLeap != 0 and lunarM != leapM):
return [0, 0, 0]
elif (lunarLeap != 0 or off >= leapOff):
off += 1
monthStart = getNewMoonDay(k + off, tZ)
return jdToDate(monthStart + lunarD - 1) | def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ = 7): Convert a lunar date
to the corresponding solar date. | Below is the the instruction that describes the task:
### Input:
def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ = 7): Convert a lunar date
to the corresponding solar date.
### Response:
def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ=7):
'''def L2S(lunarD, lunarM, lunarY, lunarLeap, tZ = 7): Convert a lunar date
to the corresponding solar date.'''
if (lunarM < 11):
a11 = getLunarMonth11(lunarY - 1, tZ)
b11 = getLunarMonth11(lunarY, tZ)
else:
a11 = getLunarMonth11(lunarY, tZ)
b11 = getLunarMonth11(lunarY + 1, tZ)
k = int(0.5 +
(a11 - 2415021.076998695) / 29.530588853)
off = lunarM - 11
if (off < 0):
off += 12
if (b11 - a11 > 365):
leapOff = getLeapMonthOffset(a11, tZ)
leapM = leapOff - 2
if (leapM < 0):
leapM += 12
if (lunarLeap != 0 and lunarM != leapM):
return [0, 0, 0]
elif (lunarLeap != 0 or off >= leapOff):
off += 1
monthStart = getNewMoonDay(k + off, tZ)
return jdToDate(monthStart + lunarD - 1) |
def appname(path=None):
"""
Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own.
"""
if path is None:
path = sys.argv[0]
name = os.path.basename(os.path.splitext(path)[0])
if name == 'mod_wsgi':
name = 'nvn_web' # pragma: no cover
return name | Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own. | Below is the the instruction that describes the task:
### Input:
Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own.
### Response:
def appname(path=None):
"""
Return a useful application name based on the program argument.
A special case maps 'mod_wsgi' to a more appropriate name so
web applications show up as our own.
"""
if path is None:
path = sys.argv[0]
name = os.path.basename(os.path.splitext(path)[0])
if name == 'mod_wsgi':
name = 'nvn_web' # pragma: no cover
return name |
def _get_connected_subgraphs(vertices, vertices_neighbours):
"""Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
"""
remaining_vertices = set(vertices)
subgraphs = []
while remaining_vertices:
subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours))
remaining_vertices.difference_update(subgraph)
subgraphs.append(subgraph)
return subgraphs | Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...] | Below is the the instruction that describes the task:
### Input:
Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
### Response:
def _get_connected_subgraphs(vertices, vertices_neighbours):
"""Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
"""
remaining_vertices = set(vertices)
subgraphs = []
while remaining_vertices:
subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours))
remaining_vertices.difference_update(subgraph)
subgraphs.append(subgraph)
return subgraphs |
def get_queryset(self):
"""
Retrieve the category by his path and
build a queryset of her published entries.
"""
self.category = get_category_or_404(self.kwargs['path'])
return self.category.entries_published() | Retrieve the category by his path and
build a queryset of her published entries. | Below is the the instruction that describes the task:
### Input:
Retrieve the category by his path and
build a queryset of her published entries.
### Response:
def get_queryset(self):
"""
Retrieve the category by his path and
build a queryset of her published entries.
"""
self.category = get_category_or_404(self.kwargs['path'])
return self.category.entries_published() |
def _get_type(self, values):
""":return: the type of a knitting pattern set."""
if TYPE not in values:
self._error("No pattern type given but should be "
"\"{}\"".format(KNITTING_PATTERN_TYPE))
type_ = values[TYPE]
if type_ != KNITTING_PATTERN_TYPE:
self._error("Wrong pattern type. Type is \"{}\" "
"but should be \"{}\""
"".format(type_, KNITTING_PATTERN_TYPE))
return type_ | :return: the type of a knitting pattern set. | Below is the the instruction that describes the task:
### Input:
:return: the type of a knitting pattern set.
### Response:
def _get_type(self, values):
""":return: the type of a knitting pattern set."""
if TYPE not in values:
self._error("No pattern type given but should be "
"\"{}\"".format(KNITTING_PATTERN_TYPE))
type_ = values[TYPE]
if type_ != KNITTING_PATTERN_TYPE:
self._error("Wrong pattern type. Type is \"{}\" "
"but should be \"{}\""
"".format(type_, KNITTING_PATTERN_TYPE))
return type_ |
def verification_check(self, phone_number, country_code, verification_code):
"""
:param phone_number:
:param country_code:
:param verification_code:
:return:
"""
options = {
'phone_number': phone_number,
'country_code': country_code,
'verification_code': verification_code
}
resp = self.get("/protected/json/phones/verification/check", options)
return Phone(self, resp) | :param phone_number:
:param country_code:
:param verification_code:
:return: | Below is the the instruction that describes the task:
### Input:
:param phone_number:
:param country_code:
:param verification_code:
:return:
### Response:
def verification_check(self, phone_number, country_code, verification_code):
"""
:param phone_number:
:param country_code:
:param verification_code:
:return:
"""
options = {
'phone_number': phone_number,
'country_code': country_code,
'verification_code': verification_code
}
resp = self.get("/protected/json/phones/verification/check", options)
return Phone(self, resp) |
def set_goid2color_pval(self, goid2color):
"""Fill missing colors based on p-value of an enriched GO term."""
alpha2col = self.alpha2col
if self.pval_name is not None:
pval_name = self.pval_name
for goid, res in self.go2res.items():
pval = getattr(res, pval_name, None)
if pval is not None:
for alpha, color in alpha2col.items():
if pval <= alpha and res.study_count != 0:
if goid not in goid2color:
goid2color[goid] = color | Fill missing colors based on p-value of an enriched GO term. | Below is the the instruction that describes the task:
### Input:
Fill missing colors based on p-value of an enriched GO term.
### Response:
def set_goid2color_pval(self, goid2color):
"""Fill missing colors based on p-value of an enriched GO term."""
alpha2col = self.alpha2col
if self.pval_name is not None:
pval_name = self.pval_name
for goid, res in self.go2res.items():
pval = getattr(res, pval_name, None)
if pval is not None:
for alpha, color in alpha2col.items():
if pval <= alpha and res.study_count != 0:
if goid not in goid2color:
goid2color[goid] = color |
def tuplesorted(items, *keys):
"""Sort by tuples with a different key for each item.
Args:
items: An iterable series of sequences (typically tuples)
*keys: Key objects which transform individual elements of
each tuple into sort keys. The zeroth object
transforms the zeroth element of each tuple, the first
key object transforms the first element of each tuple,
and so on.
Returns:
A list of items sorted according to keys.
"""
# Transform the keys so each works on one item of the tuple
tuple_keys = [
Key(func=lambda t, i=index, k=key: k.func(t[i]), reverse=key.reverse)
for index, key in enumerate(keys)
]
return multisorted(items, *tuple_keys) | Sort by tuples with a different key for each item.
Args:
items: An iterable series of sequences (typically tuples)
*keys: Key objects which transform individual elements of
each tuple into sort keys. The zeroth object
transforms the zeroth element of each tuple, the first
key object transforms the first element of each tuple,
and so on.
Returns:
A list of items sorted according to keys. | Below is the the instruction that describes the task:
### Input:
Sort by tuples with a different key for each item.
Args:
items: An iterable series of sequences (typically tuples)
*keys: Key objects which transform individual elements of
each tuple into sort keys. The zeroth object
transforms the zeroth element of each tuple, the first
key object transforms the first element of each tuple,
and so on.
Returns:
A list of items sorted according to keys.
### Response:
def tuplesorted(items, *keys):
"""Sort by tuples with a different key for each item.
Args:
items: An iterable series of sequences (typically tuples)
*keys: Key objects which transform individual elements of
each tuple into sort keys. The zeroth object
transforms the zeroth element of each tuple, the first
key object transforms the first element of each tuple,
and so on.
Returns:
A list of items sorted according to keys.
"""
# Transform the keys so each works on one item of the tuple
tuple_keys = [
Key(func=lambda t, i=index, k=key: k.func(t[i]), reverse=key.reverse)
for index, key in enumerate(keys)
]
return multisorted(items, *tuple_keys) |
def get_valid_times_for_job_legacy(self, num_job):
""" Get the times for which the job num_job will be valid, using the method
use in inspiral hipe.
"""
# All of this should be integers, so no rounding factors needed.
shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job)
job_valid_seg = self.valid_chunk.shift(shift_dur)
# If this is the last job, push the end back
if num_job == (self.num_jobs - 1):
dataPushBack = self.data_length - self.valid_chunk[1]
job_valid_seg = segments.segment(job_valid_seg[0],
self.curr_seg[1] - dataPushBack)
return job_valid_seg | Get the times for which the job num_job will be valid, using the method
use in inspiral hipe. | Below is the the instruction that describes the task:
### Input:
Get the times for which the job num_job will be valid, using the method
use in inspiral hipe.
### Response:
def get_valid_times_for_job_legacy(self, num_job):
""" Get the times for which the job num_job will be valid, using the method
use in inspiral hipe.
"""
# All of this should be integers, so no rounding factors needed.
shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job)
job_valid_seg = self.valid_chunk.shift(shift_dur)
# If this is the last job, push the end back
if num_job == (self.num_jobs - 1):
dataPushBack = self.data_length - self.valid_chunk[1]
job_valid_seg = segments.segment(job_valid_seg[0],
self.curr_seg[1] - dataPushBack)
return job_valid_seg |
def keep_negative_impute(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (impute)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 17
"""
return __run_measure(measures.keep_impute, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred) | Keep Negative (impute)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 17 | Below is the the instruction that describes the task:
### Input:
Keep Negative (impute)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 17
### Response:
def keep_negative_impute(X, y, model_generator, method_name, num_fcounts=11):
""" Keep Negative (impute)
xlabel = "Max fraction of features kept"
ylabel = "Negative mean model output"
transform = "negate"
sort_order = 17
"""
return __run_measure(measures.keep_impute, X, y, model_generator, method_name, -1, num_fcounts, __mean_pred) |
def _init(self, formula):
"""
SAT oracle initialization. The method creates a new SAT oracle and
feeds it with the formula's hard clauses. Afterwards, all soft
clauses of the formula are augmented with selector literals and
also added to the solver. The list of all introduced selectors is
stored in variable ``self.sels``.
:param formula: input MaxSAT formula
:type formula: :class:`WCNF`
"""
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard,
incr=True, use_timer=True)
for i, cl in enumerate(formula.soft):
# TODO: if clause is unit, use its literal as selector
# (ITotalizer must be extended to support PB constraints first)
self.topv += 1
selv = self.topv
cl.append(self.topv)
self.oracle.add_clause(cl)
self.sels.append(selv)
if self.verbose > 1:
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv, len(formula.hard), len(formula.soft))) | SAT oracle initialization. The method creates a new SAT oracle and
feeds it with the formula's hard clauses. Afterwards, all soft
clauses of the formula are augmented with selector literals and
also added to the solver. The list of all introduced selectors is
stored in variable ``self.sels``.
:param formula: input MaxSAT formula
:type formula: :class:`WCNF` | Below is the the instruction that describes the task:
### Input:
SAT oracle initialization. The method creates a new SAT oracle and
feeds it with the formula's hard clauses. Afterwards, all soft
clauses of the formula are augmented with selector literals and
also added to the solver. The list of all introduced selectors is
stored in variable ``self.sels``.
:param formula: input MaxSAT formula
:type formula: :class:`WCNF`
### Response:
def _init(self, formula):
"""
SAT oracle initialization. The method creates a new SAT oracle and
feeds it with the formula's hard clauses. Afterwards, all soft
clauses of the formula are augmented with selector literals and
also added to the solver. The list of all introduced selectors is
stored in variable ``self.sels``.
:param formula: input MaxSAT formula
:type formula: :class:`WCNF`
"""
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard,
incr=True, use_timer=True)
for i, cl in enumerate(formula.soft):
# TODO: if clause is unit, use its literal as selector
# (ITotalizer must be extended to support PB constraints first)
self.topv += 1
selv = self.topv
cl.append(self.topv)
self.oracle.add_clause(cl)
self.sels.append(selv)
if self.verbose > 1:
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv, len(formula.hard), len(formula.soft))) |
def _call_connection_lost(self, error):
"""Finalize closing."""
try:
self._protocol.connection_lost(error)
finally:
os.close(self._fileno)
self._fileno = None
self._protocol = None
self._loop = None | Finalize closing. | Below is the the instruction that describes the task:
### Input:
Finalize closing.
### Response:
def _call_connection_lost(self, error):
"""Finalize closing."""
try:
self._protocol.connection_lost(error)
finally:
os.close(self._fileno)
self._fileno = None
self._protocol = None
self._loop = None |
def build_homogeneisation_vehicules(temporary_store = None, year = None):
assert temporary_store is not None
"""Compute vehicule numbers by type"""
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
if year == 1995:
vehicule = None
# L'enquête BdF 1995 ne contient pas d'information sur le type de carburant utilisé par les véhicules.
if year == 2000:
vehicule = survey.get_values(table = "depmen")
kept_variables = ['ident', 'carbu01', 'carbu02']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident': 'ident_men'}, inplace = True)
vehicule.rename(columns = {'carbu01': 'carbu1'}, inplace = True)
vehicule.rename(columns = {'carbu02': 'carbu2'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = 1 * (vehicule['carbu1'] == 1) + 1 * (vehicule['carbu2'] == 1)
vehicule["veh_diesel"] = 1 * (vehicule['carbu1'] == 2) + 1 * (vehicule['carbu2'] == 2)
vehicule.index = vehicule.index.astype(ident_men_dtype)
if year == 2005:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_men', 'carbu']
vehicule = vehicule[kept_variables]
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
if year == 2011:
try:
vehicule = survey.get_values(table = "AUTOMOBILE")
except:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_me', 'carbu']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident_me': 'ident_men'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
# Compute the number of cars by category and save
if year != 1995:
vehicule = vehicule.groupby(by = 'ident_men')["veh_tot", "veh_essence", "veh_diesel"].sum()
vehicule["pourcentage_vehicule_essence"] = 0
vehicule.pourcentage_vehicule_essence.loc[vehicule.veh_tot != 0] = vehicule.veh_essence / vehicule.veh_tot
# Save in temporary store
temporary_store['automobile_{}'.format(year)] = vehicule | Compute vehicule numbers by type | Below is the the instruction that describes the task:
### Input:
Compute vehicule numbers by type
### Response:
def build_homogeneisation_vehicules(temporary_store = None, year = None):
assert temporary_store is not None
"""Compute vehicule numbers by type"""
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
if year == 1995:
vehicule = None
# L'enquête BdF 1995 ne contient pas d'information sur le type de carburant utilisé par les véhicules.
if year == 2000:
vehicule = survey.get_values(table = "depmen")
kept_variables = ['ident', 'carbu01', 'carbu02']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident': 'ident_men'}, inplace = True)
vehicule.rename(columns = {'carbu01': 'carbu1'}, inplace = True)
vehicule.rename(columns = {'carbu02': 'carbu2'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = 1 * (vehicule['carbu1'] == 1) + 1 * (vehicule['carbu2'] == 1)
vehicule["veh_diesel"] = 1 * (vehicule['carbu1'] == 2) + 1 * (vehicule['carbu2'] == 2)
vehicule.index = vehicule.index.astype(ident_men_dtype)
if year == 2005:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_men', 'carbu']
vehicule = vehicule[kept_variables]
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
if year == 2011:
try:
vehicule = survey.get_values(table = "AUTOMOBILE")
except:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_me', 'carbu']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident_me': 'ident_men'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
# Compute the number of cars by category and save
if year != 1995:
vehicule = vehicule.groupby(by = 'ident_men')["veh_tot", "veh_essence", "veh_diesel"].sum()
vehicule["pourcentage_vehicule_essence"] = 0
vehicule.pourcentage_vehicule_essence.loc[vehicule.veh_tot != 0] = vehicule.veh_essence / vehicule.veh_tot
# Save in temporary store
temporary_store['automobile_{}'.format(year)] = vehicule |
def _set_mep_id(self, v, load=False):
"""
Setter method for mep_id, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/mep_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mep_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mep_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="mep-id", rest_name="mep-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DECIMAL :: <1-8191>', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mep_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="mep-id", rest_name="mep-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DECIMAL :: <1-8191>', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='uint32', is_config=True)""",
})
self.__mep_id = t
if hasattr(self, '_set'):
self._set() | Setter method for mep_id, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/mep_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mep_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mep_id() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for mep_id, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/mep_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mep_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mep_id() directly.
### Response:
def _set_mep_id(self, v, load=False):
"""
Setter method for mep_id, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mep/mep_id (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_mep_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mep_id() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="mep-id", rest_name="mep-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DECIMAL :: <1-8191>', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mep_id must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..8191']}), is_leaf=True, yang_name="mep-id", rest_name="mep-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'DECIMAL :: <1-8191>', u'cli-run-template': u'$(.?:)', u'cli-incomplete-command': None, u'cli-hide-in-submode': None}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='uint32', is_config=True)""",
})
self.__mep_id = t
if hasattr(self, '_set'):
self._set() |
def nodes_touching_tile(tile_id):
"""
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
nodes = []
for offset in _tile_node_offsets.keys():
nodes.append(coord + offset)
# logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))
return nodes | Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int) | Below is the the instruction that describes the task:
### Input:
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
### Response:
def nodes_touching_tile(tile_id):
"""
Get a list of node coordinates touching the given tile.
:param tile_id: tile identifier, Tile.tile_id
:return: list of node coordinates touching the given tile, list(int)
"""
coord = tile_id_to_coord(tile_id)
nodes = []
for offset in _tile_node_offsets.keys():
nodes.append(coord + offset)
# logging.debug('tile_id={}, nodes touching={}'.format(tile_id, nodes))
return nodes |
def draw_polygon(
self,
*pts,
close_path=True,
stroke=None,
stroke_width=1,
stroke_dash=None,
fill=None
) -> None:
"""Draws the given polygon."""
c = self.c
c.save()
c.new_path()
for x,y in zip(*[iter(pts)]*2):
c.line_to(x, y)
if close_path:
c.close_path()
self._fill_and_stroke(stroke, stroke_width, stroke_dash, fill)
c.restore() | Draws the given polygon. | Below is the the instruction that describes the task:
### Input:
Draws the given polygon.
### Response:
def draw_polygon(
self,
*pts,
close_path=True,
stroke=None,
stroke_width=1,
stroke_dash=None,
fill=None
) -> None:
"""Draws the given polygon."""
c = self.c
c.save()
c.new_path()
for x,y in zip(*[iter(pts)]*2):
c.line_to(x, y)
if close_path:
c.close_path()
self._fill_and_stroke(stroke, stroke_width, stroke_dash, fill)
c.restore() |
def get_queries(self, project, expand=None, depth=None, include_deleted=None):
"""GetQueries.
[Preview API] Gets the root queries and their children
:param str project: Project ID or project name
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: [QueryHierarchyItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='5.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[QueryHierarchyItem]', self._unwrap_collection(response)) | GetQueries.
[Preview API] Gets the root queries and their children
:param str project: Project ID or project name
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: [QueryHierarchyItem] | Below is the the instruction that describes the task:
### Input:
GetQueries.
[Preview API] Gets the root queries and their children
:param str project: Project ID or project name
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: [QueryHierarchyItem]
### Response:
def get_queries(self, project, expand=None, depth=None, include_deleted=None):
"""GetQueries.
[Preview API] Gets the root queries and their children
:param str project: Project ID or project name
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: [QueryHierarchyItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='5.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[QueryHierarchyItem]', self._unwrap_collection(response)) |
def find_time_base(self, gps, first_ms_stamp):
'''work out time basis for the log - new style'''
t = self._gpsTimeToTime(gps.Week, gps.TimeMS)
self.set_timebase(t - gps.T*0.001)
self.timestamp = self.timebase + first_ms_stamp*0.001 | work out time basis for the log - new style | Below is the the instruction that describes the task:
### Input:
work out time basis for the log - new style
### Response:
def find_time_base(self, gps, first_ms_stamp):
'''work out time basis for the log - new style'''
t = self._gpsTimeToTime(gps.Week, gps.TimeMS)
self.set_timebase(t - gps.T*0.001)
self.timestamp = self.timebase + first_ms_stamp*0.001 |
def make_processor(self, name, mappings, processor_type, **kwargs):
"""
Instantiates a RmlProcessor and registers it in the manager
Args:
-----
name: the name to register the processor
mappings: the list RML mapping definitions to use
processor_type: the name of the RML processor to use
"""
from .processor import Processor
if self.processors.get(name):
raise LookupError("processor has already been created")
if isinstance(mappings, list):
mappings = [self.get_rml(item) for item in mappings]
else:
mappings = [self.get_rml(mappings)]
self.processors[name] = Processor[processor_type](mappings, **kwargs)
self.processors[name].name = name
return self.processors[name] | Instantiates a RmlProcessor and registers it in the manager
Args:
-----
name: the name to register the processor
mappings: the list RML mapping definitions to use
processor_type: the name of the RML processor to use | Below is the the instruction that describes the task:
### Input:
Instantiates a RmlProcessor and registers it in the manager
Args:
-----
name: the name to register the processor
mappings: the list RML mapping definitions to use
processor_type: the name of the RML processor to use
### Response:
def make_processor(self, name, mappings, processor_type, **kwargs):
"""
Instantiates a RmlProcessor and registers it in the manager
Args:
-----
name: the name to register the processor
mappings: the list RML mapping definitions to use
processor_type: the name of the RML processor to use
"""
from .processor import Processor
if self.processors.get(name):
raise LookupError("processor has already been created")
if isinstance(mappings, list):
mappings = [self.get_rml(item) for item in mappings]
else:
mappings = [self.get_rml(mappings)]
self.processors[name] = Processor[processor_type](mappings, **kwargs)
self.processors[name].name = name
return self.processors[name] |
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError | Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com' | Below is the the instruction that describes the task:
### Input:
Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
### Response:
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a@.com")
>>> s('t@x.com')
't@x.com'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError |
def smoothing_cross_entropy_factored_grad(op, dy):
"""Gradient function for smoothing_cross_entropy_factored."""
a = op.inputs[0]
b = op.inputs[1]
labels = op.inputs[2]
confidence = op.inputs[3]
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
dy = approximate_split(dy, num_splits)
b_grad = None
a_grad_parts = []
deps = []
for part in range(num_splits):
with tf.control_dependencies(deps):
logits = tf.matmul(a[part], b, transpose_b=True)
output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
confidence)
a_grad_part, b_grad_part = tf.gradients(
ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
a_grad_parts.append(a_grad_part)
if part > 0:
b_grad += b_grad_part
else:
b_grad = b_grad_part
deps = [b_grad, a_grad_part]
a_grad = tf.concat(a_grad_parts, 0)
return a_grad, b_grad, None, None | Gradient function for smoothing_cross_entropy_factored. | Below is the the instruction that describes the task:
### Input:
Gradient function for smoothing_cross_entropy_factored.
### Response:
def smoothing_cross_entropy_factored_grad(op, dy):
"""Gradient function for smoothing_cross_entropy_factored."""
a = op.inputs[0]
b = op.inputs[1]
labels = op.inputs[2]
confidence = op.inputs[3]
num_splits = 16
vocab_size = shape_list(b)[0]
labels = approximate_split(labels, num_splits)
a = approximate_split(a, num_splits)
dy = approximate_split(dy, num_splits)
b_grad = None
a_grad_parts = []
deps = []
for part in range(num_splits):
with tf.control_dependencies(deps):
logits = tf.matmul(a[part], b, transpose_b=True)
output_part = smoothing_cross_entropy(logits, labels[part], vocab_size,
confidence)
a_grad_part, b_grad_part = tf.gradients(
ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]])
a_grad_parts.append(a_grad_part)
if part > 0:
b_grad += b_grad_part
else:
b_grad = b_grad_part
deps = [b_grad, a_grad_part]
a_grad = tf.concat(a_grad_parts, 0)
return a_grad, b_grad, None, None |
def get_unused_list_annotation_values(graph) -> Mapping[str, Set[str]]:
"""Get all of the unused values for list annotations.
:param pybel.BELGraph graph: A BEL graph
:return: A dictionary of {str annotation: set of str values that aren't used}
"""
result = {}
for annotation, values in graph.annotation_list.items():
used_values = get_annotation_values(graph, annotation)
if len(used_values) == len(values): # all values have been used
continue
result[annotation] = set(values) - used_values
return result | Get all of the unused values for list annotations.
:param pybel.BELGraph graph: A BEL graph
:return: A dictionary of {str annotation: set of str values that aren't used} | Below is the the instruction that describes the task:
### Input:
Get all of the unused values for list annotations.
:param pybel.BELGraph graph: A BEL graph
:return: A dictionary of {str annotation: set of str values that aren't used}
### Response:
def get_unused_list_annotation_values(graph) -> Mapping[str, Set[str]]:
"""Get all of the unused values for list annotations.
:param pybel.BELGraph graph: A BEL graph
:return: A dictionary of {str annotation: set of str values that aren't used}
"""
result = {}
for annotation, values in graph.annotation_list.items():
used_values = get_annotation_values(graph, annotation)
if len(used_values) == len(values): # all values have been used
continue
result[annotation] = set(values) - used_values
return result |
def _getMostActiveCells(self):
"""
Gets the most active cells in the Union SDR having at least non-zero
activation in sorted order.
@return: a list of cell indices
"""
poolingActivation = self._poolingActivation
nonZeroCells = numpy.argwhere(poolingActivation > 0)[:,0]
# include a tie-breaker before sorting
poolingActivationSubset = poolingActivation[nonZeroCells] + \
self._poolingActivation_tieBreaker[nonZeroCells]
potentialUnionSDR = nonZeroCells[numpy.argsort(poolingActivationSubset)[::-1]]
topCells = potentialUnionSDR[0: self._maxUnionCells]
if max(self._poolingTimer) > self._minHistory:
self._unionSDR = numpy.sort(topCells).astype(UINT_DTYPE)
else:
self._unionSDR = []
return self._unionSDR | Gets the most active cells in the Union SDR having at least non-zero
activation in sorted order.
@return: a list of cell indices | Below is the the instruction that describes the task:
### Input:
Gets the most active cells in the Union SDR having at least non-zero
activation in sorted order.
@return: a list of cell indices
### Response:
def _getMostActiveCells(self):
"""
Gets the most active cells in the Union SDR having at least non-zero
activation in sorted order.
@return: a list of cell indices
"""
poolingActivation = self._poolingActivation
nonZeroCells = numpy.argwhere(poolingActivation > 0)[:,0]
# include a tie-breaker before sorting
poolingActivationSubset = poolingActivation[nonZeroCells] + \
self._poolingActivation_tieBreaker[nonZeroCells]
potentialUnionSDR = nonZeroCells[numpy.argsort(poolingActivationSubset)[::-1]]
topCells = potentialUnionSDR[0: self._maxUnionCells]
if max(self._poolingTimer) > self._minHistory:
self._unionSDR = numpy.sort(topCells).astype(UINT_DTYPE)
else:
self._unionSDR = []
return self._unionSDR |
def _hangul_char_to_jamo(syllable):
"""Return a 3-tuple of lead, vowel, and tail jamo characters.
Note: Non-Hangul characters are echoed back.
"""
if is_hangul_char(syllable):
rem = ord(syllable) - _JAMO_OFFSET
tail = rem % 28
vowel = 1 + ((rem - tail) % 588) // 28
lead = 1 + rem // 588
if tail:
return (chr(lead + _JAMO_LEAD_OFFSET),
chr(vowel + _JAMO_VOWEL_OFFSET),
chr(tail + _JAMO_TAIL_OFFSET))
else:
return (chr(lead + _JAMO_LEAD_OFFSET),
chr(vowel + _JAMO_VOWEL_OFFSET))
else:
return syllable | Return a 3-tuple of lead, vowel, and tail jamo characters.
Note: Non-Hangul characters are echoed back. | Below is the the instruction that describes the task:
### Input:
Return a 3-tuple of lead, vowel, and tail jamo characters.
Note: Non-Hangul characters are echoed back.
### Response:
def _hangul_char_to_jamo(syllable):
"""Return a 3-tuple of lead, vowel, and tail jamo characters.
Note: Non-Hangul characters are echoed back.
"""
if is_hangul_char(syllable):
rem = ord(syllable) - _JAMO_OFFSET
tail = rem % 28
vowel = 1 + ((rem - tail) % 588) // 28
lead = 1 + rem // 588
if tail:
return (chr(lead + _JAMO_LEAD_OFFSET),
chr(vowel + _JAMO_VOWEL_OFFSET),
chr(tail + _JAMO_TAIL_OFFSET))
else:
return (chr(lead + _JAMO_LEAD_OFFSET),
chr(vowel + _JAMO_VOWEL_OFFSET))
else:
return syllable |
def _isInt(x, precision = 0.0001):
"""
Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer
"""
xInt = int(round(x))
return (abs(x - xInt) < precision * x, xInt) | Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer | Below is the the instruction that describes the task:
### Input:
Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer
### Response:
def _isInt(x, precision = 0.0001):
"""
Return (isInt, intValue) for a given floating point number.
Parameters:
----------------------------------------------------------------------
x: floating point number to evaluate
precision: desired precision
retval: (isInt, intValue)
isInt: True if x is close enough to an integer value
intValue: x as an integer
"""
xInt = int(round(x))
return (abs(x - xInt) < precision * x, xInt) |
def ReadGRRUser(self, username, cursor=None):
"""Reads a user object corresponding to a given name."""
cursor.execute(
"SELECT username, password, ui_mode, canary_mode, user_type "
"FROM grr_users WHERE username_hash = %s", [mysql_utils.Hash(username)])
row = cursor.fetchone()
if row is None:
raise db.UnknownGRRUserError(username)
return self._RowToGRRUser(row) | Reads a user object corresponding to a given name. | Below is the the instruction that describes the task:
### Input:
Reads a user object corresponding to a given name.
### Response:
def ReadGRRUser(self, username, cursor=None):
"""Reads a user object corresponding to a given name."""
cursor.execute(
"SELECT username, password, ui_mode, canary_mode, user_type "
"FROM grr_users WHERE username_hash = %s", [mysql_utils.Hash(username)])
row = cursor.fetchone()
if row is None:
raise db.UnknownGRRUserError(username)
return self._RowToGRRUser(row) |
def post(self, data):
"""
Posts message with payload formatted in accordance with
this documentation https://api.slack.com/incoming-webhooks
"""
if not self.url:
raise Error('URL for incoming webhook is undefined')
return requests.post(self.url, data=json.dumps(data),
timeout=self.timeout, proxies=self.proxies) | Posts message with payload formatted in accordance with
this documentation https://api.slack.com/incoming-webhooks | Below is the the instruction that describes the task:
### Input:
Posts message with payload formatted in accordance with
this documentation https://api.slack.com/incoming-webhooks
### Response:
def post(self, data):
"""
Posts message with payload formatted in accordance with
this documentation https://api.slack.com/incoming-webhooks
"""
if not self.url:
raise Error('URL for incoming webhook is undefined')
return requests.post(self.url, data=json.dumps(data),
timeout=self.timeout, proxies=self.proxies) |
def Approval(self, username, approval_id):
"""Returns a reference to an approval."""
return ClientApprovalRef(
client_id=self.client_id,
username=username,
approval_id=approval_id,
context=self._context) | Returns a reference to an approval. | Below is the the instruction that describes the task:
### Input:
Returns a reference to an approval.
### Response:
def Approval(self, username, approval_id):
"""Returns a reference to an approval."""
return ClientApprovalRef(
client_id=self.client_id,
username=username,
approval_id=approval_id,
context=self._context) |
def main():
"""Main method for running upsidedown.py from the command line."""
import sys
output = []
line = sys.stdin.readline()
while line:
line = line.strip("\n")
output.append(transform(line))
line = sys.stdin.readline()
output.reverse()
print("\n".join(output)) | Main method for running upsidedown.py from the command line. | Below is the the instruction that describes the task:
### Input:
Main method for running upsidedown.py from the command line.
### Response:
def main():
"""Main method for running upsidedown.py from the command line."""
import sys
output = []
line = sys.stdin.readline()
while line:
line = line.strip("\n")
output.append(transform(line))
line = sys.stdin.readline()
output.reverse()
print("\n".join(output)) |
def get_event(self, timeout=None, block=True):
"""
Fetch the next available :class:`Event` from any source, or raise
:class:`mitogen.core.TimeoutError` if no value is available within
`timeout` seconds.
On success, the message's :attr:`receiver
<mitogen.core.Message.receiver>` attribute is set to the receiver.
:param float timeout:
Timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the select is empty.
:return:
:class:`Event`.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the underlying latch is no
longer valid.
"""
if not self._receivers:
raise Error(self.empty_msg)
event = Event()
while True:
recv = self._latch.get(timeout=timeout, block=block)
try:
if isinstance(recv, Select):
event = recv.get_event(block=False)
else:
event.source = recv
event.data = recv.get(block=False)
if self._oneshot:
self.remove(recv)
if isinstance(recv, mitogen.core.Receiver):
# Remove in 0.3.x.
event.data.receiver = recv
return event
except mitogen.core.TimeoutError:
# A receiver may have been queued with no result if another
# thread drained it before we woke up, or because another
# thread drained it between add() calling recv.empty() and
# self._put(). In this case just sleep again.
continue | Fetch the next available :class:`Event` from any source, or raise
:class:`mitogen.core.TimeoutError` if no value is available within
`timeout` seconds.
On success, the message's :attr:`receiver
<mitogen.core.Message.receiver>` attribute is set to the receiver.
:param float timeout:
Timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the select is empty.
:return:
:class:`Event`.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the underlying latch is no
longer valid. | Below is the the instruction that describes the task:
### Input:
Fetch the next available :class:`Event` from any source, or raise
:class:`mitogen.core.TimeoutError` if no value is available within
`timeout` seconds.
On success, the message's :attr:`receiver
<mitogen.core.Message.receiver>` attribute is set to the receiver.
:param float timeout:
Timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the select is empty.
:return:
:class:`Event`.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the underlying latch is no
longer valid.
### Response:
def get_event(self, timeout=None, block=True):
"""
Fetch the next available :class:`Event` from any source, or raise
:class:`mitogen.core.TimeoutError` if no value is available within
`timeout` seconds.
On success, the message's :attr:`receiver
<mitogen.core.Message.receiver>` attribute is set to the receiver.
:param float timeout:
Timeout in seconds.
:param bool block:
If :data:`False`, immediately raise
:class:`mitogen.core.TimeoutError` if the select is empty.
:return:
:class:`Event`.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:raises mitogen.core.LatchError:
:meth:`close` has been called, and the underlying latch is no
longer valid.
"""
if not self._receivers:
raise Error(self.empty_msg)
event = Event()
while True:
recv = self._latch.get(timeout=timeout, block=block)
try:
if isinstance(recv, Select):
event = recv.get_event(block=False)
else:
event.source = recv
event.data = recv.get(block=False)
if self._oneshot:
self.remove(recv)
if isinstance(recv, mitogen.core.Receiver):
# Remove in 0.3.x.
event.data.receiver = recv
return event
except mitogen.core.TimeoutError:
# A receiver may have been queued with no result if another
# thread drained it before we woke up, or because another
# thread drained it between add() calling recv.empty() and
# self._put(). In this case just sleep again.
continue |
def elem_find(self, field, value):
"""
Return the indices of elements whose field first satisfies the given values
``value`` should be unique in self.field.
This function does not check the uniqueness.
:param field: name of the supplied field
:param value: value of field of the elemtn to find
:return: idx of the elements
:rtype: list, int, float, str
"""
if isinstance(value, (int, float, str)):
value = [value]
f = list(self.__dict__[field])
uid = np.vectorize(f.index)(value)
return self.get_idx(uid) | Return the indices of elements whose field first satisfies the given values
``value`` should be unique in self.field.
This function does not check the uniqueness.
:param field: name of the supplied field
:param value: value of field of the elemtn to find
:return: idx of the elements
:rtype: list, int, float, str | Below is the the instruction that describes the task:
### Input:
Return the indices of elements whose field first satisfies the given values
``value`` should be unique in self.field.
This function does not check the uniqueness.
:param field: name of the supplied field
:param value: value of field of the elemtn to find
:return: idx of the elements
:rtype: list, int, float, str
### Response:
def elem_find(self, field, value):
"""
Return the indices of elements whose field first satisfies the given values
``value`` should be unique in self.field.
This function does not check the uniqueness.
:param field: name of the supplied field
:param value: value of field of the elemtn to find
:return: idx of the elements
:rtype: list, int, float, str
"""
if isinstance(value, (int, float, str)):
value = [value]
f = list(self.__dict__[field])
uid = np.vectorize(f.index)(value)
return self.get_idx(uid) |
def get_balance(self, asset_hash, id=None, endpoint=None):
"""
Get balance by asset hash
Args:
asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_BALANCE, params=[asset_hash], id=id, endpoint=endpoint) | Get balance by asset hash
Args:
asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call | Below is the the instruction that describes the task:
### Input:
Get balance by asset hash
Args:
asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
### Response:
def get_balance(self, asset_hash, id=None, endpoint=None):
"""
Get balance by asset hash
Args:
asset_hash: (str) asset to lookup, example would be 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_BALANCE, params=[asset_hash], id=id, endpoint=endpoint) |
def write(self, *text, sep=' '):
"""
Write text to response
:param text:
:param sep:
:return:
"""
self.text += markdown.text(*text, sep)
return self | Write text to response
:param text:
:param sep:
:return: | Below is the the instruction that describes the task:
### Input:
Write text to response
:param text:
:param sep:
:return:
### Response:
def write(self, *text, sep=' '):
"""
Write text to response
:param text:
:param sep:
:return:
"""
self.text += markdown.text(*text, sep)
return self |
def _prepare_dict_inputs(inputs, tensor_info_map):
"""Converts inputs to a dict of inputs and checks extra/missing args.
Args:
inputs: inputs fed to Module.__call__().
tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`
describing the signature inputs.
Returns:
A dict of values with the same keys as tensor_info_map.
Raises:
TypeError: If it fails to convert the input values into a dict of tensors
to feed to the signature instantiation.
"""
if inputs is None:
dict_inputs = {}
elif isinstance(inputs, dict):
dict_inputs = inputs
elif len(tensor_info_map) == 1:
dict_inputs = {list(tensor_info_map.keys())[0]: inputs}
elif not tensor_info_map:
raise TypeError("Signature expects no inputs.")
else:
raise TypeError("Signature expects multiple inputs. Use a dict.")
dict_inputs_keys = set(dict_inputs.keys())
tensor_info_map_keys = set(tensor_info_map.keys())
if dict_inputs_keys != tensor_info_map_keys:
raise TypeError("Cannot convert dict_inputs: missing %r, extra given %r" %
(sorted(list(tensor_info_map_keys - dict_inputs_keys)),
sorted(list(dict_inputs_keys - tensor_info_map_keys))))
return dict_inputs | Converts inputs to a dict of inputs and checks extra/missing args.
Args:
inputs: inputs fed to Module.__call__().
tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`
describing the signature inputs.
Returns:
A dict of values with the same keys as tensor_info_map.
Raises:
TypeError: If it fails to convert the input values into a dict of tensors
to feed to the signature instantiation. | Below is the the instruction that describes the task:
### Input:
Converts inputs to a dict of inputs and checks extra/missing args.
Args:
inputs: inputs fed to Module.__call__().
tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`
describing the signature inputs.
Returns:
A dict of values with the same keys as tensor_info_map.
Raises:
TypeError: If it fails to convert the input values into a dict of tensors
to feed to the signature instantiation.
### Response:
def _prepare_dict_inputs(inputs, tensor_info_map):
"""Converts inputs to a dict of inputs and checks extra/missing args.
Args:
inputs: inputs fed to Module.__call__().
tensor_info_map: A map from string to `tensor_info.ParsedTensorInfo`
describing the signature inputs.
Returns:
A dict of values with the same keys as tensor_info_map.
Raises:
TypeError: If it fails to convert the input values into a dict of tensors
to feed to the signature instantiation.
"""
if inputs is None:
dict_inputs = {}
elif isinstance(inputs, dict):
dict_inputs = inputs
elif len(tensor_info_map) == 1:
dict_inputs = {list(tensor_info_map.keys())[0]: inputs}
elif not tensor_info_map:
raise TypeError("Signature expects no inputs.")
else:
raise TypeError("Signature expects multiple inputs. Use a dict.")
dict_inputs_keys = set(dict_inputs.keys())
tensor_info_map_keys = set(tensor_info_map.keys())
if dict_inputs_keys != tensor_info_map_keys:
raise TypeError("Cannot convert dict_inputs: missing %r, extra given %r" %
(sorted(list(tensor_info_map_keys - dict_inputs_keys)),
sorted(list(dict_inputs_keys - tensor_info_map_keys))))
return dict_inputs |
def _page_gen(self):
"""
Generates The String for pages
"""
track = ""
for page in self.__pages__:
track += "/{page}".format(page=page)
return track | Generates The String for pages | Below is the the instruction that describes the task:
### Input:
Generates The String for pages
### Response:
def _page_gen(self):
"""
Generates The String for pages
"""
track = ""
for page in self.__pages__:
track += "/{page}".format(page=page)
return track |
def set_xattr(self, path, xattr_name, xattr_value, flag, **kwargs):
"""Set an xattr of a file or directory.
:param xattr_name: The name must be prefixed with the namespace followed by ``.``. For
example, ``user.attr``.
:param flag: ``CREATE`` or ``REPLACE``
"""
kwargs['xattr.name'] = xattr_name
kwargs['xattr.value'] = xattr_value
response = self._put(path, 'SETXATTR', flag=flag, **kwargs)
assert not response.content | Set an xattr of a file or directory.
:param xattr_name: The name must be prefixed with the namespace followed by ``.``. For
example, ``user.attr``.
:param flag: ``CREATE`` or ``REPLACE`` | Below is the the instruction that describes the task:
### Input:
Set an xattr of a file or directory.
:param xattr_name: The name must be prefixed with the namespace followed by ``.``. For
example, ``user.attr``.
:param flag: ``CREATE`` or ``REPLACE``
### Response:
def set_xattr(self, path, xattr_name, xattr_value, flag, **kwargs):
"""Set an xattr of a file or directory.
:param xattr_name: The name must be prefixed with the namespace followed by ``.``. For
example, ``user.attr``.
:param flag: ``CREATE`` or ``REPLACE``
"""
kwargs['xattr.name'] = xattr_name
kwargs['xattr.value'] = xattr_value
response = self._put(path, 'SETXATTR', flag=flag, **kwargs)
assert not response.content |
def filterResults(allResults, reportKeys, optimizeKey=None):
""" Given the complete set of results generated by an experiment (passed in
'results'), filter out and return only the ones the caller wants, as
specified through 'reportKeys' and 'optimizeKey'.
A report key is a string of key names separated by colons, each key being one
level deeper into the experiment results dict. For example, 'key1:key2'.
Parameters:
-------------------------------------------------------------------------
results: dict of all results generated by an experiment
reportKeys: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKey: Which report item, if any, we will be optimizing for. This can
also be a regular expression, but is an error if it matches
more than one key from the experiment's results.
retval: (reportDict, optimizeDict)
reportDict: a dictionary of the metrics named by desiredReportKeys
optimizeDict: A dictionary containing 1 item: the full name and
value of the metric identified by the optimizeKey
"""
# Init return values
optimizeDict = dict()
# Get all available report key names for this experiment
allReportKeys = set()
_appendReportKeys(keys=allReportKeys, prefix='', results=allResults)
#----------------------------------------------------------------------------
# Extract the report items that match the regular expressions passed in reportKeys
matchingKeys = _matchReportKeys(reportKeys, allReportKeys)
# Extract the values of the desired items
reportDict = dict()
for keyName in matchingKeys:
value = _getReportItem(keyName, allResults)
reportDict[keyName] = value
# -------------------------------------------------------------------------
# Extract the report item that matches the regular expression passed in
# optimizeKey
if optimizeKey is not None:
matchingKeys = _matchReportKeys([optimizeKey], allReportKeys)
if len(matchingKeys) == 0:
raise _BadKeyError(optimizeKey)
elif len(matchingKeys) > 1:
raise _BadOptimizeKeyError(optimizeKey, matchingKeys)
optimizeKeyFullName = matchingKeys[0]
# Get the value of the optimize metric
value = _getReportItem(optimizeKeyFullName, allResults)
optimizeDict[optimizeKeyFullName] = value
reportDict[optimizeKeyFullName] = value
# Return info
return(reportDict, optimizeDict) | Given the complete set of results generated by an experiment (passed in
'results'), filter out and return only the ones the caller wants, as
specified through 'reportKeys' and 'optimizeKey'.
A report key is a string of key names separated by colons, each key being one
level deeper into the experiment results dict. For example, 'key1:key2'.
Parameters:
-------------------------------------------------------------------------
results: dict of all results generated by an experiment
reportKeys: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKey: Which report item, if any, we will be optimizing for. This can
also be a regular expression, but is an error if it matches
more than one key from the experiment's results.
retval: (reportDict, optimizeDict)
reportDict: a dictionary of the metrics named by desiredReportKeys
optimizeDict: A dictionary containing 1 item: the full name and
value of the metric identified by the optimizeKey | Below is the the instruction that describes the task:
### Input:
Given the complete set of results generated by an experiment (passed in
'results'), filter out and return only the ones the caller wants, as
specified through 'reportKeys' and 'optimizeKey'.
A report key is a string of key names separated by colons, each key being one
level deeper into the experiment results dict. For example, 'key1:key2'.
Parameters:
-------------------------------------------------------------------------
results: dict of all results generated by an experiment
reportKeys: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKey: Which report item, if any, we will be optimizing for. This can
also be a regular expression, but is an error if it matches
more than one key from the experiment's results.
retval: (reportDict, optimizeDict)
reportDict: a dictionary of the metrics named by desiredReportKeys
optimizeDict: A dictionary containing 1 item: the full name and
value of the metric identified by the optimizeKey
### Response:
def filterResults(allResults, reportKeys, optimizeKey=None):
""" Given the complete set of results generated by an experiment (passed in
'results'), filter out and return only the ones the caller wants, as
specified through 'reportKeys' and 'optimizeKey'.
A report key is a string of key names separated by colons, each key being one
level deeper into the experiment results dict. For example, 'key1:key2'.
Parameters:
-------------------------------------------------------------------------
results: dict of all results generated by an experiment
reportKeys: list of items from the results dict to include in
the report. These can be regular expressions.
optimizeKey: Which report item, if any, we will be optimizing for. This can
also be a regular expression, but is an error if it matches
more than one key from the experiment's results.
retval: (reportDict, optimizeDict)
reportDict: a dictionary of the metrics named by desiredReportKeys
optimizeDict: A dictionary containing 1 item: the full name and
value of the metric identified by the optimizeKey
"""
# Init return values
optimizeDict = dict()
# Get all available report key names for this experiment
allReportKeys = set()
_appendReportKeys(keys=allReportKeys, prefix='', results=allResults)
#----------------------------------------------------------------------------
# Extract the report items that match the regular expressions passed in reportKeys
matchingKeys = _matchReportKeys(reportKeys, allReportKeys)
# Extract the values of the desired items
reportDict = dict()
for keyName in matchingKeys:
value = _getReportItem(keyName, allResults)
reportDict[keyName] = value
# -------------------------------------------------------------------------
# Extract the report item that matches the regular expression passed in
# optimizeKey
if optimizeKey is not None:
matchingKeys = _matchReportKeys([optimizeKey], allReportKeys)
if len(matchingKeys) == 0:
raise _BadKeyError(optimizeKey)
elif len(matchingKeys) > 1:
raise _BadOptimizeKeyError(optimizeKey, matchingKeys)
optimizeKeyFullName = matchingKeys[0]
# Get the value of the optimize metric
value = _getReportItem(optimizeKeyFullName, allResults)
optimizeDict[optimizeKeyFullName] = value
reportDict[optimizeKeyFullName] = value
# Return info
return(reportDict, optimizeDict) |
def _echo_method(self, method):
"""Given a method, return a method that runs the internal
method and echos the result.
"""
@functools.wraps(method)
def func(*args, **kwargs):
# Echo warning if this method is deprecated.
if getattr(method, 'deprecated', False):
debug.log('This method is deprecated in Tower 3.0.', header='warning')
result = method(*args, **kwargs)
# If this was a request that could result in a modification
# of data, print it in Ansible coloring.
color_info = {}
if isinstance(result, dict) and 'changed' in result:
if result['changed']:
color_info['fg'] = 'yellow'
else:
color_info['fg'] = 'green'
# Piece together the result into the proper format.
format = getattr(self, '_format_%s' % (getattr(method, 'format_freezer', None) or settings.format))
output = format(result)
# Perform the echo.
secho(output, **color_info)
return func | Given a method, return a method that runs the internal
method and echos the result. | Below is the the instruction that describes the task:
### Input:
Given a method, return a method that runs the internal
method and echos the result.
### Response:
def _echo_method(self, method):
"""Given a method, return a method that runs the internal
method and echos the result.
"""
@functools.wraps(method)
def func(*args, **kwargs):
# Echo warning if this method is deprecated.
if getattr(method, 'deprecated', False):
debug.log('This method is deprecated in Tower 3.0.', header='warning')
result = method(*args, **kwargs)
# If this was a request that could result in a modification
# of data, print it in Ansible coloring.
color_info = {}
if isinstance(result, dict) and 'changed' in result:
if result['changed']:
color_info['fg'] = 'yellow'
else:
color_info['fg'] = 'green'
# Piece together the result into the proper format.
format = getattr(self, '_format_%s' % (getattr(method, 'format_freezer', None) or settings.format))
output = format(result)
# Perform the echo.
secho(output, **color_info)
return func |
def run(self, sensor_graph, model):
"""Run this optimization pass on the sensor graph
If necessary, information on the device model being targeted
can be found in the associated model argument.
Args:
sensor_graph (SensorGraph): The sensor graph to optimize
model (DeviceModel): The device model we're using
"""
# This check can be done if there is 1 input and it is count == 1
# and the stream type is input or unbuffered
for node, inputs, outputs in sensor_graph.iterate_bfs():
if node.num_inputs != 1:
continue
input_a, trigger_a = node.inputs[0]
if input_a.selector.match_type not in [DataStream.InputType, DataStream.UnbufferedType]:
continue
if not isinstance(trigger_a, InputTrigger):
continue
if trigger_a.comp_string != u'==':
continue
if not trigger_a.use_count:
continue
if trigger_a.reference != 1:
continue
# here we're looking at count input | unbuffered X == 1
node.inputs[0] = (input_a, TrueTrigger()) | Run this optimization pass on the sensor graph
If necessary, information on the device model being targeted
can be found in the associated model argument.
Args:
sensor_graph (SensorGraph): The sensor graph to optimize
model (DeviceModel): The device model we're using | Below is the the instruction that describes the task:
### Input:
Run this optimization pass on the sensor graph
If necessary, information on the device model being targeted
can be found in the associated model argument.
Args:
sensor_graph (SensorGraph): The sensor graph to optimize
model (DeviceModel): The device model we're using
### Response:
def run(self, sensor_graph, model):
"""Run this optimization pass on the sensor graph
If necessary, information on the device model being targeted
can be found in the associated model argument.
Args:
sensor_graph (SensorGraph): The sensor graph to optimize
model (DeviceModel): The device model we're using
"""
# This check can be done if there is 1 input and it is count == 1
# and the stream type is input or unbuffered
for node, inputs, outputs in sensor_graph.iterate_bfs():
if node.num_inputs != 1:
continue
input_a, trigger_a = node.inputs[0]
if input_a.selector.match_type not in [DataStream.InputType, DataStream.UnbufferedType]:
continue
if not isinstance(trigger_a, InputTrigger):
continue
if trigger_a.comp_string != u'==':
continue
if not trigger_a.use_count:
continue
if trigger_a.reference != 1:
continue
# here we're looking at count input | unbuffered X == 1
node.inputs[0] = (input_a, TrueTrigger()) |
def get_localhost():
'''
Should return 127.0.0.1 in ipv4 and ::1 in ipv6
localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work
properly and takes a lot of time (had this issue on the pyunit server).
Using the IP directly solves the problem.
'''
# TODO: Needs better investigation!
global _cache
if _cache is None:
try:
for addr_info in socket.getaddrinfo("localhost", 80, 0, 0, socket.SOL_TCP):
config = addr_info[4]
if config[0] == '127.0.0.1':
_cache = '127.0.0.1'
return _cache
except:
# Ok, some versions of Python don't have getaddrinfo or SOL_TCP... Just consider it 127.0.0.1 in this case.
_cache = '127.0.0.1'
else:
_cache = 'localhost'
return _cache | Should return 127.0.0.1 in ipv4 and ::1 in ipv6
localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work
properly and takes a lot of time (had this issue on the pyunit server).
Using the IP directly solves the problem. | Below is the the instruction that describes the task:
### Input:
Should return 127.0.0.1 in ipv4 and ::1 in ipv6
localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work
properly and takes a lot of time (had this issue on the pyunit server).
Using the IP directly solves the problem.
### Response:
def get_localhost():
'''
Should return 127.0.0.1 in ipv4 and ::1 in ipv6
localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work
properly and takes a lot of time (had this issue on the pyunit server).
Using the IP directly solves the problem.
'''
# TODO: Needs better investigation!
global _cache
if _cache is None:
try:
for addr_info in socket.getaddrinfo("localhost", 80, 0, 0, socket.SOL_TCP):
config = addr_info[4]
if config[0] == '127.0.0.1':
_cache = '127.0.0.1'
return _cache
except:
# Ok, some versions of Python don't have getaddrinfo or SOL_TCP... Just consider it 127.0.0.1 in this case.
_cache = '127.0.0.1'
else:
_cache = 'localhost'
return _cache |
def get_variables(self):
"""
Returns a list of variables.
Each variable is represented by an index of list.
For example if the no of variables are 4 then the list will be
[var_0, var_1, var_2, var_3]
Returns
-------
list: list of variables
Example
-------
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_variables()
['var_0', 'var_1', 'var_2']
"""
variables = []
for var in range(0, self.no_variables):
var_name = "var_" + str(var)
variables.append(var_name)
return variables | Returns a list of variables.
Each variable is represented by an index of list.
For example if the no of variables are 4 then the list will be
[var_0, var_1, var_2, var_3]
Returns
-------
list: list of variables
Example
-------
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_variables()
['var_0', 'var_1', 'var_2'] | Below is the the instruction that describes the task:
### Input:
Returns a list of variables.
Each variable is represented by an index of list.
For example if the no of variables are 4 then the list will be
[var_0, var_1, var_2, var_3]
Returns
-------
list: list of variables
Example
-------
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_variables()
['var_0', 'var_1', 'var_2']
### Response:
def get_variables(self):
"""
Returns a list of variables.
Each variable is represented by an index of list.
For example if the no of variables are 4 then the list will be
[var_0, var_1, var_2, var_3]
Returns
-------
list: list of variables
Example
-------
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_variables()
['var_0', 'var_1', 'var_2']
"""
variables = []
for var in range(0, self.no_variables):
var_name = "var_" + str(var)
variables.append(var_name)
return variables |
def checkUser(self, user):
"""
Query a username or email address to see if a corresponding Microsoft account exists.
Args:
user (str): username or email address of an account
Returns:
bool: whether the account exists
"""
return not self.conn("POST", "{0}/GetCredentialType.srf".format(SkypeConnection.API_MSACC),
json={"username": user}).json().get("IfExistsResult") | Query a username or email address to see if a corresponding Microsoft account exists.
Args:
user (str): username or email address of an account
Returns:
bool: whether the account exists | Below is the the instruction that describes the task:
### Input:
Query a username or email address to see if a corresponding Microsoft account exists.
Args:
user (str): username or email address of an account
Returns:
bool: whether the account exists
### Response:
def checkUser(self, user):
"""
Query a username or email address to see if a corresponding Microsoft account exists.
Args:
user (str): username or email address of an account
Returns:
bool: whether the account exists
"""
return not self.conn("POST", "{0}/GetCredentialType.srf".format(SkypeConnection.API_MSACC),
json={"username": user}).json().get("IfExistsResult") |
def get(self, host, files_count, path="/", ssl=False, external=None):
"""
Send get request to a url and wrap the results
:param host (str): the host name of the url
:param path (str): the path of the url (start with "/")
:return (dict): the result of the test url
"""
theme = "https" if ssl else "http"
url = host + path
http_url = theme + "://" + url
result = {}
try:
capture_path = os.getcwd() + '/'
har_file_path = capture_path + "har/"
# fc.load_page(self.driver, http_url)
fc.switch_tab(self.driver)
self.load_page(http_url)
print "driver get: " + http_url
time.sleep(2)
# if url[-1] == "/":
# f_name = url.split('/')[-2]
# else:
# f_name = url.split('/')[-1]
# fc.save_html(self.driver, f_name, os.path.join(capture_path, "htmls/"))
# fc.save_screenshot(self.driver, f_name, os.path.join(capture_path, "screenshots/"))
result = self.wrap_results(url=http_url, files_count=files_count, fd=har_file_path)
if external is not None:
external[http_url] = result
except Exception as e:
result['error'] = e.message
print e
return result | Send get request to a url and wrap the results
:param host (str): the host name of the url
:param path (str): the path of the url (start with "/")
:return (dict): the result of the test url | Below is the the instruction that describes the task:
### Input:
Send get request to a url and wrap the results
:param host (str): the host name of the url
:param path (str): the path of the url (start with "/")
:return (dict): the result of the test url
### Response:
def get(self, host, files_count, path="/", ssl=False, external=None):
"""
Send get request to a url and wrap the results
:param host (str): the host name of the url
:param path (str): the path of the url (start with "/")
:return (dict): the result of the test url
"""
theme = "https" if ssl else "http"
url = host + path
http_url = theme + "://" + url
result = {}
try:
capture_path = os.getcwd() + '/'
har_file_path = capture_path + "har/"
# fc.load_page(self.driver, http_url)
fc.switch_tab(self.driver)
self.load_page(http_url)
print "driver get: " + http_url
time.sleep(2)
# if url[-1] == "/":
# f_name = url.split('/')[-2]
# else:
# f_name = url.split('/')[-1]
# fc.save_html(self.driver, f_name, os.path.join(capture_path, "htmls/"))
# fc.save_screenshot(self.driver, f_name, os.path.join(capture_path, "screenshots/"))
result = self.wrap_results(url=http_url, files_count=files_count, fd=har_file_path)
if external is not None:
external[http_url] = result
except Exception as e:
result['error'] = e.message
print e
return result |
def list_datasets(
self,
project=None,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
):
"""List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
"""
extra_params = {}
if project is None:
project = self.project
if include_all:
extra_params["all"] = True
if filter:
# TODO: consider supporting a dict of label -> value for filter,
# and converting it into a string here.
extra_params["filter"] = filter
path = "/projects/%s/datasets" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_dataset,
items_key="datasets",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
) | List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project. | Below is the the instruction that describes the task:
### Input:
List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
### Response:
def list_datasets(
self,
project=None,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
):
"""List datasets for the project associated with this client.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
Args:
project (str):
Optional. Project ID to use for retreiving datasets. Defaults
to the client's project.
include_all (bool):
Optional. True if results include hidden datasets. Defaults
to False.
filter (str):
Optional. An expression for filtering the results by label.
For syntax, see
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list#filter.
max_results (int):
Optional. Maximum number of datasets to return.
page_token (str):
Optional. Token representing a cursor into the datasets. If
not passed, the API will return the first page of datasets.
The token marks the beginning of the iterator to be returned
and the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (google.api_core.retry.Retry):
Optional. How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.dataset.DatasetListItem`.
associated with the project.
"""
extra_params = {}
if project is None:
project = self.project
if include_all:
extra_params["all"] = True
if filter:
# TODO: consider supporting a dict of label -> value for filter,
# and converting it into a string here.
extra_params["filter"] = filter
path = "/projects/%s/datasets" % (project,)
return page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_dataset,
items_key="datasets",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
) |
def parse(self):
"""
Parse the vmstat file
:return: status of the metric parse
"""
file_status = True
for input_file in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(input_file)
if not file_status:
return False
status = True
cur_zone = None
cur_submetric = None
cur_value = None
data = {} # stores the data of each column
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file) as fh:
for line in fh:
words = line.replace(',', ' ').split() # [0] is day; [1] is seconds; [2...] is field names:;
if len(words) < 3:
continue
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts):
continue
if words[2] == 'Node': # Node 0 zone DMA
cols = words[2:]
cur_zone = '.'.join(cols)
continue
elif words[2] == 'pages': # pages free 3936
cur_submetric = words[2] + '.' + words[3] # pages.free
cur_value = words[4]
elif words[2] in self.processed_sub_metrics:
cur_submetric = 'pages' + '.' + words[2] # pages.min
cur_value = words[3]
elif words[2] in self.skipped_sub_metrics:
continue
else: # other useful submetrics
cur_submetric = words[2]
cur_value = words[3]
col = cur_zone + '.' + cur_submetric # prefix with 'Node.0.zone.DMA.
# only process zones specified in config
if cur_zone and self.zones and cur_zone not in self.zones:
continue
self.sub_metric_unit[col] = 'pages' # The unit of the sub metric. For /proc/zoneinfo, they are all in pages
# only process sub_metrics specified in config.
if self.sub_metrics and cur_submetric and cur_submetric not in self.sub_metrics:
continue
if col in self.column_csv_map:
out_csv = self.column_csv_map[col]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
data[out_csv] = []
data[out_csv].append(ts + "," + cur_value)
# post processing, putting data in csv files;
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as fh:
fh.write('\n'.join(sorted(data[csv])))
return status | Parse the vmstat file
:return: status of the metric parse | Below is the the instruction that describes the task:
### Input:
Parse the vmstat file
:return: status of the metric parse
### Response:
def parse(self):
"""
Parse the vmstat file
:return: status of the metric parse
"""
file_status = True
for input_file in self.infile_list:
file_status = file_status and naarad.utils.is_valid_file(input_file)
if not file_status:
return False
status = True
cur_zone = None
cur_submetric = None
cur_value = None
data = {} # stores the data of each column
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file) as fh:
for line in fh:
words = line.replace(',', ' ').split() # [0] is day; [1] is seconds; [2...] is field names:;
if len(words) < 3:
continue
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts):
continue
if words[2] == 'Node': # Node 0 zone DMA
cols = words[2:]
cur_zone = '.'.join(cols)
continue
elif words[2] == 'pages': # pages free 3936
cur_submetric = words[2] + '.' + words[3] # pages.free
cur_value = words[4]
elif words[2] in self.processed_sub_metrics:
cur_submetric = 'pages' + '.' + words[2] # pages.min
cur_value = words[3]
elif words[2] in self.skipped_sub_metrics:
continue
else: # other useful submetrics
cur_submetric = words[2]
cur_value = words[3]
col = cur_zone + '.' + cur_submetric # prefix with 'Node.0.zone.DMA.
# only process zones specified in config
if cur_zone and self.zones and cur_zone not in self.zones:
continue
self.sub_metric_unit[col] = 'pages' # The unit of the sub metric. For /proc/zoneinfo, they are all in pages
# only process sub_metrics specified in config.
if self.sub_metrics and cur_submetric and cur_submetric not in self.sub_metrics:
continue
if col in self.column_csv_map:
out_csv = self.column_csv_map[col]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
data[out_csv] = []
data[out_csv].append(ts + "," + cur_value)
# post processing, putting data in csv files;
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as fh:
fh.write('\n'.join(sorted(data[csv])))
return status |
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string | Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)' | Below is the the instruction that describes the task:
### Input:
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
### Response:
def shorten(string, length=140, ellipsis=None):
'''
Shorten a string to 140 characters without breaking words.
Optionally add an ellipsis character: '…' if ellipsis=True, or a given string
e.g. ellipsis=' (cut)'
'''
string = string.strip()
if len(string) > length:
if ellipsis is True:
ellipsis = '…'
else:
ellipsis = ellipsis or ''
L = length - len(ellipsis)
return ' '.join(string[:L].split(' ')[:-1]).strip(',;:.') + ellipsis
else:
return string |
def cross_v3(vec_a, vec_b):
"""Return the crossproduct between vec_a and vec_b."""
return Vec3(vec_a.y * vec_b.z - vec_a.z * vec_b.y,
vec_a.z * vec_b.x - vec_a.x * vec_b.z,
vec_a.x * vec_b.y - vec_a.y * vec_b.x) | Return the crossproduct between vec_a and vec_b. | Below is the the instruction that describes the task:
### Input:
Return the crossproduct between vec_a and vec_b.
### Response:
def cross_v3(vec_a, vec_b):
"""Return the crossproduct between vec_a and vec_b."""
return Vec3(vec_a.y * vec_b.z - vec_a.z * vec_b.y,
vec_a.z * vec_b.x - vec_a.x * vec_b.z,
vec_a.x * vec_b.y - vec_a.y * vec_b.x) |
def namedspace(typename, required_fields=(), optional_fields=(), mutable_fields=(),
default_values=frozendict(), default_value_factories=frozendict(),
return_none=False):
"""Builds a new class that encapsulates a namespace and provides
various ways to access it.
The typename argument is required and is the name of the
namedspace class that will be generated.
The required_fields and optional_fields arguments can be a string
or sequence of strings and together specify the fields that
instances of the namedspace class have.
Values for the required fields must be provided somehow when the
instance is created. Values for optional fields may be provided
later, or maybe not at all.
If an optional field is queried before its value has been set,
an AttributeError will be raised. This behavior can be altered
to cause None to be returned instead by setting the return_none
keyword argument to True.
The mutable_fields argument specifies which fields will be mutable,
if any. By default, all fields are immutable and all instances are
hashable and can be used as dictionary keys. If any fields are set
as mutable, all instances are not hashable and cannot be used as
dictionary keys.
The default_values mapping provides simple default values for the
fields.
The default_value_factories mapping provides a more flexible, but
more complex, mechanism for providing default values. The value of
each item is a callable that takes a single argument, the
namedspace instance, and returns the default value for the field.
The default_values_factories mapping is only consulted if there
is no default value for the field in the default_values mapping.
Here is a simple example, using only the required fields argument:
>>> SimpleNS = namedspace("SimpleNS", ("id", "name", "description"))
>>> SimpleNS
<class 'namedspace.SimpleNS'>
There are built-in properties to access collections and iterators
associated with the namespace class.
>>> SimpleNS._field_names
('id', 'name', 'description')
>>> tuple(SimpleNS._field_names_iter)
('id', 'name', 'description')
Once the class has been created, it can be instantiated like any
other class. However, a value for all of the required fields must
be provided.
>>> simple_ns = SimpleNS(id=1, description="Simple Description")
Traceback (most recent call last):
<snip/>
ValueError: A value for field 'name' is required.
>>> simple_ns = SimpleNS(id=1, name="Simple Name", description="Simple Description")
>>> simple_ns
SimpleNS(id=1, name='Simple Name', description='Simple Description')
An instance of a namedspace class provides standard attribute
access to its fields.
>>> simple_ns.id
1
>>> simple_ns.name
'Simple Name'
>>> simple_ns.description
'Simple Description'
In addition to standard attribute access, instances of a namedspace
class implement a MutableMapping interface.
>>> 'id' in simple_ns
True
>>> for field_name in simple_ns:
... print field_name
id
name
description
>>> len(simple_ns)
3
>>> simple_ns["id"]
1
>>> simple_ns["name"]
'Simple Name'
>>> simple_ns["description"]
'Simple Description'
There are built-in properties to access collections and iterators
associated with the namespace.
The namespace encapsulated by a namedspace class is stored in an
OrderedDict, so order of the collections is the same as the order
that the fields were specified.
All of these properties use the standard "non-public" naming
convention in order to not pollute the public namespace.
>>> simple_ns._field_names
('id', 'name', 'description')
>>> tuple(simple_ns._field_names_iter)
('id', 'name', 'description')
>>> simple_ns._field_values
(1, 'Simple Name', 'Simple Description')
>>> tuple(simple_ns._field_values_iter)
(1, 'Simple Name', 'Simple Description')
>>> simple_ns._field_items
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> list(simple_ns._field_items_iter)
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> simple_ns._as_dict
OrderedDict([('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')])
Here is a more complex example, using most of the other arguments:
>>> from itertools import count
>>> ComplexNS = namedspace("ComplexNS", "id", optional_fields=("name", "description", "extra"),
... mutable_fields=("description", "extra"), default_values={"description": "None available"},
... default_value_factories={"id": lambda self, counter=count(start=1): counter.next(),
... "name": lambda self: "Name for id={id}".format(id=self.id)})
>>> complex_ns1 = ComplexNS()
>>> complex_ns1.id
1
The value of 1 was automatically assigned by the
default_value_factory for the 'id' field, in this case a lambda
closure that hooks up an instance of itertools.count.
>>> complex_ns1.name
'Name for id=1'
This value was also generated by a default value factory. In this
case, the factory for the 'name' attribute uses the value of the
'id' attribute to compute the default value.
>>> complex_ns1.description
'None available'
This value came from the default_values mapping.
The description field was set as a mutable field, which allows
it to be modified.
>>> complex_ns1.description = "Some fancy description"
>>> complex_ns1.description
'Some fancy description'
Its value can also be deleted.
>>> del complex_ns1.description
>>> complex_ns1.description
'None available'
Since its modified value was deleted, and it has a default value,
it has reverted to its default value.
The extra field is a valid field in this namedspace, but it has not
yet been assigned a value and does not have a default.
>>> complex_ns1.extra
Traceback (most recent call last):
<snip/>
AttributeError: "Field 'extra' does not yet exist in this ComplexNS namedspace instance."
Sometimes, having an exception raised if an optional field is
missing, and being forced to handle it, is annoying. A namedspace
class can be configured at creation time to return None instead of
raising exceptions for optional fields by setting the `return_none`
parameter to `True`. Here is a trivial example:
>>> QuietNS = namedspace("QuietNS", optional_fields=("might_be_none",), return_none=True)
>>> quiet_ns1 = QuietNS(might_be_none="Nope, not this time")
>>> quiet_ns1.might_be_none
'Nope, not this time'
>>> quiet_ns2 = QuietNS()
>>> quiet_ns2.might_be_none
>>>
Having the namedspace quietly return `None` makes sense in some
situations. But be careful. Understand the full implications of
this alternate behavior on the code that uses it. Subtle data-
dependent bugs can be introduced by this behavior, which is why it
is not enabled by default.
Now, back to our "complex" example.
Since the 'extra' field is one of the mutable fields, we can give it a value.
>>> complex_ns1.extra = "Lasts a long, long time"
>>> complex_ns1.extra
'Lasts a long, long time'
Only fields that have been declared as either required or optional
are allowed.
>>> complex_ns1.some_other_field = "some other value"
Traceback (most recent call last):
<snip/>
FieldNameError: "Field 'some_other_field' does not exist in ComplexNS namedspace."
Finally, to illustrate that our counter is working as it should, if
we instantiate another instance, our id field will get the next
counter value.
>>> complex_ns2 = ComplexNS()
>>> complex_ns2.id
2
A common use case for a namedspace class is as a base class for
another custom class that has additional members such as properties
and methods. This way, the custom class gets all of the namedspace
behavior through declarative configuration, instead of having
to re-define that behavior imperatively.
The following is an example where one of the required fields is
generated at instantiation time, and the values for the two
optional fields are calculated values provided by properties in
the subclass.
>>> from collections import Counter
>>> class Widget(namedspace("_Widget", ("mfg_code", "model_code", "serial_number"), optional_fields=("sku", "pk"),
... return_none=True)):
... _sn_map = Counter()
... def __init__(self, *args, **kwargs):
... sn_key = (kwargs["mfg_code"], kwargs["model_code"])
... self._sn_map[sn_key] += 1
... kwargs["serial_number"] = "{:010}".format(self._sn_map[sn_key])
... super(Widget, self).__init__(*args, **kwargs)
... @property
... def sku(self):
... return "{}_{}".format(self.mfg_code, self.model_code)
... @property
... def pk(self):
... return "{}_{}".format(self.sku, self.serial_number)
>>> widget1 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget1
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000001', sku='ACME_X-500', pk='ACME_X-500_0000000001')
>>> widget1._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000001'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000001')])
>>> widget2 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget2
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000002', sku='ACME_X-500', pk='ACME_X-500_0000000002')
>>> widget2._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000002'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000002')])
"""
# Initialize the list of arguments that will get put into the
# doc string of the generated class
arg_list_items = []
#
# Validate parameters
#
for arg_name in ("required_fields", "optional_fields", "mutable_fields"):
arg_value = locals()[arg_name]
if isinstance(arg_value, basestring):
arg_value = (arg_value,)
exec "{arg_name} = arg_value".format(arg_name=arg_name)
elif not isinstance(arg_value, Container):
raise ValueError("Value for argument '{arg_name}' must be a string or container of strings.".format(
arg_name=arg_name))
for field_name in arg_value:
if not isinstance(field_name, basestring):
raise ValueError("Items of container argument '{arg_name}' must be strings.".format(arg_name=arg_name))
if len(arg_value) != len(frozenset(arg_value)):
raise ValueError("Value for argument '{arg_name}' contains duplicate fields.".format(
arg_name=arg_name))
arg_list_items.append("{arg_name}={arg_value!r}".format(arg_name=arg_name, arg_value=tuple(arg_value)))
exec "{arg_name}_set = frozenset(arg_value)".format(arg_name=arg_name)
all_fields = tuple(required_fields + optional_fields)
if not all_fields:
raise ValueError("At least one required or optional field must be provided.")
all_fields_set = frozenset(all_fields)
for field_name in mutable_fields:
if field_name not in all_fields_set:
raise ValueError("Mutable field '{field_name}' is not a required or optional field.".format(
field_name=field_name))
for arg_name in ("default_values", "default_value_factories"):
arg_value = locals()[arg_name]
if not isinstance(arg_value, Mapping):
raise ValueError("Value for argument '{arg_name}' must be a mapping.".format(arg_name=arg_name))
default_field_names = frozenset(arg_value.iterkeys())
if not default_field_names.issubset(all_fields_set):
bad_default_field_names = default_field_names - all_fields_set
raise ValueError("Value for argument '{arg_name}' contains invalid field(s) '{field_names}'.".format(
arg_name=arg_name, field_names=", ".join(bad_default_field_names)))
arg_list_items.append("{arg_name}={arg_value!r}".format(arg_name=arg_name, arg_value=dict(arg_value)))
exec "{arg_name} = frozendict(arg_value)".format(arg_name=arg_name)
for field_name, factory in default_value_factories.iteritems():
if not callable(factory):
raise ValueError("Default value factory for '{field_name}' is not callable.".format(field_name=field_name))
# Fill-in the class template
class_definition = _class_template.format(
typename=typename,
arg_list=", ".join(arg_list_items),
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(
__name__='namedspace_{typename}'.format(typename=typename),
all_fields=all_fields,
all_fields_set=all_fields_set,
required_fields_set=locals()["required_fields_set"],
mutable_fields_set=locals()["mutable_fields_set"],
default_values=default_values,
default_value_factories=default_value_factories,
Hashable=Hashable,
MutableMapping=MutableMapping,
OrderedDict=OrderedDict,
return_none=return_none,
NamedspaceMeta=NamedspaceMeta,
)
#
# Code from here down copied verbatim from namedtuple
#
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result | Builds a new class that encapsulates a namespace and provides
various ways to access it.
The typename argument is required and is the name of the
namedspace class that will be generated.
The required_fields and optional_fields arguments can be a string
or sequence of strings and together specify the fields that
instances of the namedspace class have.
Values for the required fields must be provided somehow when the
instance is created. Values for optional fields may be provided
later, or maybe not at all.
If an optional field is queried before its value has been set,
an AttributeError will be raised. This behavior can be altered
to cause None to be returned instead by setting the return_none
keyword argument to True.
The mutable_fields argument specifies which fields will be mutable,
if any. By default, all fields are immutable and all instances are
hashable and can be used as dictionary keys. If any fields are set
as mutable, all instances are not hashable and cannot be used as
dictionary keys.
The default_values mapping provides simple default values for the
fields.
The default_value_factories mapping provides a more flexible, but
more complex, mechanism for providing default values. The value of
each item is a callable that takes a single argument, the
namedspace instance, and returns the default value for the field.
The default_values_factories mapping is only consulted if there
is no default value for the field in the default_values mapping.
Here is a simple example, using only the required fields argument:
>>> SimpleNS = namedspace("SimpleNS", ("id", "name", "description"))
>>> SimpleNS
<class 'namedspace.SimpleNS'>
There are built-in properties to access collections and iterators
associated with the namespace class.
>>> SimpleNS._field_names
('id', 'name', 'description')
>>> tuple(SimpleNS._field_names_iter)
('id', 'name', 'description')
Once the class has been created, it can be instantiated like any
other class. However, a value for all of the required fields must
be provided.
>>> simple_ns = SimpleNS(id=1, description="Simple Description")
Traceback (most recent call last):
<snip/>
ValueError: A value for field 'name' is required.
>>> simple_ns = SimpleNS(id=1, name="Simple Name", description="Simple Description")
>>> simple_ns
SimpleNS(id=1, name='Simple Name', description='Simple Description')
An instance of a namedspace class provides standard attribute
access to its fields.
>>> simple_ns.id
1
>>> simple_ns.name
'Simple Name'
>>> simple_ns.description
'Simple Description'
In addition to standard attribute access, instances of a namedspace
class implement a MutableMapping interface.
>>> 'id' in simple_ns
True
>>> for field_name in simple_ns:
... print field_name
id
name
description
>>> len(simple_ns)
3
>>> simple_ns["id"]
1
>>> simple_ns["name"]
'Simple Name'
>>> simple_ns["description"]
'Simple Description'
There are built-in properties to access collections and iterators
associated with the namespace.
The namespace encapsulated by a namedspace class is stored in an
OrderedDict, so order of the collections is the same as the order
that the fields were specified.
All of these properties use the standard "non-public" naming
convention in order to not pollute the public namespace.
>>> simple_ns._field_names
('id', 'name', 'description')
>>> tuple(simple_ns._field_names_iter)
('id', 'name', 'description')
>>> simple_ns._field_values
(1, 'Simple Name', 'Simple Description')
>>> tuple(simple_ns._field_values_iter)
(1, 'Simple Name', 'Simple Description')
>>> simple_ns._field_items
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> list(simple_ns._field_items_iter)
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> simple_ns._as_dict
OrderedDict([('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')])
Here is a more complex example, using most of the other arguments:
>>> from itertools import count
>>> ComplexNS = namedspace("ComplexNS", "id", optional_fields=("name", "description", "extra"),
... mutable_fields=("description", "extra"), default_values={"description": "None available"},
... default_value_factories={"id": lambda self, counter=count(start=1): counter.next(),
... "name": lambda self: "Name for id={id}".format(id=self.id)})
>>> complex_ns1 = ComplexNS()
>>> complex_ns1.id
1
The value of 1 was automatically assigned by the
default_value_factory for the 'id' field, in this case a lambda
closure that hooks up an instance of itertools.count.
>>> complex_ns1.name
'Name for id=1'
This value was also generated by a default value factory. In this
case, the factory for the 'name' attribute uses the value of the
'id' attribute to compute the default value.
>>> complex_ns1.description
'None available'
This value came from the default_values mapping.
The description field was set as a mutable field, which allows
it to be modified.
>>> complex_ns1.description = "Some fancy description"
>>> complex_ns1.description
'Some fancy description'
Its value can also be deleted.
>>> del complex_ns1.description
>>> complex_ns1.description
'None available'
Since its modified value was deleted, and it has a default value,
it has reverted to its default value.
The extra field is a valid field in this namedspace, but it has not
yet been assigned a value and does not have a default.
>>> complex_ns1.extra
Traceback (most recent call last):
<snip/>
AttributeError: "Field 'extra' does not yet exist in this ComplexNS namedspace instance."
Sometimes, having an exception raised if an optional field is
missing, and being forced to handle it, is annoying. A namedspace
class can be configured at creation time to return None instead of
raising exceptions for optional fields by setting the `return_none`
parameter to `True`. Here is a trivial example:
>>> QuietNS = namedspace("QuietNS", optional_fields=("might_be_none",), return_none=True)
>>> quiet_ns1 = QuietNS(might_be_none="Nope, not this time")
>>> quiet_ns1.might_be_none
'Nope, not this time'
>>> quiet_ns2 = QuietNS()
>>> quiet_ns2.might_be_none
>>>
Having the namedspace quietly return `None` makes sense in some
situations. But be careful. Understand the full implications of
this alternate behavior on the code that uses it. Subtle data-
dependent bugs can be introduced by this behavior, which is why it
is not enabled by default.
Now, back to our "complex" example.
Since the 'extra' field is one of the mutable fields, we can give it a value.
>>> complex_ns1.extra = "Lasts a long, long time"
>>> complex_ns1.extra
'Lasts a long, long time'
Only fields that have been declared as either required or optional
are allowed.
>>> complex_ns1.some_other_field = "some other value"
Traceback (most recent call last):
<snip/>
FieldNameError: "Field 'some_other_field' does not exist in ComplexNS namedspace."
Finally, to illustrate that our counter is working as it should, if
we instantiate another instance, our id field will get the next
counter value.
>>> complex_ns2 = ComplexNS()
>>> complex_ns2.id
2
A common use case for a namedspace class is as a base class for
another custom class that has additional members such as properties
and methods. This way, the custom class gets all of the namedspace
behavior through declarative configuration, instead of having
to re-define that behavior imperatively.
The following is an example where one of the required fields is
generated at instantiation time, and the values for the two
optional fields are calculated values provided by properties in
the subclass.
>>> from collections import Counter
>>> class Widget(namedspace("_Widget", ("mfg_code", "model_code", "serial_number"), optional_fields=("sku", "pk"),
... return_none=True)):
... _sn_map = Counter()
... def __init__(self, *args, **kwargs):
... sn_key = (kwargs["mfg_code"], kwargs["model_code"])
... self._sn_map[sn_key] += 1
... kwargs["serial_number"] = "{:010}".format(self._sn_map[sn_key])
... super(Widget, self).__init__(*args, **kwargs)
... @property
... def sku(self):
... return "{}_{}".format(self.mfg_code, self.model_code)
... @property
... def pk(self):
... return "{}_{}".format(self.sku, self.serial_number)
>>> widget1 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget1
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000001', sku='ACME_X-500', pk='ACME_X-500_0000000001')
>>> widget1._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000001'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000001')])
>>> widget2 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget2
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000002', sku='ACME_X-500', pk='ACME_X-500_0000000002')
>>> widget2._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000002'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000002')]) | Below is the the instruction that describes the task:
### Input:
Builds a new class that encapsulates a namespace and provides
various ways to access it.
The typename argument is required and is the name of the
namedspace class that will be generated.
The required_fields and optional_fields arguments can be a string
or sequence of strings and together specify the fields that
instances of the namedspace class have.
Values for the required fields must be provided somehow when the
instance is created. Values for optional fields may be provided
later, or maybe not at all.
If an optional field is queried before its value has been set,
an AttributeError will be raised. This behavior can be altered
to cause None to be returned instead by setting the return_none
keyword argument to True.
The mutable_fields argument specifies which fields will be mutable,
if any. By default, all fields are immutable and all instances are
hashable and can be used as dictionary keys. If any fields are set
as mutable, all instances are not hashable and cannot be used as
dictionary keys.
The default_values mapping provides simple default values for the
fields.
The default_value_factories mapping provides a more flexible, but
more complex, mechanism for providing default values. The value of
each item is a callable that takes a single argument, the
namedspace instance, and returns the default value for the field.
The default_values_factories mapping is only consulted if there
is no default value for the field in the default_values mapping.
Here is a simple example, using only the required fields argument:
>>> SimpleNS = namedspace("SimpleNS", ("id", "name", "description"))
>>> SimpleNS
<class 'namedspace.SimpleNS'>
There are built-in properties to access collections and iterators
associated with the namespace class.
>>> SimpleNS._field_names
('id', 'name', 'description')
>>> tuple(SimpleNS._field_names_iter)
('id', 'name', 'description')
Once the class has been created, it can be instantiated like any
other class. However, a value for all of the required fields must
be provided.
>>> simple_ns = SimpleNS(id=1, description="Simple Description")
Traceback (most recent call last):
<snip/>
ValueError: A value for field 'name' is required.
>>> simple_ns = SimpleNS(id=1, name="Simple Name", description="Simple Description")
>>> simple_ns
SimpleNS(id=1, name='Simple Name', description='Simple Description')
An instance of a namedspace class provides standard attribute
access to its fields.
>>> simple_ns.id
1
>>> simple_ns.name
'Simple Name'
>>> simple_ns.description
'Simple Description'
In addition to standard attribute access, instances of a namedspace
class implement a MutableMapping interface.
>>> 'id' in simple_ns
True
>>> for field_name in simple_ns:
... print field_name
id
name
description
>>> len(simple_ns)
3
>>> simple_ns["id"]
1
>>> simple_ns["name"]
'Simple Name'
>>> simple_ns["description"]
'Simple Description'
There are built-in properties to access collections and iterators
associated with the namespace.
The namespace encapsulated by a namedspace class is stored in an
OrderedDict, so order of the collections is the same as the order
that the fields were specified.
All of these properties use the standard "non-public" naming
convention in order to not pollute the public namespace.
>>> simple_ns._field_names
('id', 'name', 'description')
>>> tuple(simple_ns._field_names_iter)
('id', 'name', 'description')
>>> simple_ns._field_values
(1, 'Simple Name', 'Simple Description')
>>> tuple(simple_ns._field_values_iter)
(1, 'Simple Name', 'Simple Description')
>>> simple_ns._field_items
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> list(simple_ns._field_items_iter)
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> simple_ns._as_dict
OrderedDict([('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')])
Here is a more complex example, using most of the other arguments:
>>> from itertools import count
>>> ComplexNS = namedspace("ComplexNS", "id", optional_fields=("name", "description", "extra"),
... mutable_fields=("description", "extra"), default_values={"description": "None available"},
... default_value_factories={"id": lambda self, counter=count(start=1): counter.next(),
... "name": lambda self: "Name for id={id}".format(id=self.id)})
>>> complex_ns1 = ComplexNS()
>>> complex_ns1.id
1
The value of 1 was automatically assigned by the
default_value_factory for the 'id' field, in this case a lambda
closure that hooks up an instance of itertools.count.
>>> complex_ns1.name
'Name for id=1'
This value was also generated by a default value factory. In this
case, the factory for the 'name' attribute uses the value of the
'id' attribute to compute the default value.
>>> complex_ns1.description
'None available'
This value came from the default_values mapping.
The description field was set as a mutable field, which allows
it to be modified.
>>> complex_ns1.description = "Some fancy description"
>>> complex_ns1.description
'Some fancy description'
Its value can also be deleted.
>>> del complex_ns1.description
>>> complex_ns1.description
'None available'
Since its modified value was deleted, and it has a default value,
it has reverted to its default value.
The extra field is a valid field in this namedspace, but it has not
yet been assigned a value and does not have a default.
>>> complex_ns1.extra
Traceback (most recent call last):
<snip/>
AttributeError: "Field 'extra' does not yet exist in this ComplexNS namedspace instance."
Sometimes, having an exception raised if an optional field is
missing, and being forced to handle it, is annoying. A namedspace
class can be configured at creation time to return None instead of
raising exceptions for optional fields by setting the `return_none`
parameter to `True`. Here is a trivial example:
>>> QuietNS = namedspace("QuietNS", optional_fields=("might_be_none",), return_none=True)
>>> quiet_ns1 = QuietNS(might_be_none="Nope, not this time")
>>> quiet_ns1.might_be_none
'Nope, not this time'
>>> quiet_ns2 = QuietNS()
>>> quiet_ns2.might_be_none
>>>
Having the namedspace quietly return `None` makes sense in some
situations. But be careful. Understand the full implications of
this alternate behavior on the code that uses it. Subtle data-
dependent bugs can be introduced by this behavior, which is why it
is not enabled by default.
Now, back to our "complex" example.
Since the 'extra' field is one of the mutable fields, we can give it a value.
>>> complex_ns1.extra = "Lasts a long, long time"
>>> complex_ns1.extra
'Lasts a long, long time'
Only fields that have been declared as either required or optional
are allowed.
>>> complex_ns1.some_other_field = "some other value"
Traceback (most recent call last):
<snip/>
FieldNameError: "Field 'some_other_field' does not exist in ComplexNS namedspace."
Finally, to illustrate that our counter is working as it should, if
we instantiate another instance, our id field will get the next
counter value.
>>> complex_ns2 = ComplexNS()
>>> complex_ns2.id
2
A common use case for a namedspace class is as a base class for
another custom class that has additional members such as properties
and methods. This way, the custom class gets all of the namedspace
behavior through declarative configuration, instead of having
to re-define that behavior imperatively.
The following is an example where one of the required fields is
generated at instantiation time, and the values for the two
optional fields are calculated values provided by properties in
the subclass.
>>> from collections import Counter
>>> class Widget(namedspace("_Widget", ("mfg_code", "model_code", "serial_number"), optional_fields=("sku", "pk"),
... return_none=True)):
... _sn_map = Counter()
... def __init__(self, *args, **kwargs):
... sn_key = (kwargs["mfg_code"], kwargs["model_code"])
... self._sn_map[sn_key] += 1
... kwargs["serial_number"] = "{:010}".format(self._sn_map[sn_key])
... super(Widget, self).__init__(*args, **kwargs)
... @property
... def sku(self):
... return "{}_{}".format(self.mfg_code, self.model_code)
... @property
... def pk(self):
... return "{}_{}".format(self.sku, self.serial_number)
>>> widget1 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget1
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000001', sku='ACME_X-500', pk='ACME_X-500_0000000001')
>>> widget1._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000001'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000001')])
>>> widget2 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget2
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000002', sku='ACME_X-500', pk='ACME_X-500_0000000002')
>>> widget2._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000002'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000002')])
### Response:
def namedspace(typename, required_fields=(), optional_fields=(), mutable_fields=(),
default_values=frozendict(), default_value_factories=frozendict(),
return_none=False):
"""Builds a new class that encapsulates a namespace and provides
various ways to access it.
The typename argument is required and is the name of the
namedspace class that will be generated.
The required_fields and optional_fields arguments can be a string
or sequence of strings and together specify the fields that
instances of the namedspace class have.
Values for the required fields must be provided somehow when the
instance is created. Values for optional fields may be provided
later, or maybe not at all.
If an optional field is queried before its value has been set,
an AttributeError will be raised. This behavior can be altered
to cause None to be returned instead by setting the return_none
keyword argument to True.
The mutable_fields argument specifies which fields will be mutable,
if any. By default, all fields are immutable and all instances are
hashable and can be used as dictionary keys. If any fields are set
as mutable, all instances are not hashable and cannot be used as
dictionary keys.
The default_values mapping provides simple default values for the
fields.
The default_value_factories mapping provides a more flexible, but
more complex, mechanism for providing default values. The value of
each item is a callable that takes a single argument, the
namedspace instance, and returns the default value for the field.
The default_values_factories mapping is only consulted if there
is no default value for the field in the default_values mapping.
Here is a simple example, using only the required fields argument:
>>> SimpleNS = namedspace("SimpleNS", ("id", "name", "description"))
>>> SimpleNS
<class 'namedspace.SimpleNS'>
There are built-in properties to access collections and iterators
associated with the namespace class.
>>> SimpleNS._field_names
('id', 'name', 'description')
>>> tuple(SimpleNS._field_names_iter)
('id', 'name', 'description')
Once the class has been created, it can be instantiated like any
other class. However, a value for all of the required fields must
be provided.
>>> simple_ns = SimpleNS(id=1, description="Simple Description")
Traceback (most recent call last):
<snip/>
ValueError: A value for field 'name' is required.
>>> simple_ns = SimpleNS(id=1, name="Simple Name", description="Simple Description")
>>> simple_ns
SimpleNS(id=1, name='Simple Name', description='Simple Description')
An instance of a namedspace class provides standard attribute
access to its fields.
>>> simple_ns.id
1
>>> simple_ns.name
'Simple Name'
>>> simple_ns.description
'Simple Description'
In addition to standard attribute access, instances of a namedspace
class implement a MutableMapping interface.
>>> 'id' in simple_ns
True
>>> for field_name in simple_ns:
... print field_name
id
name
description
>>> len(simple_ns)
3
>>> simple_ns["id"]
1
>>> simple_ns["name"]
'Simple Name'
>>> simple_ns["description"]
'Simple Description'
There are built-in properties to access collections and iterators
associated with the namespace.
The namespace encapsulated by a namedspace class is stored in an
OrderedDict, so order of the collections is the same as the order
that the fields were specified.
All of these properties use the standard "non-public" naming
convention in order to not pollute the public namespace.
>>> simple_ns._field_names
('id', 'name', 'description')
>>> tuple(simple_ns._field_names_iter)
('id', 'name', 'description')
>>> simple_ns._field_values
(1, 'Simple Name', 'Simple Description')
>>> tuple(simple_ns._field_values_iter)
(1, 'Simple Name', 'Simple Description')
>>> simple_ns._field_items
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> list(simple_ns._field_items_iter)
[('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')]
>>> simple_ns._as_dict
OrderedDict([('id', 1), ('name', 'Simple Name'), ('description', 'Simple Description')])
Here is a more complex example, using most of the other arguments:
>>> from itertools import count
>>> ComplexNS = namedspace("ComplexNS", "id", optional_fields=("name", "description", "extra"),
... mutable_fields=("description", "extra"), default_values={"description": "None available"},
... default_value_factories={"id": lambda self, counter=count(start=1): counter.next(),
... "name": lambda self: "Name for id={id}".format(id=self.id)})
>>> complex_ns1 = ComplexNS()
>>> complex_ns1.id
1
The value of 1 was automatically assigned by the
default_value_factory for the 'id' field, in this case a lambda
closure that hooks up an instance of itertools.count.
>>> complex_ns1.name
'Name for id=1'
This value was also generated by a default value factory. In this
case, the factory for the 'name' attribute uses the value of the
'id' attribute to compute the default value.
>>> complex_ns1.description
'None available'
This value came from the default_values mapping.
The description field was set as a mutable field, which allows
it to be modified.
>>> complex_ns1.description = "Some fancy description"
>>> complex_ns1.description
'Some fancy description'
Its value can also be deleted.
>>> del complex_ns1.description
>>> complex_ns1.description
'None available'
Since its modified value was deleted, and it has a default value,
it has reverted to its default value.
The extra field is a valid field in this namedspace, but it has not
yet been assigned a value and does not have a default.
>>> complex_ns1.extra
Traceback (most recent call last):
<snip/>
AttributeError: "Field 'extra' does not yet exist in this ComplexNS namedspace instance."
Sometimes, having an exception raised if an optional field is
missing, and being forced to handle it, is annoying. A namedspace
class can be configured at creation time to return None instead of
raising exceptions for optional fields by setting the `return_none`
parameter to `True`. Here is a trivial example:
>>> QuietNS = namedspace("QuietNS", optional_fields=("might_be_none",), return_none=True)
>>> quiet_ns1 = QuietNS(might_be_none="Nope, not this time")
>>> quiet_ns1.might_be_none
'Nope, not this time'
>>> quiet_ns2 = QuietNS()
>>> quiet_ns2.might_be_none
>>>
Having the namedspace quietly return `None` makes sense in some
situations. But be careful. Understand the full implications of
this alternate behavior on the code that uses it. Subtle data-
dependent bugs can be introduced by this behavior, which is why it
is not enabled by default.
Now, back to our "complex" example.
Since the 'extra' field is one of the mutable fields, we can give it a value.
>>> complex_ns1.extra = "Lasts a long, long time"
>>> complex_ns1.extra
'Lasts a long, long time'
Only fields that have been declared as either required or optional
are allowed.
>>> complex_ns1.some_other_field = "some other value"
Traceback (most recent call last):
<snip/>
FieldNameError: "Field 'some_other_field' does not exist in ComplexNS namedspace."
Finally, to illustrate that our counter is working as it should, if
we instantiate another instance, our id field will get the next
counter value.
>>> complex_ns2 = ComplexNS()
>>> complex_ns2.id
2
A common use case for a namedspace class is as a base class for
another custom class that has additional members such as properties
and methods. This way, the custom class gets all of the namedspace
behavior through declarative configuration, instead of having
to re-define that behavior imperatively.
The following is an example where one of the required fields is
generated at instantiation time, and the values for the two
optional fields are calculated values provided by properties in
the subclass.
>>> from collections import Counter
>>> class Widget(namedspace("_Widget", ("mfg_code", "model_code", "serial_number"), optional_fields=("sku", "pk"),
... return_none=True)):
... _sn_map = Counter()
... def __init__(self, *args, **kwargs):
... sn_key = (kwargs["mfg_code"], kwargs["model_code"])
... self._sn_map[sn_key] += 1
... kwargs["serial_number"] = "{:010}".format(self._sn_map[sn_key])
... super(Widget, self).__init__(*args, **kwargs)
... @property
... def sku(self):
... return "{}_{}".format(self.mfg_code, self.model_code)
... @property
... def pk(self):
... return "{}_{}".format(self.sku, self.serial_number)
>>> widget1 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget1
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000001', sku='ACME_X-500', pk='ACME_X-500_0000000001')
>>> widget1._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000001'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000001')])
>>> widget2 = Widget(mfg_code="ACME", model_code="X-500")
>>> widget2
Widget(mfg_code='ACME', model_code='X-500', serial_number='0000000002', sku='ACME_X-500', pk='ACME_X-500_0000000002')
>>> widget2._as_dict
OrderedDict([('mfg_code', 'ACME'), ('model_code', 'X-500'), ('serial_number', '0000000002'), ('sku', 'ACME_X-500'), ('pk', 'ACME_X-500_0000000002')])
"""
# Initialize the list of arguments that will get put into the
# doc string of the generated class
arg_list_items = []
#
# Validate parameters
#
for arg_name in ("required_fields", "optional_fields", "mutable_fields"):
arg_value = locals()[arg_name]
if isinstance(arg_value, basestring):
arg_value = (arg_value,)
exec "{arg_name} = arg_value".format(arg_name=arg_name)
elif not isinstance(arg_value, Container):
raise ValueError("Value for argument '{arg_name}' must be a string or container of strings.".format(
arg_name=arg_name))
for field_name in arg_value:
if not isinstance(field_name, basestring):
raise ValueError("Items of container argument '{arg_name}' must be strings.".format(arg_name=arg_name))
if len(arg_value) != len(frozenset(arg_value)):
raise ValueError("Value for argument '{arg_name}' contains duplicate fields.".format(
arg_name=arg_name))
arg_list_items.append("{arg_name}={arg_value!r}".format(arg_name=arg_name, arg_value=tuple(arg_value)))
exec "{arg_name}_set = frozenset(arg_value)".format(arg_name=arg_name)
all_fields = tuple(required_fields + optional_fields)
if not all_fields:
raise ValueError("At least one required or optional field must be provided.")
all_fields_set = frozenset(all_fields)
for field_name in mutable_fields:
if field_name not in all_fields_set:
raise ValueError("Mutable field '{field_name}' is not a required or optional field.".format(
field_name=field_name))
for arg_name in ("default_values", "default_value_factories"):
arg_value = locals()[arg_name]
if not isinstance(arg_value, Mapping):
raise ValueError("Value for argument '{arg_name}' must be a mapping.".format(arg_name=arg_name))
default_field_names = frozenset(arg_value.iterkeys())
if not default_field_names.issubset(all_fields_set):
bad_default_field_names = default_field_names - all_fields_set
raise ValueError("Value for argument '{arg_name}' contains invalid field(s) '{field_names}'.".format(
arg_name=arg_name, field_names=", ".join(bad_default_field_names)))
arg_list_items.append("{arg_name}={arg_value!r}".format(arg_name=arg_name, arg_value=dict(arg_value)))
exec "{arg_name} = frozendict(arg_value)".format(arg_name=arg_name)
for field_name, factory in default_value_factories.iteritems():
if not callable(factory):
raise ValueError("Default value factory for '{field_name}' is not callable.".format(field_name=field_name))
# Fill-in the class template
class_definition = _class_template.format(
typename=typename,
arg_list=", ".join(arg_list_items),
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(
__name__='namedspace_{typename}'.format(typename=typename),
all_fields=all_fields,
all_fields_set=all_fields_set,
required_fields_set=locals()["required_fields_set"],
mutable_fields_set=locals()["mutable_fields_set"],
default_values=default_values,
default_value_factories=default_value_factories,
Hashable=Hashable,
MutableMapping=MutableMapping,
OrderedDict=OrderedDict,
return_none=return_none,
NamedspaceMeta=NamedspaceMeta,
)
#
# Code from here down copied verbatim from namedtuple
#
try:
exec class_definition in namespace
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result |
def visit_ifexp(self, node, parent):
"""visit a IfExp node by returning a fresh instance of it"""
newnode = nodes.IfExp(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.test, newnode),
self.visit(node.body, newnode),
self.visit(node.orelse, newnode),
)
return newnode | visit a IfExp node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a IfExp node by returning a fresh instance of it
### Response:
def visit_ifexp(self, node, parent):
"""visit a IfExp node by returning a fresh instance of it"""
newnode = nodes.IfExp(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.test, newnode),
self.visit(node.body, newnode),
self.visit(node.orelse, newnode),
)
return newnode |
def send(self, message, json=False, callback=None):
"""Use send to send a simple string message.
If ``json`` is True, the message will be encoded as a JSON object
on the wire, and decoded on the other side.
This is mostly for backwards compatibility. ``emit()`` is more fun.
:param callback: This is a callback function that will be
called automatically by the client upon
reception. It does not verify that the
listener over there was completed with
success. It just tells you that the browser
got a hold of the packet.
:type callback: callable
"""
pkt = dict(type="message", data=message, endpoint=self.ns_name)
if json:
pkt['type'] = "json"
if callback:
# By passing ack=True, we use the old behavior of being returned
# an 'ack' packet, automatically triggered by the client-side
# with no user-code being run. The emit() version of the
# callback is more useful I think :) So migrate your code.
pkt['ack'] = True
pkt['id'] = msgid = self.socket._get_next_msgid()
self.socket._save_ack_callback(msgid, callback)
self.socket.send_packet(pkt) | Use send to send a simple string message.
If ``json`` is True, the message will be encoded as a JSON object
on the wire, and decoded on the other side.
This is mostly for backwards compatibility. ``emit()`` is more fun.
:param callback: This is a callback function that will be
called automatically by the client upon
reception. It does not verify that the
listener over there was completed with
success. It just tells you that the browser
got a hold of the packet.
:type callback: callable | Below is the the instruction that describes the task:
### Input:
Use send to send a simple string message.
If ``json`` is True, the message will be encoded as a JSON object
on the wire, and decoded on the other side.
This is mostly for backwards compatibility. ``emit()`` is more fun.
:param callback: This is a callback function that will be
called automatically by the client upon
reception. It does not verify that the
listener over there was completed with
success. It just tells you that the browser
got a hold of the packet.
:type callback: callable
### Response:
def send(self, message, json=False, callback=None):
"""Use send to send a simple string message.
If ``json`` is True, the message will be encoded as a JSON object
on the wire, and decoded on the other side.
This is mostly for backwards compatibility. ``emit()`` is more fun.
:param callback: This is a callback function that will be
called automatically by the client upon
reception. It does not verify that the
listener over there was completed with
success. It just tells you that the browser
got a hold of the packet.
:type callback: callable
"""
pkt = dict(type="message", data=message, endpoint=self.ns_name)
if json:
pkt['type'] = "json"
if callback:
# By passing ack=True, we use the old behavior of being returned
# an 'ack' packet, automatically triggered by the client-side
# with no user-code being run. The emit() version of the
# callback is more useful I think :) So migrate your code.
pkt['ack'] = True
pkt['id'] = msgid = self.socket._get_next_msgid()
self.socket._save_ack_callback(msgid, callback)
self.socket.send_packet(pkt) |
def absorb(self, other):
"""
For attributes of others that value is not None, assign it to self.
**中文文档**
将另一个文档中的数据更新到本条文档。当且仅当数据值不为None时。
"""
if not isinstance(other, self.__class__):
raise TypeError("`other` has to be a instance of %s!" %
self.__class__)
for attr, value in other.items():
if value is not None:
setattr(self, attr, deepcopy(value)) | For attributes of others that value is not None, assign it to self.
**中文文档**
将另一个文档中的数据更新到本条文档。当且仅当数据值不为None时。 | Below is the the instruction that describes the task:
### Input:
For attributes of others that value is not None, assign it to self.
**中文文档**
将另一个文档中的数据更新到本条文档。当且仅当数据值不为None时。
### Response:
def absorb(self, other):
"""
For attributes of others that value is not None, assign it to self.
**中文文档**
将另一个文档中的数据更新到本条文档。当且仅当数据值不为None时。
"""
if not isinstance(other, self.__class__):
raise TypeError("`other` has to be a instance of %s!" %
self.__class__)
for attr, value in other.items():
if value is not None:
setattr(self, attr, deepcopy(value)) |
def set_project_path(self, path):
"""
Sets the project path and disables the project search in the combobox
if the value of path is None.
"""
if path is None:
self.project_path = None
self.model().item(PROJECT, 0).setEnabled(False)
if self.currentIndex() == PROJECT:
self.setCurrentIndex(CWD)
else:
path = osp.abspath(path)
self.project_path = path
self.model().item(PROJECT, 0).setEnabled(True) | Sets the project path and disables the project search in the combobox
if the value of path is None. | Below is the the instruction that describes the task:
### Input:
Sets the project path and disables the project search in the combobox
if the value of path is None.
### Response:
def set_project_path(self, path):
"""
Sets the project path and disables the project search in the combobox
if the value of path is None.
"""
if path is None:
self.project_path = None
self.model().item(PROJECT, 0).setEnabled(False)
if self.currentIndex() == PROJECT:
self.setCurrentIndex(CWD)
else:
path = osp.abspath(path)
self.project_path = path
self.model().item(PROJECT, 0).setEnabled(True) |
def model_resource(self, resource_name):
""" Details of a specific model resource. """
resource = first(
[resource for resource in self.api._registry.values()
if resource.get_api_name() == resource_name])
data = {
'apiVersion': '0.1',
'swaggerVersion': '1.1',
'basePath': '%s%s' % (self.base_uri(), self.api.url_prefix),
'resourcePath': '/meta/%s' % resource.get_api_name(),
'apis': self.get_model_apis(resource),
'models': self.get_model(resource)
}
response = jsonify(data)
response.headers.add('Cache-Control', 'max-age=0')
return response | Details of a specific model resource. | Below is the the instruction that describes the task:
### Input:
Details of a specific model resource.
### Response:
def model_resource(self, resource_name):
""" Details of a specific model resource. """
resource = first(
[resource for resource in self.api._registry.values()
if resource.get_api_name() == resource_name])
data = {
'apiVersion': '0.1',
'swaggerVersion': '1.1',
'basePath': '%s%s' % (self.base_uri(), self.api.url_prefix),
'resourcePath': '/meta/%s' % resource.get_api_name(),
'apis': self.get_model_apis(resource),
'models': self.get_model(resource)
}
response = jsonify(data)
response.headers.add('Cache-Control', 'max-age=0')
return response |
def get_all_kernels(self, kernel_ids=None, owners=None):
"""
Retrieve all the EC2 kernels available on your account.
Constructs a filter to allow the processing to happen server side.
:type kernel_ids: list
:param kernel_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
"""
params = {}
if kernel_ids:
self.build_list_params(params, kernel_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
filter = {'image-type' : 'kernel'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST') | Retrieve all the EC2 kernels available on your account.
Constructs a filter to allow the processing to happen server side.
:type kernel_ids: list
:param kernel_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image` | Below is the the instruction that describes the task:
### Input:
Retrieve all the EC2 kernels available on your account.
Constructs a filter to allow the processing to happen server side.
:type kernel_ids: list
:param kernel_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
### Response:
def get_all_kernels(self, kernel_ids=None, owners=None):
"""
Retrieve all the EC2 kernels available on your account.
Constructs a filter to allow the processing to happen server side.
:type kernel_ids: list
:param kernel_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
"""
params = {}
if kernel_ids:
self.build_list_params(params, kernel_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
filter = {'image-type' : 'kernel'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params,
[('item', Image)], verb='POST') |
def get_items(self, project=None, scope_path=None, recursion_level=None, include_links=None, version_descriptor=None):
"""GetItems.
Get a list of Tfvc items
:param str project: Project ID or project name
:param str scope_path: Version control path of a folder to return multiple items.
:param str recursion_level: None (just the item), or OneLevel (contents of a folder).
:param bool include_links: True to include links.
:param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor:
:rtype: [TfvcItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if scope_path is not None:
query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_links is not None:
query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool')
if version_descriptor is not None:
if version_descriptor.version_option is not None:
query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
response = self._send(http_method='GET',
location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcItem]', self._unwrap_collection(response)) | GetItems.
Get a list of Tfvc items
:param str project: Project ID or project name
:param str scope_path: Version control path of a folder to return multiple items.
:param str recursion_level: None (just the item), or OneLevel (contents of a folder).
:param bool include_links: True to include links.
:param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor:
:rtype: [TfvcItem] | Below is the the instruction that describes the task:
### Input:
GetItems.
Get a list of Tfvc items
:param str project: Project ID or project name
:param str scope_path: Version control path of a folder to return multiple items.
:param str recursion_level: None (just the item), or OneLevel (contents of a folder).
:param bool include_links: True to include links.
:param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor:
:rtype: [TfvcItem]
### Response:
def get_items(self, project=None, scope_path=None, recursion_level=None, include_links=None, version_descriptor=None):
"""GetItems.
Get a list of Tfvc items
:param str project: Project ID or project name
:param str scope_path: Version control path of a folder to return multiple items.
:param str recursion_level: None (just the item), or OneLevel (contents of a folder).
:param bool include_links: True to include links.
:param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor:
:rtype: [TfvcItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if scope_path is not None:
query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_links is not None:
query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool')
if version_descriptor is not None:
if version_descriptor.version_option is not None:
query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
response = self._send(http_method='GET',
location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcItem]', self._unwrap_collection(response)) |
def smtp_connection(c):
"""Create an SMTP connection from a Config object"""
if c.smtp_ssl:
klass = smtplib.SMTP_SSL
else:
klass = smtplib.SMTP
conn = klass(c.smtp_host, c.smtp_port, timeout=c.smtp_timeout)
if not c.smtp_ssl:
conn.ehlo()
conn.starttls()
conn.ehlo()
if c.smtp_username:
conn.login(c.smtp_username, c.smtp_password)
return conn | Create an SMTP connection from a Config object | Below is the the instruction that describes the task:
### Input:
Create an SMTP connection from a Config object
### Response:
def smtp_connection(c):
"""Create an SMTP connection from a Config object"""
if c.smtp_ssl:
klass = smtplib.SMTP_SSL
else:
klass = smtplib.SMTP
conn = klass(c.smtp_host, c.smtp_port, timeout=c.smtp_timeout)
if not c.smtp_ssl:
conn.ehlo()
conn.starttls()
conn.ehlo()
if c.smtp_username:
conn.login(c.smtp_username, c.smtp_password)
return conn |
def is_owner():
"""A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
"""
async def predicate(ctx):
if not await ctx.bot.is_owner(ctx.author):
raise NotOwner('You do not own this bot.')
return True
return check(predicate) | A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`. | Below is the the instruction that describes the task:
### Input:
A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
### Response:
def is_owner():
"""A :func:`.check` that checks if the person invoking this command is the
owner of the bot.
This is powered by :meth:`.Bot.is_owner`.
This check raises a special exception, :exc:`.NotOwner` that is derived
from :exc:`.CheckFailure`.
"""
async def predicate(ctx):
if not await ctx.bot.is_owner(ctx.author):
raise NotOwner('You do not own this bot.')
return True
return check(predicate) |
def _has_role(self, role_name_or_list):
"""
Whether the user has this role name
"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()]) | Whether the user has this role name | Below is the the instruction that describes the task:
### Input:
Whether the user has this role name
### Response:
def _has_role(self, role_name_or_list):
"""
Whether the user has this role name
"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()]) |
def abspath(myPath):
import sys, os
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath) | Get absolute path to resource, works for dev and for PyInstaller | Below is the the instruction that describes the task:
### Input:
Get absolute path to resource, works for dev and for PyInstaller
### Response:
def abspath(myPath):
import sys, os
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath) |
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call_=caller, _decorate_=decorate)
return FunctionMaker.create(
'%s(func)' % name, 'return _decorate_(func, _call_)',
evaldict, doc=doc, module=caller.__module__,
__wrapped__=caller) | decorator(caller) converts a caller function into a decorator | Below is the the instruction that describes the task:
### Input:
decorator(caller) converts a caller function into a decorator
### Response:
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call_=caller, _decorate_=decorate)
return FunctionMaker.create(
'%s(func)' % name, 'return _decorate_(func, _call_)',
evaldict, doc=doc, module=caller.__module__,
__wrapped__=caller) |
def matches_key_in_query(self, key, query_key, query):
"""
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果指定的值相同。
:param key: 查询条件字段名
:param query_key: 查询对象返回结果的字段名
:param query: 查询对象
:type query: Query
:rtype: Query
"""
dumped = query.dump()
dumped['className'] = query._query_class._class_name
self._add_condition(key, '$select', {'key': query_key, 'query': dumped})
return self | 增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果指定的值相同。
:param key: 查询条件字段名
:param query_key: 查询对象返回结果的字段名
:param query: 查询对象
:type query: Query
:rtype: Query | Below is the the instruction that describes the task:
### Input:
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果指定的值相同。
:param key: 查询条件字段名
:param query_key: 查询对象返回结果的字段名
:param query: 查询对象
:type query: Query
:rtype: Query
### Response:
def matches_key_in_query(self, key, query_key, query):
"""
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果指定的值相同。
:param key: 查询条件字段名
:param query_key: 查询对象返回结果的字段名
:param query: 查询对象
:type query: Query
:rtype: Query
"""
dumped = query.dump()
dumped['className'] = query._query_class._class_name
self._add_condition(key, '$select', {'key': query_key, 'query': dumped})
return self |
def _fake_referencenames(self, namespace, **params):
"""
Implements a mock WBEM server responder for
:meth:`~pywbem.WBEMConnection.ReferenceNames`
"""
assert params['ResultClass'] is None or \
isinstance(params['ResultClass'], CIMClassName)
rc = None if params['ResultClass'] is None else \
params['ResultClass'].classname
role = params['Role']
obj_name = params['ObjectName']
classname = obj_name.classname
if isinstance(obj_name, CIMClassName):
ref_classnames = self._get_reference_classnames(classname,
namespace,
rc, role)
ref_result = [CIMClassName(classname=cn, host=self.host,
namespace=namespace)
for cn in ref_classnames]
return self._return_assoc_tuple(ref_result)
assert isinstance(obj_name, CIMInstanceName)
ref_paths = self._get_reference_instnames(obj_name, namespace,
rc, role)
rtn_names = [deepcopy(r) for r in ref_paths]
for iname in rtn_names:
if iname.host is None:
iname.host = self.host
return self._return_assoc_tuple(rtn_names) | Implements a mock WBEM server responder for
:meth:`~pywbem.WBEMConnection.ReferenceNames` | Below is the the instruction that describes the task:
### Input:
Implements a mock WBEM server responder for
:meth:`~pywbem.WBEMConnection.ReferenceNames`
### Response:
def _fake_referencenames(self, namespace, **params):
"""
Implements a mock WBEM server responder for
:meth:`~pywbem.WBEMConnection.ReferenceNames`
"""
assert params['ResultClass'] is None or \
isinstance(params['ResultClass'], CIMClassName)
rc = None if params['ResultClass'] is None else \
params['ResultClass'].classname
role = params['Role']
obj_name = params['ObjectName']
classname = obj_name.classname
if isinstance(obj_name, CIMClassName):
ref_classnames = self._get_reference_classnames(classname,
namespace,
rc, role)
ref_result = [CIMClassName(classname=cn, host=self.host,
namespace=namespace)
for cn in ref_classnames]
return self._return_assoc_tuple(ref_result)
assert isinstance(obj_name, CIMInstanceName)
ref_paths = self._get_reference_instnames(obj_name, namespace,
rc, role)
rtn_names = [deepcopy(r) for r in ref_paths]
for iname in rtn_names:
if iname.host is None:
iname.host = self.host
return self._return_assoc_tuple(rtn_names) |
def _doCommandCompletion(self):
""" Command-completion method """
prefix = ''.join(self.inputBuffer).strip().upper()
matches = self.completion.keys(prefix)
matchLen = len(matches)
if matchLen == 0 and prefix[-1] == '=':
try:
command = prefix[:-1]
except KeyError:
pass
else:
self.__printCommandSyntax(command)
elif matchLen > 0:
if matchLen == 1:
if matches[0] == prefix:
# User has already entered command - show command syntax
self.__printCommandSyntax(prefix)
else:
# Complete only possible command
self.inputBuffer = list(matches[0])
self.cursorPos = len(self.inputBuffer)
self._refreshInputPrompt(len(self.inputBuffer))
return
else:
commonPrefix = self.completion.longestCommonPrefix(''.join(self.inputBuffer))
self.inputBuffer = list(commonPrefix)
self.cursorPos = len(self.inputBuffer)
if matchLen > 20:
matches = matches[:20]
matches.append('... ({0} more)'.format(matchLen - 20))
sys.stdout.write('\n')
for match in matches:
sys.stdout.write(' {0} '.format(match))
sys.stdout.write('\n')
sys.stdout.flush()
self._refreshInputPrompt(len(self.inputBuffer)) | Command-completion method | Below is the the instruction that describes the task:
### Input:
Command-completion method
### Response:
def _doCommandCompletion(self):
""" Command-completion method """
prefix = ''.join(self.inputBuffer).strip().upper()
matches = self.completion.keys(prefix)
matchLen = len(matches)
if matchLen == 0 and prefix[-1] == '=':
try:
command = prefix[:-1]
except KeyError:
pass
else:
self.__printCommandSyntax(command)
elif matchLen > 0:
if matchLen == 1:
if matches[0] == prefix:
# User has already entered command - show command syntax
self.__printCommandSyntax(prefix)
else:
# Complete only possible command
self.inputBuffer = list(matches[0])
self.cursorPos = len(self.inputBuffer)
self._refreshInputPrompt(len(self.inputBuffer))
return
else:
commonPrefix = self.completion.longestCommonPrefix(''.join(self.inputBuffer))
self.inputBuffer = list(commonPrefix)
self.cursorPos = len(self.inputBuffer)
if matchLen > 20:
matches = matches[:20]
matches.append('... ({0} more)'.format(matchLen - 20))
sys.stdout.write('\n')
for match in matches:
sys.stdout.write(' {0} '.format(match))
sys.stdout.write('\n')
sys.stdout.flush()
self._refreshInputPrompt(len(self.inputBuffer)) |
def parse_args(*args, **kwargs):
"""
Parse the args for the command.
It should be possible for one to specify '--ns', '-x', and '--rename'
multiple times:
>>> args = parse_args(['--ns', 'foo', 'bar', '--ns', 'baz'])
>>> args.ns
['foo', 'bar', 'baz']
>>> parse_args(['-x', '--exclude']).exclude
[]
>>> renames = parse_args(['--rename', 'a=b', '--rename', 'b=c']).rename
>>> len(renames)
2
>>> type(renames)
<class 'jaraco.mongodb.oplog.Renamer'>
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--help",
help="show usage information",
action="help",
)
parser.add_argument(
"--source", metavar="host[:port]",
help="""Hostname of the mongod server from which oplog
operations are going to be pulled. Called "--from"
in mongooplog.""",
)
parser.add_argument(
'--oplogns', default='local.oplog.rs',
help="Source namespace for oplog",
)
parser.add_argument(
"--dest", metavar="host[:port]",
help="""
Hostname of the mongod server (or replica set as
<set name>/s1,s2) to which oplog operations
are going to be applied. Default is "localhost".
Called "--host" in mongooplog.
""",
)
parser.add_argument(
"-w", "--window",
dest="start_ts",
metavar="WINDOW",
type=compose(
Timestamp.for_window,
delta_from_seconds,
pytimeparse.parse,
),
help="""Time window to query, like "3 days" or "24:00"
(24 hours, 0 minutes).""",
)
parser.add_argument(
"-f", "--follow", action="store_true",
help="""Wait for new data in oplog. Makes the utility
polling oplog forever (until interrupted). New data
is going to be applied immediately with at most one
second delay.""",
)
parser.add_argument(
"--ns", nargs="*", default=[],
action=Extend,
help="""Process only these namespaces, ignoring all others.
Space separated list of strings in form of ``dname``
or ``dbname.collection``. May be specified multiple times.
""",
)
parser.add_argument(
"-x", "--exclude", nargs="*", default=[],
action=Extend,
help="""List of space separated namespaces which should be
ignored. Can be in form of ``dname`` or ``dbname.collection``.
May be specified multiple times.
""",
)
parser.add_argument(
"--rename", nargs="*", default=[],
metavar="ns_old=ns_new",
type=RenameSpec.from_spec,
action=Extend,
help="""
Rename database(s) and/or collection(s). Operations on
namespace ``ns_old`` from the source server will be
applied to namespace ``ns_new`` on the destination server.
May be specified multiple times.
""",
)
parser.add_argument(
"--dry-run", default=False,
action="store_true",
help="Suppress application of ops.",
)
parser.add_argument(
"--resume-file",
metavar="FILENAME",
type=ResumeFile,
default=NullResumeFile(),
help="""Read from and write to this file the last processed
timestamp.""",
)
jaraco.logging.add_arguments(parser)
args = parser.parse_args(*args, **kwargs)
args.rename = Renamer(args.rename)
args.start_ts = args.start_ts or args.resume_file.read()
return args | Parse the args for the command.
It should be possible for one to specify '--ns', '-x', and '--rename'
multiple times:
>>> args = parse_args(['--ns', 'foo', 'bar', '--ns', 'baz'])
>>> args.ns
['foo', 'bar', 'baz']
>>> parse_args(['-x', '--exclude']).exclude
[]
>>> renames = parse_args(['--rename', 'a=b', '--rename', 'b=c']).rename
>>> len(renames)
2
>>> type(renames)
<class 'jaraco.mongodb.oplog.Renamer'> | Below is the the instruction that describes the task:
### Input:
Parse the args for the command.
It should be possible for one to specify '--ns', '-x', and '--rename'
multiple times:
>>> args = parse_args(['--ns', 'foo', 'bar', '--ns', 'baz'])
>>> args.ns
['foo', 'bar', 'baz']
>>> parse_args(['-x', '--exclude']).exclude
[]
>>> renames = parse_args(['--rename', 'a=b', '--rename', 'b=c']).rename
>>> len(renames)
2
>>> type(renames)
<class 'jaraco.mongodb.oplog.Renamer'>
### Response:
def parse_args(*args, **kwargs):
"""
Parse the args for the command.
It should be possible for one to specify '--ns', '-x', and '--rename'
multiple times:
>>> args = parse_args(['--ns', 'foo', 'bar', '--ns', 'baz'])
>>> args.ns
['foo', 'bar', 'baz']
>>> parse_args(['-x', '--exclude']).exclude
[]
>>> renames = parse_args(['--rename', 'a=b', '--rename', 'b=c']).rename
>>> len(renames)
2
>>> type(renames)
<class 'jaraco.mongodb.oplog.Renamer'>
"""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--help",
help="show usage information",
action="help",
)
parser.add_argument(
"--source", metavar="host[:port]",
help="""Hostname of the mongod server from which oplog
operations are going to be pulled. Called "--from"
in mongooplog.""",
)
parser.add_argument(
'--oplogns', default='local.oplog.rs',
help="Source namespace for oplog",
)
parser.add_argument(
"--dest", metavar="host[:port]",
help="""
Hostname of the mongod server (or replica set as
<set name>/s1,s2) to which oplog operations
are going to be applied. Default is "localhost".
Called "--host" in mongooplog.
""",
)
parser.add_argument(
"-w", "--window",
dest="start_ts",
metavar="WINDOW",
type=compose(
Timestamp.for_window,
delta_from_seconds,
pytimeparse.parse,
),
help="""Time window to query, like "3 days" or "24:00"
(24 hours, 0 minutes).""",
)
parser.add_argument(
"-f", "--follow", action="store_true",
help="""Wait for new data in oplog. Makes the utility
polling oplog forever (until interrupted). New data
is going to be applied immediately with at most one
second delay.""",
)
parser.add_argument(
"--ns", nargs="*", default=[],
action=Extend,
help="""Process only these namespaces, ignoring all others.
Space separated list of strings in form of ``dname``
or ``dbname.collection``. May be specified multiple times.
""",
)
parser.add_argument(
"-x", "--exclude", nargs="*", default=[],
action=Extend,
help="""List of space separated namespaces which should be
ignored. Can be in form of ``dname`` or ``dbname.collection``.
May be specified multiple times.
""",
)
parser.add_argument(
"--rename", nargs="*", default=[],
metavar="ns_old=ns_new",
type=RenameSpec.from_spec,
action=Extend,
help="""
Rename database(s) and/or collection(s). Operations on
namespace ``ns_old`` from the source server will be
applied to namespace ``ns_new`` on the destination server.
May be specified multiple times.
""",
)
parser.add_argument(
"--dry-run", default=False,
action="store_true",
help="Suppress application of ops.",
)
parser.add_argument(
"--resume-file",
metavar="FILENAME",
type=ResumeFile,
default=NullResumeFile(),
help="""Read from and write to this file the last processed
timestamp.""",
)
jaraco.logging.add_arguments(parser)
args = parser.parse_args(*args, **kwargs)
args.rename = Renamer(args.rename)
args.start_ts = args.start_ts or args.resume_file.read()
return args |
def get_digests(self):
'''return a list of layers from a manifest.
The function is intended to work with both version
1 and 2 of the schema. All layers (including redundant)
are returned. By default, we try version 2 first,
then fall back to version 1.
For version 1 manifests: extraction is reversed
Parameters
==========
manifest: the manifest to read_layers from
'''
if not hasattr(self, 'manifests'):
bot.error('Please retrieve manifests for an image first.')
sys.exit(1)
digests = []
reverseLayers = False
schemaVersions = list(self.manifests.keys())
schemaVersions.reverse()
# Select the manifest to use
for schemaVersion in schemaVersions:
manifest = self.manifests[schemaVersion]
if manifest['schemaVersion'] == 1:
reverseLayers = True
# version 2 indices used by default
layer_key = 'layers'
digest_key = 'digest'
# Docker manifest-v2-2.md#image-manifest
if 'layers' in manifest:
bot.debug('Image manifest version 2.2 found.')
break
# Docker manifest-v2-1.md#example-manifest # noqa
elif 'fsLayers' in manifest:
layer_key = 'fsLayers'
digest_key = 'blobSum'
bot.debug('Image manifest version 2.1 found.')
break
else:
msg = "Improperly formed manifest, "
msg += "layers, manifests, or fsLayers must be present"
bot.error(msg)
sys.exit(1)
for layer in manifest[layer_key]:
if digest_key in layer:
bot.debug("Adding digest %s" % layer[digest_key])
digests.append(layer[digest_key])
# Reverse layer order for manifest version 1.0
if reverseLayers is True:
message = 'v%s manifest, reversing layers' % schemaVersion
bot.debug(message)
digests.reverse()
return digests | return a list of layers from a manifest.
The function is intended to work with both version
1 and 2 of the schema. All layers (including redundant)
are returned. By default, we try version 2 first,
then fall back to version 1.
For version 1 manifests: extraction is reversed
Parameters
==========
manifest: the manifest to read_layers from | Below is the the instruction that describes the task:
### Input:
return a list of layers from a manifest.
The function is intended to work with both version
1 and 2 of the schema. All layers (including redundant)
are returned. By default, we try version 2 first,
then fall back to version 1.
For version 1 manifests: extraction is reversed
Parameters
==========
manifest: the manifest to read_layers from
### Response:
def get_digests(self):
'''return a list of layers from a manifest.
The function is intended to work with both version
1 and 2 of the schema. All layers (including redundant)
are returned. By default, we try version 2 first,
then fall back to version 1.
For version 1 manifests: extraction is reversed
Parameters
==========
manifest: the manifest to read_layers from
'''
if not hasattr(self, 'manifests'):
bot.error('Please retrieve manifests for an image first.')
sys.exit(1)
digests = []
reverseLayers = False
schemaVersions = list(self.manifests.keys())
schemaVersions.reverse()
# Select the manifest to use
for schemaVersion in schemaVersions:
manifest = self.manifests[schemaVersion]
if manifest['schemaVersion'] == 1:
reverseLayers = True
# version 2 indices used by default
layer_key = 'layers'
digest_key = 'digest'
# Docker manifest-v2-2.md#image-manifest
if 'layers' in manifest:
bot.debug('Image manifest version 2.2 found.')
break
# Docker manifest-v2-1.md#example-manifest # noqa
elif 'fsLayers' in manifest:
layer_key = 'fsLayers'
digest_key = 'blobSum'
bot.debug('Image manifest version 2.1 found.')
break
else:
msg = "Improperly formed manifest, "
msg += "layers, manifests, or fsLayers must be present"
bot.error(msg)
sys.exit(1)
for layer in manifest[layer_key]:
if digest_key in layer:
bot.debug("Adding digest %s" % layer[digest_key])
digests.append(layer[digest_key])
# Reverse layer order for manifest version 1.0
if reverseLayers is True:
message = 'v%s manifest, reversing layers' % schemaVersion
bot.debug(message)
digests.reverse()
return digests |
def is_log(value):
"""
This function checks whether file path
that is specified at "log_file" option exists,
whether write permission to the file path.
Return the following value:
case1: exists path and write permission
is_log('/tmp')
'/tmp/hogehoge.log'
case2: non-exists path and write permission
is_log('/tmp/hogehoge')
'/tmp/hogehoge'
In this case, 'hogehoge' doesn't exist.
but 'hogehoge' is considered as a file.
Thus, create log file named 'hogehoge'.
case3: complete non-exists path
is_log('/tmp/hogehoge/fugafuga')
IOError: [Error 2] No such file or directory.
The last part of given path is only considered log_file's name.
In this case, "fugafuga" is considered log_file's name.
case4: syslog case
is_log('syslog')
'syslog'
In any case, check whether given path exists before checking permission.
Notes: Even if given relative path, works fine.
But, if don't use as much as possible if good.
Recommended giving he full path including the file name.
:param str value:
log file path
You can specify not only absolute path but also relational path.
:rtype: str
:return: Converted absolute log file path.
"""
if value.lower() == 'syslog':
return 'syslog'
value = os.path.expanduser(value)
value = os.path.expandvars(value)
value = os.path.abspath(value)
log_file = 'blackbird.log'
if os.path.exists(value):
if os.path.isdir(value):
if os.access(value, os.W_OK):
return os.path.join(value, log_file)
else:
err_message = ('{path}: Permission denied.'
''.format(path=value)
)
raise validate.VdtValueError(err_message)
else:
if os.access(value, os.W_OK):
return value
else:
err_message = ('{path}: Permission denied.'
''.format(path=value)
)
raise validate.VdtValueError(err_message)
else:
directory = os.path.split(value)[0]
if os.path.isdir(directory):
if os.access(directory, os.W_OK):
return value
else:
err_message = ('{directory}: Permission denied.'
''.format(directory=directory)
)
raise validate.VdtValueError(err_message)
else:
if os.path.exists(directory):
err_message = ('{directory} is file.'
''.format(directory=directory)
)
raise validate.VdtTypeError(err_message)
else:
err_message = ('{directory}: No such file or directory.'
''.format(directory=directory)
)
raise validate.VdtValueError(err_message) | This function checks whether file path
that is specified at "log_file" option exists,
whether write permission to the file path.
Return the following value:
case1: exists path and write permission
is_log('/tmp')
'/tmp/hogehoge.log'
case2: non-exists path and write permission
is_log('/tmp/hogehoge')
'/tmp/hogehoge'
In this case, 'hogehoge' doesn't exist.
but 'hogehoge' is considered as a file.
Thus, create log file named 'hogehoge'.
case3: complete non-exists path
is_log('/tmp/hogehoge/fugafuga')
IOError: [Error 2] No such file or directory.
The last part of given path is only considered log_file's name.
In this case, "fugafuga" is considered log_file's name.
case4: syslog case
is_log('syslog')
'syslog'
In any case, check whether given path exists before checking permission.
Notes: Even if given relative path, works fine.
But, if don't use as much as possible if good.
Recommended giving he full path including the file name.
:param str value:
log file path
You can specify not only absolute path but also relational path.
:rtype: str
:return: Converted absolute log file path. | Below is the the instruction that describes the task:
### Input:
This function checks whether file path
that is specified at "log_file" option exists,
whether write permission to the file path.
Return the following value:
case1: exists path and write permission
is_log('/tmp')
'/tmp/hogehoge.log'
case2: non-exists path and write permission
is_log('/tmp/hogehoge')
'/tmp/hogehoge'
In this case, 'hogehoge' doesn't exist.
but 'hogehoge' is considered as a file.
Thus, create log file named 'hogehoge'.
case3: complete non-exists path
is_log('/tmp/hogehoge/fugafuga')
IOError: [Error 2] No such file or directory.
The last part of given path is only considered log_file's name.
In this case, "fugafuga" is considered log_file's name.
case4: syslog case
is_log('syslog')
'syslog'
In any case, check whether given path exists before checking permission.
Notes: Even if given relative path, works fine.
But, if don't use as much as possible if good.
Recommended giving he full path including the file name.
:param str value:
log file path
You can specify not only absolute path but also relational path.
:rtype: str
:return: Converted absolute log file path.
### Response:
def is_log(value):
"""
This function checks whether file path
that is specified at "log_file" option exists,
whether write permission to the file path.
Return the following value:
case1: exists path and write permission
is_log('/tmp')
'/tmp/hogehoge.log'
case2: non-exists path and write permission
is_log('/tmp/hogehoge')
'/tmp/hogehoge'
In this case, 'hogehoge' doesn't exist.
but 'hogehoge' is considered as a file.
Thus, create log file named 'hogehoge'.
case3: complete non-exists path
is_log('/tmp/hogehoge/fugafuga')
IOError: [Error 2] No such file or directory.
The last part of given path is only considered log_file's name.
In this case, "fugafuga" is considered log_file's name.
case4: syslog case
is_log('syslog')
'syslog'
In any case, check whether given path exists before checking permission.
Notes: Even if given relative path, works fine.
But, if don't use as much as possible if good.
Recommended giving he full path including the file name.
:param str value:
log file path
You can specify not only absolute path but also relational path.
:rtype: str
:return: Converted absolute log file path.
"""
if value.lower() == 'syslog':
return 'syslog'
value = os.path.expanduser(value)
value = os.path.expandvars(value)
value = os.path.abspath(value)
log_file = 'blackbird.log'
if os.path.exists(value):
if os.path.isdir(value):
if os.access(value, os.W_OK):
return os.path.join(value, log_file)
else:
err_message = ('{path}: Permission denied.'
''.format(path=value)
)
raise validate.VdtValueError(err_message)
else:
if os.access(value, os.W_OK):
return value
else:
err_message = ('{path}: Permission denied.'
''.format(path=value)
)
raise validate.VdtValueError(err_message)
else:
directory = os.path.split(value)[0]
if os.path.isdir(directory):
if os.access(directory, os.W_OK):
return value
else:
err_message = ('{directory}: Permission denied.'
''.format(directory=directory)
)
raise validate.VdtValueError(err_message)
else:
if os.path.exists(directory):
err_message = ('{directory} is file.'
''.format(directory=directory)
)
raise validate.VdtTypeError(err_message)
else:
err_message = ('{directory}: No such file or directory.'
''.format(directory=directory)
)
raise validate.VdtValueError(err_message) |
def fake_shell(self, func, stdout=False):
"""
Execute a function and decorate its return value in the style of
_low_level_execute_command(). This produces a return value that looks
like some shell command was run, when really func() was implemented
entirely in Python.
If the function raises :py:class:`mitogen.core.CallError`, this will be
translated into a failed shell command with a non-zero exit status.
:param func:
Function invoked as `func()`.
:returns:
See :py:attr:`COMMAND_RESULT`.
"""
dct = self.COMMAND_RESULT.copy()
try:
rc = func()
if stdout:
dct['stdout'] = repr(rc)
except mitogen.core.CallError:
LOG.exception('While emulating a shell command')
dct['rc'] = 1
dct['stderr'] = traceback.format_exc()
return dct | Execute a function and decorate its return value in the style of
_low_level_execute_command(). This produces a return value that looks
like some shell command was run, when really func() was implemented
entirely in Python.
If the function raises :py:class:`mitogen.core.CallError`, this will be
translated into a failed shell command with a non-zero exit status.
:param func:
Function invoked as `func()`.
:returns:
See :py:attr:`COMMAND_RESULT`. | Below is the the instruction that describes the task:
### Input:
Execute a function and decorate its return value in the style of
_low_level_execute_command(). This produces a return value that looks
like some shell command was run, when really func() was implemented
entirely in Python.
If the function raises :py:class:`mitogen.core.CallError`, this will be
translated into a failed shell command with a non-zero exit status.
:param func:
Function invoked as `func()`.
:returns:
See :py:attr:`COMMAND_RESULT`.
### Response:
def fake_shell(self, func, stdout=False):
"""
Execute a function and decorate its return value in the style of
_low_level_execute_command(). This produces a return value that looks
like some shell command was run, when really func() was implemented
entirely in Python.
If the function raises :py:class:`mitogen.core.CallError`, this will be
translated into a failed shell command with a non-zero exit status.
:param func:
Function invoked as `func()`.
:returns:
See :py:attr:`COMMAND_RESULT`.
"""
dct = self.COMMAND_RESULT.copy()
try:
rc = func()
if stdout:
dct['stdout'] = repr(rc)
except mitogen.core.CallError:
LOG.exception('While emulating a shell command')
dct['rc'] = 1
dct['stderr'] = traceback.format_exc()
return dct |
def write_zip(self, resources=None, dumpfile=None):
"""Write a ZIP format dump file.
Writes a ZIP file containing the resources in the iterable resources along with
a manifest file manifest.xml (written first). No checks on the size of files
or total size are performed, this is expected to have been done beforehand.
"""
compression = (ZIP_DEFLATED if self.compress else ZIP_STORED)
zf = ZipFile(
dumpfile,
mode="w",
compression=compression,
allowZip64=True)
# Write resources first
rdm = ResourceDumpManifest(resources=resources)
real_path = {}
for resource in resources:
archive_path = self.archive_path(resource.path)
real_path[archive_path] = resource.path
resource.path = archive_path
zf.writestr('manifest.xml', rdm.as_xml())
# Add all files in the resources
for resource in resources:
zf.write(real_path[resource.path], arcname=resource.path)
zf.close()
zipsize = os.path.getsize(dumpfile)
self.logger.info(
"Wrote ZIP file dump %s with size %d bytes" %
(dumpfile, zipsize)) | Write a ZIP format dump file.
Writes a ZIP file containing the resources in the iterable resources along with
a manifest file manifest.xml (written first). No checks on the size of files
or total size are performed, this is expected to have been done beforehand. | Below is the the instruction that describes the task:
### Input:
Write a ZIP format dump file.
Writes a ZIP file containing the resources in the iterable resources along with
a manifest file manifest.xml (written first). No checks on the size of files
or total size are performed, this is expected to have been done beforehand.
### Response:
def write_zip(self, resources=None, dumpfile=None):
"""Write a ZIP format dump file.
Writes a ZIP file containing the resources in the iterable resources along with
a manifest file manifest.xml (written first). No checks on the size of files
or total size are performed, this is expected to have been done beforehand.
"""
compression = (ZIP_DEFLATED if self.compress else ZIP_STORED)
zf = ZipFile(
dumpfile,
mode="w",
compression=compression,
allowZip64=True)
# Write resources first
rdm = ResourceDumpManifest(resources=resources)
real_path = {}
for resource in resources:
archive_path = self.archive_path(resource.path)
real_path[archive_path] = resource.path
resource.path = archive_path
zf.writestr('manifest.xml', rdm.as_xml())
# Add all files in the resources
for resource in resources:
zf.write(real_path[resource.path], arcname=resource.path)
zf.close()
zipsize = os.path.getsize(dumpfile)
self.logger.info(
"Wrote ZIP file dump %s with size %d bytes" %
(dumpfile, zipsize)) |
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs) | Looks up y and returns the corresponding value of x. | Below is the the instruction that describes the task:
### Input:
Looks up y and returns the corresponding value of x.
### Response:
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs) |
def get_messages(user):
"""
Fetch messages for given user. Returns None if no such message exists.
:param user: User instance
"""
key = _user_key(user)
result = cache.get(key)
if result:
cache.delete(key)
return result
return None | Fetch messages for given user. Returns None if no such message exists.
:param user: User instance | Below is the the instruction that describes the task:
### Input:
Fetch messages for given user. Returns None if no such message exists.
:param user: User instance
### Response:
def get_messages(user):
"""
Fetch messages for given user. Returns None if no such message exists.
:param user: User instance
"""
key = _user_key(user)
result = cache.get(key)
if result:
cache.delete(key)
return result
return None |
def clean_up_auth_url(auth_url):
"""Clean up the auth url to extract the exact Keystone URL"""
# NOTE(mnaser): This drops the query and fragment because we're only
# trying to extract the Keystone URL.
scheme, netloc, path, query, fragment = urlparse.urlsplit(auth_url)
return urlparse.urlunsplit((
scheme, netloc, re.sub(r'/auth.*', '', path), '', '')) | Clean up the auth url to extract the exact Keystone URL | Below is the the instruction that describes the task:
### Input:
Clean up the auth url to extract the exact Keystone URL
### Response:
def clean_up_auth_url(auth_url):
"""Clean up the auth url to extract the exact Keystone URL"""
# NOTE(mnaser): This drops the query and fragment because we're only
# trying to extract the Keystone URL.
scheme, netloc, path, query, fragment = urlparse.urlsplit(auth_url)
return urlparse.urlunsplit((
scheme, netloc, re.sub(r'/auth.*', '', path), '', '')) |
def gevent_run(app, monkey_patch=True, start=True, debug=False,
**kwargs): # pragma: no cover
"""Run your app in gevent.spawn, run simple loop if start == True
:param app: queues.Microservice instance
:param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: True
:param start: boolean, if True, server will be start (simple loop)
:param kwargs: other params for WSGIServer(**kwargs)
:return: server
"""
if monkey_patch:
from gevent import monkey
monkey.patch_all()
import gevent
gevent.spawn(app.run, debug=debug, **kwargs)
if start:
while not app.stopped:
gevent.sleep(0.1) | Run your app in gevent.spawn, run simple loop if start == True
:param app: queues.Microservice instance
:param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: True
:param start: boolean, if True, server will be start (simple loop)
:param kwargs: other params for WSGIServer(**kwargs)
:return: server | Below is the the instruction that describes the task:
### Input:
Run your app in gevent.spawn, run simple loop if start == True
:param app: queues.Microservice instance
:param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: True
:param start: boolean, if True, server will be start (simple loop)
:param kwargs: other params for WSGIServer(**kwargs)
:return: server
### Response:
def gevent_run(app, monkey_patch=True, start=True, debug=False,
**kwargs): # pragma: no cover
"""Run your app in gevent.spawn, run simple loop if start == True
:param app: queues.Microservice instance
:param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: True
:param start: boolean, if True, server will be start (simple loop)
:param kwargs: other params for WSGIServer(**kwargs)
:return: server
"""
if monkey_patch:
from gevent import monkey
monkey.patch_all()
import gevent
gevent.spawn(app.run, debug=debug, **kwargs)
if start:
while not app.stopped:
gevent.sleep(0.1) |
def count_frames(frame, count_start=0):
"Return a count of the number of frames"
count = -count_start
while frame:
count += 1
frame = frame.f_back
return count | Return a count of the number of frames | Below is the the instruction that describes the task:
### Input:
Return a count of the number of frames
### Response:
def count_frames(frame, count_start=0):
"Return a count of the number of frames"
count = -count_start
while frame:
count += 1
frame = frame.f_back
return count |
def sample_stats_prior_to_xarray(self):
"""Extract sample_stats from fit."""
dtypes = {"divergent__": bool, "n_leapfrog__": np.int64, "treedepth__": np.int64}
# copy dims and coords
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
sampler_params = self.sample_stats_prior
for j, s_params in enumerate(sampler_params):
rename_dict = {}
for key in s_params:
key_, *end = key.split(".")
name = re.sub("__$", "", key_)
name = "diverging" if name == "divergent" else name
rename_dict[key] = ".".join((name, *end))
sampler_params[j][key] = s_params[key].astype(dtypes.get(key))
sampler_params[j] = sampler_params[j].rename(columns=rename_dict)
data = _unpack_dataframes(sampler_params)
return dict_to_dataset(data, coords=coords, dims=dims) | Extract sample_stats from fit. | Below is the the instruction that describes the task:
### Input:
Extract sample_stats from fit.
### Response:
def sample_stats_prior_to_xarray(self):
"""Extract sample_stats from fit."""
dtypes = {"divergent__": bool, "n_leapfrog__": np.int64, "treedepth__": np.int64}
# copy dims and coords
dims = deepcopy(self.dims) if self.dims is not None else {}
coords = deepcopy(self.coords) if self.coords is not None else {}
sampler_params = self.sample_stats_prior
for j, s_params in enumerate(sampler_params):
rename_dict = {}
for key in s_params:
key_, *end = key.split(".")
name = re.sub("__$", "", key_)
name = "diverging" if name == "divergent" else name
rename_dict[key] = ".".join((name, *end))
sampler_params[j][key] = s_params[key].astype(dtypes.get(key))
sampler_params[j] = sampler_params[j].rename(columns=rename_dict)
data = _unpack_dataframes(sampler_params)
return dict_to_dataset(data, coords=coords, dims=dims) |
def match(self, *args, **kwargs):
"""
Check the if these args match this expectation.
"""
return self._any_args or \
self._arguments_rule.validate(*args, **kwargs) | Check the if these args match this expectation. | Below is the the instruction that describes the task:
### Input:
Check the if these args match this expectation.
### Response:
def match(self, *args, **kwargs):
"""
Check the if these args match this expectation.
"""
return self._any_args or \
self._arguments_rule.validate(*args, **kwargs) |
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts | Decode string according to RFC 2231 | Below is the the instruction that describes the task:
### Input:
Decode string according to RFC 2231
### Response:
def decode_rfc2231(s):
"""Decode string according to RFC 2231"""
parts = s.split(TICK, 2)
if len(parts) <= 2:
return None, None, s
return parts |
def eventdata(payload):
"""
Parse a Supervisor event.
"""
headerinfo, data = payload.split('\n', 1)
headers = get_headers(headerinfo)
return headers, data | Parse a Supervisor event. | Below is the the instruction that describes the task:
### Input:
Parse a Supervisor event.
### Response:
def eventdata(payload):
"""
Parse a Supervisor event.
"""
headerinfo, data = payload.split('\n', 1)
headers = get_headers(headerinfo)
return headers, data |
def html_state(self):
"""Display state in HTML format for the admin form."""
ret = ""
state = json.loads(self.state)
for (app, appstate) in state.items():
for (model, modelstate) in appstate.items():
ret += "<p>%s.models.%s</p>" % (app, model,)
ret += "<ul>"
for field in modelstate["fields"] + ["uid"]:
ret += "<li>%s</li>" % field
for fk in modelstate["foreignkeys"]:
ret += "<li>%s (foreign key)</li>" % fk
ret += "</ul>"
return ret | Display state in HTML format for the admin form. | Below is the the instruction that describes the task:
### Input:
Display state in HTML format for the admin form.
### Response:
def html_state(self):
"""Display state in HTML format for the admin form."""
ret = ""
state = json.loads(self.state)
for (app, appstate) in state.items():
for (model, modelstate) in appstate.items():
ret += "<p>%s.models.%s</p>" % (app, model,)
ret += "<ul>"
for field in modelstate["fields"] + ["uid"]:
ret += "<li>%s</li>" % field
for fk in modelstate["foreignkeys"]:
ret += "<li>%s (foreign key)</li>" % fk
ret += "</ul>"
return ret |
def url_for(self, *args: str, **kwargs: str) -> URL:
"""Construct url for route with additional params."""
return self._resource.url_for(*args, **kwargs) | Construct url for route with additional params. | Below is the the instruction that describes the task:
### Input:
Construct url for route with additional params.
### Response:
def url_for(self, *args: str, **kwargs: str) -> URL:
"""Construct url for route with additional params."""
return self._resource.url_for(*args, **kwargs) |
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
if min(actual_status.values()) == 0:
number_node_blocked = int(float(self.graph.number_of_nodes()) *
float(self.params['model']['percentage_blocked']))
i = 0
while i < number_node_blocked:
# select a random node
node = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# node not infected
if actual_status[node] == 0:
# node blocked
actual_status[node] = -1
self.status[node] = -1
i += 1
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for node in self.graph.nodes():
if self.status[node] == 0:
if self.params['model']['adopter_rate'] > 0:
xk = (0, 1)
pk = (1-self.params['model']['adopter_rate'], self.params['model']['adopter_rate'])
probability = stats.rv_discrete(name='probability', values=(xk, pk))
number_probability = probability.rvs()
if number_probability == 1:
actual_status[node] = 1
continue
neighbors = list(self.graph.neighbors(node))
if len(neighbors) == 0:
continue
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(node)
infected = 0
for v in neighbors:
if self.status[v] != -1:
infected += self.status[v]
infected_ratio = float(infected)/len(neighbors)
if infected_ratio >= self.params['nodes']['threshold'][node]:
actual_status[node] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | Below is the the instruction that describes the task:
### Input:
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
### Response:
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
if min(actual_status.values()) == 0:
number_node_blocked = int(float(self.graph.number_of_nodes()) *
float(self.params['model']['percentage_blocked']))
i = 0
while i < number_node_blocked:
# select a random node
node = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# node not infected
if actual_status[node] == 0:
# node blocked
actual_status[node] = -1
self.status[node] = -1
i += 1
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for node in self.graph.nodes():
if self.status[node] == 0:
if self.params['model']['adopter_rate'] > 0:
xk = (0, 1)
pk = (1-self.params['model']['adopter_rate'], self.params['model']['adopter_rate'])
probability = stats.rv_discrete(name='probability', values=(xk, pk))
number_probability = probability.rvs()
if number_probability == 1:
actual_status[node] = 1
continue
neighbors = list(self.graph.neighbors(node))
if len(neighbors) == 0:
continue
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(node)
infected = 0
for v in neighbors:
if self.status[v] != -1:
infected += self.status[v]
infected_ratio = float(infected)/len(neighbors)
if infected_ratio >= self.params['nodes']['threshold'][node]:
actual_status[node] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} |
def eigen(matrix):
""" Calculates the eigenvalues and eigenvectors of the input matrix.
Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of
variance explained). Eigenvalues and eigenvectors are sorted in order of
eigenvalue magnitude, high to low """
(vals, vecs) = np.linalg.eigh(matrix)
ind = vals.argsort()[::-1]
vals = vals[ind]
vecs = vecs[:, ind]
vals_ = vals.copy()
vals_[vals_ < 0] = 0.
cum_var_exp = np.cumsum(vals_ / vals_.sum())
return Decomp(matrix.copy(), vals, vecs, cum_var_exp) | Calculates the eigenvalues and eigenvectors of the input matrix.
Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of
variance explained). Eigenvalues and eigenvectors are sorted in order of
eigenvalue magnitude, high to low | Below is the the instruction that describes the task:
### Input:
Calculates the eigenvalues and eigenvectors of the input matrix.
Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of
variance explained). Eigenvalues and eigenvectors are sorted in order of
eigenvalue magnitude, high to low
### Response:
def eigen(matrix):
""" Calculates the eigenvalues and eigenvectors of the input matrix.
Returns a tuple of (eigenvalues, eigenvectors, cumulative percentage of
variance explained). Eigenvalues and eigenvectors are sorted in order of
eigenvalue magnitude, high to low """
(vals, vecs) = np.linalg.eigh(matrix)
ind = vals.argsort()[::-1]
vals = vals[ind]
vecs = vecs[:, ind]
vals_ = vals.copy()
vals_[vals_ < 0] = 0.
cum_var_exp = np.cumsum(vals_ / vals_.sum())
return Decomp(matrix.copy(), vals, vecs, cum_var_exp) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.