code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def ok_button_status(self):
"""Function to enable or disable OK button."""
if not self.layer.currentLayer():
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(False)
elif (self.hazard_class_form.count() > 0
and self.layer.currentLayer().name()
and len(self.output_form.text()) >= 0):
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(True)
else:
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(False) | Function to enable or disable OK button. | Below is the the instruction that describes the task:
### Input:
Function to enable or disable OK button.
### Response:
def ok_button_status(self):
"""Function to enable or disable OK button."""
if not self.layer.currentLayer():
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(False)
elif (self.hazard_class_form.count() > 0
and self.layer.currentLayer().name()
and len(self.output_form.text()) >= 0):
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(True)
else:
self.button_box.button(
QtWidgets.QDialogButtonBox.Ok).setEnabled(False) |
def shutil_rmtree_onerror(func: Callable[[str], None],
path: str,
exc_info: EXC_INFO_TYPE) -> None:
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
""" # noqa
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
exc = exc_info[1]
raise exc | Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied | Below is the the instruction that describes the task:
### Input:
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
### Response:
def shutil_rmtree_onerror(func: Callable[[str], None],
path: str,
exc_info: EXC_INFO_TYPE) -> None:
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage: ``shutil.rmtree(path, onerror=shutil_rmtree_onerror)``
See
https://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
""" # noqa
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
exc = exc_info[1]
raise exc |
def state_preorder(collision_checker):
"""
Decorator for the check() method on a state-preordering operation.
Make sure that there are no duplicate preorders anywhere--either in this
block, or in any previous blocks.
"""
def wrap( check ):
def wrapped_check( state_engine, nameop, block_id, checked_ops ):
rc = check( state_engine, nameop, block_id, checked_ops )
if rc:
# verify no duplicates
history_id_key = "preorder_hash"
rc = state_check_collisions( state_engine, nameop, history_id_key, block_id, checked_ops, collision_checker )
if rc:
log.debug("COLLISION on %s '%s'" % (history_id_key, nameop[history_id_key]))
rc = False
else:
# no collision
rc = True
# sanity check---we need to have the appropriate metadata for this operation
invariant_tags = state_preorder_invariant_tags()
for tag in invariant_tags:
assert tag in nameop, "BUG: missing invariant tag '%s'" % tag
# sanity check---all required consensus fields must be present
for required_field in CONSENSUS_FIELDS_REQUIRED:
assert required_field in nameop, 'BUG: missing required consensus field {}'.format(required_field)
return rc
return wrapped_check
return wrap | Decorator for the check() method on a state-preordering operation.
Make sure that there are no duplicate preorders anywhere--either in this
block, or in any previous blocks. | Below is the the instruction that describes the task:
### Input:
Decorator for the check() method on a state-preordering operation.
Make sure that there are no duplicate preorders anywhere--either in this
block, or in any previous blocks.
### Response:
def state_preorder(collision_checker):
"""
Decorator for the check() method on a state-preordering operation.
Make sure that there are no duplicate preorders anywhere--either in this
block, or in any previous blocks.
"""
def wrap( check ):
def wrapped_check( state_engine, nameop, block_id, checked_ops ):
rc = check( state_engine, nameop, block_id, checked_ops )
if rc:
# verify no duplicates
history_id_key = "preorder_hash"
rc = state_check_collisions( state_engine, nameop, history_id_key, block_id, checked_ops, collision_checker )
if rc:
log.debug("COLLISION on %s '%s'" % (history_id_key, nameop[history_id_key]))
rc = False
else:
# no collision
rc = True
# sanity check---we need to have the appropriate metadata for this operation
invariant_tags = state_preorder_invariant_tags()
for tag in invariant_tags:
assert tag in nameop, "BUG: missing invariant tag '%s'" % tag
# sanity check---all required consensus fields must be present
for required_field in CONSENSUS_FIELDS_REQUIRED:
assert required_field in nameop, 'BUG: missing required consensus field {}'.format(required_field)
return rc
return wrapped_check
return wrap |
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response | Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid. | Below is the the instruction that describes the task:
### Input:
Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
### Response:
def reorder_sonos_playlist(self, sonos_playlist, tracks, new_pos,
update_id=0):
"""Reorder and/or Remove tracks in a Sonos playlist.
The underlying call is quite complex as it can both move a track
within the list or delete a track from the playlist. All of this
depends on what tracks and new_pos specify.
If a list is specified for tracks, then a list must be used for
new_pos. Each list element is a discrete modification and the next
list operation must anticipate the new state of the playlist.
If a comma formatted string to tracks is specified, then use
a similiar string to specify new_pos. Those operations should be
ordered from the end of the list to the beginning
See the helper methods
:py:meth:`clear_sonos_playlist`, :py:meth:`move_in_sonos_playlist`,
:py:meth:`remove_from_sonos_playlist` for simplified usage.
update_id - If you have a series of operations, tracking the update_id
and setting it, will save a lookup operation.
Examples:
To reorder the first two tracks::
# sonos_playlist specified by the DidlPlaylistContainer object
sonos_playlist = device.get_sonos_playlists()[0]
device.reorder_sonos_playlist(sonos_playlist,
tracks=[0, ], new_pos=[1, ])
# OR specified by the item_id
device.reorder_sonos_playlist('SQ:0', tracks=[0, ], new_pos=[1, ])
To delete the second track::
# tracks/new_pos are a list of int
device.reorder_sonos_playlist(sonos_playlist,
tracks=[1, ], new_pos=[None, ])
# OR tracks/new_pos are a list of int-like
device.reorder_sonos_playlist(sonos_playlist,
tracks=['1', ], new_pos=['', ])
# OR tracks/new_pos are strings - no transform is done
device.reorder_sonos_playlist(sonos_playlist, tracks='1',
new_pos='')
To reverse the order of a playlist with 4 items::
device.reorder_sonos_playlist(sonos_playlist, tracks='3,2,1,0',
new_pos='0,1,2,3')
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`): The
Sonos playlist object or the item_id (str) of the Sonos
playlist.
tracks: (list): list of track indices(int) to reorder. May also be
a list of int like things. i.e. ``['0', '1',]`` OR it may be a
str of comma separated int like things. ``"0,1"``. Tracks are
**0**-based. Meaning the first track is track 0, just like
indexing into a Python list.
new_pos (list): list of new positions (int|None)
corresponding to track_list. MUST be the same type as
``tracks``. **0**-based, see tracks above. ``None`` is the
indicator to remove the track. If using a list of strings,
then a remove is indicated by an empty string.
update_id (int): operation id (default: 0) If set to 0, a lookup
is done to find the correct value.
Returns:
dict: Which contains 3 elements: change, length and update_id.
Change in size between original playlist and the resulting
playlist, the length of resulting playlist, and the new
update_id.
Raises:
SoCoUPnPException: If playlist does not exist or if your tracks
and/or new_pos arguments are invalid.
"""
# allow either a string 'SQ:10' or an object with item_id attribute.
object_id = getattr(sonos_playlist, 'item_id', sonos_playlist)
if isinstance(tracks, UnicodeType):
track_list = [tracks, ]
position_list = [new_pos, ]
elif isinstance(tracks, int):
track_list = [tracks, ]
if new_pos is None:
new_pos = ''
position_list = [new_pos, ]
else:
track_list = [str(x) for x in tracks]
position_list = [str(x) if x is not None else '' for x in new_pos]
# track_list = ','.join(track_list)
# position_list = ','.join(position_list)
if update_id == 0: # retrieve the update id for the object
response, _ = self.music_library._music_lib_search(object_id, 0, 1)
update_id = response['UpdateID']
change = 0
for track, position in zip(track_list, position_list):
if track == position: # there is no move, a no-op
continue
response = self.avTransport.ReorderTracksInSavedQueue([
("InstanceID", 0),
("ObjectID", object_id),
("UpdateID", update_id),
("TrackList", track),
("NewPositionList", position),
])
change += int(response['QueueLengthChange'])
update_id = int(response['NewUpdateID'])
length = int(response['NewQueueLength'])
response = {'change': change,
'update_id': update_id,
'length': length}
return response |
def read_config(self, path):
"""Read configuration file."""
self.pyvlx.logger.info('Reading config file: ', path)
try:
with open(path, 'r') as filehandle:
doc = yaml.load(filehandle)
if 'config' not in doc:
raise PyVLXException('no element config found in: {0}'.format(path))
if 'host' not in doc['config']:
raise PyVLXException('no element host found in: {0}'.format(path))
if 'password' not in doc['config']:
raise PyVLXException('no element password found in: {0}'.format(path))
self.host = doc['config']['host']
self.password = doc['config']['password']
except FileNotFoundError as ex:
raise PyVLXException('file does not exist: {0}'.format(ex)) | Read configuration file. | Below is the the instruction that describes the task:
### Input:
Read configuration file.
### Response:
def read_config(self, path):
"""Read configuration file."""
self.pyvlx.logger.info('Reading config file: ', path)
try:
with open(path, 'r') as filehandle:
doc = yaml.load(filehandle)
if 'config' not in doc:
raise PyVLXException('no element config found in: {0}'.format(path))
if 'host' not in doc['config']:
raise PyVLXException('no element host found in: {0}'.format(path))
if 'password' not in doc['config']:
raise PyVLXException('no element password found in: {0}'.format(path))
self.host = doc['config']['host']
self.password = doc['config']['password']
except FileNotFoundError as ex:
raise PyVLXException('file does not exist: {0}'.format(ex)) |
def serialize(self, node: SchemaNode,
value: Any) -> Union[str, ColanderNullType]:
"""
Serializes Python object to string representation.
"""
if value is None:
retval = ''
else:
# noinspection PyUnresolvedReferences
retval = self.type_.serialize(node, value)
# log.debug("AllowNoneType.serialize: {!r} -> {!r}", value, retval)
return retval | Serializes Python object to string representation. | Below is the the instruction that describes the task:
### Input:
Serializes Python object to string representation.
### Response:
def serialize(self, node: SchemaNode,
value: Any) -> Union[str, ColanderNullType]:
"""
Serializes Python object to string representation.
"""
if value is None:
retval = ''
else:
# noinspection PyUnresolvedReferences
retval = self.type_.serialize(node, value)
# log.debug("AllowNoneType.serialize: {!r} -> {!r}", value, retval)
return retval |
def _compact_class_repr(obj):
""" A compact version of __repr__ for each of the steps.
"""
dict_str_list = []
post_repr_string = ""
# If features are present, then shorten it.
init_func = obj.__init__
if _sys.version_info.major == 2:
init_func = init_func.__func__
fields = _inspect.getargspec(init_func).args
fields = fields[1:] # remove self
if 'features' in fields:
fields.remove('features')
features = obj.get("features")
if features is not None:
post_repr_string = ' on %s feature(s)' % len(features)
if 'excluded_features' in fields:
fields.remove('excluded_features')
# GLC transformers.
if issubclass(obj.__class__, _Transformer):
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.get(attr).__repr__()))
# Chains
elif obj.__class__ == TransformerChain:
_step_classes = list(map(lambda x: x.__class__.__name__, obj.get('steps')))
_steps = _internal_utils.pretty_print_list(
_step_classes, 'steps', False)
dict_str_list.append(_steps)
# For user defined transformers.
else:
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.__dict__[attr]))
return "%s(%s)%s" % (obj.__class__.__name__, ", ".join(dict_str_list),
post_repr_string) | A compact version of __repr__ for each of the steps. | Below is the the instruction that describes the task:
### Input:
A compact version of __repr__ for each of the steps.
### Response:
def _compact_class_repr(obj):
""" A compact version of __repr__ for each of the steps.
"""
dict_str_list = []
post_repr_string = ""
# If features are present, then shorten it.
init_func = obj.__init__
if _sys.version_info.major == 2:
init_func = init_func.__func__
fields = _inspect.getargspec(init_func).args
fields = fields[1:] # remove self
if 'features' in fields:
fields.remove('features')
features = obj.get("features")
if features is not None:
post_repr_string = ' on %s feature(s)' % len(features)
if 'excluded_features' in fields:
fields.remove('excluded_features')
# GLC transformers.
if issubclass(obj.__class__, _Transformer):
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.get(attr).__repr__()))
# Chains
elif obj.__class__ == TransformerChain:
_step_classes = list(map(lambda x: x.__class__.__name__, obj.get('steps')))
_steps = _internal_utils.pretty_print_list(
_step_classes, 'steps', False)
dict_str_list.append(_steps)
# For user defined transformers.
else:
for attr in fields:
dict_str_list.append("%s=%s" % (attr, obj.__dict__[attr]))
return "%s(%s)%s" % (obj.__class__.__name__, ", ".join(dict_str_list),
post_repr_string) |
def convert_lat(Recs):
"""
uses lat, for age<5Ma, model_lat if present, else tries to use average_inc to estimate plat.
"""
New = []
for rec in Recs:
if 'model_lat' in list(rec.keys()) and rec['model_lat'] != "":
New.append(rec)
elif 'average_age' in list(rec.keys()) and rec['average_age'] != "" and float(rec['average_age']) <= 5.:
if 'site_lat' in list(rec.keys()) and rec['site_lat'] != "":
rec['model_lat'] = rec['site_lat']
New.append(rec)
elif 'average_inc' in list(rec.keys()) and rec['average_inc'] != "":
rec['model_lat'] = '%7.1f' % (plat(float(rec['average_inc'])))
New.append(rec)
return New | uses lat, for age<5Ma, model_lat if present, else tries to use average_inc to estimate plat. | Below is the the instruction that describes the task:
### Input:
uses lat, for age<5Ma, model_lat if present, else tries to use average_inc to estimate plat.
### Response:
def convert_lat(Recs):
"""
uses lat, for age<5Ma, model_lat if present, else tries to use average_inc to estimate plat.
"""
New = []
for rec in Recs:
if 'model_lat' in list(rec.keys()) and rec['model_lat'] != "":
New.append(rec)
elif 'average_age' in list(rec.keys()) and rec['average_age'] != "" and float(rec['average_age']) <= 5.:
if 'site_lat' in list(rec.keys()) and rec['site_lat'] != "":
rec['model_lat'] = rec['site_lat']
New.append(rec)
elif 'average_inc' in list(rec.keys()) and rec['average_inc'] != "":
rec['model_lat'] = '%7.1f' % (plat(float(rec['average_inc'])))
New.append(rec)
return New |
def human_readable_bytes(nb_bytes, suffix='B'):
"""
Convert a byte number into a human readable format.
Parameters
----------
nb_bytes : number
suffix : str, optional (default: "B")
Returns
-------
size_str : str
Examples
--------
>>> human_readable_bytes(123)
'123.0 B'
>>> human_readable_bytes(1025)
'1.0 KiB'
>>> human_readable_bytes(9671406556917033397649423)
'8.0 YiB'
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(nb_bytes) < 1024.0:
return '%3.1f %s%s' % (nb_bytes, unit, suffix)
nb_bytes /= 1024.0
return '%.1f %s%s' % (nb_bytes, 'Yi', suffix) | Convert a byte number into a human readable format.
Parameters
----------
nb_bytes : number
suffix : str, optional (default: "B")
Returns
-------
size_str : str
Examples
--------
>>> human_readable_bytes(123)
'123.0 B'
>>> human_readable_bytes(1025)
'1.0 KiB'
>>> human_readable_bytes(9671406556917033397649423)
'8.0 YiB' | Below is the the instruction that describes the task:
### Input:
Convert a byte number into a human readable format.
Parameters
----------
nb_bytes : number
suffix : str, optional (default: "B")
Returns
-------
size_str : str
Examples
--------
>>> human_readable_bytes(123)
'123.0 B'
>>> human_readable_bytes(1025)
'1.0 KiB'
>>> human_readable_bytes(9671406556917033397649423)
'8.0 YiB'
### Response:
def human_readable_bytes(nb_bytes, suffix='B'):
"""
Convert a byte number into a human readable format.
Parameters
----------
nb_bytes : number
suffix : str, optional (default: "B")
Returns
-------
size_str : str
Examples
--------
>>> human_readable_bytes(123)
'123.0 B'
>>> human_readable_bytes(1025)
'1.0 KiB'
>>> human_readable_bytes(9671406556917033397649423)
'8.0 YiB'
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(nb_bytes) < 1024.0:
return '%3.1f %s%s' % (nb_bytes, unit, suffix)
nb_bytes /= 1024.0
return '%.1f %s%s' % (nb_bytes, 'Yi', suffix) |
def webob_to_django_response(webob_response):
"""Returns a django response to the `webob_response`"""
from django.http import HttpResponse
django_response = HttpResponse(
webob_response.app_iter,
content_type=webob_response.content_type,
status=webob_response.status_code,
)
for name, value in webob_response.headerlist:
django_response[name] = value
return django_response | Returns a django response to the `webob_response` | Below is the the instruction that describes the task:
### Input:
Returns a django response to the `webob_response`
### Response:
def webob_to_django_response(webob_response):
"""Returns a django response to the `webob_response`"""
from django.http import HttpResponse
django_response = HttpResponse(
webob_response.app_iter,
content_type=webob_response.content_type,
status=webob_response.status_code,
)
for name, value in webob_response.headerlist:
django_response[name] = value
return django_response |
def tokenize_sign(self, word):
"""This is for tokenizing cuneiform signs."""
if self.language == 'akkadian':
sign_tokens = tokenize_akkadian_signs(word)
else:
sign_tokens = 'Language must be written using cuneiform.'
return sign_tokens | This is for tokenizing cuneiform signs. | Below is the the instruction that describes the task:
### Input:
This is for tokenizing cuneiform signs.
### Response:
def tokenize_sign(self, word):
"""This is for tokenizing cuneiform signs."""
if self.language == 'akkadian':
sign_tokens = tokenize_akkadian_signs(word)
else:
sign_tokens = 'Language must be written using cuneiform.'
return sign_tokens |
def get_sequence_rules_by_search(self, sequence_rule_query, sequence_rule_search):
"""Pass through to provider SequenceRuleSearchSession.get_sequence_rules_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rules_by_search(sequence_rule_query, sequence_rule_search) | Pass through to provider SequenceRuleSearchSession.get_sequence_rules_by_search | Below is the the instruction that describes the task:
### Input:
Pass through to provider SequenceRuleSearchSession.get_sequence_rules_by_search
### Response:
def get_sequence_rules_by_search(self, sequence_rule_query, sequence_rule_search):
"""Pass through to provider SequenceRuleSearchSession.get_sequence_rules_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_sequence_rules_by_search(sequence_rule_query, sequence_rule_search) |
def generic(self, input_string, **kwargs):
""" return a generic filename for a given dataset and component
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
return input_string.format(**kwargs_copy) | return a generic filename for a given dataset and component | Below is the the instruction that describes the task:
### Input:
return a generic filename for a given dataset and component
### Response:
def generic(self, input_string, **kwargs):
""" return a generic filename for a given dataset and component
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
return input_string.format(**kwargs_copy) |
def python_job(self, function, parameters=None):
"""
Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes
"""
if not callable(function):
raise utils.StimelaCabRuntimeError('Object given as function is not callable')
if self.name is None:
self.name = function.__name__
self.job = {
'function' : function,
'parameters': parameters,
}
return 0 | Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes | Below is the the instruction that describes the task:
### Input:
Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes
### Response:
def python_job(self, function, parameters=None):
"""
Run python function
function : Python callable to execute
name : Name of function (if not given, will used function.__name__)
parameters : Parameters to parse to function
label : Function label; for logging purposes
"""
if not callable(function):
raise utils.StimelaCabRuntimeError('Object given as function is not callable')
if self.name is None:
self.name = function.__name__
self.job = {
'function' : function,
'parameters': parameters,
}
return 0 |
def delete(self, directory_updated=False):
# pylint: disable=W0212
"""
Delete this configuration
:param directory_updated: If True, tell ConfigurationAdmin to not
recall the directory of this deletion
(internal use only)
"""
with self.__lock:
if self.__deleted:
# Nothing to do
return
# Update status
self.__deleted = True
# Notify ConfigurationAdmin, notify services only if the
# configuration had been updated before
self.__config_admin._delete(self, self.__updated, directory_updated)
# Remove the file
self.__persistence.delete(self.__pid)
# Clean up
if self.__properties:
self.__properties.clear()
self.__persistence = None
self.__pid = None | Delete this configuration
:param directory_updated: If True, tell ConfigurationAdmin to not
recall the directory of this deletion
(internal use only) | Below is the the instruction that describes the task:
### Input:
Delete this configuration
:param directory_updated: If True, tell ConfigurationAdmin to not
recall the directory of this deletion
(internal use only)
### Response:
def delete(self, directory_updated=False):
# pylint: disable=W0212
"""
Delete this configuration
:param directory_updated: If True, tell ConfigurationAdmin to not
recall the directory of this deletion
(internal use only)
"""
with self.__lock:
if self.__deleted:
# Nothing to do
return
# Update status
self.__deleted = True
# Notify ConfigurationAdmin, notify services only if the
# configuration had been updated before
self.__config_admin._delete(self, self.__updated, directory_updated)
# Remove the file
self.__persistence.delete(self.__pid)
# Clean up
if self.__properties:
self.__properties.clear()
self.__persistence = None
self.__pid = None |
def config_flag(option, value, default=False, section=cli.name):
"""Guesses whether a CLI flag should be turned on or off from the
configuration. If the configuration option value is same with the given
value, it returns ``True``.
::
@click.option('--ko-kr', 'locale', is_flag=True,
default=config_flag('locale', 'ko_KR'))
"""
class x(object):
def __bool__(self, option=option, value=value,
default=default, section=section):
config = read_config()
type = builtins.type(value)
get_option = option_getter(type)
try:
return get_option(config, section, option) == value
except (NoOptionError, NoSectionError):
return default
__nonzero__ = __bool__
return x() | Guesses whether a CLI flag should be turned on or off from the
configuration. If the configuration option value is same with the given
value, it returns ``True``.
::
@click.option('--ko-kr', 'locale', is_flag=True,
default=config_flag('locale', 'ko_KR')) | Below is the the instruction that describes the task:
### Input:
Guesses whether a CLI flag should be turned on or off from the
configuration. If the configuration option value is same with the given
value, it returns ``True``.
::
@click.option('--ko-kr', 'locale', is_flag=True,
default=config_flag('locale', 'ko_KR'))
### Response:
def config_flag(option, value, default=False, section=cli.name):
"""Guesses whether a CLI flag should be turned on or off from the
configuration. If the configuration option value is same with the given
value, it returns ``True``.
::
@click.option('--ko-kr', 'locale', is_flag=True,
default=config_flag('locale', 'ko_KR'))
"""
class x(object):
def __bool__(self, option=option, value=value,
default=default, section=section):
config = read_config()
type = builtins.type(value)
get_option = option_getter(type)
try:
return get_option(config, section, option) == value
except (NoOptionError, NoSectionError):
return default
__nonzero__ = __bool__
return x() |
def pwm_min_score(self):
"""Return the minimum PWM score.
Returns
-------
score : float
Minimum PWM score.
"""
if self.min_score is None:
score = 0
for row in self.pwm:
score += log(min(row) / 0.25 + 0.01)
self.min_score = score
return self.min_score | Return the minimum PWM score.
Returns
-------
score : float
Minimum PWM score. | Below is the the instruction that describes the task:
### Input:
Return the minimum PWM score.
Returns
-------
score : float
Minimum PWM score.
### Response:
def pwm_min_score(self):
"""Return the minimum PWM score.
Returns
-------
score : float
Minimum PWM score.
"""
if self.min_score is None:
score = 0
for row in self.pwm:
score += log(min(row) / 0.25 + 0.01)
self.min_score = score
return self.min_score |
def _init_map(self):
"""stub"""
super(EdXDragAndDropQuestionFormRecord, self)._init_map()
QuestionTextFormRecord._init_map(self)
QuestionFilesFormRecord._init_map(self)
self.my_osid_object_form._my_map['text']['text'] = '' | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_map(self):
"""stub"""
super(EdXDragAndDropQuestionFormRecord, self)._init_map()
QuestionTextFormRecord._init_map(self)
QuestionFilesFormRecord._init_map(self)
self.my_osid_object_form._my_map['text']['text'] = '' |
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration) | Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict | Below is the the instruction that describes the task:
### Input:
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
### Response:
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted ``<dataset>.<table>``
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration) |
def validate_common_content(experiment_config):
'''Validate whether the common values in experiment_config is valid'''
if not experiment_config.get('trainingServicePlatform') or \
experiment_config.get('trainingServicePlatform') not in ['local', 'remote', 'pai', 'kubeflow', 'frameworkcontroller']:
print_error('Please set correct trainingServicePlatform!')
exit(1)
schema_dict = {
'local': LOCAL_CONFIG_SCHEMA,
'remote': REMOTE_CONFIG_SCHEMA,
'pai': PAI_CONFIG_SCHEMA,
'kubeflow': KUBEFLOW_CONFIG_SCHEMA,
'frameworkcontroller': FRAMEWORKCONTROLLER_CONFIG_SCHEMA
}
separate_schema_dict = {
'tuner': tuner_schema_dict,
'advisor': advisor_schema_dict,
'assessor': assessor_schema_dict
}
separate_builtInName_dict = {
'tuner': 'builtinTunerName',
'advisor': 'builtinAdvisorName',
'assessor': 'builtinAssessorName'
}
try:
schema_dict.get(experiment_config['trainingServicePlatform']).validate(experiment_config)
for separate_key in separate_schema_dict.keys():
if experiment_config.get(separate_key):
if experiment_config[separate_key].get(separate_builtInName_dict[separate_key]):
validate = False
for key in separate_schema_dict[separate_key].keys():
if key.__contains__(experiment_config[separate_key][separate_builtInName_dict[separate_key]]):
Schema({**separate_schema_dict[separate_key][key]}).validate(experiment_config[separate_key])
validate = True
break
if not validate:
print_error('%s %s error!' % (separate_key, separate_builtInName_dict[separate_key]))
exit(1)
else:
Schema({**separate_schema_dict[separate_key]['customized']}).validate(experiment_config[separate_key])
except SchemaError as error:
print_error('Your config file is not correct, please check your config file content!')
if error.__str__().__contains__('Wrong key'):
print_error(' '.join(error.__str__().split()[:3]))
else:
print_error(error)
exit(1)
#set default value
if experiment_config.get('maxExecDuration') is None:
experiment_config['maxExecDuration'] = '999d'
if experiment_config.get('maxTrialNum') is None:
experiment_config['maxTrialNum'] = 99999
if experiment_config['trainingServicePlatform'] == 'remote':
for index in range(len(experiment_config['machineList'])):
if experiment_config['machineList'][index].get('port') is None:
experiment_config['machineList'][index]['port'] = 22 | Validate whether the common values in experiment_config is valid | Below is the the instruction that describes the task:
### Input:
Validate whether the common values in experiment_config is valid
### Response:
def validate_common_content(experiment_config):
'''Validate whether the common values in experiment_config is valid'''
if not experiment_config.get('trainingServicePlatform') or \
experiment_config.get('trainingServicePlatform') not in ['local', 'remote', 'pai', 'kubeflow', 'frameworkcontroller']:
print_error('Please set correct trainingServicePlatform!')
exit(1)
schema_dict = {
'local': LOCAL_CONFIG_SCHEMA,
'remote': REMOTE_CONFIG_SCHEMA,
'pai': PAI_CONFIG_SCHEMA,
'kubeflow': KUBEFLOW_CONFIG_SCHEMA,
'frameworkcontroller': FRAMEWORKCONTROLLER_CONFIG_SCHEMA
}
separate_schema_dict = {
'tuner': tuner_schema_dict,
'advisor': advisor_schema_dict,
'assessor': assessor_schema_dict
}
separate_builtInName_dict = {
'tuner': 'builtinTunerName',
'advisor': 'builtinAdvisorName',
'assessor': 'builtinAssessorName'
}
try:
schema_dict.get(experiment_config['trainingServicePlatform']).validate(experiment_config)
for separate_key in separate_schema_dict.keys():
if experiment_config.get(separate_key):
if experiment_config[separate_key].get(separate_builtInName_dict[separate_key]):
validate = False
for key in separate_schema_dict[separate_key].keys():
if key.__contains__(experiment_config[separate_key][separate_builtInName_dict[separate_key]]):
Schema({**separate_schema_dict[separate_key][key]}).validate(experiment_config[separate_key])
validate = True
break
if not validate:
print_error('%s %s error!' % (separate_key, separate_builtInName_dict[separate_key]))
exit(1)
else:
Schema({**separate_schema_dict[separate_key]['customized']}).validate(experiment_config[separate_key])
except SchemaError as error:
print_error('Your config file is not correct, please check your config file content!')
if error.__str__().__contains__('Wrong key'):
print_error(' '.join(error.__str__().split()[:3]))
else:
print_error(error)
exit(1)
#set default value
if experiment_config.get('maxExecDuration') is None:
experiment_config['maxExecDuration'] = '999d'
if experiment_config.get('maxTrialNum') is None:
experiment_config['maxTrialNum'] = 99999
if experiment_config['trainingServicePlatform'] == 'remote':
for index in range(len(experiment_config['machineList'])):
if experiment_config['machineList'][index].get('port') is None:
experiment_config['machineList'][index]['port'] = 22 |
def _post_init(self):
"""
The standard rootpy _post_init method that is used to initialize both
new Trees and Trees retrieved from a File.
"""
if not hasattr(self, '_buffer'):
# only set _buffer if model was not specified in the __init__
self._buffer = TreeBuffer()
self.read_branches_on_demand = False
self._branch_cache = {}
self._current_entry = 0
self._always_read = []
self.userdata = UserData()
self._inited = True | The standard rootpy _post_init method that is used to initialize both
new Trees and Trees retrieved from a File. | Below is the the instruction that describes the task:
### Input:
The standard rootpy _post_init method that is used to initialize both
new Trees and Trees retrieved from a File.
### Response:
def _post_init(self):
"""
The standard rootpy _post_init method that is used to initialize both
new Trees and Trees retrieved from a File.
"""
if not hasattr(self, '_buffer'):
# only set _buffer if model was not specified in the __init__
self._buffer = TreeBuffer()
self.read_branches_on_demand = False
self._branch_cache = {}
self._current_entry = 0
self._always_read = []
self.userdata = UserData()
self._inited = True |
def clean(input_string,
tag_dictionary=constants.SUPPORTED_TAGS):
"""
Sanitizes HTML. Tags not contained as keys in the tag_dictionary input are
removed, and child nodes are recursively moved to parent of removed node.
Attributes not contained as arguments in tag_dictionary are removed.
Doctype is set to <!DOCTYPE html>.
Args:
input_string (basestring): A (possibly unicode) string representing HTML.
tag_dictionary (Option[dict]): A dictionary with tags as keys and
attributes as values. This operates as a whitelist--i.e. if a tag
isn't contained, it will be removed. By default, this is set to
use the supported tags and attributes for the Amazon Kindle,
as found at https://kdp.amazon.com/help?topicId=A1JPUWCSD6F59O
Returns:
str: A (possibly unicode) string representing HTML.
Raises:
TypeError: Raised if input_string isn't a unicode string or string.
"""
try:
assert isinstance(input_string, basestring)
except AssertionError:
raise TypeError
root = BeautifulSoup(input_string, 'html.parser')
article_tag = root.find_all('article')
if article_tag:
root = article_tag[0]
stack = root.findAll(True, recursive=False)
while stack:
current_node = stack.pop()
child_node_list = current_node.findAll(True, recursive=False)
if current_node.name not in tag_dictionary.keys():
parent_node = current_node.parent
current_node.extract()
for n in child_node_list:
parent_node.append(n)
else:
attribute_dict = current_node.attrs
for attribute in attribute_dict.keys():
if attribute not in tag_dictionary[current_node.name]:
attribute_dict.pop(attribute)
stack.extend(child_node_list)
#wrap partial tree if necessary
if root.find_all('html') == []:
root = create_html_from_fragment(root)
# Remove img tags without src attribute
image_node_list = root.find_all('img')
for node in image_node_list:
if not node.has_attr('src'):
node.extract()
unformatted_html_unicode_string = unicode(root.prettify(encoding='utf-8',
formatter=EntitySubstitution.substitute_html),
encoding='utf-8')
# fix <br> tags since not handled well by default by bs4
unformatted_html_unicode_string = unformatted_html_unicode_string.replace('<br>', '<br/>')
# remove and replace with space since not handled well by certain e-readers
unformatted_html_unicode_string = unformatted_html_unicode_string.replace(' ', ' ')
return unformatted_html_unicode_string | Sanitizes HTML. Tags not contained as keys in the tag_dictionary input are
removed, and child nodes are recursively moved to parent of removed node.
Attributes not contained as arguments in tag_dictionary are removed.
Doctype is set to <!DOCTYPE html>.
Args:
input_string (basestring): A (possibly unicode) string representing HTML.
tag_dictionary (Option[dict]): A dictionary with tags as keys and
attributes as values. This operates as a whitelist--i.e. if a tag
isn't contained, it will be removed. By default, this is set to
use the supported tags and attributes for the Amazon Kindle,
as found at https://kdp.amazon.com/help?topicId=A1JPUWCSD6F59O
Returns:
str: A (possibly unicode) string representing HTML.
Raises:
TypeError: Raised if input_string isn't a unicode string or string. | Below is the the instruction that describes the task:
### Input:
Sanitizes HTML. Tags not contained as keys in the tag_dictionary input are
removed, and child nodes are recursively moved to parent of removed node.
Attributes not contained as arguments in tag_dictionary are removed.
Doctype is set to <!DOCTYPE html>.
Args:
input_string (basestring): A (possibly unicode) string representing HTML.
tag_dictionary (Option[dict]): A dictionary with tags as keys and
attributes as values. This operates as a whitelist--i.e. if a tag
isn't contained, it will be removed. By default, this is set to
use the supported tags and attributes for the Amazon Kindle,
as found at https://kdp.amazon.com/help?topicId=A1JPUWCSD6F59O
Returns:
str: A (possibly unicode) string representing HTML.
Raises:
TypeError: Raised if input_string isn't a unicode string or string.
### Response:
def clean(input_string,
tag_dictionary=constants.SUPPORTED_TAGS):
"""
Sanitizes HTML. Tags not contained as keys in the tag_dictionary input are
removed, and child nodes are recursively moved to parent of removed node.
Attributes not contained as arguments in tag_dictionary are removed.
Doctype is set to <!DOCTYPE html>.
Args:
input_string (basestring): A (possibly unicode) string representing HTML.
tag_dictionary (Option[dict]): A dictionary with tags as keys and
attributes as values. This operates as a whitelist--i.e. if a tag
isn't contained, it will be removed. By default, this is set to
use the supported tags and attributes for the Amazon Kindle,
as found at https://kdp.amazon.com/help?topicId=A1JPUWCSD6F59O
Returns:
str: A (possibly unicode) string representing HTML.
Raises:
TypeError: Raised if input_string isn't a unicode string or string.
"""
try:
assert isinstance(input_string, basestring)
except AssertionError:
raise TypeError
root = BeautifulSoup(input_string, 'html.parser')
article_tag = root.find_all('article')
if article_tag:
root = article_tag[0]
stack = root.findAll(True, recursive=False)
while stack:
current_node = stack.pop()
child_node_list = current_node.findAll(True, recursive=False)
if current_node.name not in tag_dictionary.keys():
parent_node = current_node.parent
current_node.extract()
for n in child_node_list:
parent_node.append(n)
else:
attribute_dict = current_node.attrs
for attribute in attribute_dict.keys():
if attribute not in tag_dictionary[current_node.name]:
attribute_dict.pop(attribute)
stack.extend(child_node_list)
#wrap partial tree if necessary
if root.find_all('html') == []:
root = create_html_from_fragment(root)
# Remove img tags without src attribute
image_node_list = root.find_all('img')
for node in image_node_list:
if not node.has_attr('src'):
node.extract()
unformatted_html_unicode_string = unicode(root.prettify(encoding='utf-8',
formatter=EntitySubstitution.substitute_html),
encoding='utf-8')
# fix <br> tags since not handled well by default by bs4
unformatted_html_unicode_string = unformatted_html_unicode_string.replace('<br>', '<br/>')
# remove and replace with space since not handled well by certain e-readers
unformatted_html_unicode_string = unformatted_html_unicode_string.replace(' ', ' ')
return unformatted_html_unicode_string |
def login(self):
"""Authorize client."""
if not self.auth.access_token or \
(hasattr(self.auth, 'access_token_expired') and self.auth.access_token_expired):
import httplib2
http = httplib2.Http()
self.auth.refresh(http)
self.session.headers.update({
'Authorization': 'Bearer %s' % self.auth.access_token
}) | Authorize client. | Below is the the instruction that describes the task:
### Input:
Authorize client.
### Response:
def login(self):
"""Authorize client."""
if not self.auth.access_token or \
(hasattr(self.auth, 'access_token_expired') and self.auth.access_token_expired):
import httplib2
http = httplib2.Http()
self.auth.refresh(http)
self.session.headers.update({
'Authorization': 'Bearer %s' % self.auth.access_token
}) |
def start(self, id):
""" start a specific tracker. """
path = partial(_path, self.adapter)
path = path(id)
return self._put(path) | start a specific tracker. | Below is the the instruction that describes the task:
### Input:
start a specific tracker.
### Response:
def start(self, id):
""" start a specific tracker. """
path = partial(_path, self.adapter)
path = path(id)
return self._put(path) |
def union(self, streamSet):
"""
Creates a stream that is a union of this stream and other streams
Args:
streamSet: a set of Stream objects to merge with this stream
Returns:
Stream:
"""
if(not isinstance(streamSet,set)) :
raise TypeError("The union operator parameter must be a set object")
if(len(streamSet) == 0):
return self
op = self.topology.graph.addOperator("$Union$")
op.addInputPort(outputPort=self.oport)
for stream in streamSet:
op.addInputPort(outputPort=stream.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport) | Creates a stream that is a union of this stream and other streams
Args:
streamSet: a set of Stream objects to merge with this stream
Returns:
Stream: | Below is the the instruction that describes the task:
### Input:
Creates a stream that is a union of this stream and other streams
Args:
streamSet: a set of Stream objects to merge with this stream
Returns:
Stream:
### Response:
def union(self, streamSet):
"""
Creates a stream that is a union of this stream and other streams
Args:
streamSet: a set of Stream objects to merge with this stream
Returns:
Stream:
"""
if(not isinstance(streamSet,set)) :
raise TypeError("The union operator parameter must be a set object")
if(len(streamSet) == 0):
return self
op = self.topology.graph.addOperator("$Union$")
op.addInputPort(outputPort=self.oport)
for stream in streamSet:
op.addInputPort(outputPort=stream.oport)
oport = op.addOutputPort(schema=self.oport.schema)
return Stream(self.topology, oport) |
def run(args):
"""Runs the acorn setup/configuration commands.
"""
if args is None:
return
cmd = args["commands"][0]
if cmd == "configure":
if len(args["commands"]) < 2:# pragma: no cover
msg.err("'configure' command requires a second, sub-command "
"parameter. E.g., `acorn.py configure packages`.")
exit(0)
subcmd = args["commands"][1]
_run_configure(subcmd, args) | Runs the acorn setup/configuration commands. | Below is the the instruction that describes the task:
### Input:
Runs the acorn setup/configuration commands.
### Response:
def run(args):
"""Runs the acorn setup/configuration commands.
"""
if args is None:
return
cmd = args["commands"][0]
if cmd == "configure":
if len(args["commands"]) < 2:# pragma: no cover
msg.err("'configure' command requires a second, sub-command "
"parameter. E.g., `acorn.py configure packages`.")
exit(0)
subcmd = args["commands"][1]
_run_configure(subcmd, args) |
def set_extension(self, name, value):
"""
Sets the value for an extension using a fully constructed
asn1crypto.core.Asn1Value object. Normally this should not be needed,
and the convenience attributes should be sufficient.
See the definition of asn1crypto.ocsp.SingleResponseExtension and
asn1crypto.ocsp.ResponseDataExtension to determine the appropriate
object type for a given extension. Extensions are marked as critical
when RFC 6960 indicates so.
:param name:
A unicode string of an extension id name from
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId. If the extension is not one
defined in those classes, this must be an instance of one of the
classes instead of a unicode string.
:param value:
A value object per the specs defined by
asn1crypto.ocsp.SingleResponseExtension or
asn1crypto.ocsp.ResponseDataExtension
"""
if isinstance(name, str_cls):
response_data_extension_oids = set([
'nonce',
'extended_revoke',
'1.3.6.1.5.5.7.48.1.2',
'1.3.6.1.5.5.7.48.1.9'
])
single_response_extension_oids = set([
'crl',
'archive_cutoff',
'crl_reason',
'invalidity_date',
'certificate_issuer',
'1.3.6.1.5.5.7.48.1.3',
'1.3.6.1.5.5.7.48.1.6',
'2.5.29.21',
'2.5.29.24',
'2.5.29.29'
])
if name in response_data_extension_oids:
name = ocsp.ResponseDataExtensionId(name)
elif name in single_response_extension_oids:
name = ocsp.SingleResponseExtensionId(name)
else:
raise ValueError(_pretty_message(
'''
name must be a unicode string from
asn1crypto.ocsp.ResponseDataExtensionId or
asn1crypto.ocsp.SingleResponseExtensionId, not %s
''',
repr(name)
))
if isinstance(name, ocsp.ResponseDataExtensionId):
extension = ocsp.ResponseDataExtension({'extn_id': name})
elif isinstance(name, ocsp.SingleResponseExtensionId):
extension = ocsp.SingleResponseExtension({'extn_id': name})
else:
raise TypeError(_pretty_message(
'''
name must be a unicode string or an instance of
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId, not %s
''',
_type_name(name)
))
# We use native here to convert OIDs to meaningful names
name = extension['extn_id'].native
spec = extension.spec('extn_value')
if name == 'nonce':
raise ValueError(_pretty_message(
'''
The nonce value should be set via the .nonce attribute, not the
.set_extension() method
'''
))
if name == 'crl_reason':
raise ValueError(_pretty_message(
'''
The crl_reason value should be set via the certificate_status
parameter of the OCSPResponseBuilder() constructor, not the
.set_extension() method
'''
))
if name == 'certificate_issuer':
raise ValueError(_pretty_message(
'''
The certificate_issuer value should be set via the
.certificate_issuer attribute, not the .set_extension() method
'''
))
if not isinstance(value, spec) and value is not None:
raise TypeError(_pretty_message(
'''
value must be an instance of %s, not %s
''',
_type_name(spec),
_type_name(value)
))
if isinstance(extension, ocsp.ResponseDataExtension):
extn_dict = self._response_data_extensions
else:
extn_dict = self._single_response_extensions
if value is None:
if name in extn_dict:
del extn_dict[name]
else:
extn_dict[name] = value | Sets the value for an extension using a fully constructed
asn1crypto.core.Asn1Value object. Normally this should not be needed,
and the convenience attributes should be sufficient.
See the definition of asn1crypto.ocsp.SingleResponseExtension and
asn1crypto.ocsp.ResponseDataExtension to determine the appropriate
object type for a given extension. Extensions are marked as critical
when RFC 6960 indicates so.
:param name:
A unicode string of an extension id name from
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId. If the extension is not one
defined in those classes, this must be an instance of one of the
classes instead of a unicode string.
:param value:
A value object per the specs defined by
asn1crypto.ocsp.SingleResponseExtension or
asn1crypto.ocsp.ResponseDataExtension | Below is the the instruction that describes the task:
### Input:
Sets the value for an extension using a fully constructed
asn1crypto.core.Asn1Value object. Normally this should not be needed,
and the convenience attributes should be sufficient.
See the definition of asn1crypto.ocsp.SingleResponseExtension and
asn1crypto.ocsp.ResponseDataExtension to determine the appropriate
object type for a given extension. Extensions are marked as critical
when RFC 6960 indicates so.
:param name:
A unicode string of an extension id name from
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId. If the extension is not one
defined in those classes, this must be an instance of one of the
classes instead of a unicode string.
:param value:
A value object per the specs defined by
asn1crypto.ocsp.SingleResponseExtension or
asn1crypto.ocsp.ResponseDataExtension
### Response:
def set_extension(self, name, value):
"""
Sets the value for an extension using a fully constructed
asn1crypto.core.Asn1Value object. Normally this should not be needed,
and the convenience attributes should be sufficient.
See the definition of asn1crypto.ocsp.SingleResponseExtension and
asn1crypto.ocsp.ResponseDataExtension to determine the appropriate
object type for a given extension. Extensions are marked as critical
when RFC 6960 indicates so.
:param name:
A unicode string of an extension id name from
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId. If the extension is not one
defined in those classes, this must be an instance of one of the
classes instead of a unicode string.
:param value:
A value object per the specs defined by
asn1crypto.ocsp.SingleResponseExtension or
asn1crypto.ocsp.ResponseDataExtension
"""
if isinstance(name, str_cls):
response_data_extension_oids = set([
'nonce',
'extended_revoke',
'1.3.6.1.5.5.7.48.1.2',
'1.3.6.1.5.5.7.48.1.9'
])
single_response_extension_oids = set([
'crl',
'archive_cutoff',
'crl_reason',
'invalidity_date',
'certificate_issuer',
'1.3.6.1.5.5.7.48.1.3',
'1.3.6.1.5.5.7.48.1.6',
'2.5.29.21',
'2.5.29.24',
'2.5.29.29'
])
if name in response_data_extension_oids:
name = ocsp.ResponseDataExtensionId(name)
elif name in single_response_extension_oids:
name = ocsp.SingleResponseExtensionId(name)
else:
raise ValueError(_pretty_message(
'''
name must be a unicode string from
asn1crypto.ocsp.ResponseDataExtensionId or
asn1crypto.ocsp.SingleResponseExtensionId, not %s
''',
repr(name)
))
if isinstance(name, ocsp.ResponseDataExtensionId):
extension = ocsp.ResponseDataExtension({'extn_id': name})
elif isinstance(name, ocsp.SingleResponseExtensionId):
extension = ocsp.SingleResponseExtension({'extn_id': name})
else:
raise TypeError(_pretty_message(
'''
name must be a unicode string or an instance of
asn1crypto.ocsp.SingleResponseExtensionId or
asn1crypto.ocsp.ResponseDataExtensionId, not %s
''',
_type_name(name)
))
# We use native here to convert OIDs to meaningful names
name = extension['extn_id'].native
spec = extension.spec('extn_value')
if name == 'nonce':
raise ValueError(_pretty_message(
'''
The nonce value should be set via the .nonce attribute, not the
.set_extension() method
'''
))
if name == 'crl_reason':
raise ValueError(_pretty_message(
'''
The crl_reason value should be set via the certificate_status
parameter of the OCSPResponseBuilder() constructor, not the
.set_extension() method
'''
))
if name == 'certificate_issuer':
raise ValueError(_pretty_message(
'''
The certificate_issuer value should be set via the
.certificate_issuer attribute, not the .set_extension() method
'''
))
if not isinstance(value, spec) and value is not None:
raise TypeError(_pretty_message(
'''
value must be an instance of %s, not %s
''',
_type_name(spec),
_type_name(value)
))
if isinstance(extension, ocsp.ResponseDataExtension):
extn_dict = self._response_data_extensions
else:
extn_dict = self._single_response_extensions
if value is None:
if name in extn_dict:
del extn_dict[name]
else:
extn_dict[name] = value |
def query(self, start_date=None, end_date=None, ndays=None, metrics=[], dimensions=[], filters=None, segment=None, sort=[], start_index=1, max_results=10):
"""
Execute a query.
"""
if start_date:
start_date = start_date
elif getattr(self.args, 'start-date'):
start_date = getattr(self.args, 'start-date')
elif self.config.get('start-date', None):
start_date = self.config['start-date']
else:
start_date = '2005-01-01'
if end_date:
end_date = end_date
elif getattr(self.args, 'end-date'):
end_date = getattr(self.args, 'end-date')
elif self.config.get('end-date', None):
end_date = self.config['end-date']
elif ndays:
end_date = self._ndays(start_date, ndays)
elif self.args.ndays:
end_date = self._ndays(start_date, self.args.ndays)
elif self.config.get('ndays', None):
end_date = self._ndays(start_date, self.config['ndays'])
else:
end_date = 'today'
if self.args.domain:
domain = self.args.domain
elif self.config.get('domain', None):
domain = self.config['domain']
else:
domain = None
if domain:
domain_filter = 'ga:hostname==%s' % domain
if filters:
filters = '%s;%s' % (domain_filter, filters)
else:
filters = domain_filter
if self.args.prefix:
prefix = self.args.prefix
elif self.config.get('prefix', None):
prefix = self.config['prefix']
else:
prefix = None
if prefix:
prefix_filter = 'ga:pagePath=~^%s' % prefix
if filters:
filters = '%s;%s' % (prefix_filter, filters)
else:
filters = prefix_filter
return self.service.data().ga().get(
ids='ga:' + self.config['property-id'],
start_date=start_date,
end_date=end_date,
metrics=','.join(metrics) or None,
dimensions=','.join(dimensions) or None,
filters=filters,
segment=segment,
sort=','.join(sort) or None,
start_index=str(start_index),
max_results=str(max_results)
).execute() | Execute a query. | Below is the the instruction that describes the task:
### Input:
Execute a query.
### Response:
def query(self, start_date=None, end_date=None, ndays=None, metrics=[], dimensions=[], filters=None, segment=None, sort=[], start_index=1, max_results=10):
"""
Execute a query.
"""
if start_date:
start_date = start_date
elif getattr(self.args, 'start-date'):
start_date = getattr(self.args, 'start-date')
elif self.config.get('start-date', None):
start_date = self.config['start-date']
else:
start_date = '2005-01-01'
if end_date:
end_date = end_date
elif getattr(self.args, 'end-date'):
end_date = getattr(self.args, 'end-date')
elif self.config.get('end-date', None):
end_date = self.config['end-date']
elif ndays:
end_date = self._ndays(start_date, ndays)
elif self.args.ndays:
end_date = self._ndays(start_date, self.args.ndays)
elif self.config.get('ndays', None):
end_date = self._ndays(start_date, self.config['ndays'])
else:
end_date = 'today'
if self.args.domain:
domain = self.args.domain
elif self.config.get('domain', None):
domain = self.config['domain']
else:
domain = None
if domain:
domain_filter = 'ga:hostname==%s' % domain
if filters:
filters = '%s;%s' % (domain_filter, filters)
else:
filters = domain_filter
if self.args.prefix:
prefix = self.args.prefix
elif self.config.get('prefix', None):
prefix = self.config['prefix']
else:
prefix = None
if prefix:
prefix_filter = 'ga:pagePath=~^%s' % prefix
if filters:
filters = '%s;%s' % (prefix_filter, filters)
else:
filters = prefix_filter
return self.service.data().ga().get(
ids='ga:' + self.config['property-id'],
start_date=start_date,
end_date=end_date,
metrics=','.join(metrics) or None,
dimensions=','.join(dimensions) or None,
filters=filters,
segment=segment,
sort=','.join(sort) or None,
start_index=str(start_index),
max_results=str(max_results)
).execute() |
def is_type(url, types=[], wait=10):
""" Determine the MIME-type of the document behind the url.
MIME is more reliable than simply checking the document extension.
Returns True when the MIME-type starts with anything in the list of types.
"""
# Types can also be a single string for convenience.
if isinstance(types, str):
types = [types]
try: connection = open(url, wait)
except:
return False
type = connection.info()["Content-Type"]
for t in types:
if type.startswith(t): return True
return False | Determine the MIME-type of the document behind the url.
MIME is more reliable than simply checking the document extension.
Returns True when the MIME-type starts with anything in the list of types. | Below is the the instruction that describes the task:
### Input:
Determine the MIME-type of the document behind the url.
MIME is more reliable than simply checking the document extension.
Returns True when the MIME-type starts with anything in the list of types.
### Response:
def is_type(url, types=[], wait=10):
""" Determine the MIME-type of the document behind the url.
MIME is more reliable than simply checking the document extension.
Returns True when the MIME-type starts with anything in the list of types.
"""
# Types can also be a single string for convenience.
if isinstance(types, str):
types = [types]
try: connection = open(url, wait)
except:
return False
type = connection.info()["Content-Type"]
for t in types:
if type.startswith(t): return True
return False |
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
if path != '':
spec = {'path': path}
count = self._connect_collection(self.notebook_collection).find(spec).count()
else:
count = 1
return count > 0 | Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory. | Below is the the instruction that describes the task:
### Input:
Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
### Response:
def path_exists(self, path):
"""Does the API-style path (directory) actually exist?
Parameters
----------
path : string
The path to check. This is an API path (`/` separated,
relative to base notebook-dir).
Returns
-------
exists : bool
Whether the path is indeed a directory.
"""
path = path.strip('/')
if path != '':
spec = {'path': path}
count = self._connect_collection(self.notebook_collection).find(spec).count()
else:
count = 1
return count > 0 |
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer | Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets. | Below is the the instruction that describes the task:
### Input:
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
### Response:
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer |
def quote_chinese(url, encodeing="utf-8"):
"""Quote non-ascii characters"""
if isinstance(url, six.text_type):
return quote_chinese(url.encode(encodeing))
if six.PY3:
res = [six.int2byte(b).decode('latin-1') if b < 128 else '%%%02X' % b for b in url]
else:
res = [b if ord(b) < 128 else '%%%02X' % ord(b) for b in url]
return "".join(res) | Quote non-ascii characters | Below is the the instruction that describes the task:
### Input:
Quote non-ascii characters
### Response:
def quote_chinese(url, encodeing="utf-8"):
"""Quote non-ascii characters"""
if isinstance(url, six.text_type):
return quote_chinese(url.encode(encodeing))
if six.PY3:
res = [six.int2byte(b).decode('latin-1') if b < 128 else '%%%02X' % b for b in url]
else:
res = [b if ord(b) < 128 else '%%%02X' % ord(b) for b in url]
return "".join(res) |
def log_interp(x, xp, *args, **kwargs):
"""Wrap log_interpolate_1d for deprecated log_interp."""
return log_interpolate_1d(x, xp, *args, **kwargs) | Wrap log_interpolate_1d for deprecated log_interp. | Below is the the instruction that describes the task:
### Input:
Wrap log_interpolate_1d for deprecated log_interp.
### Response:
def log_interp(x, xp, *args, **kwargs):
"""Wrap log_interpolate_1d for deprecated log_interp."""
return log_interpolate_1d(x, xp, *args, **kwargs) |
def get_staticmethod_qualname(staticmeth):
"""Determines the fully qualified name of a static method.
Yields a result similar to what __qualname__ would contain, but is applicable
to static methods and also works in Python 2.7.
"""
func = _actualfunc(staticmeth)
module = sys.modules[func.__module__]
nst = _get_class_nesting_list_for_staticmethod(staticmeth, module, [], set())
nst = [cl.__name__ for cl in nst]
return '.'.join(nst)+'.'+func.__name__ | Determines the fully qualified name of a static method.
Yields a result similar to what __qualname__ would contain, but is applicable
to static methods and also works in Python 2.7. | Below is the the instruction that describes the task:
### Input:
Determines the fully qualified name of a static method.
Yields a result similar to what __qualname__ would contain, but is applicable
to static methods and also works in Python 2.7.
### Response:
def get_staticmethod_qualname(staticmeth):
"""Determines the fully qualified name of a static method.
Yields a result similar to what __qualname__ would contain, but is applicable
to static methods and also works in Python 2.7.
"""
func = _actualfunc(staticmeth)
module = sys.modules[func.__module__]
nst = _get_class_nesting_list_for_staticmethod(staticmeth, module, [], set())
nst = [cl.__name__ for cl in nst]
return '.'.join(nst)+'.'+func.__name__ |
def check_py_version():
"""Check if a propper Python version is used."""
try:
if sys.version_info >= (2, 7):
return
except:
pass
print(" ")
print(" ERROR - memtop needs python version at least 2.7")
print(("Chances are that you can install newer version from your "
"repositories, or even that you have some newer version "
"installed yet."))
print("(one way to find out which versions are installed is to try "
"following: 'which python2.7' , 'which python3' and so...)")
print(" ")
sys.exit(-1) | Check if a propper Python version is used. | Below is the the instruction that describes the task:
### Input:
Check if a propper Python version is used.
### Response:
def check_py_version():
"""Check if a propper Python version is used."""
try:
if sys.version_info >= (2, 7):
return
except:
pass
print(" ")
print(" ERROR - memtop needs python version at least 2.7")
print(("Chances are that you can install newer version from your "
"repositories, or even that you have some newer version "
"installed yet."))
print("(one way to find out which versions are installed is to try "
"following: 'which python2.7' , 'which python3' and so...)")
print(" ")
sys.exit(-1) |
def enter_position(self):
'''enter new position'''
state = self.state
dlg = wx.TextEntryDialog(self, 'Enter new position', 'Position')
dlg.SetValue("%f %f" % (state.lat, state.lon))
if dlg.ShowModal() == wx.ID_OK:
latlon = dlg.GetValue().split()
dlg.Destroy()
state.lat = float(latlon[0])
state.lon = float(latlon[1])
self.re_center(state.width/2,state.height/2, state.lat, state.lon)
self.redraw_map() | enter new position | Below is the the instruction that describes the task:
### Input:
enter new position
### Response:
def enter_position(self):
'''enter new position'''
state = self.state
dlg = wx.TextEntryDialog(self, 'Enter new position', 'Position')
dlg.SetValue("%f %f" % (state.lat, state.lon))
if dlg.ShowModal() == wx.ID_OK:
latlon = dlg.GetValue().split()
dlg.Destroy()
state.lat = float(latlon[0])
state.lon = float(latlon[1])
self.re_center(state.width/2,state.height/2, state.lat, state.lon)
self.redraw_map() |
def liftover_to_genome(pass_pos, gtf):
"""Liftover from precursor to genome"""
fixed_pos = []
for pos in pass_pos:
if pos["chrom"] not in gtf:
continue
db_pos = gtf[pos["chrom"]][0]
mut = _parse_mut(pos["sv"])
print([db_pos, pos])
if db_pos[3] == "+":
pos['pre_pos'] = db_pos[1] + pos["pre_pos"] + 1
else:
pos['pre_pos'] = db_pos[2] - (pos["pre_pos"] - 1)
pos['chrom'] = db_pos[0]
pos['nt'] = list(mut[0])
fixed_pos.append(pos)
_print_header(fixed_pos)
for pos in fixed_pos:
print_vcf(pos) | Liftover from precursor to genome | Below is the the instruction that describes the task:
### Input:
Liftover from precursor to genome
### Response:
def liftover_to_genome(pass_pos, gtf):
"""Liftover from precursor to genome"""
fixed_pos = []
for pos in pass_pos:
if pos["chrom"] not in gtf:
continue
db_pos = gtf[pos["chrom"]][0]
mut = _parse_mut(pos["sv"])
print([db_pos, pos])
if db_pos[3] == "+":
pos['pre_pos'] = db_pos[1] + pos["pre_pos"] + 1
else:
pos['pre_pos'] = db_pos[2] - (pos["pre_pos"] - 1)
pos['chrom'] = db_pos[0]
pos['nt'] = list(mut[0])
fixed_pos.append(pos)
_print_header(fixed_pos)
for pos in fixed_pos:
print_vcf(pos) |
def _getConfigFile(self, config):
"""
Retrieves a file descriptor to a configuration file to process.
Inputs: config - The _Config object which is being populated.
Outputs: An open file descriptor to the configuration file to parse in read mode if successful, else None.
"""
joinPath = lambda p: (os.path.join(p) if isinstance(p, (tuple, list)) else p)
if self.filepathConfig is not None and self.filenameConfig is not None:
if hasattr(config, self.filepathConfig) and hasattr(config, self.filenameConfig):
path = joinPath(getattr(config, self.filepathConfig))
name = getattr(config, self.filenameConfig)
if os.path.isfile(os.path.join(path, name)):
return open(os.path.join(path, name), 'r')
if self.filepath is not None and self.filename is not None:
path = joinPath(self.filepath)
name = self.filename
if os.path.isfile(os.path.join(path, name)):
return open(os.path.join(path, name), 'r') | Retrieves a file descriptor to a configuration file to process.
Inputs: config - The _Config object which is being populated.
Outputs: An open file descriptor to the configuration file to parse in read mode if successful, else None. | Below is the the instruction that describes the task:
### Input:
Retrieves a file descriptor to a configuration file to process.
Inputs: config - The _Config object which is being populated.
Outputs: An open file descriptor to the configuration file to parse in read mode if successful, else None.
### Response:
def _getConfigFile(self, config):
"""
Retrieves a file descriptor to a configuration file to process.
Inputs: config - The _Config object which is being populated.
Outputs: An open file descriptor to the configuration file to parse in read mode if successful, else None.
"""
joinPath = lambda p: (os.path.join(p) if isinstance(p, (tuple, list)) else p)
if self.filepathConfig is not None and self.filenameConfig is not None:
if hasattr(config, self.filepathConfig) and hasattr(config, self.filenameConfig):
path = joinPath(getattr(config, self.filepathConfig))
name = getattr(config, self.filenameConfig)
if os.path.isfile(os.path.join(path, name)):
return open(os.path.join(path, name), 'r')
if self.filepath is not None and self.filename is not None:
path = joinPath(self.filepath)
name = self.filename
if os.path.isfile(os.path.join(path, name)):
return open(os.path.join(path, name), 'r') |
def keywords(self) -> Set[str]:
"""A set of all keywords of all handled devices.
In addition to attribute access via device names, |Nodes| and
|Elements| objects allow for attribute access via keywords,
allowing for an efficient search of certain groups of devices.
Let us use the example from above, where the nodes `na` and `nb`
have no keywords, but each of the other three nodes both belongs
to either `group_a` or `group_b` and `group_1` or `group_2`:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('na',
... Node('nb', variable='W'),
... Node('nc', keywords=('group_a', 'group_1')),
... Node('nd', keywords=('group_a', 'group_2')),
... Node('ne', keywords=('group_b', 'group_1')))
>>> nodes
Nodes("na", "nb", "nc", "nd", "ne")
>>> sorted(nodes.keywords)
['group_1', 'group_2', 'group_a', 'group_b']
If you are interested in inspecting all devices belonging to
`group_a`, select them via this keyword:
>>> subgroup = nodes.group_1
>>> subgroup
Nodes("nc", "ne")
You can further restrict the search by also selecting the devices
belonging to `group_b`, which holds only for node "e", in the given
example:
>>> subsubgroup = subgroup.group_b
>>> subsubgroup
Node("ne", variable="Q",
keywords=["group_1", "group_b"])
Note that the keywords already used for building a device subgroup
are not informative anymore (as they hold for each device) and are
thus not shown anymore:
>>> sorted(subgroup.keywords)
['group_a', 'group_b']
The latter might be confusing if you intend to work with a device
subgroup for a longer time. After copying the subgroup, all
keywords of the contained devices are available again:
>>> from copy import copy
>>> newgroup = copy(subgroup)
>>> sorted(newgroup.keywords)
['group_1', 'group_a', 'group_b']
"""
return set(keyword for device in self
for keyword in device.keywords if
keyword not in self._shadowed_keywords) | A set of all keywords of all handled devices.
In addition to attribute access via device names, |Nodes| and
|Elements| objects allow for attribute access via keywords,
allowing for an efficient search of certain groups of devices.
Let us use the example from above, where the nodes `na` and `nb`
have no keywords, but each of the other three nodes both belongs
to either `group_a` or `group_b` and `group_1` or `group_2`:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('na',
... Node('nb', variable='W'),
... Node('nc', keywords=('group_a', 'group_1')),
... Node('nd', keywords=('group_a', 'group_2')),
... Node('ne', keywords=('group_b', 'group_1')))
>>> nodes
Nodes("na", "nb", "nc", "nd", "ne")
>>> sorted(nodes.keywords)
['group_1', 'group_2', 'group_a', 'group_b']
If you are interested in inspecting all devices belonging to
`group_a`, select them via this keyword:
>>> subgroup = nodes.group_1
>>> subgroup
Nodes("nc", "ne")
You can further restrict the search by also selecting the devices
belonging to `group_b`, which holds only for node "e", in the given
example:
>>> subsubgroup = subgroup.group_b
>>> subsubgroup
Node("ne", variable="Q",
keywords=["group_1", "group_b"])
Note that the keywords already used for building a device subgroup
are not informative anymore (as they hold for each device) and are
thus not shown anymore:
>>> sorted(subgroup.keywords)
['group_a', 'group_b']
The latter might be confusing if you intend to work with a device
subgroup for a longer time. After copying the subgroup, all
keywords of the contained devices are available again:
>>> from copy import copy
>>> newgroup = copy(subgroup)
>>> sorted(newgroup.keywords)
['group_1', 'group_a', 'group_b'] | Below is the the instruction that describes the task:
### Input:
A set of all keywords of all handled devices.
In addition to attribute access via device names, |Nodes| and
|Elements| objects allow for attribute access via keywords,
allowing for an efficient search of certain groups of devices.
Let us use the example from above, where the nodes `na` and `nb`
have no keywords, but each of the other three nodes both belongs
to either `group_a` or `group_b` and `group_1` or `group_2`:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('na',
... Node('nb', variable='W'),
... Node('nc', keywords=('group_a', 'group_1')),
... Node('nd', keywords=('group_a', 'group_2')),
... Node('ne', keywords=('group_b', 'group_1')))
>>> nodes
Nodes("na", "nb", "nc", "nd", "ne")
>>> sorted(nodes.keywords)
['group_1', 'group_2', 'group_a', 'group_b']
If you are interested in inspecting all devices belonging to
`group_a`, select them via this keyword:
>>> subgroup = nodes.group_1
>>> subgroup
Nodes("nc", "ne")
You can further restrict the search by also selecting the devices
belonging to `group_b`, which holds only for node "e", in the given
example:
>>> subsubgroup = subgroup.group_b
>>> subsubgroup
Node("ne", variable="Q",
keywords=["group_1", "group_b"])
Note that the keywords already used for building a device subgroup
are not informative anymore (as they hold for each device) and are
thus not shown anymore:
>>> sorted(subgroup.keywords)
['group_a', 'group_b']
The latter might be confusing if you intend to work with a device
subgroup for a longer time. After copying the subgroup, all
keywords of the contained devices are available again:
>>> from copy import copy
>>> newgroup = copy(subgroup)
>>> sorted(newgroup.keywords)
['group_1', 'group_a', 'group_b']
### Response:
def keywords(self) -> Set[str]:
"""A set of all keywords of all handled devices.
In addition to attribute access via device names, |Nodes| and
|Elements| objects allow for attribute access via keywords,
allowing for an efficient search of certain groups of devices.
Let us use the example from above, where the nodes `na` and `nb`
have no keywords, but each of the other three nodes both belongs
to either `group_a` or `group_b` and `group_1` or `group_2`:
>>> from hydpy import Node, Nodes
>>> nodes = Nodes('na',
... Node('nb', variable='W'),
... Node('nc', keywords=('group_a', 'group_1')),
... Node('nd', keywords=('group_a', 'group_2')),
... Node('ne', keywords=('group_b', 'group_1')))
>>> nodes
Nodes("na", "nb", "nc", "nd", "ne")
>>> sorted(nodes.keywords)
['group_1', 'group_2', 'group_a', 'group_b']
If you are interested in inspecting all devices belonging to
`group_a`, select them via this keyword:
>>> subgroup = nodes.group_1
>>> subgroup
Nodes("nc", "ne")
You can further restrict the search by also selecting the devices
belonging to `group_b`, which holds only for node "e", in the given
example:
>>> subsubgroup = subgroup.group_b
>>> subsubgroup
Node("ne", variable="Q",
keywords=["group_1", "group_b"])
Note that the keywords already used for building a device subgroup
are not informative anymore (as they hold for each device) and are
thus not shown anymore:
>>> sorted(subgroup.keywords)
['group_a', 'group_b']
The latter might be confusing if you intend to work with a device
subgroup for a longer time. After copying the subgroup, all
keywords of the contained devices are available again:
>>> from copy import copy
>>> newgroup = copy(subgroup)
>>> sorted(newgroup.keywords)
['group_1', 'group_a', 'group_b']
"""
return set(keyword for device in self
for keyword in device.keywords if
keyword not in self._shadowed_keywords) |
def get_int(config, key, default):
"""
A helper to retrieve an integer value from a given dictionary
containing string values. If the requested value is not present
in the dictionary, or if it cannot be converted to an integer, a
default value will be returned instead.
:param config: The dictionary containing the desired value.
:param key: The dictionary key for the desired value.
:param default: The default value to return, if the key isn't set
in the dictionary, or if the value set isn't a
legal integer value.
:returns: The desired integer value.
"""
try:
return int(config[key])
except (KeyError, ValueError):
return default | A helper to retrieve an integer value from a given dictionary
containing string values. If the requested value is not present
in the dictionary, or if it cannot be converted to an integer, a
default value will be returned instead.
:param config: The dictionary containing the desired value.
:param key: The dictionary key for the desired value.
:param default: The default value to return, if the key isn't set
in the dictionary, or if the value set isn't a
legal integer value.
:returns: The desired integer value. | Below is the the instruction that describes the task:
### Input:
A helper to retrieve an integer value from a given dictionary
containing string values. If the requested value is not present
in the dictionary, or if it cannot be converted to an integer, a
default value will be returned instead.
:param config: The dictionary containing the desired value.
:param key: The dictionary key for the desired value.
:param default: The default value to return, if the key isn't set
in the dictionary, or if the value set isn't a
legal integer value.
:returns: The desired integer value.
### Response:
def get_int(config, key, default):
"""
A helper to retrieve an integer value from a given dictionary
containing string values. If the requested value is not present
in the dictionary, or if it cannot be converted to an integer, a
default value will be returned instead.
:param config: The dictionary containing the desired value.
:param key: The dictionary key for the desired value.
:param default: The default value to return, if the key isn't set
in the dictionary, or if the value set isn't a
legal integer value.
:returns: The desired integer value.
"""
try:
return int(config[key])
except (KeyError, ValueError):
return default |
def save_xml(self, doc, element):
'''Save this component into an xml.dom.Element object.'''
element.setAttributeNS(XSI_NS, XSI_NS_S + 'type', 'rtsExt:component_ext')
element.setAttributeNS(RTS_NS, RTS_NS_S + 'id', self.id)
element.setAttributeNS(RTS_NS, RTS_NS_S + 'pathUri', self.path_uri)
if self.active_configuration_set:
element.setAttributeNS(RTS_NS, RTS_NS_S + 'activeConfigurationSet',
self.active_configuration_set)
element.setAttributeNS(RTS_NS, RTS_NS_S + 'instanceName',
self.instance_name)
element.setAttributeNS(RTS_NS, RTS_NS_S + 'compositeType',
comp_type.to_string(self.composite_type))
element.setAttributeNS(RTS_NS, RTS_NS_S + 'isRequired',
str(self.is_required).lower())
if self.comment:
element.setAttributeNS(RTS_EXT_NS, RTS_EXT_NS_S + 'comment',
self.comment)
element.setAttributeNS(RTS_EXT_NS, RTS_EXT_NS_S + 'visible',
str(self.visible).lower())
for port in self.data_ports:
new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'DataPorts')
port.save_xml(doc, new_element)
element.appendChild(new_element)
for port in self.service_ports:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'ServicePorts')
port.save_xml(doc, new_element)
element.appendChild(new_element)
for cs in self.configuration_sets:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'ConfigurationSets')
cs.save_xml(doc, new_element)
element.appendChild(new_element)
for ec in self.execution_contexts:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'ExecutionContexts')
ec.save_xml(doc, new_element)
element.appendChild(new_element)
for p in self.participants:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'Participants')
p.save_xml(doc, new_element)
element.appendChild(new_element)
new_element = doc.createElementNS(RTS_EXT_NS,
RTS_EXT_NS_S + 'Location')
self._location.save_xml(doc, new_element)
element.appendChild(new_element)
for p in self.properties:
new_prop_element = doc.createElementNS(RTS_EXT_NS,
RTS_EXT_NS_S + 'Properties')
properties_to_xml(new_prop_element, p, self.properties[p])
element.appendChild(new_prop_element) | Save this component into an xml.dom.Element object. | Below is the the instruction that describes the task:
### Input:
Save this component into an xml.dom.Element object.
### Response:
def save_xml(self, doc, element):
'''Save this component into an xml.dom.Element object.'''
element.setAttributeNS(XSI_NS, XSI_NS_S + 'type', 'rtsExt:component_ext')
element.setAttributeNS(RTS_NS, RTS_NS_S + 'id', self.id)
element.setAttributeNS(RTS_NS, RTS_NS_S + 'pathUri', self.path_uri)
if self.active_configuration_set:
element.setAttributeNS(RTS_NS, RTS_NS_S + 'activeConfigurationSet',
self.active_configuration_set)
element.setAttributeNS(RTS_NS, RTS_NS_S + 'instanceName',
self.instance_name)
element.setAttributeNS(RTS_NS, RTS_NS_S + 'compositeType',
comp_type.to_string(self.composite_type))
element.setAttributeNS(RTS_NS, RTS_NS_S + 'isRequired',
str(self.is_required).lower())
if self.comment:
element.setAttributeNS(RTS_EXT_NS, RTS_EXT_NS_S + 'comment',
self.comment)
element.setAttributeNS(RTS_EXT_NS, RTS_EXT_NS_S + 'visible',
str(self.visible).lower())
for port in self.data_ports:
new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'DataPorts')
port.save_xml(doc, new_element)
element.appendChild(new_element)
for port in self.service_ports:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'ServicePorts')
port.save_xml(doc, new_element)
element.appendChild(new_element)
for cs in self.configuration_sets:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'ConfigurationSets')
cs.save_xml(doc, new_element)
element.appendChild(new_element)
for ec in self.execution_contexts:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'ExecutionContexts')
ec.save_xml(doc, new_element)
element.appendChild(new_element)
for p in self.participants:
new_element = doc.createElementNS(RTS_NS,
RTS_NS_S + 'Participants')
p.save_xml(doc, new_element)
element.appendChild(new_element)
new_element = doc.createElementNS(RTS_EXT_NS,
RTS_EXT_NS_S + 'Location')
self._location.save_xml(doc, new_element)
element.appendChild(new_element)
for p in self.properties:
new_prop_element = doc.createElementNS(RTS_EXT_NS,
RTS_EXT_NS_S + 'Properties')
properties_to_xml(new_prop_element, p, self.properties[p])
element.appendChild(new_prop_element) |
def setuptools_entry(dist, keyword, value):
"""Setuptools entry point for setting version and baking it into package."""
# If 'use_katversion' is False, ignore the rest
if not value:
return
# Enforce the version obtained by katversion, overriding user setting
version = get_version()
if dist.metadata.version is not None:
s = "Ignoring explicit version='{0}' in setup.py, using '{1}' instead"
warnings.warn(s.format(dist.metadata.version, version))
dist.metadata.version = version
# Extend build_py command to bake version string into installed package
ExistingCustomBuildPy = dist.cmdclass.get('build_py', object)
class KatVersionBuildPy(AddVersionToInitBuildPy, ExistingCustomBuildPy):
"""First perform existing build_py and then bake in version string."""
dist.cmdclass['build_py'] = KatVersionBuildPy
# Extend sdist command to bake version string into source package
ExistingCustomSdist = dist.cmdclass.get('sdist', object)
class KatVersionSdist(AddVersionToInitSdist, ExistingCustomSdist):
"""First perform existing sdist and then bake in version string."""
dist.cmdclass['sdist'] = KatVersionSdist | Setuptools entry point for setting version and baking it into package. | Below is the the instruction that describes the task:
### Input:
Setuptools entry point for setting version and baking it into package.
### Response:
def setuptools_entry(dist, keyword, value):
"""Setuptools entry point for setting version and baking it into package."""
# If 'use_katversion' is False, ignore the rest
if not value:
return
# Enforce the version obtained by katversion, overriding user setting
version = get_version()
if dist.metadata.version is not None:
s = "Ignoring explicit version='{0}' in setup.py, using '{1}' instead"
warnings.warn(s.format(dist.metadata.version, version))
dist.metadata.version = version
# Extend build_py command to bake version string into installed package
ExistingCustomBuildPy = dist.cmdclass.get('build_py', object)
class KatVersionBuildPy(AddVersionToInitBuildPy, ExistingCustomBuildPy):
"""First perform existing build_py and then bake in version string."""
dist.cmdclass['build_py'] = KatVersionBuildPy
# Extend sdist command to bake version string into source package
ExistingCustomSdist = dist.cmdclass.get('sdist', object)
class KatVersionSdist(AddVersionToInitSdist, ExistingCustomSdist):
"""First perform existing sdist and then bake in version string."""
dist.cmdclass['sdist'] = KatVersionSdist |
def handle_transform(self, task):
"""Handle a 'transform' callback."""
self.transformed += 1
file = task.result()
if file:
self.next.append_file(file)
self.flush_if_ended() | Handle a 'transform' callback. | Below is the the instruction that describes the task:
### Input:
Handle a 'transform' callback.
### Response:
def handle_transform(self, task):
"""Handle a 'transform' callback."""
self.transformed += 1
file = task.result()
if file:
self.next.append_file(file)
self.flush_if_ended() |
def get_resource_metadata(self, resource=None):
"""
Get resource metadata
:param resource: The name of the resource to get metadata for
:return: list
"""
result = self._make_metadata_request(meta_id=0, metadata_type='METADATA-RESOURCE')
if resource:
result = next((item for item in result if item['ResourceID'] == resource), None)
return result | Get resource metadata
:param resource: The name of the resource to get metadata for
:return: list | Below is the the instruction that describes the task:
### Input:
Get resource metadata
:param resource: The name of the resource to get metadata for
:return: list
### Response:
def get_resource_metadata(self, resource=None):
"""
Get resource metadata
:param resource: The name of the resource to get metadata for
:return: list
"""
result = self._make_metadata_request(meta_id=0, metadata_type='METADATA-RESOURCE')
if resource:
result = next((item for item in result if item['ResourceID'] == resource), None)
return result |
def fill_relationshipobjs(go2obj, relationships):
"""Add GO IDs to go2obj that are involved in relationships."""
# Get all GO Term record objects that have relationships
obj = RelationshipFill(go2obj, relationships)
for goobj in go2obj.values():
if goobj.relationship:
obj.fill_relationshipgo2obj(goobj)
if goobj.relationship_rev:
obj.fill_relationshiprevgo2obj(goobj) | Add GO IDs to go2obj that are involved in relationships. | Below is the the instruction that describes the task:
### Input:
Add GO IDs to go2obj that are involved in relationships.
### Response:
def fill_relationshipobjs(go2obj, relationships):
"""Add GO IDs to go2obj that are involved in relationships."""
# Get all GO Term record objects that have relationships
obj = RelationshipFill(go2obj, relationships)
for goobj in go2obj.values():
if goobj.relationship:
obj.fill_relationshipgo2obj(goobj)
if goobj.relationship_rev:
obj.fill_relationshiprevgo2obj(goobj) |
def get_response_page(request, return_type, template_location, response_page_type):
"""
Helper function to get an appropriate response page if it exists.
This function is not designed to be used directly as a view. It is
a helper function which can be called to check if a ResponsePage
exists for a ResponsePage type (which is also active).
:param request:
:param return_type:
:param template_location:
:param response_page_type:
:return:
"""
try:
page = models.ResponsePage.objects.get(
is_active=True,
type=response_page_type,
)
template = loader.get_template(template_location)
content_type = None
body = template.render(
RequestContext(request, {'request_path': request.path, 'page': page, })
)
return return_type(body, content_type=content_type)
except models.ResponsePage.DoesNotExist:
return None | Helper function to get an appropriate response page if it exists.
This function is not designed to be used directly as a view. It is
a helper function which can be called to check if a ResponsePage
exists for a ResponsePage type (which is also active).
:param request:
:param return_type:
:param template_location:
:param response_page_type:
:return: | Below is the the instruction that describes the task:
### Input:
Helper function to get an appropriate response page if it exists.
This function is not designed to be used directly as a view. It is
a helper function which can be called to check if a ResponsePage
exists for a ResponsePage type (which is also active).
:param request:
:param return_type:
:param template_location:
:param response_page_type:
:return:
### Response:
def get_response_page(request, return_type, template_location, response_page_type):
"""
Helper function to get an appropriate response page if it exists.
This function is not designed to be used directly as a view. It is
a helper function which can be called to check if a ResponsePage
exists for a ResponsePage type (which is also active).
:param request:
:param return_type:
:param template_location:
:param response_page_type:
:return:
"""
try:
page = models.ResponsePage.objects.get(
is_active=True,
type=response_page_type,
)
template = loader.get_template(template_location)
content_type = None
body = template.render(
RequestContext(request, {'request_path': request.path, 'page': page, })
)
return return_type(body, content_type=content_type)
except models.ResponsePage.DoesNotExist:
return None |
def prepare_for_cyk(grammar, inplace=False):
# type: (Grammar, bool) -> Grammar
"""
Take common context-free grammar and perform all the necessary steps to use it in the CYK algorithm.
Performs following steps:
- remove useless symbols
- remove rules with epsilon
- remove unit rules
- remove useless symbols once more (as previous steps could change the grammar)
- transform it to Chomsky Normal Form
:param grammar: Grammar to transform.
:param inplace: True if the operation should be done in place. False by default.
:return: Modified grammar.
"""
grammar = ContextFree.remove_useless_symbols(grammar, inplace)
grammar = ContextFree.remove_rules_with_epsilon(grammar, True)
grammar = ContextFree.remove_unit_rules(grammar, True)
grammar = ContextFree.remove_useless_symbols(grammar, True)
grammar = ContextFree.transform_to_chomsky_normal_form(grammar, True)
return grammar | Take common context-free grammar and perform all the necessary steps to use it in the CYK algorithm.
Performs following steps:
- remove useless symbols
- remove rules with epsilon
- remove unit rules
- remove useless symbols once more (as previous steps could change the grammar)
- transform it to Chomsky Normal Form
:param grammar: Grammar to transform.
:param inplace: True if the operation should be done in place. False by default.
:return: Modified grammar. | Below is the the instruction that describes the task:
### Input:
Take common context-free grammar and perform all the necessary steps to use it in the CYK algorithm.
Performs following steps:
- remove useless symbols
- remove rules with epsilon
- remove unit rules
- remove useless symbols once more (as previous steps could change the grammar)
- transform it to Chomsky Normal Form
:param grammar: Grammar to transform.
:param inplace: True if the operation should be done in place. False by default.
:return: Modified grammar.
### Response:
def prepare_for_cyk(grammar, inplace=False):
# type: (Grammar, bool) -> Grammar
"""
Take common context-free grammar and perform all the necessary steps to use it in the CYK algorithm.
Performs following steps:
- remove useless symbols
- remove rules with epsilon
- remove unit rules
- remove useless symbols once more (as previous steps could change the grammar)
- transform it to Chomsky Normal Form
:param grammar: Grammar to transform.
:param inplace: True if the operation should be done in place. False by default.
:return: Modified grammar.
"""
grammar = ContextFree.remove_useless_symbols(grammar, inplace)
grammar = ContextFree.remove_rules_with_epsilon(grammar, True)
grammar = ContextFree.remove_unit_rules(grammar, True)
grammar = ContextFree.remove_useless_symbols(grammar, True)
grammar = ContextFree.transform_to_chomsky_normal_form(grammar, True)
return grammar |
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills | Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to | Below is the the instruction that describes the task:
### Input:
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
### Response:
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills |
def save(self, save_json=True, save_xml=True):
"""
Saves the metadata json and/or xml to a file or DB.
:param save_json: flag to save json
:type save_json: bool
:param save_xml: flag to save xml
:type save_xml: bool
"""
if self.layer_is_file_based:
if save_json:
self.write_to_file(self.json_uri)
if save_xml:
self.write_to_file(self.xml_uri)
else:
self.write_to_db(save_json, save_xml) | Saves the metadata json and/or xml to a file or DB.
:param save_json: flag to save json
:type save_json: bool
:param save_xml: flag to save xml
:type save_xml: bool | Below is the the instruction that describes the task:
### Input:
Saves the metadata json and/or xml to a file or DB.
:param save_json: flag to save json
:type save_json: bool
:param save_xml: flag to save xml
:type save_xml: bool
### Response:
def save(self, save_json=True, save_xml=True):
"""
Saves the metadata json and/or xml to a file or DB.
:param save_json: flag to save json
:type save_json: bool
:param save_xml: flag to save xml
:type save_xml: bool
"""
if self.layer_is_file_based:
if save_json:
self.write_to_file(self.json_uri)
if save_xml:
self.write_to_file(self.xml_uri)
else:
self.write_to_db(save_json, save_xml) |
def list_vdirs(site, app=_DEFAULT_APP):
'''
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
'''
ret = dict()
ps_cmd = ['Get-WebVirtualDirectory',
'-Site', r"'{0}'".format(site),
'-Application', r"'{0}'".format(app),
'|', "Select-Object PhysicalPath, @{ Name = 'name';",
r"Expression = { $_.path.Split('/')[-1] } }"]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
ret[item['name']] = {'sourcepath': item['physicalPath']}
if not ret:
log.warning('No vdirs found in output: %s', cmd_ret)
return ret | Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site | Below is the the instruction that describes the task:
### Input:
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
### Response:
def list_vdirs(site, app=_DEFAULT_APP):
'''
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
'''
ret = dict()
ps_cmd = ['Get-WebVirtualDirectory',
'-Site', r"'{0}'".format(site),
'-Application', r"'{0}'".format(app),
'|', "Select-Object PhysicalPath, @{ Name = 'name';",
r"Expression = { $_.path.Split('/')[-1] } }"]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
ret[item['name']] = {'sourcepath': item['physicalPath']}
if not ret:
log.warning('No vdirs found in output: %s', cmd_ret)
return ret |
def download(self, filename=None):
"""Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved
"""
if filename is None:
filename = self.name
with self.remote_open() as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read()) | Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved | Below is the the instruction that describes the task:
### Input:
Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved
### Response:
def download(self, filename=None):
"""Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved
"""
if filename is None:
filename = self.name
with self.remote_open() as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read()) |
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
) | Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True). | Below is the the instruction that describes the task:
### Input:
Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
### Response:
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
) |
def _build_from(baseip):
""" Build URL for description.xml from ip """
from ipaddress import ip_address
try:
ip_address(baseip)
except ValueError:
# """attempt to construct url but the ip format has changed"""
# logger.warning("Format of internalipaddress changed: %s", baseip)
if 'http' not in baseip[0:4].lower():
baseip = urlunsplit(['http', baseip, '', '', ''])
spl = urlsplit(baseip)
if '.xml' not in spl.path:
sep = '' if spl.path.endswith('/') else '/'
spl = spl._replace(path=spl.path+sep+'description.xml')
return spl.geturl()
else:
# construct url knowing baseip is a pure ip
return urlunsplit(('http', baseip, '/description.xml', '', '')) | Build URL for description.xml from ip | Below is the the instruction that describes the task:
### Input:
Build URL for description.xml from ip
### Response:
def _build_from(baseip):
""" Build URL for description.xml from ip """
from ipaddress import ip_address
try:
ip_address(baseip)
except ValueError:
# """attempt to construct url but the ip format has changed"""
# logger.warning("Format of internalipaddress changed: %s", baseip)
if 'http' not in baseip[0:4].lower():
baseip = urlunsplit(['http', baseip, '', '', ''])
spl = urlsplit(baseip)
if '.xml' not in spl.path:
sep = '' if spl.path.endswith('/') else '/'
spl = spl._replace(path=spl.path+sep+'description.xml')
return spl.geturl()
else:
# construct url knowing baseip is a pure ip
return urlunsplit(('http', baseip, '/description.xml', '', '')) |
def get_by_id(self, style_id, style_type):
"""Return the style of *style_type* matching *style_id*.
Returns the default for *style_type* if *style_id* is not found or is |None|, or
if the style having *style_id* is not of *style_type*.
"""
if style_id is None:
return self.default(style_type)
return self._get_by_id(style_id, style_type) | Return the style of *style_type* matching *style_id*.
Returns the default for *style_type* if *style_id* is not found or is |None|, or
if the style having *style_id* is not of *style_type*. | Below is the the instruction that describes the task:
### Input:
Return the style of *style_type* matching *style_id*.
Returns the default for *style_type* if *style_id* is not found or is |None|, or
if the style having *style_id* is not of *style_type*.
### Response:
def get_by_id(self, style_id, style_type):
"""Return the style of *style_type* matching *style_id*.
Returns the default for *style_type* if *style_id* is not found or is |None|, or
if the style having *style_id* is not of *style_type*.
"""
if style_id is None:
return self.default(style_type)
return self._get_by_id(style_id, style_type) |
def items_lower(self):
'''
Returns a generator iterating over keys and values, with the keys all
being lowercase.
'''
return ((key, val[1]) for key, val in six.iteritems(self._data)) | Returns a generator iterating over keys and values, with the keys all
being lowercase. | Below is the the instruction that describes the task:
### Input:
Returns a generator iterating over keys and values, with the keys all
being lowercase.
### Response:
def items_lower(self):
'''
Returns a generator iterating over keys and values, with the keys all
being lowercase.
'''
return ((key, val[1]) for key, val in six.iteritems(self._data)) |
def create_packet(reqid, message):
"""Creates Outgoing Packet from a given reqid and message
:param reqid: REQID object
:param message: protocol buffer object
"""
assert message.IsInitialized()
packet = ''
# calculate the totla size of the packet incl. header
typename = message.DESCRIPTOR.full_name
datasize = HeronProtocol.get_size_to_pack_string(typename) + \
REQID.REQID_SIZE + HeronProtocol.get_size_to_pack_message(message)
# first write out how much data is there as the header
packet += HeronProtocol.pack_int(datasize)
# next write the type string
packet += HeronProtocol.pack_int(len(typename))
packet += typename
# reqid
packet += reqid.pack()
# add the proto
packet += HeronProtocol.pack_int(message.ByteSize())
packet += message.SerializeToString()
return OutgoingPacket(packet) | Creates Outgoing Packet from a given reqid and message
:param reqid: REQID object
:param message: protocol buffer object | Below is the the instruction that describes the task:
### Input:
Creates Outgoing Packet from a given reqid and message
:param reqid: REQID object
:param message: protocol buffer object
### Response:
def create_packet(reqid, message):
"""Creates Outgoing Packet from a given reqid and message
:param reqid: REQID object
:param message: protocol buffer object
"""
assert message.IsInitialized()
packet = ''
# calculate the totla size of the packet incl. header
typename = message.DESCRIPTOR.full_name
datasize = HeronProtocol.get_size_to_pack_string(typename) + \
REQID.REQID_SIZE + HeronProtocol.get_size_to_pack_message(message)
# first write out how much data is there as the header
packet += HeronProtocol.pack_int(datasize)
# next write the type string
packet += HeronProtocol.pack_int(len(typename))
packet += typename
# reqid
packet += reqid.pack()
# add the proto
packet += HeronProtocol.pack_int(message.ByteSize())
packet += message.SerializeToString()
return OutgoingPacket(packet) |
def _F_indegree(H, F):
"""Returns the result of a function F applied to the list of indegrees in
in the hypergraph.
:param H: the hypergraph whose indegrees will be operated on.
:param F: function to execute on the list of indegrees in the hypergraph.
:returns: result of the given function F.
:raises: TypeError -- Algorithm only applicable to directed hypergraphs
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Algorithm only applicable to directed hypergraphs")
return F([len(H.get_backward_star(node))
for node in H.get_node_set()]) | Returns the result of a function F applied to the list of indegrees in
in the hypergraph.
:param H: the hypergraph whose indegrees will be operated on.
:param F: function to execute on the list of indegrees in the hypergraph.
:returns: result of the given function F.
:raises: TypeError -- Algorithm only applicable to directed hypergraphs | Below is the the instruction that describes the task:
### Input:
Returns the result of a function F applied to the list of indegrees in
in the hypergraph.
:param H: the hypergraph whose indegrees will be operated on.
:param F: function to execute on the list of indegrees in the hypergraph.
:returns: result of the given function F.
:raises: TypeError -- Algorithm only applicable to directed hypergraphs
### Response:
def _F_indegree(H, F):
"""Returns the result of a function F applied to the list of indegrees in
in the hypergraph.
:param H: the hypergraph whose indegrees will be operated on.
:param F: function to execute on the list of indegrees in the hypergraph.
:returns: result of the given function F.
:raises: TypeError -- Algorithm only applicable to directed hypergraphs
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Algorithm only applicable to directed hypergraphs")
return F([len(H.get_backward_star(node))
for node in H.get_node_set()]) |
def _battedball_cd(cls, ab_des):
"""
Batted ball Code for at bat description
:param ab_des: at bat description
:return: battedball_cd(str)
"""
_ab_des = ab_des.lower()
if ab_des.count("ground")>0:
return 'G'
elif _ab_des.count("lines")>0:
return 'L'
elif _ab_des.count("flies")>0:
return 'F'
elif _ab_des.count("pops")>0:
return 'P'
elif _ab_des.count("on a line drive")>0:
return 'L'
elif _ab_des.count("fly ball")>0:
return 'F'
elif _ab_des.count("ground ball")>0:
return 'G'
elif _ab_des.count("pop up")>0:
return 'P'
else:
return '' | Batted ball Code for at bat description
:param ab_des: at bat description
:return: battedball_cd(str) | Below is the the instruction that describes the task:
### Input:
Batted ball Code for at bat description
:param ab_des: at bat description
:return: battedball_cd(str)
### Response:
def _battedball_cd(cls, ab_des):
"""
Batted ball Code for at bat description
:param ab_des: at bat description
:return: battedball_cd(str)
"""
_ab_des = ab_des.lower()
if ab_des.count("ground")>0:
return 'G'
elif _ab_des.count("lines")>0:
return 'L'
elif _ab_des.count("flies")>0:
return 'F'
elif _ab_des.count("pops")>0:
return 'P'
elif _ab_des.count("on a line drive")>0:
return 'L'
elif _ab_des.count("fly ball")>0:
return 'F'
elif _ab_des.count("ground ball")>0:
return 'G'
elif _ab_des.count("pop up")>0:
return 'P'
else:
return '' |
def present(name, DomainName,
ElasticsearchClusterConfig=None,
EBSOptions=None,
AccessPolicies=None,
SnapshotOptions=None,
AdvancedOptions=None,
Tags=None,
region=None, key=None, keyid=None, profile=None,
ElasticsearchVersion="1.5"):
'''
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {
'DedicatedMasterEnabled': False,
'InstanceCount': 1,
'InstanceType': 'm3.medium.elasticsearch',
'ZoneAwarenessEnabled': False
}
if EBSOptions is None:
EBSOptions = {
'EBSEnabled': False,
}
if SnapshotOptions is None:
SnapshotOptions = {
'AutomatedSnapshotStartHour': 0
}
if AdvancedOptions is None:
AdvancedOptions = {
'rest.action.multi.allow_explicit_index': 'true'
}
if Tags is None:
Tags = {}
if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types):
try:
AccessPolicies = salt.utils.json.loads(AccessPolicies)
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName,
ElasticsearchClusterConfig=ElasticsearchClusterConfig,
EBSOptions=EBSOptions,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion), # future lint: disable=blacklisted-function
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
# domain exists, ensure config matches
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion): # future lint: disable=blacklisted-function
ret['result'] = False
ret['comment'] = (
'Failed to update domain: version cannot be modified '
'from {0} to {1}.'.format(
_status.get('ElasticsearchVersion'),
str(ElasticsearchVersion) # future lint: disable=blacklisted-function
)
)
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
_describe['AccessPolicies'] = salt.utils.json.loads(_describe['AccessPolicies'])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get('EBSOptions', {}).get('EBSEnabled'):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None)
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig,
'EBSOptions': EBSOptions,
'AccessPolicies': AccessPolicies,
'SnapshotOptions': SnapshotOptions,
'AdvancedOptions': AdvancedOptions}
for k, v in six.iteritems(es_opts):
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName,
region=region, key=key,
keyid=keyid, profile=profile,
**comm_args)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret
return ret | Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3". | Below is the the instruction that describes the task:
### Input:
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
### Response:
def present(name, DomainName,
ElasticsearchClusterConfig=None,
EBSOptions=None,
AccessPolicies=None,
SnapshotOptions=None,
AdvancedOptions=None,
Tags=None,
region=None, key=None, keyid=None, profile=None,
ElasticsearchVersion="1.5"):
'''
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {
'DedicatedMasterEnabled': False,
'InstanceCount': 1,
'InstanceType': 'm3.medium.elasticsearch',
'ZoneAwarenessEnabled': False
}
if EBSOptions is None:
EBSOptions = {
'EBSEnabled': False,
}
if SnapshotOptions is None:
SnapshotOptions = {
'AutomatedSnapshotStartHour': 0
}
if AdvancedOptions is None:
AdvancedOptions = {
'rest.action.multi.allow_explicit_index': 'true'
}
if Tags is None:
Tags = {}
if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types):
try:
AccessPolicies = salt.utils.json.loads(AccessPolicies)
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName,
ElasticsearchClusterConfig=ElasticsearchClusterConfig,
EBSOptions=EBSOptions,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion), # future lint: disable=blacklisted-function
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
# domain exists, ensure config matches
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion): # future lint: disable=blacklisted-function
ret['result'] = False
ret['comment'] = (
'Failed to update domain: version cannot be modified '
'from {0} to {1}.'.format(
_status.get('ElasticsearchVersion'),
str(ElasticsearchVersion) # future lint: disable=blacklisted-function
)
)
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
_describe['AccessPolicies'] = salt.utils.json.loads(_describe['AccessPolicies'])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get('EBSOptions', {}).get('EBSEnabled'):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None)
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig,
'EBSOptions': EBSOptions,
'AccessPolicies': AccessPolicies,
'SnapshotOptions': SnapshotOptions,
'AdvancedOptions': AdvancedOptions}
for k, v in six.iteritems(es_opts):
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName,
region=region, key=key,
keyid=keyid, profile=profile,
**comm_args)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret
return ret |
def Bipartite(graph, resolution_parameter_01,
resolution_parameter_0 = 0, resolution_parameter_1 = 0,
degree_as_node_size=False, types='type', **kwargs):
""" Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
"""
if types is not None:
if isinstance(types, str):
types = graph.vs[types]
else:
# Make sure it is a list
types = list(types)
if set(types) != set([0, 1]):
new_type = _ig.UniqueIdGenerator()
types = [new_type[t] for t in types]
if set(types) != set([0, 1]):
raise ValueError("More than one type specified.")
if degree_as_node_size:
if (graph.is_directed()):
raise ValueError("This method is not suitable for directed graphs " +
"when using degree as node sizes.")
node_sizes = graph.degree()
else:
node_sizes = [1]*graph.vcount()
partition_01 = CPMVertexPartition(graph,
node_sizes=node_sizes,
resolution_parameter=resolution_parameter_01,
**kwargs)
H_0 = graph.subgraph_edges([], delete_vertices=False)
partition_0 = CPMVertexPartition(H_0, weights=None,
node_sizes=[s if t == 0 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_0,
**kwargs)
H_1 = graph.subgraph_edges([], delete_vertices=False)
partition_1 = CPMVertexPartition(H_1, weights=None,
node_sizes=[s if t == 1 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_1,
**kwargs)
return partition_01, partition_0, partition_1 | Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes. | Below is the the instruction that describes the task:
### Input:
Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
### Response:
def Bipartite(graph, resolution_parameter_01,
resolution_parameter_0 = 0, resolution_parameter_1 = 0,
degree_as_node_size=False, types='type', **kwargs):
""" Create three layers for bipartite partitions.
This creates three layers for bipartite partition necessary for detecting
communities in bipartite networks. These three layers should be passed to
:func:`Optimiser.optimise_partition_multiplex` with
``layer_weights=[1,-1,-1]``.
Parameters
----------
graph : :class:`ig.Graph`
Graph to define the bipartite partitions on.
resolution_parameter_01 : double
Resolution parameter for in between two classes.
resolution_parameter_0 : double
Resolution parameter for class 0.
resolution_parameter_1 : double
Resolution parameter for class 1.
degree_as_node_size : boolean
If ``True`` use degree as node size instead of 1, to mimic modularity,
see `Notes <#notes-bipartite>`_.
types : vertex attribute or list
Indicator of the class for each vertex. If not 0, 1, it is automatically
converted.
**kwargs
Additional arguments passed on to default constructor of
:class:`CPMVertexPartition`.
.. _notes-bipartite:
Notes
-----
For bipartite networks, we would like to be able to set three different
resolution parameters: one for within each class :math:`\\gamma_0,
\\gamma_1`, and one for the links between classes, :math:`\\gamma_{01}`.
Then the formulation would be
.. math:: Q = \\sum_{ij}
[A_{ij}
- (\\gamma_0\\delta(s_i,0) + \\gamma_1\\delta(s_i,1)) \\delta(s_i,s_j)
- \\gamma_{01}(1 - \\delta(s_i, s_j))
]\\delta(\\sigma_i, \\sigma_j)
In terms of communities this is
.. math:: Q = \\sum_c (e_c
- \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_0 n^2_c(0)
- \\gamma_1 n^2_c(1))
where :math:`n_c(0)` is the number of nodes in community :math:`c` of class 0
(and similarly for 1) and :math:`e_c` is the number of edges within community
:math:`c`. We denote by :math:`n_c = n_c(0) + n_c(1)` the total number of nodes
in community :math:`c`.
We achieve this by creating three layers : (1) all nodes have ``node_size =
1`` and all relevant links; (2) only nodes of class 0 have ``node_size =
1`` and no links; (3) only nodes of class 1 have ``node_size = 1`` and no
links. If we add the first with resolution parameter :math:`\\gamma_{01}`,
and the others with resolution parameters :math:`\\gamma_{01} - \\gamma_0`
and :math:`\\gamma_{01} - \\gamma_1`, but the latter two with a layer
weight of -1 while the first layer has layer weight 1, we obtain the
following:
.. math:: Q &= \\sum_c (e_c - \\gamma_{01} n_c^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_0) n_c(0)^2)
-\\sum_c (- (\\gamma_{01} - \\gamma_1) n_c(1)^2) \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{01} n_c(0)^2
- \\gamma_{01} n_c(1)^2)
+ ( \\gamma_{01} - \\gamma_0) n_c(0)^2
+ ( \\gamma_{01} - \\gamma_1) n_c(1)^2
] \\\\
&= \\sum_c [e_c - \\gamma_{01} 2 n_c(0) n_c(1)
- \\gamma_{0} n_c(0)^2
- \\gamma_{1} n_c(1)^2]
Although the derivation above is using :math:`n_c^2`, implicitly assuming a
direct graph with self-loops, similar derivations can be made for
undirected graphs using :math:`\\binom{n_c}{2}`, but the notation is then
somewhat more convoluted.
If we set node sizes equal to the degree, we get something similar to
modularity, except that the resolution parameter should still be divided by
:math:`2m`. In particular, in general (i.e. not specifically for bipartite
graph) if ``node_sizes=G.degree()`` we then obtain
.. math:: Q = \\sum_{ij} A_{ij} - \\gamma k_i k_j
In the case of bipartite graphs something similar is obtained, but then
correctly adapted (as long as the resolution parameter is also
appropriately rescaled).
.. note:: This function is not suited for directed graphs in the case of
using the degree as node sizes.
"""
if types is not None:
if isinstance(types, str):
types = graph.vs[types]
else:
# Make sure it is a list
types = list(types)
if set(types) != set([0, 1]):
new_type = _ig.UniqueIdGenerator()
types = [new_type[t] for t in types]
if set(types) != set([0, 1]):
raise ValueError("More than one type specified.")
if degree_as_node_size:
if (graph.is_directed()):
raise ValueError("This method is not suitable for directed graphs " +
"when using degree as node sizes.")
node_sizes = graph.degree()
else:
node_sizes = [1]*graph.vcount()
partition_01 = CPMVertexPartition(graph,
node_sizes=node_sizes,
resolution_parameter=resolution_parameter_01,
**kwargs)
H_0 = graph.subgraph_edges([], delete_vertices=False)
partition_0 = CPMVertexPartition(H_0, weights=None,
node_sizes=[s if t == 0 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_0,
**kwargs)
H_1 = graph.subgraph_edges([], delete_vertices=False)
partition_1 = CPMVertexPartition(H_1, weights=None,
node_sizes=[s if t == 1 else 0
for v, s, t in zip(graph.vs,node_sizes,types)],
resolution_parameter=resolution_parameter_01 - resolution_parameter_1,
**kwargs)
return partition_01, partition_0, partition_1 |
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
APFSContainerFileEntry: a file entry or None if not exists.
"""
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)
# The virtual root file has not corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
if location is None or location != self.LOCATION_ROOT:
return None
return apfs_container_file_entry.APFSContainerFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
if (volume_index < 0 or
volume_index >= self._fsapfs_container.number_of_volumes):
return None
return apfs_container_file_entry.APFSContainerFileEntry(
self._resolver_context, self, path_spec) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
APFSContainerFileEntry: a file entry or None if not exists. | Below is the the instruction that describes the task:
### Input:
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
APFSContainerFileEntry: a file entry or None if not exists.
### Response:
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): a path specification.
Returns:
APFSContainerFileEntry: a file entry or None if not exists.
"""
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)
# The virtual root file has not corresponding volume index but
# should have a location.
if volume_index is None:
location = getattr(path_spec, 'location', None)
if location is None or location != self.LOCATION_ROOT:
return None
return apfs_container_file_entry.APFSContainerFileEntry(
self._resolver_context, self, path_spec, is_root=True,
is_virtual=True)
if (volume_index < 0 or
volume_index >= self._fsapfs_container.number_of_volumes):
return None
return apfs_container_file_entry.APFSContainerFileEntry(
self._resolver_context, self, path_spec) |
def get_parent_family_ids(self, family_id):
"""Gets the parent ``Ids`` of the given family.
arg: family_id (osid.id.Id): the ``Id`` of a family
return: (osid.id.IdList) - the parent ``Ids`` of the family
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=family_id)
return self._hierarchy_session.get_parents(id_=family_id) | Gets the parent ``Ids`` of the given family.
arg: family_id (osid.id.Id): the ``Id`` of a family
return: (osid.id.IdList) - the parent ``Ids`` of the family
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the parent ``Ids`` of the given family.
arg: family_id (osid.id.Id): the ``Id`` of a family
return: (osid.id.IdList) - the parent ``Ids`` of the family
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_parent_family_ids(self, family_id):
"""Gets the parent ``Ids`` of the given family.
arg: family_id (osid.id.Id): the ``Id`` of a family
return: (osid.id.IdList) - the parent ``Ids`` of the family
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=family_id)
return self._hierarchy_session.get_parents(id_=family_id) |
def collect(self, cert_id, format_type):
"""
Poll for certificate availability after submission.
:param int cert_id: The certificate ID
:param str format_type: The format type to use (example: 'X509 PEM Certificate only')
:return: The certificate_id or the certificate depending on whether the certificate is ready (check status code)
:rtype: dict
"""
result = self.client.service.collect(authData=self.auth, id=cert_id,
formatType=ComodoCA.format_type[format_type])
# The certificate is ready for collection
if result.statusCode == 2:
return jsend.success({'certificate': result.SSL.certificate, 'certificate_status': 'issued',
'certificate_id': cert_id})
# The certificate is not ready for collection yet
elif result.statusCode == 0:
return jsend.fail({'certificate_id': cert_id, 'certificate': '', 'certificate_status': 'pending'})
# Some error occurred
else:
return self._create_error(result.statusCode) | Poll for certificate availability after submission.
:param int cert_id: The certificate ID
:param str format_type: The format type to use (example: 'X509 PEM Certificate only')
:return: The certificate_id or the certificate depending on whether the certificate is ready (check status code)
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Poll for certificate availability after submission.
:param int cert_id: The certificate ID
:param str format_type: The format type to use (example: 'X509 PEM Certificate only')
:return: The certificate_id or the certificate depending on whether the certificate is ready (check status code)
:rtype: dict
### Response:
def collect(self, cert_id, format_type):
"""
Poll for certificate availability after submission.
:param int cert_id: The certificate ID
:param str format_type: The format type to use (example: 'X509 PEM Certificate only')
:return: The certificate_id or the certificate depending on whether the certificate is ready (check status code)
:rtype: dict
"""
result = self.client.service.collect(authData=self.auth, id=cert_id,
formatType=ComodoCA.format_type[format_type])
# The certificate is ready for collection
if result.statusCode == 2:
return jsend.success({'certificate': result.SSL.certificate, 'certificate_status': 'issued',
'certificate_id': cert_id})
# The certificate is not ready for collection yet
elif result.statusCode == 0:
return jsend.fail({'certificate_id': cert_id, 'certificate': '', 'certificate_status': 'pending'})
# Some error occurred
else:
return self._create_error(result.statusCode) |
def get_child_value(parent, name, allow_missing=0):
""" return the value of the child element with name in the parent Element """
if not parent.hasElement(name):
if allow_missing:
return np.nan
else:
raise Exception('failed to find child element %s in parent' % name)
else:
return XmlHelper.as_value(parent.getElement(name)) | return the value of the child element with name in the parent Element | Below is the the instruction that describes the task:
### Input:
return the value of the child element with name in the parent Element
### Response:
def get_child_value(parent, name, allow_missing=0):
""" return the value of the child element with name in the parent Element """
if not parent.hasElement(name):
if allow_missing:
return np.nan
else:
raise Exception('failed to find child element %s in parent' % name)
else:
return XmlHelper.as_value(parent.getElement(name)) |
def get_classical_addresses_from_program(program) -> Dict[str, List[int]]:
"""
Returns a sorted list of classical addresses found in the MEASURE instructions in the program.
:param Program program: The program from which to get the classical addresses.
:return: A mapping from memory region names to lists of offsets appearing in the program.
"""
addresses: Dict[str, List[int]] = defaultdict(list)
flattened_addresses = {}
# Required to use the `classical_reg.address` int attribute.
# See https://github.com/rigetti/pyquil/issues/388.
for instr in program:
if isinstance(instr, Measurement) and instr.classical_reg:
addresses[instr.classical_reg.name].append(instr.classical_reg.offset)
# flatten duplicates
for k, v in addresses.items():
reduced_list = list(set(v))
reduced_list.sort()
flattened_addresses[k] = reduced_list
return flattened_addresses | Returns a sorted list of classical addresses found in the MEASURE instructions in the program.
:param Program program: The program from which to get the classical addresses.
:return: A mapping from memory region names to lists of offsets appearing in the program. | Below is the the instruction that describes the task:
### Input:
Returns a sorted list of classical addresses found in the MEASURE instructions in the program.
:param Program program: The program from which to get the classical addresses.
:return: A mapping from memory region names to lists of offsets appearing in the program.
### Response:
def get_classical_addresses_from_program(program) -> Dict[str, List[int]]:
"""
Returns a sorted list of classical addresses found in the MEASURE instructions in the program.
:param Program program: The program from which to get the classical addresses.
:return: A mapping from memory region names to lists of offsets appearing in the program.
"""
addresses: Dict[str, List[int]] = defaultdict(list)
flattened_addresses = {}
# Required to use the `classical_reg.address` int attribute.
# See https://github.com/rigetti/pyquil/issues/388.
for instr in program:
if isinstance(instr, Measurement) and instr.classical_reg:
addresses[instr.classical_reg.name].append(instr.classical_reg.offset)
# flatten duplicates
for k, v in addresses.items():
reduced_list = list(set(v))
reduced_list.sort()
flattened_addresses[k] = reduced_list
return flattened_addresses |
def advise(self, item, stop=False):
"""Request updates when DDE data changes."""
hszItem = DDE.CreateStringHandle(self._idInst, item, CP_WINUNICODE)
hDdeData = DDE.ClientTransaction(LPBYTE(), 0, self._hConv, hszItem, CF_TEXT, XTYP_ADVSTOP if stop else XTYP_ADVSTART, TIMEOUT_ASYNC, LPDWORD())
DDE.FreeStringHandle(self._idInst, hszItem)
if not hDdeData:
raise DDEError("Unable to %s advise" % ("stop" if stop else "start"), self._idInst)
DDE.FreeDataHandle(hDdeData) | Request updates when DDE data changes. | Below is the the instruction that describes the task:
### Input:
Request updates when DDE data changes.
### Response:
def advise(self, item, stop=False):
"""Request updates when DDE data changes."""
hszItem = DDE.CreateStringHandle(self._idInst, item, CP_WINUNICODE)
hDdeData = DDE.ClientTransaction(LPBYTE(), 0, self._hConv, hszItem, CF_TEXT, XTYP_ADVSTOP if stop else XTYP_ADVSTART, TIMEOUT_ASYNC, LPDWORD())
DDE.FreeStringHandle(self._idInst, hszItem)
if not hDdeData:
raise DDEError("Unable to %s advise" % ("stop" if stop else "start"), self._idInst)
DDE.FreeDataHandle(hDdeData) |
def draw(self):
"""Draws the image at the given location."""
if not self.visible:
return
self.window.blit(self.image, self.loc) | Draws the image at the given location. | Below is the the instruction that describes the task:
### Input:
Draws the image at the given location.
### Response:
def draw(self):
"""Draws the image at the given location."""
if not self.visible:
return
self.window.blit(self.image, self.loc) |
def _result(self, command_response, log=None):
"""Create a CommandResult for a request response.
:param command_response: command request response
:type command_response: dict
:param log: list of log messages (optional)
:type log: list
:return: a CommandResult containing any given log messages
:rtype: :py:class:`vici.session.CommandResult`
"""
if command_response["success"] == "yes":
return CommandResult(True, None, log)
else:
return CommandResult(False, command_response["errmsg"], log) | Create a CommandResult for a request response.
:param command_response: command request response
:type command_response: dict
:param log: list of log messages (optional)
:type log: list
:return: a CommandResult containing any given log messages
:rtype: :py:class:`vici.session.CommandResult` | Below is the the instruction that describes the task:
### Input:
Create a CommandResult for a request response.
:param command_response: command request response
:type command_response: dict
:param log: list of log messages (optional)
:type log: list
:return: a CommandResult containing any given log messages
:rtype: :py:class:`vici.session.CommandResult`
### Response:
def _result(self, command_response, log=None):
"""Create a CommandResult for a request response.
:param command_response: command request response
:type command_response: dict
:param log: list of log messages (optional)
:type log: list
:return: a CommandResult containing any given log messages
:rtype: :py:class:`vici.session.CommandResult`
"""
if command_response["success"] == "yes":
return CommandResult(True, None, log)
else:
return CommandResult(False, command_response["errmsg"], log) |
def read_configs_(self):
"""Read config files and set config values accordingly.
Returns:
(dict, list, list): respectively content of files, list of
missing/empty files and list of files for which a parsing error
arised.
"""
if not self.config_files_:
return {}, [], []
content = {section: {} for section in self}
empty_files = []
faulty_files = []
for cfile in self.config_files_:
conf_dict = self.read_config_(cfile)
if conf_dict is None:
faulty_files.append(cfile)
continue
elif not conf_dict:
empty_files.append(cfile)
continue
for section, secdict in conf_dict.items():
content[section].update(secdict)
return content, empty_files, faulty_files | Read config files and set config values accordingly.
Returns:
(dict, list, list): respectively content of files, list of
missing/empty files and list of files for which a parsing error
arised. | Below is the the instruction that describes the task:
### Input:
Read config files and set config values accordingly.
Returns:
(dict, list, list): respectively content of files, list of
missing/empty files and list of files for which a parsing error
arised.
### Response:
def read_configs_(self):
"""Read config files and set config values accordingly.
Returns:
(dict, list, list): respectively content of files, list of
missing/empty files and list of files for which a parsing error
arised.
"""
if not self.config_files_:
return {}, [], []
content = {section: {} for section in self}
empty_files = []
faulty_files = []
for cfile in self.config_files_:
conf_dict = self.read_config_(cfile)
if conf_dict is None:
faulty_files.append(cfile)
continue
elif not conf_dict:
empty_files.append(cfile)
continue
for section, secdict in conf_dict.items():
content[section].update(secdict)
return content, empty_files, faulty_files |
def unsetenv(key):
"""Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset
"""
key = path2fsn(key)
if is_win:
# python 3 has no unsetenv under Windows -> use our ctypes one as well
try:
del_windows_env_var(key)
except WindowsError:
pass
else:
os.unsetenv(key) | Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset | Below is the the instruction that describes the task:
### Input:
Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset
### Response:
def unsetenv(key):
"""Like `os.unsetenv` but takes unicode under Windows + Python 2
Args:
key (pathlike): The env var to unset
"""
key = path2fsn(key)
if is_win:
# python 3 has no unsetenv under Windows -> use our ctypes one as well
try:
del_windows_env_var(key)
except WindowsError:
pass
else:
os.unsetenv(key) |
def terminate(self):
""" Send SIGTERM to the task's process. """
logger.info('Sending SIGTERM to task {0}'.format(self.name))
if hasattr(self, 'remote_client') and self.remote_client is not None:
self.terminate_sent = True
self.remote_client.close()
return
if not self.process:
raise DagobahError('task does not have a running process')
self.terminate_sent = True
self.process.terminate() | Send SIGTERM to the task's process. | Below is the the instruction that describes the task:
### Input:
Send SIGTERM to the task's process.
### Response:
def terminate(self):
""" Send SIGTERM to the task's process. """
logger.info('Sending SIGTERM to task {0}'.format(self.name))
if hasattr(self, 'remote_client') and self.remote_client is not None:
self.terminate_sent = True
self.remote_client.close()
return
if not self.process:
raise DagobahError('task does not have a running process')
self.terminate_sent = True
self.process.terminate() |
def _active_mounts_darwin(ret):
'''
List active mounts on Mac OS systems
'''
for line in __salt__['cmd.run_stdout']('mount').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
parens = re.findall(r'\((.*?)\)', line, re.DOTALL)[0].split(", ")
ret[comps[2]] = {'device': comps[0],
'fstype': parens[0],
'opts': _resolve_user_group_names(parens[1:])}
return ret | List active mounts on Mac OS systems | Below is the the instruction that describes the task:
### Input:
List active mounts on Mac OS systems
### Response:
def _active_mounts_darwin(ret):
'''
List active mounts on Mac OS systems
'''
for line in __salt__['cmd.run_stdout']('mount').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
parens = re.findall(r'\((.*?)\)', line, re.DOTALL)[0].split(", ")
ret[comps[2]] = {'device': comps[0],
'fstype': parens[0],
'opts': _resolve_user_group_names(parens[1:])}
return ret |
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database() | Creates an empty database if not exists. | Below is the the instruction that describes the task:
### Input:
Creates an empty database if not exists.
### Response:
def create_database(self):
"""
Creates an empty database if not exists.
"""
if not self._database_exists():
con = psycopg2.connect(host=self.host, database="postgres",
user=self.user, password=self.password, port=self.port)
con.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
query = "CREATE DATABASE {0};".format(self.dbname)
c = con.cursor()
c.execute(query)
con.close()
if self.normalize:
self.open_database()
query = "CREATE EXTENSION IF NOT EXISTS \"plperlu\";"
self.execute_sql(query)
# query = """CREATE OR REPLACE FUNCTION normalize(str text)
#RETURNS text
#AS $$
#import unicodedata
#return ''.join(c for c in unicodedata.normalize('NFKD', str)
#if unicodedata.category(c) != 'Mn')
#$$ LANGUAGE plpython3u IMMUTABLE;"""
# query = """CREATE OR REPLACE FUNCTION normalize(mystr text)
# RETURNS text
# AS $$
# from unidecode import unidecode
# return unidecode(mystr.decode("utf-8"))
# $$ LANGUAGE plpythonu IMMUTABLE;"""
query = """CREATE OR REPLACE FUNCTION normalize(text)
RETURNS text
AS $$
use Text::Unidecode;
return unidecode(shift);
$$ LANGUAGE plperlu IMMUTABLE;"""
self.execute_sql(query)
self.commit()
self.close_database() |
def setter(self, fset):
"""
To be used as a decorator. Will define the decorated method
as a write attribute method to be called when client writes
the attribute
"""
self.fset = fset
if self.attr_write == AttrWriteType.READ:
if getattr(self, 'fget', None):
self.attr_write = AttrWriteType.READ_WRITE
else:
self.attr_write = AttrWriteType.WRITE
return self | To be used as a decorator. Will define the decorated method
as a write attribute method to be called when client writes
the attribute | Below is the the instruction that describes the task:
### Input:
To be used as a decorator. Will define the decorated method
as a write attribute method to be called when client writes
the attribute
### Response:
def setter(self, fset):
"""
To be used as a decorator. Will define the decorated method
as a write attribute method to be called when client writes
the attribute
"""
self.fset = fset
if self.attr_write == AttrWriteType.READ:
if getattr(self, 'fget', None):
self.attr_write = AttrWriteType.READ_WRITE
else:
self.attr_write = AttrWriteType.WRITE
return self |
def acquire(self):
"""
Try to aquire the lock.
"""
if self.timeout is not None:
sleep_intervals = int(self.timeout / self.sleep_time)
else:
sleep_intervals = float('inf')
while not self.acquire_try_once() and sleep_intervals > 0:
time.sleep(self.sleep_time)
sleep_intervals -= 1
if not self.is_locked_by_me():
raise util.io.filelock.general.FileLockTimeoutError(self.lock_filename, self.timeout) | Try to aquire the lock. | Below is the the instruction that describes the task:
### Input:
Try to aquire the lock.
### Response:
def acquire(self):
"""
Try to aquire the lock.
"""
if self.timeout is not None:
sleep_intervals = int(self.timeout / self.sleep_time)
else:
sleep_intervals = float('inf')
while not self.acquire_try_once() and sleep_intervals > 0:
time.sleep(self.sleep_time)
sleep_intervals -= 1
if not self.is_locked_by_me():
raise util.io.filelock.general.FileLockTimeoutError(self.lock_filename, self.timeout) |
def parse_schema(schema_file):
"""
parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe
:param schema_file: the path to the schema file
:return: the columns of the schema file
"""
e = xml.etree.ElementTree.parse(schema_file)
root = e.getroot()
cols = []
for elem in root.findall(".//{http://genomic.elet.polimi.it/entities}field"): # XPATH
cols.append(elem.text)
return cols | parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe
:param schema_file: the path to the schema file
:return: the columns of the schema file | Below is the the instruction that describes the task:
### Input:
parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe
:param schema_file: the path to the schema file
:return: the columns of the schema file
### Response:
def parse_schema(schema_file):
"""
parses the schema file and returns the columns that are later going to represent the columns of the genometric space dataframe
:param schema_file: the path to the schema file
:return: the columns of the schema file
"""
e = xml.etree.ElementTree.parse(schema_file)
root = e.getroot()
cols = []
for elem in root.findall(".//{http://genomic.elet.polimi.it/entities}field"): # XPATH
cols.append(elem.text)
return cols |
def acl_absent(name, id=None, token=None, consul_url='http://localhost:8500'):
'''
Ensure the ACL is absent
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
token
token to authenticate you Consul query
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#delete-acl-token
'''
ret = {
'name': id,
'changes': {},
'result': True,
'comment': 'ACL "{0}" does not exist'.format(id)}
exists = _acl_exists(name, id, token, consul_url)
if exists['result']:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "the acl exists, it will be deleted"
return ret
delete = __salt__['consul.acl_delete'](id=exists['id'], token=token, consul_url=consul_url)
if delete['res']:
ret['result'] = True
ret['comment'] = "the acl has been deleted"
elif not delete['res']:
ret['result'] = False
ret['comment'] = "failed to delete the acl"
return ret | Ensure the ACL is absent
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
token
token to authenticate you Consul query
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#delete-acl-token | Below is the the instruction that describes the task:
### Input:
Ensure the ACL is absent
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
token
token to authenticate you Consul query
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#delete-acl-token
### Response:
def acl_absent(name, id=None, token=None, consul_url='http://localhost:8500'):
'''
Ensure the ACL is absent
name
Specifies a human-friendly name for the ACL token.
id
Specifies the ID of the ACL.
token
token to authenticate you Consul query
consul_url : http://locahost:8500
consul URL to query
.. note::
For more information https://www.consul.io/api/acl.html#delete-acl-token
'''
ret = {
'name': id,
'changes': {},
'result': True,
'comment': 'ACL "{0}" does not exist'.format(id)}
exists = _acl_exists(name, id, token, consul_url)
if exists['result']:
if __opts__['test']:
ret['result'] = None
ret['comment'] = "the acl exists, it will be deleted"
return ret
delete = __salt__['consul.acl_delete'](id=exists['id'], token=token, consul_url=consul_url)
if delete['res']:
ret['result'] = True
ret['comment'] = "the acl has been deleted"
elif not delete['res']:
ret['result'] = False
ret['comment'] = "failed to delete the acl"
return ret |
def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful API
"""
# avoid infinite recursion
if record.name.startswith('requests'):
return
data, header = self._prepPayload(record)
try:
self.session.post(self._getEndpoint(),
data=data,
headers={'content-type': header})
except Exception:
self.handleError(record) | Override emit() method in handler parent for sending log to RESTful API | Below is the the instruction that describes the task:
### Input:
Override emit() method in handler parent for sending log to RESTful API
### Response:
def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful API
"""
# avoid infinite recursion
if record.name.startswith('requests'):
return
data, header = self._prepPayload(record)
try:
self.session.post(self._getEndpoint(),
data=data,
headers={'content-type': header})
except Exception:
self.handleError(record) |
def bubble_to_gexf(bblfile:str, gexffile:str=None, oriented:bool=False):
"""Write in bblfile a graph equivalent to those depicted in bubble file"""
tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented))
gexf_converter.tree_to_file(tree, gexffile)
return gexffile | Write in bblfile a graph equivalent to those depicted in bubble file | Below is the the instruction that describes the task:
### Input:
Write in bblfile a graph equivalent to those depicted in bubble file
### Response:
def bubble_to_gexf(bblfile:str, gexffile:str=None, oriented:bool=False):
"""Write in bblfile a graph equivalent to those depicted in bubble file"""
tree = BubbleTree.from_bubble_file(bblfile, oriented=bool(oriented))
gexf_converter.tree_to_file(tree, gexffile)
return gexffile |
def build_shapes(self):
"""
Extract SHACL data shapes from the rdf graph.
<http://www.w3.org/ns/shacl#>
Instatiate the Shape Python objects and relate it to existing classes,
if available.
"""
self.all_shapes = [] # @todo: keep adding?
qres = self.sparqlHelper.getShapes()
for candidate in qres:
test_existing_cl = self.get_any_entity(uri=candidate[0])
if not test_existing_cl:
# create it
self.all_shapes += [OntoShape(candidate[0], None, self.namespaces)]
else:
pass
# add more data
shacl = rdflib.Namespace('http://www.w3.org/ns/shacl#')
for aShape in self.all_shapes:
aShape.rdftype = shacl['Shape']
aShape.triples = self.sparqlHelper.entityTriples(aShape.uri)
aShape._buildGraph() # force construction of mini graph
aShape.sparqlHelper = self.sparqlHelper
# attach to a class
for uri in aShape.getValuesForProperty(shacl['targetClass']):
aclass = self.get_class(str(uri))
if aclass:
aShape.targetClasses += [aclass]
aclass.all_shapes += [aShape]
for propertyUri in aShape.getValuesForProperty(shacl['path']): #add shaped properties of this class. later can be used for ontodocs
propType = self.get_property(str(propertyUri))
if propType:
aclass.shapedProperties += [{'shape': aShape, 'property': propType}]
# sort alphabetically
self.all_shapes = sorted(self.all_shapes, key=lambda x: x.qname)
# compute top layer
exit = []
for c in self.all_shapes:
if not c.parents():
exit += [c]
self.toplayer_shapes = exit | Extract SHACL data shapes from the rdf graph.
<http://www.w3.org/ns/shacl#>
Instatiate the Shape Python objects and relate it to existing classes,
if available. | Below is the the instruction that describes the task:
### Input:
Extract SHACL data shapes from the rdf graph.
<http://www.w3.org/ns/shacl#>
Instatiate the Shape Python objects and relate it to existing classes,
if available.
### Response:
def build_shapes(self):
"""
Extract SHACL data shapes from the rdf graph.
<http://www.w3.org/ns/shacl#>
Instatiate the Shape Python objects and relate it to existing classes,
if available.
"""
self.all_shapes = [] # @todo: keep adding?
qres = self.sparqlHelper.getShapes()
for candidate in qres:
test_existing_cl = self.get_any_entity(uri=candidate[0])
if not test_existing_cl:
# create it
self.all_shapes += [OntoShape(candidate[0], None, self.namespaces)]
else:
pass
# add more data
shacl = rdflib.Namespace('http://www.w3.org/ns/shacl#')
for aShape in self.all_shapes:
aShape.rdftype = shacl['Shape']
aShape.triples = self.sparqlHelper.entityTriples(aShape.uri)
aShape._buildGraph() # force construction of mini graph
aShape.sparqlHelper = self.sparqlHelper
# attach to a class
for uri in aShape.getValuesForProperty(shacl['targetClass']):
aclass = self.get_class(str(uri))
if aclass:
aShape.targetClasses += [aclass]
aclass.all_shapes += [aShape]
for propertyUri in aShape.getValuesForProperty(shacl['path']): #add shaped properties of this class. later can be used for ontodocs
propType = self.get_property(str(propertyUri))
if propType:
aclass.shapedProperties += [{'shape': aShape, 'property': propType}]
# sort alphabetically
self.all_shapes = sorted(self.all_shapes, key=lambda x: x.qname)
# compute top layer
exit = []
for c in self.all_shapes:
if not c.parents():
exit += [c]
self.toplayer_shapes = exit |
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
"""
authenticator = self.getAuthenticator(service_request)
if authenticator is None:
return True
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return authenticator(*args) == True | Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool} | Below is the the instruction that describes the task:
### Input:
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
### Response:
def authenticateRequest(self, service_request, username, password, **kwargs):
"""
Processes an authentication request. If no authenticator is supplied,
then authentication succeeds.
@return: Returns a C{bool} based on the result of authorization. A
value of C{False} will stop processing the request and return an
error to the client.
@rtype: C{bool}
"""
authenticator = self.getAuthenticator(service_request)
if authenticator is None:
return True
args = (username, password)
if hasattr(authenticator, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return authenticator(*args) == True |
def read_data(fo, writer_schema, reader_schema=None):
"""Read data from file object according to schema."""
record_type = extract_record_type(writer_schema)
logical_type = extract_logical_type(writer_schema)
if reader_schema and record_type in AVRO_TYPES:
# If the schemas are the same, set the reader schema to None so that no
# schema resolution is done for this call or future recursive calls
if writer_schema == reader_schema:
reader_schema = None
else:
match_schemas(writer_schema, reader_schema)
reader_fn = READERS.get(record_type)
if reader_fn:
try:
data = reader_fn(fo, writer_schema, reader_schema)
except StructError:
raise EOFError('cannot read %s from %s' % (record_type, fo))
if 'logicalType' in writer_schema:
fn = LOGICAL_READERS.get(logical_type)
if fn:
return fn(data, writer_schema, reader_schema)
if reader_schema is not None:
return maybe_promote(
data,
record_type,
extract_record_type(reader_schema)
)
else:
return data
else:
return read_data(
fo,
SCHEMA_DEFS[record_type],
SCHEMA_DEFS.get(reader_schema)
) | Read data from file object according to schema. | Below is the the instruction that describes the task:
### Input:
Read data from file object according to schema.
### Response:
def read_data(fo, writer_schema, reader_schema=None):
"""Read data from file object according to schema."""
record_type = extract_record_type(writer_schema)
logical_type = extract_logical_type(writer_schema)
if reader_schema and record_type in AVRO_TYPES:
# If the schemas are the same, set the reader schema to None so that no
# schema resolution is done for this call or future recursive calls
if writer_schema == reader_schema:
reader_schema = None
else:
match_schemas(writer_schema, reader_schema)
reader_fn = READERS.get(record_type)
if reader_fn:
try:
data = reader_fn(fo, writer_schema, reader_schema)
except StructError:
raise EOFError('cannot read %s from %s' % (record_type, fo))
if 'logicalType' in writer_schema:
fn = LOGICAL_READERS.get(logical_type)
if fn:
return fn(data, writer_schema, reader_schema)
if reader_schema is not None:
return maybe_promote(
data,
record_type,
extract_record_type(reader_schema)
)
else:
return data
else:
return read_data(
fo,
SCHEMA_DEFS[record_type],
SCHEMA_DEFS.get(reader_schema)
) |
def bin_power(X, Band, Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = numpy.fft.fft(X)
C = abs(C)
Power = numpy.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[numpy.floor(
Freq / Fs * len(X)
): numpy.floor(Next_Freq / Fs * len(X))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio | Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins. | Below is the the instruction that describes the task:
### Input:
Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
### Response:
def bin_power(X, Band, Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = numpy.fft.fft(X)
C = abs(C)
Power = numpy.zeros(len(Band) - 1)
for Freq_Index in range(0, len(Band) - 1):
Freq = float(Band[Freq_Index])
Next_Freq = float(Band[Freq_Index + 1])
Power[Freq_Index] = sum(
C[numpy.floor(
Freq / Fs * len(X)
): numpy.floor(Next_Freq / Fs * len(X))]
)
Power_Ratio = Power / sum(Power)
return Power, Power_Ratio |
def post_registration_redirect(self, request, user):
"""
After registration, redirect to the home page or supplied "next"
query string or hidden field value.
"""
next_url = "/registration/register/complete/"
if "next" in request.GET or "next" in request.POST:
next_url = request.GET.get("next", None) or request.POST.get("next", None) or "/"
return (next_url, (), {}) | After registration, redirect to the home page or supplied "next"
query string or hidden field value. | Below is the the instruction that describes the task:
### Input:
After registration, redirect to the home page or supplied "next"
query string or hidden field value.
### Response:
def post_registration_redirect(self, request, user):
"""
After registration, redirect to the home page or supplied "next"
query string or hidden field value.
"""
next_url = "/registration/register/complete/"
if "next" in request.GET or "next" in request.POST:
next_url = request.GET.get("next", None) or request.POST.get("next", None) or "/"
return (next_url, (), {}) |
def connect_child(self, child_node):
"""Adds a connection to an existing rule in the :class`Flow` graph.
The given :class`Rule` subclass must be allowed to be connected at
this stage of the flow according to the hierarchy of rules.
:param child_node:
``FlowNodeData`` to attach as a child
"""
self._child_allowed(child_node.rule)
self.node.connect_child(child_node.node) | Adds a connection to an existing rule in the :class`Flow` graph.
The given :class`Rule` subclass must be allowed to be connected at
this stage of the flow according to the hierarchy of rules.
:param child_node:
``FlowNodeData`` to attach as a child | Below is the the instruction that describes the task:
### Input:
Adds a connection to an existing rule in the :class`Flow` graph.
The given :class`Rule` subclass must be allowed to be connected at
this stage of the flow according to the hierarchy of rules.
:param child_node:
``FlowNodeData`` to attach as a child
### Response:
def connect_child(self, child_node):
"""Adds a connection to an existing rule in the :class`Flow` graph.
The given :class`Rule` subclass must be allowed to be connected at
this stage of the flow according to the hierarchy of rules.
:param child_node:
``FlowNodeData`` to attach as a child
"""
self._child_allowed(child_node.rule)
self.node.connect_child(child_node.node) |
def _save_db():
"""Serializes the contents of the script db to JSON."""
from pyci.utility import json_serial
import json
vms("Serializing DB to JSON in {}".format(datapath))
with open(datapath, 'w') as f:
json.dump(db, f, default=json_serial) | Serializes the contents of the script db to JSON. | Below is the the instruction that describes the task:
### Input:
Serializes the contents of the script db to JSON.
### Response:
def _save_db():
"""Serializes the contents of the script db to JSON."""
from pyci.utility import json_serial
import json
vms("Serializing DB to JSON in {}".format(datapath))
with open(datapath, 'w') as f:
json.dump(db, f, default=json_serial) |
def pdftoxml(pdfdata, options=""):
"""converts pdf file to xml file"""
pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')
pdffout.write(pdfdata)
pdffout.flush()
xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')
tmpxml = xmlin.name # "temph.xml"
cmd = 'pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes %s "%s" "%s"' % (
options, pdffout.name, os.path.splitext(tmpxml)[0])
# can't turn off output, so throw away even stderr yeuch
cmd = cmd + " >/dev/null 2>&1"
os.system(cmd)
pdffout.close()
#xmlfin = open(tmpxml)
xmldata = xmlin.read()
xmlin.close()
return xmldata.decode('utf-8') | converts pdf file to xml file | Below is the the instruction that describes the task:
### Input:
converts pdf file to xml file
### Response:
def pdftoxml(pdfdata, options=""):
"""converts pdf file to xml file"""
pdffout = tempfile.NamedTemporaryFile(suffix='.pdf')
pdffout.write(pdfdata)
pdffout.flush()
xmlin = tempfile.NamedTemporaryFile(mode='r', suffix='.xml')
tmpxml = xmlin.name # "temph.xml"
cmd = 'pdftohtml -xml -nodrm -zoom 1.5 -enc UTF-8 -noframes %s "%s" "%s"' % (
options, pdffout.name, os.path.splitext(tmpxml)[0])
# can't turn off output, so throw away even stderr yeuch
cmd = cmd + " >/dev/null 2>&1"
os.system(cmd)
pdffout.close()
#xmlfin = open(tmpxml)
xmldata = xmlin.read()
xmlin.close()
return xmldata.decode('utf-8') |
def annotated(self):
"""
Annotate this graph, returning an AnnotatedGraph object
with the same structure.
"""
# Build up dictionary of edge annotations.
edge_annotations = {}
for edge in self.edges:
if edge not in edge_annotations:
# We annotate all edges from a given object at once.
referrer = self._tail[edge]
known_refs = annotated_references(referrer)
for out_edge in self._out_edges[referrer]:
referent = self._head[out_edge]
if known_refs[referent]:
annotation = known_refs[referent].pop()
else:
annotation = None
edge_annotations[out_edge] = annotation
annotated_vertices = [
AnnotatedVertex(
id=id(vertex),
annotation=object_annotation(vertex),
)
for vertex in self.vertices
]
annotated_edges = [
AnnotatedEdge(
id=edge,
annotation=edge_annotations[edge],
head=id(self._head[edge]),
tail=id(self._tail[edge]),
)
for edge in self.edges
]
return AnnotatedGraph(
vertices=annotated_vertices,
edges=annotated_edges,
) | Annotate this graph, returning an AnnotatedGraph object
with the same structure. | Below is the the instruction that describes the task:
### Input:
Annotate this graph, returning an AnnotatedGraph object
with the same structure.
### Response:
def annotated(self):
"""
Annotate this graph, returning an AnnotatedGraph object
with the same structure.
"""
# Build up dictionary of edge annotations.
edge_annotations = {}
for edge in self.edges:
if edge not in edge_annotations:
# We annotate all edges from a given object at once.
referrer = self._tail[edge]
known_refs = annotated_references(referrer)
for out_edge in self._out_edges[referrer]:
referent = self._head[out_edge]
if known_refs[referent]:
annotation = known_refs[referent].pop()
else:
annotation = None
edge_annotations[out_edge] = annotation
annotated_vertices = [
AnnotatedVertex(
id=id(vertex),
annotation=object_annotation(vertex),
)
for vertex in self.vertices
]
annotated_edges = [
AnnotatedEdge(
id=edge,
annotation=edge_annotations[edge],
head=id(self._head[edge]),
tail=id(self._tail[edge]),
)
for edge in self.edges
]
return AnnotatedGraph(
vertices=annotated_vertices,
edges=annotated_edges,
) |
def do_parse(infilename: str, jsonfilename: Optional[str], rdffilename: Optional[str], rdffmt: str,
context: Optional[str] = None) -> bool:
"""
Parse the jsg in infilename and save the results in outfilename
:param infilename: name of the file containing the ShExC
:param jsonfilename: target ShExJ equivalent
:param rdffilename: target ShExR equivalent
:param rdffmt: target RDF format
:param context: @context to use for rdf generation. If None use what is in the file
:return: true if success
"""
shexj = parse(FileStream(infilename, encoding="utf-8"))
if shexj is not None:
shexj['@context'] = context if context else "http://www.w3.org/ns/shex.jsonld"
if jsonfilename:
with open(jsonfilename, 'w') as outfile:
outfile.write(shexj._as_json_dumps())
if rdffilename:
g = Graph().parse(data=shexj._as_json, format="json-ld")
g.serialize(open(rdffilename, "wb"), format=rdffmt)
return True
return False | Parse the jsg in infilename and save the results in outfilename
:param infilename: name of the file containing the ShExC
:param jsonfilename: target ShExJ equivalent
:param rdffilename: target ShExR equivalent
:param rdffmt: target RDF format
:param context: @context to use for rdf generation. If None use what is in the file
:return: true if success | Below is the the instruction that describes the task:
### Input:
Parse the jsg in infilename and save the results in outfilename
:param infilename: name of the file containing the ShExC
:param jsonfilename: target ShExJ equivalent
:param rdffilename: target ShExR equivalent
:param rdffmt: target RDF format
:param context: @context to use for rdf generation. If None use what is in the file
:return: true if success
### Response:
def do_parse(infilename: str, jsonfilename: Optional[str], rdffilename: Optional[str], rdffmt: str,
context: Optional[str] = None) -> bool:
"""
Parse the jsg in infilename and save the results in outfilename
:param infilename: name of the file containing the ShExC
:param jsonfilename: target ShExJ equivalent
:param rdffilename: target ShExR equivalent
:param rdffmt: target RDF format
:param context: @context to use for rdf generation. If None use what is in the file
:return: true if success
"""
shexj = parse(FileStream(infilename, encoding="utf-8"))
if shexj is not None:
shexj['@context'] = context if context else "http://www.w3.org/ns/shex.jsonld"
if jsonfilename:
with open(jsonfilename, 'w') as outfile:
outfile.write(shexj._as_json_dumps())
if rdffilename:
g = Graph().parse(data=shexj._as_json, format="json-ld")
g.serialize(open(rdffilename, "wb"), format=rdffmt)
return True
return False |
def tupleize(
rows,
alphabetize_columns=getattr(
settings,
'ALPHABETIZE_COLUMNS',
False)):
"""Also alphabetizes columns and returns a tuple of tuples"""
# define a blank list as our return object
l = []
for r in rows:
row = []
row = list(r.values())
l.append(row)
# alphabetize
if alphabetize_columns:
col = sorted(zip(*l))
result = zip(*col)
return result
else:
return l | Also alphabetizes columns and returns a tuple of tuples | Below is the the instruction that describes the task:
### Input:
Also alphabetizes columns and returns a tuple of tuples
### Response:
def tupleize(
rows,
alphabetize_columns=getattr(
settings,
'ALPHABETIZE_COLUMNS',
False)):
"""Also alphabetizes columns and returns a tuple of tuples"""
# define a blank list as our return object
l = []
for r in rows:
row = []
row = list(r.values())
l.append(row)
# alphabetize
if alphabetize_columns:
col = sorted(zip(*l))
result = zip(*col)
return result
else:
return l |
def register_task(self, task_def):
'''
Register a task for a python dict
:param task_def: dict defining gbdx task
'''
r = self.session.post(
self.task_url,
data=task_def,
headers={'Content-Type': 'application/json', 'Accept': 'application/json'}
)
task_dict = json.loads(task_def)
if r.status_code == 200:
return r.status_code, 'Task %s registered' % task_dict['name']
else:
return r.status_code, 'Task %s was not registered: %s' % (task_dict['name'], r.text) | Register a task for a python dict
:param task_def: dict defining gbdx task | Below is the the instruction that describes the task:
### Input:
Register a task for a python dict
:param task_def: dict defining gbdx task
### Response:
def register_task(self, task_def):
'''
Register a task for a python dict
:param task_def: dict defining gbdx task
'''
r = self.session.post(
self.task_url,
data=task_def,
headers={'Content-Type': 'application/json', 'Accept': 'application/json'}
)
task_dict = json.loads(task_def)
if r.status_code == 200:
return r.status_code, 'Task %s registered' % task_dict['name']
else:
return r.status_code, 'Task %s was not registered: %s' % (task_dict['name'], r.text) |
def _build_extra_predicate(self, extra_predicate):
""" This method is a good one to extend if you want to create a queue which always applies an extra predicate. """
if extra_predicate is None:
return ''
# if they don't have a supported format seq, wrap it for them
if not isinstance(extra_predicate[1], (list, dict, tuple)):
extra_predicate = [extra_predicate[0], (extra_predicate[1], )]
extra_predicate = database.escape_query(*extra_predicate)
return 'AND (' + extra_predicate + ')' | This method is a good one to extend if you want to create a queue which always applies an extra predicate. | Below is the the instruction that describes the task:
### Input:
This method is a good one to extend if you want to create a queue which always applies an extra predicate.
### Response:
def _build_extra_predicate(self, extra_predicate):
""" This method is a good one to extend if you want to create a queue which always applies an extra predicate. """
if extra_predicate is None:
return ''
# if they don't have a supported format seq, wrap it for them
if not isinstance(extra_predicate[1], (list, dict, tuple)):
extra_predicate = [extra_predicate[0], (extra_predicate[1], )]
extra_predicate = database.escape_query(*extra_predicate)
return 'AND (' + extra_predicate + ')' |
def _evaluate_if_headers(self, res, environ):
"""Apply HTTP headers on <path>, raising DAVError if conditions fail.
Add environ['wsgidav.conditions.if'] and environ['wsgidav.ifLockTokenList'].
Handle these headers:
- If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since:
Raising HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED
- If:
Raising HTTP_PRECONDITION_FAILED
@see http://www.webdav.org/specs/rfc4918.html#HEADER_If
@see util.evaluate_http_conditionals
"""
# Add parsed If header to environ
if "wsgidav.conditions.if" not in environ:
util.parse_if_header_dict(environ)
# Bail out, if res does not exist
if res is None:
return
ifDict = environ["wsgidav.conditions.if"]
# Raise HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED, if standard
# HTTP condition fails
last_modified = -1 # nonvalid modified time
entitytag = "[]" # Non-valid entity tag
if res.get_last_modified() is not None:
last_modified = res.get_last_modified()
if res.get_etag() is not None:
entitytag = res.get_etag()
if (
"HTTP_IF_MODIFIED_SINCE" in environ
or "HTTP_IF_UNMODIFIED_SINCE" in environ
or "HTTP_IF_MATCH" in environ
or "HTTP_IF_NONE_MATCH" in environ
):
util.evaluate_http_conditionals(res, last_modified, entitytag, environ)
if "HTTP_IF" not in environ:
return
# Raise HTTP_PRECONDITION_FAILED, if DAV 'If' condition fails
# TODO: handle empty locked resources
# TODO: handle unmapped locked resources
# isnewfile = not provider.exists(mappedpath)
refUrl = res.get_ref_url()
lockMan = self._davProvider.lock_manager
locktokenlist = []
if lockMan:
lockList = lockMan.get_indirect_url_lock_list(
refUrl, environ["wsgidav.user_name"]
)
for lock in lockList:
locktokenlist.append(lock["token"])
if not util.test_if_header_dict(res, ifDict, refUrl, locktokenlist, entitytag):
self._fail(HTTP_PRECONDITION_FAILED, "'If' header condition failed.")
return | Apply HTTP headers on <path>, raising DAVError if conditions fail.
Add environ['wsgidav.conditions.if'] and environ['wsgidav.ifLockTokenList'].
Handle these headers:
- If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since:
Raising HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED
- If:
Raising HTTP_PRECONDITION_FAILED
@see http://www.webdav.org/specs/rfc4918.html#HEADER_If
@see util.evaluate_http_conditionals | Below is the the instruction that describes the task:
### Input:
Apply HTTP headers on <path>, raising DAVError if conditions fail.
Add environ['wsgidav.conditions.if'] and environ['wsgidav.ifLockTokenList'].
Handle these headers:
- If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since:
Raising HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED
- If:
Raising HTTP_PRECONDITION_FAILED
@see http://www.webdav.org/specs/rfc4918.html#HEADER_If
@see util.evaluate_http_conditionals
### Response:
def _evaluate_if_headers(self, res, environ):
"""Apply HTTP headers on <path>, raising DAVError if conditions fail.
Add environ['wsgidav.conditions.if'] and environ['wsgidav.ifLockTokenList'].
Handle these headers:
- If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since:
Raising HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED
- If:
Raising HTTP_PRECONDITION_FAILED
@see http://www.webdav.org/specs/rfc4918.html#HEADER_If
@see util.evaluate_http_conditionals
"""
# Add parsed If header to environ
if "wsgidav.conditions.if" not in environ:
util.parse_if_header_dict(environ)
# Bail out, if res does not exist
if res is None:
return
ifDict = environ["wsgidav.conditions.if"]
# Raise HTTP_PRECONDITION_FAILED or HTTP_NOT_MODIFIED, if standard
# HTTP condition fails
last_modified = -1 # nonvalid modified time
entitytag = "[]" # Non-valid entity tag
if res.get_last_modified() is not None:
last_modified = res.get_last_modified()
if res.get_etag() is not None:
entitytag = res.get_etag()
if (
"HTTP_IF_MODIFIED_SINCE" in environ
or "HTTP_IF_UNMODIFIED_SINCE" in environ
or "HTTP_IF_MATCH" in environ
or "HTTP_IF_NONE_MATCH" in environ
):
util.evaluate_http_conditionals(res, last_modified, entitytag, environ)
if "HTTP_IF" not in environ:
return
# Raise HTTP_PRECONDITION_FAILED, if DAV 'If' condition fails
# TODO: handle empty locked resources
# TODO: handle unmapped locked resources
# isnewfile = not provider.exists(mappedpath)
refUrl = res.get_ref_url()
lockMan = self._davProvider.lock_manager
locktokenlist = []
if lockMan:
lockList = lockMan.get_indirect_url_lock_list(
refUrl, environ["wsgidav.user_name"]
)
for lock in lockList:
locktokenlist.append(lock["token"])
if not util.test_if_header_dict(res, ifDict, refUrl, locktokenlist, entitytag):
self._fail(HTTP_PRECONDITION_FAILED, "'If' header condition failed.")
return |
def create_dependency(self, _from, to, function, comment=None):
"""
Create a new dependency object and add it to the dependency layer
@type _from: string
@param _from: term id of the child node
@type _to: string
@param _to: term id of the parent node
@type function: string
@param function: grammatical function (relation) between parent and child
@type comment: string
@param comment: optional comment to be included
"""
new_dependency = Cdependency()
new_dependency.set_from(_from)
new_dependency.set_to(to)
new_dependency.set_function(function)
if comment:
new_dependency.set_comment(comment)
self.add_dependency(new_dependency)
return new_dependency | Create a new dependency object and add it to the dependency layer
@type _from: string
@param _from: term id of the child node
@type _to: string
@param _to: term id of the parent node
@type function: string
@param function: grammatical function (relation) between parent and child
@type comment: string
@param comment: optional comment to be included | Below is the the instruction that describes the task:
### Input:
Create a new dependency object and add it to the dependency layer
@type _from: string
@param _from: term id of the child node
@type _to: string
@param _to: term id of the parent node
@type function: string
@param function: grammatical function (relation) between parent and child
@type comment: string
@param comment: optional comment to be included
### Response:
def create_dependency(self, _from, to, function, comment=None):
"""
Create a new dependency object and add it to the dependency layer
@type _from: string
@param _from: term id of the child node
@type _to: string
@param _to: term id of the parent node
@type function: string
@param function: grammatical function (relation) between parent and child
@type comment: string
@param comment: optional comment to be included
"""
new_dependency = Cdependency()
new_dependency.set_from(_from)
new_dependency.set_to(to)
new_dependency.set_function(function)
if comment:
new_dependency.set_comment(comment)
self.add_dependency(new_dependency)
return new_dependency |
def leading_whitespace_in_current_line(self):
""" The leading whitespace in the left margin of the current line. """
current_line = self.current_line
length = len(current_line) - len(current_line.lstrip())
return current_line[:length] | The leading whitespace in the left margin of the current line. | Below is the the instruction that describes the task:
### Input:
The leading whitespace in the left margin of the current line.
### Response:
def leading_whitespace_in_current_line(self):
""" The leading whitespace in the left margin of the current line. """
current_line = self.current_line
length = len(current_line) - len(current_line.lstrip())
return current_line[:length] |
def lambda_B_calc(classes, table, TOP, POP):
"""
Calculate Goodman and Kruskal's lambda B.
:param classes: confusion matrix classes
:type classes : list
:param table: confusion matrix table
:type table : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP : int
:return: Goodman and Kruskal's lambda B as float
"""
try:
result = 0
length = POP
maxresponse = max(list(TOP.values()))
for i in classes:
result += max(list(table[i].values()))
result = (result - maxresponse) / (length - maxresponse)
return result
except Exception:
return "None" | Calculate Goodman and Kruskal's lambda B.
:param classes: confusion matrix classes
:type classes : list
:param table: confusion matrix table
:type table : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP : int
:return: Goodman and Kruskal's lambda B as float | Below is the the instruction that describes the task:
### Input:
Calculate Goodman and Kruskal's lambda B.
:param classes: confusion matrix classes
:type classes : list
:param table: confusion matrix table
:type table : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP : int
:return: Goodman and Kruskal's lambda B as float
### Response:
def lambda_B_calc(classes, table, TOP, POP):
"""
Calculate Goodman and Kruskal's lambda B.
:param classes: confusion matrix classes
:type classes : list
:param table: confusion matrix table
:type table : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP : int
:return: Goodman and Kruskal's lambda B as float
"""
try:
result = 0
length = POP
maxresponse = max(list(TOP.values()))
for i in classes:
result += max(list(table[i].values()))
result = (result - maxresponse) / (length - maxresponse)
return result
except Exception:
return "None" |
def mstmap(args):
"""
%prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input.
"""
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--population_type", default="RIL6",
help="Type of population, possible values are DH and RILd")
p.add_option("--missing_threshold", default=.5,
help="Missing threshold, .25 excludes any marker with >25% missing")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
lmd, = args
fp = open(lmd)
next(fp) # Header
table = {"0": "-", "1": "A", "2": "B", "3": "X"}
mh = ["locus_name"] + fp.next().split()[4:]
genotypes = []
for row in fp:
atoms = row.split()
chr, pos, ref, alt = atoms[:4]
locus_name = ".".join((chr, pos))
codes = [table[x] for x in atoms[4:]]
genotypes.append([locus_name] + codes)
mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold)
mm.write(opts.outfile, header=True) | %prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input. | Below is the the instruction that describes the task:
### Input:
%prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input.
### Response:
def mstmap(args):
"""
%prog mstmap LMD50.snps.genotype.txt
Convert LMDs to MSTMAP input.
"""
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--population_type", default="RIL6",
help="Type of population, possible values are DH and RILd")
p.add_option("--missing_threshold", default=.5,
help="Missing threshold, .25 excludes any marker with >25% missing")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
lmd, = args
fp = open(lmd)
next(fp) # Header
table = {"0": "-", "1": "A", "2": "B", "3": "X"}
mh = ["locus_name"] + fp.next().split()[4:]
genotypes = []
for row in fp:
atoms = row.split()
chr, pos, ref, alt = atoms[:4]
locus_name = ".".join((chr, pos))
codes = [table[x] for x in atoms[4:]]
genotypes.append([locus_name] + codes)
mm = MSTMatrix(genotypes, mh, opts.population_type, opts.missing_threshold)
mm.write(opts.outfile, header=True) |
def CSVofIntegers(msg=None):
'''
Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string.
'''
def fn(value):
try:
if isinstance(value, basestring):
if ',' in value:
value = list(map(
int, filter(
bool, list(map(
lambda x: x.strip(), value.split(',')
))
)
))
return value
else:
return [int(value)]
else:
raise ValueError
except ValueError:
raise Invalid(
'<{0}> is not a valid csv of integers'.format(value)
)
return fn | Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string. | Below is the the instruction that describes the task:
### Input:
Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string.
### Response:
def CSVofIntegers(msg=None):
'''
Checks whether a value is list of integers.
Returns list of integers or just one integer in
list if there is only one element in given CSV string.
'''
def fn(value):
try:
if isinstance(value, basestring):
if ',' in value:
value = list(map(
int, filter(
bool, list(map(
lambda x: x.strip(), value.split(',')
))
)
))
return value
else:
return [int(value)]
else:
raise ValueError
except ValueError:
raise Invalid(
'<{0}> is not a valid csv of integers'.format(value)
)
return fn |
def mime_type(self):
"""
:return: String describing the mime type of this file (based on the filename)
:note: Defaults to 'text/plain' in case the actual file type is unknown. """
guesses = None
if self.path:
guesses = guess_type(self.path)
return guesses and guesses[0] or self.DEFAULT_MIME_TYPE | :return: String describing the mime type of this file (based on the filename)
:note: Defaults to 'text/plain' in case the actual file type is unknown. | Below is the the instruction that describes the task:
### Input:
:return: String describing the mime type of this file (based on the filename)
:note: Defaults to 'text/plain' in case the actual file type is unknown.
### Response:
def mime_type(self):
"""
:return: String describing the mime type of this file (based on the filename)
:note: Defaults to 'text/plain' in case the actual file type is unknown. """
guesses = None
if self.path:
guesses = guess_type(self.path)
return guesses and guesses[0] or self.DEFAULT_MIME_TYPE |
def list_schemas(repo):
"""
Return a list of parsed avro schemas as dictionaries.
:param Repo repo:
The git repository.
:returns: dict
"""
schema_files = glob.glob(
os.path.join(repo.working_dir, '_schemas', '*.avsc'))
schemas = {}
for schema_file in schema_files:
with open(schema_file, 'r') as fp:
schema = json.load(fp)
schemas['%(namespace)s.%(name)s' % schema] = schema
return schemas | Return a list of parsed avro schemas as dictionaries.
:param Repo repo:
The git repository.
:returns: dict | Below is the the instruction that describes the task:
### Input:
Return a list of parsed avro schemas as dictionaries.
:param Repo repo:
The git repository.
:returns: dict
### Response:
def list_schemas(repo):
"""
Return a list of parsed avro schemas as dictionaries.
:param Repo repo:
The git repository.
:returns: dict
"""
schema_files = glob.glob(
os.path.join(repo.working_dir, '_schemas', '*.avsc'))
schemas = {}
for schema_file in schema_files:
with open(schema_file, 'r') as fp:
schema = json.load(fp)
schemas['%(namespace)s.%(name)s' % schema] = schema
return schemas |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.