code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _upload_in_splits( self, destination_folder_id, source_path, preflight_check, verbose = True, chunked_upload_threads = 5 ):
'''
Since Box has a maximum file size limit (15 GB at time of writing),
we need to split files larger than this into smaller parts, and chunk upload each part
'''
file_size = os.stat(source_path).st_size
split_size = BOX_MAX_FILE_SIZE
# Make sure that the last split piece is still big enough for a chunked upload
while file_size % split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
split_size -= 1000
if split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
raise Exception('Lazy programming error')
split_start_byte = 0
part_count = 0
uploaded_file_ids = []
while split_start_byte < file_size:
dest_file_name = '{0}.part{1}'.format( os.path.basename(source_path), part_count)
prev_uploaded_file_ids = self.find_file( destination_folder_id, dest_file_name )
if len( prev_uploaded_file_ids ) == 1:
if verbose:
print ( '\nSkipping upload of split {0} of {1}; already exists'.format( part_count + 1, math.ceil(file_size / split_size) ) )
uploaded_file_ids.extend( prev_uploaded_file_ids )
else:
if verbose:
print ( '\nUploading split {0} of {1}'.format( part_count + 1, math.ceil(file_size / split_size) ) )
uploaded_file_ids.append( self._chunked_upload(
destination_folder_id, source_path,
dest_file_name = dest_file_name,
split_start_byte = split_start_byte,
file_size = min(split_size, file_size - split_start_byte), # Take the min of file_size - split_start_byte so that the last part of a split doesn't read into the next split
preflight_check = preflight_check,
verbose = verbose,
upload_threads = chunked_upload_threads,
) )
part_count += 1
split_start_byte += split_size
return uploaded_file_ids | Since Box has a maximum file size limit (15 GB at time of writing),
we need to split files larger than this into smaller parts, and chunk upload each part | Below is the the instruction that describes the task:
### Input:
Since Box has a maximum file size limit (15 GB at time of writing),
we need to split files larger than this into smaller parts, and chunk upload each part
### Response:
def _upload_in_splits( self, destination_folder_id, source_path, preflight_check, verbose = True, chunked_upload_threads = 5 ):
'''
Since Box has a maximum file size limit (15 GB at time of writing),
we need to split files larger than this into smaller parts, and chunk upload each part
'''
file_size = os.stat(source_path).st_size
split_size = BOX_MAX_FILE_SIZE
# Make sure that the last split piece is still big enough for a chunked upload
while file_size % split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
split_size -= 1000
if split_size < BOX_MIN_CHUNK_UPLOAD_SIZE:
raise Exception('Lazy programming error')
split_start_byte = 0
part_count = 0
uploaded_file_ids = []
while split_start_byte < file_size:
dest_file_name = '{0}.part{1}'.format( os.path.basename(source_path), part_count)
prev_uploaded_file_ids = self.find_file( destination_folder_id, dest_file_name )
if len( prev_uploaded_file_ids ) == 1:
if verbose:
print ( '\nSkipping upload of split {0} of {1}; already exists'.format( part_count + 1, math.ceil(file_size / split_size) ) )
uploaded_file_ids.extend( prev_uploaded_file_ids )
else:
if verbose:
print ( '\nUploading split {0} of {1}'.format( part_count + 1, math.ceil(file_size / split_size) ) )
uploaded_file_ids.append( self._chunked_upload(
destination_folder_id, source_path,
dest_file_name = dest_file_name,
split_start_byte = split_start_byte,
file_size = min(split_size, file_size - split_start_byte), # Take the min of file_size - split_start_byte so that the last part of a split doesn't read into the next split
preflight_check = preflight_check,
verbose = verbose,
upload_threads = chunked_upload_threads,
) )
part_count += 1
split_start_byte += split_size
return uploaded_file_ids |
def _validate_row(row, sep=',', required_length=None):
'''validate_row will ensure that a row has the proper length, and is
not empty and cleaned of extra spaces.
Parameters
==========
row: a single row, not yet parsed.
Returns a valid row, or None if not valid
'''
if not isinstance(row, list):
row = _parse_row(row, sep)
if required_length:
length = len(row)
if length != required_length:
bot.warning('Row should have length %s (not %s)' %(required_length,
length))
bot.warning(row)
row = None
return row | validate_row will ensure that a row has the proper length, and is
not empty and cleaned of extra spaces.
Parameters
==========
row: a single row, not yet parsed.
Returns a valid row, or None if not valid | Below is the the instruction that describes the task:
### Input:
validate_row will ensure that a row has the proper length, and is
not empty and cleaned of extra spaces.
Parameters
==========
row: a single row, not yet parsed.
Returns a valid row, or None if not valid
### Response:
def _validate_row(row, sep=',', required_length=None):
'''validate_row will ensure that a row has the proper length, and is
not empty and cleaned of extra spaces.
Parameters
==========
row: a single row, not yet parsed.
Returns a valid row, or None if not valid
'''
if not isinstance(row, list):
row = _parse_row(row, sep)
if required_length:
length = len(row)
if length != required_length:
bot.warning('Row should have length %s (not %s)' %(required_length,
length))
bot.warning(row)
row = None
return row |
def putlogo(figure=None):
"""Puts the CREDO logo at the bottom right of the current figure (or
the figure given by the ``figure`` argument if supplied).
"""
ip = get_ipython()
if figure is None:
figure=plt.gcf()
curraxis= figure.gca()
logoaxis = figure.add_axes([0.89, 0.01, 0.1, 0.1], anchor='NW')
logoaxis.set_axis_off()
logoaxis.xaxis.set_visible(False)
logoaxis.yaxis.set_visible(False)
logoaxis.imshow(credo_logo)
figure.subplots_adjust(right=0.98)
figure.sca(curraxis) | Puts the CREDO logo at the bottom right of the current figure (or
the figure given by the ``figure`` argument if supplied). | Below is the the instruction that describes the task:
### Input:
Puts the CREDO logo at the bottom right of the current figure (or
the figure given by the ``figure`` argument if supplied).
### Response:
def putlogo(figure=None):
"""Puts the CREDO logo at the bottom right of the current figure (or
the figure given by the ``figure`` argument if supplied).
"""
ip = get_ipython()
if figure is None:
figure=plt.gcf()
curraxis= figure.gca()
logoaxis = figure.add_axes([0.89, 0.01, 0.1, 0.1], anchor='NW')
logoaxis.set_axis_off()
logoaxis.xaxis.set_visible(False)
logoaxis.yaxis.set_visible(False)
logoaxis.imshow(credo_logo)
figure.subplots_adjust(right=0.98)
figure.sca(curraxis) |
def get_interface_detail_output_interface_ifHCOutOctets(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ifHCOutOctets = ET.SubElement(interface, "ifHCOutOctets")
ifHCOutOctets.text = kwargs.pop('ifHCOutOctets')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_interface_detail_output_interface_ifHCOutOctets(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_interface_detail = ET.Element("get_interface_detail")
config = get_interface_detail
output = ET.SubElement(get_interface_detail, "output")
interface = ET.SubElement(output, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop('interface_type')
interface_name_key = ET.SubElement(interface, "interface-name")
interface_name_key.text = kwargs.pop('interface_name')
ifHCOutOctets = ET.SubElement(interface, "ifHCOutOctets")
ifHCOutOctets.text = kwargs.pop('ifHCOutOctets')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF NSR Volume
Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure not initialized')
return struct.pack(self.FMT, 0, self.standard_ident, 1, b'\x00' * 2041) | A method to generate the string representing this UDF NSR Volume
Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre. | Below is the the instruction that describes the task:
### Input:
A method to generate the string representing this UDF NSR Volume
Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre.
### Response:
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF NSR Volume
Structure.
Parameters:
None.
Returns:
A string representing this UDF BEA Volume Strucutre.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF NSR Volume Structure not initialized')
return struct.pack(self.FMT, 0, self.standard_ident, 1, b'\x00' * 2041) |
def logistic(x, x0, k, L):
'''Logistic function.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Logistic_function>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param float L: maximum value of the curve
:returns: function's value(s) in point **x**
:rtype: float or numpy.ndarray
'''
return L / (1 + exp(-k * (x - x0))) | Logistic function.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Logistic_function>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param float L: maximum value of the curve
:returns: function's value(s) in point **x**
:rtype: float or numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Logistic function.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Logistic_function>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param float L: maximum value of the curve
:returns: function's value(s) in point **x**
:rtype: float or numpy.ndarray
### Response:
def logistic(x, x0, k, L):
'''Logistic function.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Logistic_function>`_.
:param x: point in x-axis
:type x: float or numpy.ndarray
:param float x0: sigmoid's midpoint
:param float k: steepness of the curve
:param float L: maximum value of the curve
:returns: function's value(s) in point **x**
:rtype: float or numpy.ndarray
'''
return L / (1 + exp(-k * (x - x0))) |
def plotER(self,*args,**kwargs):
"""
NAME:
plotER
PURPOSE:
plot ER(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'ERnorm'
else:
kwargs['d2']= 'ER'
return self.plot(*args,**kwargs) | NAME:
plotER
PURPOSE:
plot ER(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS) | Below is the the instruction that describes the task:
### Input:
NAME:
plotER
PURPOSE:
plot ER(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
### Response:
def plotER(self,*args,**kwargs):
"""
NAME:
plotER
PURPOSE:
plot ER(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'ERnorm'
else:
kwargs['d2']= 'ER'
return self.plot(*args,**kwargs) |
def default_provenance():
"""The provenance for the default values.
:return: default provenance.
:rtype: str
"""
field = TextParameter()
field.name = tr('Provenance')
field.description = tr('The provenance of minimum needs')
field.value = 'The minimum needs are based on BNPB Perka 7/2008.'
return field | The provenance for the default values.
:return: default provenance.
:rtype: str | Below is the the instruction that describes the task:
### Input:
The provenance for the default values.
:return: default provenance.
:rtype: str
### Response:
def default_provenance():
"""The provenance for the default values.
:return: default provenance.
:rtype: str
"""
field = TextParameter()
field.name = tr('Provenance')
field.description = tr('The provenance of minimum needs')
field.value = 'The minimum needs are based on BNPB Perka 7/2008.'
return field |
def prepare_classpath():
"""
Ensures that certain subfolders of AIRFLOW_HOME are on the classpath
"""
if DAGS_FOLDER not in sys.path:
sys.path.append(DAGS_FOLDER)
# Add ./config/ for loading custom log parsers etc, or
# airflow_local_settings etc.
config_path = os.path.join(AIRFLOW_HOME, 'config')
if config_path not in sys.path:
sys.path.append(config_path)
if PLUGINS_FOLDER not in sys.path:
sys.path.append(PLUGINS_FOLDER) | Ensures that certain subfolders of AIRFLOW_HOME are on the classpath | Below is the the instruction that describes the task:
### Input:
Ensures that certain subfolders of AIRFLOW_HOME are on the classpath
### Response:
def prepare_classpath():
"""
Ensures that certain subfolders of AIRFLOW_HOME are on the classpath
"""
if DAGS_FOLDER not in sys.path:
sys.path.append(DAGS_FOLDER)
# Add ./config/ for loading custom log parsers etc, or
# airflow_local_settings etc.
config_path = os.path.join(AIRFLOW_HOME, 'config')
if config_path not in sys.path:
sys.path.append(config_path)
if PLUGINS_FOLDER not in sys.path:
sys.path.append(PLUGINS_FOLDER) |
def _init_trace_logging(self, app):
"""
Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler) | Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | Below is the the instruction that describes the task:
### Input:
Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
### Response:
def _init_trace_logging(self, app):
"""
Sets up trace logging unless ``APPINSIGHTS_DISABLE_TRACE_LOGGING`` is
set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler) |
def collect_files(self):
"""
Return collected files links
:rtype: list[static_bundle.files.StaticFileResult]
"""
self.files = []
for bundle in self.bundles:
bundle.init_build(self, self.builder)
bundle_files = bundle.prepare()
self.files.extend(bundle_files)
return self | Return collected files links
:rtype: list[static_bundle.files.StaticFileResult] | Below is the the instruction that describes the task:
### Input:
Return collected files links
:rtype: list[static_bundle.files.StaticFileResult]
### Response:
def collect_files(self):
"""
Return collected files links
:rtype: list[static_bundle.files.StaticFileResult]
"""
self.files = []
for bundle in self.bundles:
bundle.init_build(self, self.builder)
bundle_files = bundle.prepare()
self.files.extend(bundle_files)
return self |
def get_user_pubkeys(users):
'''
Retrieve a set of public keys from GitHub for the specified list of users.
Expects input in list format. Optionally, a value in the list may be a dict
whose value is a list of key IDs to be returned. If this is not done, then
all keys will be returned.
Some example data structures that coupld be passed in would look like:
.. code_block:: yaml
['user1', 'user2', 'user3']
[
'user1': [
'12345',
'67890',
],
'user2',
'user3',
]
'''
if not isinstance(users, list):
return {'Error': 'A list of users is expected'}
ret = {}
for user in users:
key_ids = []
if isinstance(user, dict):
tmp_user = next(six.iterkeys(user))
key_ids = user[tmp_user]
user = tmp_user
url = 'https://api.github.com/users/{0}/keys'.format(user)
result = salt.utils.http.query(
url,
'GET',
decode=False,
text=True,
)
keys = salt.utils.json.loads(result['text'])
ret[user] = {}
for key in keys:
if key_ids:
if six.text_type(key['id']) in key_ids:
ret[user][key['id']] = key['key']
else:
ret[user][key['id']] = key['key']
return ret | Retrieve a set of public keys from GitHub for the specified list of users.
Expects input in list format. Optionally, a value in the list may be a dict
whose value is a list of key IDs to be returned. If this is not done, then
all keys will be returned.
Some example data structures that coupld be passed in would look like:
.. code_block:: yaml
['user1', 'user2', 'user3']
[
'user1': [
'12345',
'67890',
],
'user2',
'user3',
] | Below is the the instruction that describes the task:
### Input:
Retrieve a set of public keys from GitHub for the specified list of users.
Expects input in list format. Optionally, a value in the list may be a dict
whose value is a list of key IDs to be returned. If this is not done, then
all keys will be returned.
Some example data structures that coupld be passed in would look like:
.. code_block:: yaml
['user1', 'user2', 'user3']
[
'user1': [
'12345',
'67890',
],
'user2',
'user3',
]
### Response:
def get_user_pubkeys(users):
'''
Retrieve a set of public keys from GitHub for the specified list of users.
Expects input in list format. Optionally, a value in the list may be a dict
whose value is a list of key IDs to be returned. If this is not done, then
all keys will be returned.
Some example data structures that coupld be passed in would look like:
.. code_block:: yaml
['user1', 'user2', 'user3']
[
'user1': [
'12345',
'67890',
],
'user2',
'user3',
]
'''
if not isinstance(users, list):
return {'Error': 'A list of users is expected'}
ret = {}
for user in users:
key_ids = []
if isinstance(user, dict):
tmp_user = next(six.iterkeys(user))
key_ids = user[tmp_user]
user = tmp_user
url = 'https://api.github.com/users/{0}/keys'.format(user)
result = salt.utils.http.query(
url,
'GET',
decode=False,
text=True,
)
keys = salt.utils.json.loads(result['text'])
ret[user] = {}
for key in keys:
if key_ids:
if six.text_type(key['id']) in key_ids:
ret[user][key['id']] = key['key']
else:
ret[user][key['id']] = key['key']
return ret |
def taskfile_created_data(file_, role):
"""Return the data for created date
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the created date
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
dt = file_.date_created
return dt_to_qdatetime(dt) | Return the data for created date
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the created date
:rtype: depending on role
:raises: None | Below is the the instruction that describes the task:
### Input:
Return the data for created date
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the created date
:rtype: depending on role
:raises: None
### Response:
def taskfile_created_data(file_, role):
"""Return the data for created date
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the created date
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
dt = file_.date_created
return dt_to_qdatetime(dt) |
def prepare(self, engine, mode, items) -> None:
"""
Create a unique transaction id and dumps the items into a cached request object.
"""
self.tx_id = str(uuid.uuid4()).replace("-", "")
self.engine = engine
self.mode = mode
self.items = items
self._prepare_request() | Create a unique transaction id and dumps the items into a cached request object. | Below is the the instruction that describes the task:
### Input:
Create a unique transaction id and dumps the items into a cached request object.
### Response:
def prepare(self, engine, mode, items) -> None:
"""
Create a unique transaction id and dumps the items into a cached request object.
"""
self.tx_id = str(uuid.uuid4()).replace("-", "")
self.engine = engine
self.mode = mode
self.items = items
self._prepare_request() |
def refreshButton(self):
"""
Refreshes the button for this toolbar.
"""
collapsed = self.isCollapsed()
btn = self._collapseButton
if not btn:
return
btn.setMaximumSize(MAX_SIZE, MAX_SIZE)
# set up a vertical scrollbar
if self.orientation() == Qt.Vertical:
btn.setMaximumHeight(12)
else:
btn.setMaximumWidth(12)
icon = ''
# collapse/expand a vertical toolbar
if self.orientation() == Qt.Vertical:
if collapsed:
self.setFixedWidth(self._collapsedSize)
btn.setMaximumHeight(MAX_SIZE)
btn.setArrowType(Qt.RightArrow)
else:
self.setMaximumWidth(MAX_SIZE)
self._precollapseSize = None
btn.setMaximumHeight(12)
btn.setArrowType(Qt.LeftArrow)
else:
if collapsed:
self.setFixedHeight(self._collapsedSize)
btn.setMaximumWidth(MAX_SIZE)
btn.setArrowType(Qt.DownArrow)
else:
self.setMaximumHeight(1000)
self._precollapseSize = None
btn.setMaximumWidth(12)
btn.setArrowType(Qt.UpArrow)
for index in range(1, self.layout().count()):
item = self.layout().itemAt(index)
if not item.widget():
continue
if collapsed:
item.widget().setMaximumSize(0, 0)
else:
item.widget().setMaximumSize(MAX_SIZE, MAX_SIZE)
if not self.isCollapsable():
btn.hide()
else:
btn.show() | Refreshes the button for this toolbar. | Below is the the instruction that describes the task:
### Input:
Refreshes the button for this toolbar.
### Response:
def refreshButton(self):
"""
Refreshes the button for this toolbar.
"""
collapsed = self.isCollapsed()
btn = self._collapseButton
if not btn:
return
btn.setMaximumSize(MAX_SIZE, MAX_SIZE)
# set up a vertical scrollbar
if self.orientation() == Qt.Vertical:
btn.setMaximumHeight(12)
else:
btn.setMaximumWidth(12)
icon = ''
# collapse/expand a vertical toolbar
if self.orientation() == Qt.Vertical:
if collapsed:
self.setFixedWidth(self._collapsedSize)
btn.setMaximumHeight(MAX_SIZE)
btn.setArrowType(Qt.RightArrow)
else:
self.setMaximumWidth(MAX_SIZE)
self._precollapseSize = None
btn.setMaximumHeight(12)
btn.setArrowType(Qt.LeftArrow)
else:
if collapsed:
self.setFixedHeight(self._collapsedSize)
btn.setMaximumWidth(MAX_SIZE)
btn.setArrowType(Qt.DownArrow)
else:
self.setMaximumHeight(1000)
self._precollapseSize = None
btn.setMaximumWidth(12)
btn.setArrowType(Qt.UpArrow)
for index in range(1, self.layout().count()):
item = self.layout().itemAt(index)
if not item.widget():
continue
if collapsed:
item.widget().setMaximumSize(0, 0)
else:
item.widget().setMaximumSize(MAX_SIZE, MAX_SIZE)
if not self.isCollapsable():
btn.hide()
else:
btn.show() |
def __CheckAndUnifyQueryFormat(self, query_body):
"""Checks and unifies the format of the query body.
:raises TypeError: If query_body is not of expected type (depending on the query compatibility mode).
:raises ValueError: If query_body is a dict but doesn\'t have valid query text.
:raises SystemError: If the query compatibility mode is undefined.
:param (str or dict) query_body:
:return:
The formatted query body.
:rtype:
dict or string
"""
if (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Default or
self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Query):
if not isinstance(query_body, dict) and not isinstance(query_body, six.string_types):
raise TypeError('query body must be a dict or string.')
if isinstance(query_body, dict) and not query_body.get('query'):
raise ValueError('query body must have valid query text with key "query".')
if isinstance(query_body, six.string_types):
return {'query': query_body}
elif (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.SqlQuery and
not isinstance(query_body, six.string_types)):
raise TypeError('query body must be a string.')
else:
raise SystemError('Unexpected query compatibility mode.')
return query_body | Checks and unifies the format of the query body.
:raises TypeError: If query_body is not of expected type (depending on the query compatibility mode).
:raises ValueError: If query_body is a dict but doesn\'t have valid query text.
:raises SystemError: If the query compatibility mode is undefined.
:param (str or dict) query_body:
:return:
The formatted query body.
:rtype:
dict or string | Below is the the instruction that describes the task:
### Input:
Checks and unifies the format of the query body.
:raises TypeError: If query_body is not of expected type (depending on the query compatibility mode).
:raises ValueError: If query_body is a dict but doesn\'t have valid query text.
:raises SystemError: If the query compatibility mode is undefined.
:param (str or dict) query_body:
:return:
The formatted query body.
:rtype:
dict or string
### Response:
def __CheckAndUnifyQueryFormat(self, query_body):
"""Checks and unifies the format of the query body.
:raises TypeError: If query_body is not of expected type (depending on the query compatibility mode).
:raises ValueError: If query_body is a dict but doesn\'t have valid query text.
:raises SystemError: If the query compatibility mode is undefined.
:param (str or dict) query_body:
:return:
The formatted query body.
:rtype:
dict or string
"""
if (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Default or
self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.Query):
if not isinstance(query_body, dict) and not isinstance(query_body, six.string_types):
raise TypeError('query body must be a dict or string.')
if isinstance(query_body, dict) and not query_body.get('query'):
raise ValueError('query body must have valid query text with key "query".')
if isinstance(query_body, six.string_types):
return {'query': query_body}
elif (self._query_compatibility_mode == CosmosClient._QueryCompatibilityMode.SqlQuery and
not isinstance(query_body, six.string_types)):
raise TypeError('query body must be a string.')
else:
raise SystemError('Unexpected query compatibility mode.')
return query_body |
def process_cmap(cmap, ncolors=None, provider=None, categorical=False):
"""
Convert valid colormap specifications to a list of colors.
"""
providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider
if isinstance(cmap, Cycle):
palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values]
elif isinstance(cmap, list):
palette = cmap
elif isinstance(cmap, basestring):
mpl_cmaps = _list_cmaps('matplotlib')
bk_cmaps = _list_cmaps('bokeh')
cet_cmaps = _list_cmaps('colorcet')
if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)):
palette = mplcmap_to_palette(cmap, ncolors, categorical)
elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)):
palette = bokeh_palette_to_palette(cmap, ncolors, categorical)
elif provider=='colorcet' or (provider is None and cmap in cet_cmaps):
from colorcet import palette
if cmap.endswith('_r'):
palette = list(reversed(palette[cmap[:-2]]))
else:
palette = palette[cmap]
else:
raise ValueError("Supplied cmap %s not found among %s colormaps." %
(cmap,providers_checked))
else:
try:
# Try processing as matplotlib colormap
palette = mplcmap_to_palette(cmap, ncolors)
except:
palette = None
if not isinstance(palette, list):
raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette."
% (cmap,providers_checked))
if ncolors and len(palette) != ncolors:
return [palette[i%len(palette)] for i in range(ncolors)]
return palette | Convert valid colormap specifications to a list of colors. | Below is the the instruction that describes the task:
### Input:
Convert valid colormap specifications to a list of colors.
### Response:
def process_cmap(cmap, ncolors=None, provider=None, categorical=False):
"""
Convert valid colormap specifications to a list of colors.
"""
providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider
if isinstance(cmap, Cycle):
palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values]
elif isinstance(cmap, list):
palette = cmap
elif isinstance(cmap, basestring):
mpl_cmaps = _list_cmaps('matplotlib')
bk_cmaps = _list_cmaps('bokeh')
cet_cmaps = _list_cmaps('colorcet')
if provider=='matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)):
palette = mplcmap_to_palette(cmap, ncolors, categorical)
elif provider=='bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)):
palette = bokeh_palette_to_palette(cmap, ncolors, categorical)
elif provider=='colorcet' or (provider is None and cmap in cet_cmaps):
from colorcet import palette
if cmap.endswith('_r'):
palette = list(reversed(palette[cmap[:-2]]))
else:
palette = palette[cmap]
else:
raise ValueError("Supplied cmap %s not found among %s colormaps." %
(cmap,providers_checked))
else:
try:
# Try processing as matplotlib colormap
palette = mplcmap_to_palette(cmap, ncolors)
except:
palette = None
if not isinstance(palette, list):
raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette."
% (cmap,providers_checked))
if ncolors and len(palette) != ncolors:
return [palette[i%len(palette)] for i in range(ncolors)]
return palette |
def generate_dynamodb_tables():
"""Create the Blockade DynamoDB tables."""
logger.debug("[#] Setting up DynamoDB tables")
client = boto3.client('dynamodb', region_name=PRIMARY_REGION)
existing_tables = client.list_tables()['TableNames']
responses = list()
for label in DYNAMODB_TABLES:
if label in existing_tables:
logger.debug("[*] Table %s already exists" % (label))
continue
kwargs = {
'TableName': label,
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}
kwargs.update(DYNAMODB_SCHEMAS[label])
response = client.create_table(**kwargs)
responses.append(response)
logger.debug("[#] Successfully setup DynamoDB table %s" % (label))
logger.info("[#] Successfully setup DynamoDB tables")
return responses | Create the Blockade DynamoDB tables. | Below is the the instruction that describes the task:
### Input:
Create the Blockade DynamoDB tables.
### Response:
def generate_dynamodb_tables():
"""Create the Blockade DynamoDB tables."""
logger.debug("[#] Setting up DynamoDB tables")
client = boto3.client('dynamodb', region_name=PRIMARY_REGION)
existing_tables = client.list_tables()['TableNames']
responses = list()
for label in DYNAMODB_TABLES:
if label in existing_tables:
logger.debug("[*] Table %s already exists" % (label))
continue
kwargs = {
'TableName': label,
'ProvisionedThroughput': {
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
}
kwargs.update(DYNAMODB_SCHEMAS[label])
response = client.create_table(**kwargs)
responses.append(response)
logger.debug("[#] Successfully setup DynamoDB table %s" % (label))
logger.info("[#] Successfully setup DynamoDB tables")
return responses |
def isStochastic(matrix):
"""Check that ``matrix`` is row stochastic.
Returns
=======
is_stochastic : bool
``True`` if ``matrix`` is row stochastic, ``False`` otherwise.
"""
try:
absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0])))
except AttributeError:
matrix = _np.array(matrix)
absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0])))
return (absdiff.max() <= 10*_np.spacing(_np.float64(1))) | Check that ``matrix`` is row stochastic.
Returns
=======
is_stochastic : bool
``True`` if ``matrix`` is row stochastic, ``False`` otherwise. | Below is the the instruction that describes the task:
### Input:
Check that ``matrix`` is row stochastic.
Returns
=======
is_stochastic : bool
``True`` if ``matrix`` is row stochastic, ``False`` otherwise.
### Response:
def isStochastic(matrix):
"""Check that ``matrix`` is row stochastic.
Returns
=======
is_stochastic : bool
``True`` if ``matrix`` is row stochastic, ``False`` otherwise.
"""
try:
absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0])))
except AttributeError:
matrix = _np.array(matrix)
absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0])))
return (absdiff.max() <= 10*_np.spacing(_np.float64(1))) |
def ObtenerTablaParametros(self, tipo_recurso, sep="||"):
"Devuelve un array de elementos que tienen id y descripción"
if not self.client:
self.Conectar()
self.response = self.client("parametros", "v1", tipo_recurso)
result = json.loads(self.response)
ret = {}
if result['success']:
data = result['data']
# armo un diccionario con los datos devueltos:
key = [k for k in data[0].keys() if k.startswith("id")][0]
val = [k for k in data[0].keys() if k.startswith("desc")][0]
for it in data:
ret[it[key]] = it[val]
self.data = data
else:
error = result['error']
self.Excepcion = error['mensaje']
if sep:
return ["%s%%s%s%%s%s" % (sep, sep, sep) % it for it in sorted(ret.items())]
else:
return ret | Devuelve un array de elementos que tienen id y descripción | Below is the the instruction that describes the task:
### Input:
Devuelve un array de elementos que tienen id y descripción
### Response:
def ObtenerTablaParametros(self, tipo_recurso, sep="||"):
"Devuelve un array de elementos que tienen id y descripción"
if not self.client:
self.Conectar()
self.response = self.client("parametros", "v1", tipo_recurso)
result = json.loads(self.response)
ret = {}
if result['success']:
data = result['data']
# armo un diccionario con los datos devueltos:
key = [k for k in data[0].keys() if k.startswith("id")][0]
val = [k for k in data[0].keys() if k.startswith("desc")][0]
for it in data:
ret[it[key]] = it[val]
self.data = data
else:
error = result['error']
self.Excepcion = error['mensaje']
if sep:
return ["%s%%s%s%%s%s" % (sep, sep, sep) % it for it in sorted(ret.items())]
else:
return ret |
def reload_instance(self, instance_id,
post_uri=None,
ssh_keys=None,
image_id=None):
"""Perform an OS reload of an instance.
:param integer instance_id: the instance ID to reload
:param string post_url: The URI of the post-install script to run
after reload
:param list ssh_keys: The SSH keys to add to the root user
:param int image_id: The GUID of the image to load onto the server
.. warning::
This will reformat the primary drive.
Post-provision script MUST be HTTPS for it to be executed.
Example::
# Reload instance ID 12345 then run a custom post-provision script.
# Post-provision script MUST be HTTPS for it to be executed.
post_uri = 'https://somehost.com/bootstrap.sh'
vsi = mgr.reload_instance(12345, post_uri=post_url)
"""
config = {}
if post_uri:
config['customProvisionScriptUri'] = post_uri
if ssh_keys:
config['sshKeyIds'] = [key_id for key_id in ssh_keys]
if image_id:
config['imageTemplateId'] = image_id
return self.client.call('Virtual_Guest', 'reloadOperatingSystem',
'FORCE', config, id=instance_id) | Perform an OS reload of an instance.
:param integer instance_id: the instance ID to reload
:param string post_url: The URI of the post-install script to run
after reload
:param list ssh_keys: The SSH keys to add to the root user
:param int image_id: The GUID of the image to load onto the server
.. warning::
This will reformat the primary drive.
Post-provision script MUST be HTTPS for it to be executed.
Example::
# Reload instance ID 12345 then run a custom post-provision script.
# Post-provision script MUST be HTTPS for it to be executed.
post_uri = 'https://somehost.com/bootstrap.sh'
vsi = mgr.reload_instance(12345, post_uri=post_url) | Below is the the instruction that describes the task:
### Input:
Perform an OS reload of an instance.
:param integer instance_id: the instance ID to reload
:param string post_url: The URI of the post-install script to run
after reload
:param list ssh_keys: The SSH keys to add to the root user
:param int image_id: The GUID of the image to load onto the server
.. warning::
This will reformat the primary drive.
Post-provision script MUST be HTTPS for it to be executed.
Example::
# Reload instance ID 12345 then run a custom post-provision script.
# Post-provision script MUST be HTTPS for it to be executed.
post_uri = 'https://somehost.com/bootstrap.sh'
vsi = mgr.reload_instance(12345, post_uri=post_url)
### Response:
def reload_instance(self, instance_id,
post_uri=None,
ssh_keys=None,
image_id=None):
"""Perform an OS reload of an instance.
:param integer instance_id: the instance ID to reload
:param string post_url: The URI of the post-install script to run
after reload
:param list ssh_keys: The SSH keys to add to the root user
:param int image_id: The GUID of the image to load onto the server
.. warning::
This will reformat the primary drive.
Post-provision script MUST be HTTPS for it to be executed.
Example::
# Reload instance ID 12345 then run a custom post-provision script.
# Post-provision script MUST be HTTPS for it to be executed.
post_uri = 'https://somehost.com/bootstrap.sh'
vsi = mgr.reload_instance(12345, post_uri=post_url)
"""
config = {}
if post_uri:
config['customProvisionScriptUri'] = post_uri
if ssh_keys:
config['sshKeyIds'] = [key_id for key_id in ssh_keys]
if image_id:
config['imageTemplateId'] = image_id
return self.client.call('Virtual_Guest', 'reloadOperatingSystem',
'FORCE', config, id=instance_id) |
def serialize(self, value):
"""Takes a datetime object and returns a string"""
if isinstance(value, str):
return value
return value.strftime(DATETIME_FORMAT) | Takes a datetime object and returns a string | Below is the the instruction that describes the task:
### Input:
Takes a datetime object and returns a string
### Response:
def serialize(self, value):
"""Takes a datetime object and returns a string"""
if isinstance(value, str):
return value
return value.strftime(DATETIME_FORMAT) |
def gym_space_spec(gym_space):
"""Returns a reading spec of a gym space.
NOTE: Only implemented currently for Box and Discrete.
Args:
gym_space: instance of gym.spaces whose spec we want.
Returns:
Reading spec for that space.
Raises:
NotImplementedError: For spaces whose reading spec we haven't implemented.
"""
# First try to determine the type.
try:
tf_dtype = tf.as_dtype(gym_space.dtype)
except TypeError as e:
tf.logging.error("Cannot convert space's type [%s] to tf.dtype",
gym_space.dtype)
raise e
# Now hand it over to the specialized functions.
if isinstance(gym_space, Box):
return box_space_spec(gym_space, tf_dtype)
elif isinstance(gym_space, Discrete):
return discrete_space_spec(gym_space, tf_dtype)
else:
raise NotImplementedError | Returns a reading spec of a gym space.
NOTE: Only implemented currently for Box and Discrete.
Args:
gym_space: instance of gym.spaces whose spec we want.
Returns:
Reading spec for that space.
Raises:
NotImplementedError: For spaces whose reading spec we haven't implemented. | Below is the the instruction that describes the task:
### Input:
Returns a reading spec of a gym space.
NOTE: Only implemented currently for Box and Discrete.
Args:
gym_space: instance of gym.spaces whose spec we want.
Returns:
Reading spec for that space.
Raises:
NotImplementedError: For spaces whose reading spec we haven't implemented.
### Response:
def gym_space_spec(gym_space):
"""Returns a reading spec of a gym space.
NOTE: Only implemented currently for Box and Discrete.
Args:
gym_space: instance of gym.spaces whose spec we want.
Returns:
Reading spec for that space.
Raises:
NotImplementedError: For spaces whose reading spec we haven't implemented.
"""
# First try to determine the type.
try:
tf_dtype = tf.as_dtype(gym_space.dtype)
except TypeError as e:
tf.logging.error("Cannot convert space's type [%s] to tf.dtype",
gym_space.dtype)
raise e
# Now hand it over to the specialized functions.
if isinstance(gym_space, Box):
return box_space_spec(gym_space, tf_dtype)
elif isinstance(gym_space, Discrete):
return discrete_space_spec(gym_space, tf_dtype)
else:
raise NotImplementedError |
def _load(self, value: Any):
"""
Load the value for the field, run validators and return the value.
Subclasses can override this to provide custom load logic.
:param value: value of the field
"""
if value in self.empty_values:
# If a default has been set for the field return it
if self.default is not None:
default = self.default
value = default() if callable(default) else default
return value
# If no default is set and this field is required
elif self.required:
self.fail('required')
# In all other cases just return `None` as we do not want to
# run validations against an empty value
else:
return None
# If choices exist then validate that value is be one of the choices
if self.choices:
value_list = value
if not isinstance(value, (list, tuple)):
value_list = [value]
for v in value_list:
if v not in self.choice_dict:
self.fail(
'invalid_choice', value=v,
choices=list(self.choice_dict))
# Cast and Validate the value for this Field
value = self._cast_to_type(value)
# Call the rest of the validators defined for this Field
self._run_validators(value)
return value | Load the value for the field, run validators and return the value.
Subclasses can override this to provide custom load logic.
:param value: value of the field | Below is the the instruction that describes the task:
### Input:
Load the value for the field, run validators and return the value.
Subclasses can override this to provide custom load logic.
:param value: value of the field
### Response:
def _load(self, value: Any):
"""
Load the value for the field, run validators and return the value.
Subclasses can override this to provide custom load logic.
:param value: value of the field
"""
if value in self.empty_values:
# If a default has been set for the field return it
if self.default is not None:
default = self.default
value = default() if callable(default) else default
return value
# If no default is set and this field is required
elif self.required:
self.fail('required')
# In all other cases just return `None` as we do not want to
# run validations against an empty value
else:
return None
# If choices exist then validate that value is be one of the choices
if self.choices:
value_list = value
if not isinstance(value, (list, tuple)):
value_list = [value]
for v in value_list:
if v not in self.choice_dict:
self.fail(
'invalid_choice', value=v,
choices=list(self.choice_dict))
# Cast and Validate the value for this Field
value = self._cast_to_type(value)
# Call the rest of the validators defined for this Field
self._run_validators(value)
return value |
def prepare_vector_layer(layer):
"""This function will prepare the layer to be used in InaSAFE :
* Make a local copy of the layer.
* Make sure that we have an InaSAFE ID column.
* Rename fields according to our definitions.
* Remove fields which are not used.
:param layer: The layer to prepare.
:type layer: QgsVectorLayer
:return: Cleaned memory layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = prepare_vector_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
if not layer.keywords.get('inasafe_fields'):
msg = 'inasafe_fields is missing in keywords from %s' % layer.name()
raise InvalidKeywordsForProcessingAlgorithm(msg)
cleaned = create_memory_layer(
output_layer_name, layer.geometryType(), layer.crs(), layer.fields())
# We transfer keywords to the output.
cleaned.keywords = copy_layer_keywords(layer.keywords)
copy_layer(layer, cleaned)
_remove_features(cleaned)
# After removing rows, let's check if there is still a feature.
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)
iterator = cleaned.getFeatures(request)
try:
next(iterator)
except StopIteration:
LOGGER.warning(
tr('No feature has been found in the {purpose}'
.format(purpose=layer.keywords['layer_purpose'])))
# Realtime may have no data in the extent when doing a multiexposure
# analysis. We still want the IF. I disabled the exception. ET 19/02/18
# raise NoFeaturesInExtentError
_add_id_column(cleaned)
clean_inasafe_fields(cleaned)
if _size_is_needed(cleaned):
LOGGER.info(
'We noticed some counts in your exposure layer. Before to update '
'geometries, we compute the original size for each feature.')
run_single_post_processor(cleaned, post_processor_size)
if cleaned.keywords['layer_purpose'] == 'exposure':
fields = cleaned.keywords['inasafe_fields']
if exposure_type_field['key'] not in fields:
_add_default_exposure_class(cleaned)
# Check value mapping
_check_value_mapping(cleaned)
cleaned.keywords['title'] = output_layer_name
check_layer(cleaned)
return cleaned | This function will prepare the layer to be used in InaSAFE :
* Make a local copy of the layer.
* Make sure that we have an InaSAFE ID column.
* Rename fields according to our definitions.
* Remove fields which are not used.
:param layer: The layer to prepare.
:type layer: QgsVectorLayer
:return: Cleaned memory layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
This function will prepare the layer to be used in InaSAFE :
* Make a local copy of the layer.
* Make sure that we have an InaSAFE ID column.
* Rename fields according to our definitions.
* Remove fields which are not used.
:param layer: The layer to prepare.
:type layer: QgsVectorLayer
:return: Cleaned memory layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
### Response:
def prepare_vector_layer(layer):
"""This function will prepare the layer to be used in InaSAFE :
* Make a local copy of the layer.
* Make sure that we have an InaSAFE ID column.
* Rename fields according to our definitions.
* Remove fields which are not used.
:param layer: The layer to prepare.
:type layer: QgsVectorLayer
:return: Cleaned memory layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = prepare_vector_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
if not layer.keywords.get('inasafe_fields'):
msg = 'inasafe_fields is missing in keywords from %s' % layer.name()
raise InvalidKeywordsForProcessingAlgorithm(msg)
cleaned = create_memory_layer(
output_layer_name, layer.geometryType(), layer.crs(), layer.fields())
# We transfer keywords to the output.
cleaned.keywords = copy_layer_keywords(layer.keywords)
copy_layer(layer, cleaned)
_remove_features(cleaned)
# After removing rows, let's check if there is still a feature.
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry)
iterator = cleaned.getFeatures(request)
try:
next(iterator)
except StopIteration:
LOGGER.warning(
tr('No feature has been found in the {purpose}'
.format(purpose=layer.keywords['layer_purpose'])))
# Realtime may have no data in the extent when doing a multiexposure
# analysis. We still want the IF. I disabled the exception. ET 19/02/18
# raise NoFeaturesInExtentError
_add_id_column(cleaned)
clean_inasafe_fields(cleaned)
if _size_is_needed(cleaned):
LOGGER.info(
'We noticed some counts in your exposure layer. Before to update '
'geometries, we compute the original size for each feature.')
run_single_post_processor(cleaned, post_processor_size)
if cleaned.keywords['layer_purpose'] == 'exposure':
fields = cleaned.keywords['inasafe_fields']
if exposure_type_field['key'] not in fields:
_add_default_exposure_class(cleaned)
# Check value mapping
_check_value_mapping(cleaned)
cleaned.keywords['title'] = output_layer_name
check_layer(cleaned)
return cleaned |
def decision_function(self, X):
"Decision function i.e. the raw data of the prediction"
self._X = Model.convert_features(X)
self._eval()
return self._ind[0].hy | Decision function i.e. the raw data of the prediction | Below is the the instruction that describes the task:
### Input:
Decision function i.e. the raw data of the prediction
### Response:
def decision_function(self, X):
"Decision function i.e. the raw data of the prediction"
self._X = Model.convert_features(X)
self._eval()
return self._ind[0].hy |
def transform(params):
"""
Transforms an heterogeneous map of params into a CloudStack
ready mapping of parameter to values.
It handles lists and dicts.
>>> p = {"a": 1, "b": "foo", "c": ["eggs", "spam"], "d": {"key": "value"}}
>>> transform(p)
>>> print(p)
{'a': '1', 'b': 'foo', 'c': 'eggs,spam', 'd[0].key': 'value'}
"""
for key, value in list(params.items()):
if value is None:
params.pop(key)
continue
if isinstance(value, (string_type, binary_type)):
continue
if isinstance(value, integer_types):
params[key] = text_type(value)
elif isinstance(value, (list, tuple, set, dict)):
if not value:
params.pop(key)
else:
if isinstance(value, dict):
value = [value]
if isinstance(value, set):
value = list(value)
if not isinstance(value[0], dict):
params[key] = ",".join(value)
else:
params.pop(key)
for index, val in enumerate(value):
for name, v in val.items():
k = "%s[%d].%s" % (key, index, name)
params[k] = text_type(v)
else:
raise ValueError(type(value)) | Transforms an heterogeneous map of params into a CloudStack
ready mapping of parameter to values.
It handles lists and dicts.
>>> p = {"a": 1, "b": "foo", "c": ["eggs", "spam"], "d": {"key": "value"}}
>>> transform(p)
>>> print(p)
{'a': '1', 'b': 'foo', 'c': 'eggs,spam', 'd[0].key': 'value'} | Below is the the instruction that describes the task:
### Input:
Transforms an heterogeneous map of params into a CloudStack
ready mapping of parameter to values.
It handles lists and dicts.
>>> p = {"a": 1, "b": "foo", "c": ["eggs", "spam"], "d": {"key": "value"}}
>>> transform(p)
>>> print(p)
{'a': '1', 'b': 'foo', 'c': 'eggs,spam', 'd[0].key': 'value'}
### Response:
def transform(params):
"""
Transforms an heterogeneous map of params into a CloudStack
ready mapping of parameter to values.
It handles lists and dicts.
>>> p = {"a": 1, "b": "foo", "c": ["eggs", "spam"], "d": {"key": "value"}}
>>> transform(p)
>>> print(p)
{'a': '1', 'b': 'foo', 'c': 'eggs,spam', 'd[0].key': 'value'}
"""
for key, value in list(params.items()):
if value is None:
params.pop(key)
continue
if isinstance(value, (string_type, binary_type)):
continue
if isinstance(value, integer_types):
params[key] = text_type(value)
elif isinstance(value, (list, tuple, set, dict)):
if not value:
params.pop(key)
else:
if isinstance(value, dict):
value = [value]
if isinstance(value, set):
value = list(value)
if not isinstance(value[0], dict):
params[key] = ",".join(value)
else:
params.pop(key)
for index, val in enumerate(value):
for name, v in val.items():
k = "%s[%d].%s" % (key, index, name)
params[k] = text_type(v)
else:
raise ValueError(type(value)) |
def perform_completion(editor):
"""
Performs the completion on given editor.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool
"""
completion_prefix = editor.get_partial_word_under_cursor()
if not completion_prefix:
return
words = editor.get_words()
completion_prefix in words and words.remove(completion_prefix)
editor.completer.update_model(words)
editor.completer.setCompletionPrefix(completion_prefix)
if editor.completer.completionCount() == 1:
completion = editor.completer.completionModel().data(
editor.completer.completionModel().index(0, 0)).toString()
cursor = editor.textCursor()
cursor.insertText(completion[len(completion_prefix):])
editor.setTextCursor(cursor)
else:
popup = editor.completer.popup()
popup.setCurrentIndex(editor.completer.completionModel().index(0, 0))
completer_rectangle = editor.cursorRect()
hasattr(editor, "margin_area_LinesNumbers_widget") and completer_rectangle.moveTo(
completer_rectangle.topLeft().x() + editor.margin_area_LinesNumbers_widget.get_width(),
completer_rectangle.topLeft().y())
completer_rectangle.setWidth(editor.completer.popup().sizeHintForColumn(0) +
editor.completer.popup().verticalScrollBar().sizeHint().width())
editor.completer.complete(completer_rectangle)
return True | Performs the completion on given editor.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Performs the completion on given editor.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool
### Response:
def perform_completion(editor):
"""
Performs the completion on given editor.
:param editor: Document editor.
:type editor: QWidget
:return: Method success.
:rtype: bool
"""
completion_prefix = editor.get_partial_word_under_cursor()
if not completion_prefix:
return
words = editor.get_words()
completion_prefix in words and words.remove(completion_prefix)
editor.completer.update_model(words)
editor.completer.setCompletionPrefix(completion_prefix)
if editor.completer.completionCount() == 1:
completion = editor.completer.completionModel().data(
editor.completer.completionModel().index(0, 0)).toString()
cursor = editor.textCursor()
cursor.insertText(completion[len(completion_prefix):])
editor.setTextCursor(cursor)
else:
popup = editor.completer.popup()
popup.setCurrentIndex(editor.completer.completionModel().index(0, 0))
completer_rectangle = editor.cursorRect()
hasattr(editor, "margin_area_LinesNumbers_widget") and completer_rectangle.moveTo(
completer_rectangle.topLeft().x() + editor.margin_area_LinesNumbers_widget.get_width(),
completer_rectangle.topLeft().y())
completer_rectangle.setWidth(editor.completer.popup().sizeHintForColumn(0) +
editor.completer.popup().verticalScrollBar().sizeHint().width())
editor.completer.complete(completer_rectangle)
return True |
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, compat.string_type):
outfile = open(outfile, 'wb')
outfile.write(compat.urllib2.urlopen(self.href).read()) | Save the image data to a file or file-like object | Below is the the instruction that describes the task:
### Input:
Save the image data to a file or file-like object
### Response:
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, compat.string_type):
outfile = open(outfile, 'wb')
outfile.write(compat.urllib2.urlopen(self.href).read()) |
def _do_layout(self):
"""Layout sizers"""
label_style = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
button_style = wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL | \
wx.ALIGN_CENTER_VERTICAL | wx.FIXED_MINSIZE
grid_sizer_1 = wx.GridSizer(4, 2, 3, 3)
grid_sizer_1.Add(self.Rows_Label, 0, label_style, 3)
grid_sizer_1.Add(self.X_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.Columns_Label, 0, label_style, 3)
grid_sizer_1.Add(self.Y_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.Tabs_Label, 0, label_style, 3)
grid_sizer_1.Add(self.Z_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.ok_button, 0, button_style, 3)
grid_sizer_1.Add(self.cancel_button, 0, button_style, 3)
self.SetSizer(grid_sizer_1)
grid_sizer_1.Fit(self)
self.Layout()
self.X_DimensionsEntry.SetFocus() | Layout sizers | Below is the the instruction that describes the task:
### Input:
Layout sizers
### Response:
def _do_layout(self):
"""Layout sizers"""
label_style = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
button_style = wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL | \
wx.ALIGN_CENTER_VERTICAL | wx.FIXED_MINSIZE
grid_sizer_1 = wx.GridSizer(4, 2, 3, 3)
grid_sizer_1.Add(self.Rows_Label, 0, label_style, 3)
grid_sizer_1.Add(self.X_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.Columns_Label, 0, label_style, 3)
grid_sizer_1.Add(self.Y_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.Tabs_Label, 0, label_style, 3)
grid_sizer_1.Add(self.Z_DimensionsEntry, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.ok_button, 0, button_style, 3)
grid_sizer_1.Add(self.cancel_button, 0, button_style, 3)
self.SetSizer(grid_sizer_1)
grid_sizer_1.Fit(self)
self.Layout()
self.X_DimensionsEntry.SetFocus() |
def _play_sound(self, filename):
""" Shells player with the provided filename.
`filename`
Filename for sound file.
"""
command = self._get_external_player()
if not command:
return # no player found
if common.IS_MACOSX:
command += ' "{0}"'.format(filename)
else:
# append quiet flag and filename
is_play = (command == 'play')
command += ' -q "{0}"'.format(filename)
# HACK: play can default to using pulseaudio. here, we
# check if pulse command exists and delegate to alsa if
# not
if is_play and not common.which('pulseaudio'):
command += ' -t alsa'
# play sound file, ignore if it fails
common.shell_process(command, background=True) | Shells player with the provided filename.
`filename`
Filename for sound file. | Below is the the instruction that describes the task:
### Input:
Shells player with the provided filename.
`filename`
Filename for sound file.
### Response:
def _play_sound(self, filename):
""" Shells player with the provided filename.
`filename`
Filename for sound file.
"""
command = self._get_external_player()
if not command:
return # no player found
if common.IS_MACOSX:
command += ' "{0}"'.format(filename)
else:
# append quiet flag and filename
is_play = (command == 'play')
command += ' -q "{0}"'.format(filename)
# HACK: play can default to using pulseaudio. here, we
# check if pulse command exists and delegate to alsa if
# not
if is_play and not common.which('pulseaudio'):
command += ' -t alsa'
# play sound file, ignore if it fails
common.shell_process(command, background=True) |
def least_squares(Cui, X, Y, regularization, num_threads=0):
""" For each user in Cui, calculate factors Xu for them
using least squares on Y.
Note: this is at least 10 times slower than the cython version included
here.
"""
users, n_factors = X.shape
YtY = Y.T.dot(Y)
for u in range(users):
X[u] = user_factor(Y, YtY, Cui, u, regularization, n_factors) | For each user in Cui, calculate factors Xu for them
using least squares on Y.
Note: this is at least 10 times slower than the cython version included
here. | Below is the the instruction that describes the task:
### Input:
For each user in Cui, calculate factors Xu for them
using least squares on Y.
Note: this is at least 10 times slower than the cython version included
here.
### Response:
def least_squares(Cui, X, Y, regularization, num_threads=0):
""" For each user in Cui, calculate factors Xu for them
using least squares on Y.
Note: this is at least 10 times slower than the cython version included
here.
"""
users, n_factors = X.shape
YtY = Y.T.dot(Y)
for u in range(users):
X[u] = user_factor(Y, YtY, Cui, u, regularization, n_factors) |
def _clean_tag(name):
"""Cleans a tag. Removes illegal characters for instance.
Adapted from the TensorFlow function `clean_tag()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/summary_op_util.py
Parameters
----------
name : str
The original tag name to be processed.
Returns
-------
The cleaned tag name.
"""
# In the past, the first argument to summary ops was a tag, which allowed
# arbitrary characters. Now we are changing the first argument to be the node
# name. This has a number of advantages (users of summary ops now can
# take advantage of the tf name scope system) but risks breaking existing
# usage, because a much smaller set of characters are allowed in node names.
# This function replaces all illegal characters with _s, and logs a warning.
# It also strips leading slashes from the name.
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
logging.warning('Summary name %s is illegal; using %s instead.', name, new_name)
name = new_name
return name | Cleans a tag. Removes illegal characters for instance.
Adapted from the TensorFlow function `clean_tag()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/summary_op_util.py
Parameters
----------
name : str
The original tag name to be processed.
Returns
-------
The cleaned tag name. | Below is the the instruction that describes the task:
### Input:
Cleans a tag. Removes illegal characters for instance.
Adapted from the TensorFlow function `clean_tag()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/summary_op_util.py
Parameters
----------
name : str
The original tag name to be processed.
Returns
-------
The cleaned tag name.
### Response:
def _clean_tag(name):
"""Cleans a tag. Removes illegal characters for instance.
Adapted from the TensorFlow function `clean_tag()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/summary_op_util.py
Parameters
----------
name : str
The original tag name to be processed.
Returns
-------
The cleaned tag name.
"""
# In the past, the first argument to summary ops was a tag, which allowed
# arbitrary characters. Now we are changing the first argument to be the node
# name. This has a number of advantages (users of summary ops now can
# take advantage of the tf name scope system) but risks breaking existing
# usage, because a much smaller set of characters are allowed in node names.
# This function replaces all illegal characters with _s, and logs a warning.
# It also strips leading slashes from the name.
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
logging.warning('Summary name %s is illegal; using %s instead.', name, new_name)
name = new_name
return name |
def raw_print(*args, **kw):
"""Raw print to sys.__stdout__, otherwise identical interface to print()."""
print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
file=sys.__stdout__)
sys.__stdout__.flush() | Raw print to sys.__stdout__, otherwise identical interface to print(). | Below is the the instruction that describes the task:
### Input:
Raw print to sys.__stdout__, otherwise identical interface to print().
### Response:
def raw_print(*args, **kw):
"""Raw print to sys.__stdout__, otherwise identical interface to print()."""
print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
file=sys.__stdout__)
sys.__stdout__.flush() |
def sender(self, body, stamp, url, sig):
"""Validate request is from Alexa.
Verifying that the Request was Sent by Alexa: https://goo.gl/AcrzB5.
Checking the Signature of the Request: https://goo.gl/FDkjBN.
Checking the Timestamp of the Request: https://goo.gl/Z5JhqZ
Args:
body: str. HTTPS request body.
stamp: str. Value of timestamp within request object of HTTPS
request body.
url: str. SignatureCertChainUrl header value sent
by request.
sig: str. Signature header value sent by request.
Returns:
bool: True if valid, False otherwise.
"""
if not timestamp(stamp):
return False
if self.url != url:
if not signature_cert_chain_url(url):
return False
certs = retrieve(url)
if not certs:
return False
if not cert_chain(certs):
return False
self.url = url
self.cert = certs[0]
if not signature(self.cert, sig, body):
return False
return True | Validate request is from Alexa.
Verifying that the Request was Sent by Alexa: https://goo.gl/AcrzB5.
Checking the Signature of the Request: https://goo.gl/FDkjBN.
Checking the Timestamp of the Request: https://goo.gl/Z5JhqZ
Args:
body: str. HTTPS request body.
stamp: str. Value of timestamp within request object of HTTPS
request body.
url: str. SignatureCertChainUrl header value sent
by request.
sig: str. Signature header value sent by request.
Returns:
bool: True if valid, False otherwise. | Below is the the instruction that describes the task:
### Input:
Validate request is from Alexa.
Verifying that the Request was Sent by Alexa: https://goo.gl/AcrzB5.
Checking the Signature of the Request: https://goo.gl/FDkjBN.
Checking the Timestamp of the Request: https://goo.gl/Z5JhqZ
Args:
body: str. HTTPS request body.
stamp: str. Value of timestamp within request object of HTTPS
request body.
url: str. SignatureCertChainUrl header value sent
by request.
sig: str. Signature header value sent by request.
Returns:
bool: True if valid, False otherwise.
### Response:
def sender(self, body, stamp, url, sig):
"""Validate request is from Alexa.
Verifying that the Request was Sent by Alexa: https://goo.gl/AcrzB5.
Checking the Signature of the Request: https://goo.gl/FDkjBN.
Checking the Timestamp of the Request: https://goo.gl/Z5JhqZ
Args:
body: str. HTTPS request body.
stamp: str. Value of timestamp within request object of HTTPS
request body.
url: str. SignatureCertChainUrl header value sent
by request.
sig: str. Signature header value sent by request.
Returns:
bool: True if valid, False otherwise.
"""
if not timestamp(stamp):
return False
if self.url != url:
if not signature_cert_chain_url(url):
return False
certs = retrieve(url)
if not certs:
return False
if not cert_chain(certs):
return False
self.url = url
self.cert = certs[0]
if not signature(self.cert, sig, body):
return False
return True |
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output | Calls a command through SSH and returns its output. | Below is the the instruction that describes the task:
### Input:
Calls a command through SSH and returns its output.
### Response:
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output |
def pyLuceneIndexer4to5(old):
"""
Copy attributes, reset index due because information about deleted
documents has been lost, and power up for IFulltextIndexer so other code
can find this item.
"""
new = old.upgradeVersion(PyLuceneIndexer.typeName, 4, 5,
indexCount=old.indexCount,
installedOn=old.installedOn,
indexDirectory=old.indexDirectory)
new.reset()
new.store.powerUp(new, ixmantissa.IFulltextIndexer)
return new | Copy attributes, reset index due because information about deleted
documents has been lost, and power up for IFulltextIndexer so other code
can find this item. | Below is the the instruction that describes the task:
### Input:
Copy attributes, reset index due because information about deleted
documents has been lost, and power up for IFulltextIndexer so other code
can find this item.
### Response:
def pyLuceneIndexer4to5(old):
"""
Copy attributes, reset index due because information about deleted
documents has been lost, and power up for IFulltextIndexer so other code
can find this item.
"""
new = old.upgradeVersion(PyLuceneIndexer.typeName, 4, 5,
indexCount=old.indexCount,
installedOn=old.installedOn,
indexDirectory=old.indexDirectory)
new.reset()
new.store.powerUp(new, ixmantissa.IFulltextIndexer)
return new |
def address(self):
'''
Return the address of this "object", minus the scheme, hostname
and port of the bridge
'''
return self.API.replace(
'http://{}:{}'.format(
self._bridge.hostname,
self._bridge.port
), ''
) | Return the address of this "object", minus the scheme, hostname
and port of the bridge | Below is the the instruction that describes the task:
### Input:
Return the address of this "object", minus the scheme, hostname
and port of the bridge
### Response:
def address(self):
'''
Return the address of this "object", minus the scheme, hostname
and port of the bridge
'''
return self.API.replace(
'http://{}:{}'.format(
self._bridge.hostname,
self._bridge.port
), ''
) |
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
"""
X = []
y = []
# Loop through each person in the training set
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# Loop through each training image for the current person
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image)
if len(face_bounding_boxes) != 1:
# If there are no people (or too many people) in a training image, skip the image.
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# Add face encoding for current image to the training set
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
y.append(class_dir)
# Determine how many neighbors to use for weighting in the KNN classifier
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
return knn_clf | Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data. | Below is the the instruction that describes the task:
### Input:
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
### Response:
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False):
"""
Trains a k-nearest neighbors classifier for face recognition.
:param train_dir: directory that contains a sub-directory for each known person, with its name.
(View in source code to see train_dir example tree structure)
Structure:
<train_dir>/
├── <person1>/
│ ├── <somename1>.jpeg
│ ├── <somename2>.jpeg
│ ├── ...
├── <person2>/
│ ├── <somename1>.jpeg
│ └── <somename2>.jpeg
└── ...
:param model_save_path: (optional) path to save model on disk
:param n_neighbors: (optional) number of neighbors to weigh in classification. Chosen automatically if not specified
:param knn_algo: (optional) underlying data structure to support knn.default is ball_tree
:param verbose: verbosity of training
:return: returns knn classifier that was trained on the given data.
"""
X = []
y = []
# Loop through each person in the training set
for class_dir in os.listdir(train_dir):
if not os.path.isdir(os.path.join(train_dir, class_dir)):
continue
# Loop through each training image for the current person
for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)):
image = face_recognition.load_image_file(img_path)
face_bounding_boxes = face_recognition.face_locations(image)
if len(face_bounding_boxes) != 1:
# If there are no people (or too many people) in a training image, skip the image.
if verbose:
print("Image {} not suitable for training: {}".format(img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face"))
else:
# Add face encoding for current image to the training set
X.append(face_recognition.face_encodings(image, known_face_locations=face_bounding_boxes)[0])
y.append(class_dir)
# Determine how many neighbors to use for weighting in the KNN classifier
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X, y)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
return knn_clf |
def get_changes(self, changers, in_hierarchy=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
function_changer = _FunctionChangers(self.pyname.get_object(),
self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy,
resources, task_handle) | Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched. | Below is the the instruction that describes the task:
### Input:
Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
### Response:
def get_changes(self, changers, in_hierarchy=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
function_changer = _FunctionChangers(self.pyname.get_object(),
self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy,
resources, task_handle) |
async def open(self) -> 'NodePool':
"""
Explicit entry. Opens pool as configured, for later closure via close().
Creates pool if it does not yet exist, using configured genesis transaction file.
For use when keeping pool open across multiple calls.
Raise any AbsentPool if node pool ledger configuration is not available.
:return: current object
"""
LOGGER.debug('NodePool.open >>>')
await pool.set_protocol_version(self.protocol.indy())
LOGGER.info('Pool ledger %s set protocol %s', self.name, self.protocol)
try:
self._handle = await pool.open_pool_ledger(self.name, json.dumps(self.config))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.PoolLedgerNotCreatedError:
LOGGER.debug('NodePool.open <!< Absent node pool %s ledger configuration', self.name)
raise AbsentPool('Absent node pool {} ledger configuration'.format(self.name))
LOGGER.debug(
'NodePool.open <!< cannot open node pool %s: indy error code %s',
self.name,
x_indy.error_code)
raise
LOGGER.debug('NodePool.open <<<')
return self | Explicit entry. Opens pool as configured, for later closure via close().
Creates pool if it does not yet exist, using configured genesis transaction file.
For use when keeping pool open across multiple calls.
Raise any AbsentPool if node pool ledger configuration is not available.
:return: current object | Below is the the instruction that describes the task:
### Input:
Explicit entry. Opens pool as configured, for later closure via close().
Creates pool if it does not yet exist, using configured genesis transaction file.
For use when keeping pool open across multiple calls.
Raise any AbsentPool if node pool ledger configuration is not available.
:return: current object
### Response:
async def open(self) -> 'NodePool':
"""
Explicit entry. Opens pool as configured, for later closure via close().
Creates pool if it does not yet exist, using configured genesis transaction file.
For use when keeping pool open across multiple calls.
Raise any AbsentPool if node pool ledger configuration is not available.
:return: current object
"""
LOGGER.debug('NodePool.open >>>')
await pool.set_protocol_version(self.protocol.indy())
LOGGER.info('Pool ledger %s set protocol %s', self.name, self.protocol)
try:
self._handle = await pool.open_pool_ledger(self.name, json.dumps(self.config))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.PoolLedgerNotCreatedError:
LOGGER.debug('NodePool.open <!< Absent node pool %s ledger configuration', self.name)
raise AbsentPool('Absent node pool {} ledger configuration'.format(self.name))
LOGGER.debug(
'NodePool.open <!< cannot open node pool %s: indy error code %s',
self.name,
x_indy.error_code)
raise
LOGGER.debug('NodePool.open <<<')
return self |
def _list_gids():
'''
Return a list of gids in use
'''
output = __salt__['cmd.run'](
['dscacheutil', '-q', 'group'],
output_loglevel='quiet',
python_shell=False
)
ret = set()
for line in salt.utils.itertools.split(output, '\n'):
if line.startswith('gid:'):
ret.update(line.split()[1:])
return sorted(ret) | Return a list of gids in use | Below is the the instruction that describes the task:
### Input:
Return a list of gids in use
### Response:
def _list_gids():
'''
Return a list of gids in use
'''
output = __salt__['cmd.run'](
['dscacheutil', '-q', 'group'],
output_loglevel='quiet',
python_shell=False
)
ret = set()
for line in salt.utils.itertools.split(output, '\n'):
if line.startswith('gid:'):
ret.update(line.split()[1:])
return sorted(ret) |
def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,
secureTrafficOnly=None):
"""
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
"""
return loadbalancer.update_ssl_termination(securePort=securePort,
enabled=enabled, secureTrafficOnly=secureTrafficOnly) | Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys. | Below is the the instruction that describes the task:
### Input:
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
### Response:
def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,
secureTrafficOnly=None):
"""
Updates existing SSL termination information for the load balancer
without affecting the existing certificates/keys.
"""
return loadbalancer.update_ssl_termination(securePort=securePort,
enabled=enabled, secureTrafficOnly=secureTrafficOnly) |
def transform(self, X):
"""Computes the row principal coordinates of a dataset."""
utils.validation.check_is_fitted(self, 's_')
if self.check_input:
utils.check_array(X, dtype=[str, np.number])
return self.row_coordinates(X) | Computes the row principal coordinates of a dataset. | Below is the the instruction that describes the task:
### Input:
Computes the row principal coordinates of a dataset.
### Response:
def transform(self, X):
"""Computes the row principal coordinates of a dataset."""
utils.validation.check_is_fitted(self, 's_')
if self.check_input:
utils.check_array(X, dtype=[str, np.number])
return self.row_coordinates(X) |
def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.recipes.consume, print_all, func)
return functools.wraps(func)(print_results) | Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None | Below is the the instruction that describes the task:
### Input:
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
### Response:
def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.recipes.consume, print_all, func)
return functools.wraps(func)(print_results) |
def get_variant_id(variant_dict=None, variant_line=None):
"""Build a variant id
The variant id is a string made of CHROM_POS_REF_ALT
Args:
variant_dict (dict): A variant dictionary
Returns:
variant_id (str)
"""
if variant_dict:
chrom = variant_dict['CHROM']
position = variant_dict['POS']
ref = variant_dict['REF']
alt = variant_dict['ALT']
elif variant_line:
splitted_line = variant_line.rstrip().split('\t')
chrom = splitted_line[0]
position = splitted_line[1]
ref = splitted_line[3]
alt = splitted_line[4]
else:
raise Exception("Have to provide variant dict or variant line")
return '_'.join([
chrom,
position,
ref,
alt,
]) | Build a variant id
The variant id is a string made of CHROM_POS_REF_ALT
Args:
variant_dict (dict): A variant dictionary
Returns:
variant_id (str) | Below is the the instruction that describes the task:
### Input:
Build a variant id
The variant id is a string made of CHROM_POS_REF_ALT
Args:
variant_dict (dict): A variant dictionary
Returns:
variant_id (str)
### Response:
def get_variant_id(variant_dict=None, variant_line=None):
"""Build a variant id
The variant id is a string made of CHROM_POS_REF_ALT
Args:
variant_dict (dict): A variant dictionary
Returns:
variant_id (str)
"""
if variant_dict:
chrom = variant_dict['CHROM']
position = variant_dict['POS']
ref = variant_dict['REF']
alt = variant_dict['ALT']
elif variant_line:
splitted_line = variant_line.rstrip().split('\t')
chrom = splitted_line[0]
position = splitted_line[1]
ref = splitted_line[3]
alt = splitted_line[4]
else:
raise Exception("Have to provide variant dict or variant line")
return '_'.join([
chrom,
position,
ref,
alt,
]) |
def _get_stddevs(self, C, stddev_types, num_sites):
"""
Return total standard deviation.
"""
# standard deviation is converted from log10 to ln
std_total = np.log(10 ** C['sigma'])
stddevs = []
for _ in stddev_types:
stddevs.append(np.zeros(num_sites) + std_total)
return stddevs | Return total standard deviation. | Below is the the instruction that describes the task:
### Input:
Return total standard deviation.
### Response:
def _get_stddevs(self, C, stddev_types, num_sites):
"""
Return total standard deviation.
"""
# standard deviation is converted from log10 to ln
std_total = np.log(10 ** C['sigma'])
stddevs = []
for _ in stddev_types:
stddevs.append(np.zeros(num_sites) + std_total)
return stddevs |
def _match_path(pathname,
included_patterns,
excluded_patterns,
case_sensitive=True):
"""Internal function same as :func:`match_path` but does not check arguments.
Doctests::
>>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded
"""
if not case_sensitive:
included_patterns = set(map(_string_lower, included_patterns))
excluded_patterns = set(map(_string_lower, excluded_patterns))
else:
included_patterns = set(included_patterns)
excluded_patterns = set(excluded_patterns)
common_patterns = included_patterns & excluded_patterns
if common_patterns:
raise ValueError('conflicting patterns `%s` included and excluded'\
% common_patterns)
return (match_path_against(pathname, included_patterns, case_sensitive)\
and not match_path_against(pathname, excluded_patterns,
case_sensitive)) | Internal function same as :func:`match_path` but does not check arguments.
Doctests::
>>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded | Below is the the instruction that describes the task:
### Input:
Internal function same as :func:`match_path` but does not check arguments.
Doctests::
>>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded
### Response:
def _match_path(pathname,
included_patterns,
excluded_patterns,
case_sensitive=True):
"""Internal function same as :func:`match_path` but does not check arguments.
Doctests::
>>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded
"""
if not case_sensitive:
included_patterns = set(map(_string_lower, included_patterns))
excluded_patterns = set(map(_string_lower, excluded_patterns))
else:
included_patterns = set(included_patterns)
excluded_patterns = set(excluded_patterns)
common_patterns = included_patterns & excluded_patterns
if common_patterns:
raise ValueError('conflicting patterns `%s` included and excluded'\
% common_patterns)
return (match_path_against(pathname, included_patterns, case_sensitive)\
and not match_path_against(pathname, excluded_patterns,
case_sensitive)) |
def spmatrix(self, reordered = True, symmetric = False):
"""
Converts the :py:class:`cspmatrix` :math:`A` to a sparse matrix. A reordered
matrix is returned if the optional argument `reordered` is
`True` (default), and otherwise the inverse permutation is applied. Only the
default options are allowed if the :py:class:`cspmatrix` :math:`A` represents
a Cholesky factor.
:param reordered: boolean (default: True)
:param symmetric: boolean (default: False)
"""
n = self.symb.n
snptr = self.symb.snptr
snode = self.symb.snode
relptr = self.symb.relptr
snrowidx = self.symb.snrowidx
sncolptr = self.symb.sncolptr
blkptr = self.symb.blkptr
blkval = self.blkval
if self.is_factor:
if symmetric: raise ValueError("'symmetric = True' not implemented for Cholesky factors")
if not reordered: raise ValueError("'reordered = False' not implemented for Cholesky factors")
snpost = self.symb.snpost
blkval = +blkval
for k in snpost:
j = snode[snptr[k]] # representative vertex
nn = snptr[k+1]-snptr[k] # |Nk|
na = relptr[k+1]-relptr[k] # |Ak|
if na == 0: continue
nj = na + nn
if nn == 1:
blas.scal(blkval[blkptr[k]],blkval,offset = blkptr[k]+1,n=na)
else:
blas.trmm(blkval,blkval, transA = "N", diag = "N", side = "R",uplo = "L", \
m = na, n = nn, ldA = nj, ldB = nj, \
offsetA = blkptr[k],offsetB = blkptr[k] + nn)
cc = matrix(0,(n,1)) # count number of nonzeros in each col
for k in range(self.symb.Nsn):
nn = snptr[k+1]-snptr[k]
na = relptr[k+1]-relptr[k]
nj = nn + na
for i in range(nn):
j = snode[snptr[k]+i]
cc[j] = nj - i
# build col. ptr
cp = [0]
for i in range(n): cp.append(cp[-1] + cc[i])
cp = matrix(cp)
# copy data and row indices
val = matrix(0.0, (cp[-1],1))
ri = matrix(0, (cp[-1],1))
for k in range(self.symb.Nsn):
nn = snptr[k+1]-snptr[k]
na = relptr[k+1]-relptr[k]
nj = nn + na
for i in range(nn):
j = snode[snptr[k]+i]
blas.copy(blkval, val, offsetx = blkptr[k]+nj*i+i, offsety = cp[j], n = nj-i)
ri[cp[j]:cp[j+1]] = snrowidx[sncolptr[k]+i:sncolptr[k+1]]
I = []; J = []
for i in range(n):
I += list(ri[cp[i]:cp[i+1]])
J += (cp[i+1]-cp[i])*[i]
tmp = spmatrix(val, I, J, (n,n)) # tmp is reordered and lower tril.
if reordered or self.symb.p is None:
# reordered matrix (do not apply inverse permutation)
if not symmetric: return tmp
else: return symmetrize(tmp)
else:
# apply inverse permutation
tmp = perm(symmetrize(tmp), self.symb.ip)
if symmetric: return tmp
else: return tril(tmp) | Converts the :py:class:`cspmatrix` :math:`A` to a sparse matrix. A reordered
matrix is returned if the optional argument `reordered` is
`True` (default), and otherwise the inverse permutation is applied. Only the
default options are allowed if the :py:class:`cspmatrix` :math:`A` represents
a Cholesky factor.
:param reordered: boolean (default: True)
:param symmetric: boolean (default: False) | Below is the the instruction that describes the task:
### Input:
Converts the :py:class:`cspmatrix` :math:`A` to a sparse matrix. A reordered
matrix is returned if the optional argument `reordered` is
`True` (default), and otherwise the inverse permutation is applied. Only the
default options are allowed if the :py:class:`cspmatrix` :math:`A` represents
a Cholesky factor.
:param reordered: boolean (default: True)
:param symmetric: boolean (default: False)
### Response:
def spmatrix(self, reordered = True, symmetric = False):
"""
Converts the :py:class:`cspmatrix` :math:`A` to a sparse matrix. A reordered
matrix is returned if the optional argument `reordered` is
`True` (default), and otherwise the inverse permutation is applied. Only the
default options are allowed if the :py:class:`cspmatrix` :math:`A` represents
a Cholesky factor.
:param reordered: boolean (default: True)
:param symmetric: boolean (default: False)
"""
n = self.symb.n
snptr = self.symb.snptr
snode = self.symb.snode
relptr = self.symb.relptr
snrowidx = self.symb.snrowidx
sncolptr = self.symb.sncolptr
blkptr = self.symb.blkptr
blkval = self.blkval
if self.is_factor:
if symmetric: raise ValueError("'symmetric = True' not implemented for Cholesky factors")
if not reordered: raise ValueError("'reordered = False' not implemented for Cholesky factors")
snpost = self.symb.snpost
blkval = +blkval
for k in snpost:
j = snode[snptr[k]] # representative vertex
nn = snptr[k+1]-snptr[k] # |Nk|
na = relptr[k+1]-relptr[k] # |Ak|
if na == 0: continue
nj = na + nn
if nn == 1:
blas.scal(blkval[blkptr[k]],blkval,offset = blkptr[k]+1,n=na)
else:
blas.trmm(blkval,blkval, transA = "N", diag = "N", side = "R",uplo = "L", \
m = na, n = nn, ldA = nj, ldB = nj, \
offsetA = blkptr[k],offsetB = blkptr[k] + nn)
cc = matrix(0,(n,1)) # count number of nonzeros in each col
for k in range(self.symb.Nsn):
nn = snptr[k+1]-snptr[k]
na = relptr[k+1]-relptr[k]
nj = nn + na
for i in range(nn):
j = snode[snptr[k]+i]
cc[j] = nj - i
# build col. ptr
cp = [0]
for i in range(n): cp.append(cp[-1] + cc[i])
cp = matrix(cp)
# copy data and row indices
val = matrix(0.0, (cp[-1],1))
ri = matrix(0, (cp[-1],1))
for k in range(self.symb.Nsn):
nn = snptr[k+1]-snptr[k]
na = relptr[k+1]-relptr[k]
nj = nn + na
for i in range(nn):
j = snode[snptr[k]+i]
blas.copy(blkval, val, offsetx = blkptr[k]+nj*i+i, offsety = cp[j], n = nj-i)
ri[cp[j]:cp[j+1]] = snrowidx[sncolptr[k]+i:sncolptr[k+1]]
I = []; J = []
for i in range(n):
I += list(ri[cp[i]:cp[i+1]])
J += (cp[i+1]-cp[i])*[i]
tmp = spmatrix(val, I, J, (n,n)) # tmp is reordered and lower tril.
if reordered or self.symb.p is None:
# reordered matrix (do not apply inverse permutation)
if not symmetric: return tmp
else: return symmetrize(tmp)
else:
# apply inverse permutation
tmp = perm(symmetrize(tmp), self.symb.ip)
if symmetric: return tmp
else: return tril(tmp) |
def incfile(fname, fpointer, lrange="1,6-", sdir=None):
r"""
Include a Python source file in a docstring formatted in reStructuredText.
:param fname: File name, relative to environment variable
:bash:`${TRACER_DIR}`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://sphinx-doc.org/markup/code.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`${TRACER_DIR}`
environment variable is used if it is defined, otherwise
the directory where the :code:`docs.support.incfile` module
is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output'
"""
# Read file
file_dir = (
sdir
if sdir
else os.environ.get("TRACER_DIR", os.path.abspath(os.path.dirname(__file__)))
)
fname = os.path.join(file_dir, fname)
with open(fname) as fobj:
lines = fobj.readlines()
# Parse line specification
tokens = [item.strip() for item in lrange.split(",")]
inc_lines = []
for token in tokens:
if "-" in token:
subtokens = token.split("-")
lmin, lmax = (
int(subtokens[0]),
int(subtokens[1]) if subtokens[1] else len(lines),
)
for num in range(lmin, lmax + 1):
inc_lines.append(num)
else:
inc_lines.append(int(token))
# Produce output
fpointer(".. code-block:: python\n")
fpointer("\n")
for num, line in enumerate(lines):
if num + 1 in inc_lines:
fpointer(" " + line.replace("\t", " ") if line.strip() else "\n")
fpointer("\n") | r"""
Include a Python source file in a docstring formatted in reStructuredText.
:param fname: File name, relative to environment variable
:bash:`${TRACER_DIR}`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://sphinx-doc.org/markup/code.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`${TRACER_DIR}`
environment variable is used if it is defined, otherwise
the directory where the :code:`docs.support.incfile` module
is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output' | Below is the the instruction that describes the task:
### Input:
r"""
Include a Python source file in a docstring formatted in reStructuredText.
:param fname: File name, relative to environment variable
:bash:`${TRACER_DIR}`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://sphinx-doc.org/markup/code.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`${TRACER_DIR}`
environment variable is used if it is defined, otherwise
the directory where the :code:`docs.support.incfile` module
is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output'
### Response:
def incfile(fname, fpointer, lrange="1,6-", sdir=None):
r"""
Include a Python source file in a docstring formatted in reStructuredText.
:param fname: File name, relative to environment variable
:bash:`${TRACER_DIR}`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://sphinx-doc.org/markup/code.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`${TRACER_DIR}`
environment variable is used if it is defined, otherwise
the directory where the :code:`docs.support.incfile` module
is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output'
"""
# Read file
file_dir = (
sdir
if sdir
else os.environ.get("TRACER_DIR", os.path.abspath(os.path.dirname(__file__)))
)
fname = os.path.join(file_dir, fname)
with open(fname) as fobj:
lines = fobj.readlines()
# Parse line specification
tokens = [item.strip() for item in lrange.split(",")]
inc_lines = []
for token in tokens:
if "-" in token:
subtokens = token.split("-")
lmin, lmax = (
int(subtokens[0]),
int(subtokens[1]) if subtokens[1] else len(lines),
)
for num in range(lmin, lmax + 1):
inc_lines.append(num)
else:
inc_lines.append(int(token))
# Produce output
fpointer(".. code-block:: python\n")
fpointer("\n")
for num, line in enumerate(lines):
if num + 1 in inc_lines:
fpointer(" " + line.replace("\t", " ") if line.strip() else "\n")
fpointer("\n") |
def handle_connection_change(self, state):
"""
Callback for handling changes in the kazoo client's connection state.
If the connection becomes lost or suspended, the `connected` Event
is cleared. Other given states imply that the connection is
established so `connected` is set.
"""
if state == client.KazooState.LOST:
if not self.shutdown.is_set():
logger.info("Zookeeper session lost!")
self.connected.clear()
elif state == client.KazooState.SUSPENDED:
logger.info("Zookeeper connection suspended!")
self.connected.clear()
else:
logger.info("Zookeeper connection (re)established.")
self.connected.set() | Callback for handling changes in the kazoo client's connection state.
If the connection becomes lost or suspended, the `connected` Event
is cleared. Other given states imply that the connection is
established so `connected` is set. | Below is the the instruction that describes the task:
### Input:
Callback for handling changes in the kazoo client's connection state.
If the connection becomes lost or suspended, the `connected` Event
is cleared. Other given states imply that the connection is
established so `connected` is set.
### Response:
def handle_connection_change(self, state):
"""
Callback for handling changes in the kazoo client's connection state.
If the connection becomes lost or suspended, the `connected` Event
is cleared. Other given states imply that the connection is
established so `connected` is set.
"""
if state == client.KazooState.LOST:
if not self.shutdown.is_set():
logger.info("Zookeeper session lost!")
self.connected.clear()
elif state == client.KazooState.SUSPENDED:
logger.info("Zookeeper connection suspended!")
self.connected.clear()
else:
logger.info("Zookeeper connection (re)established.")
self.connected.set() |
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0] | !
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids. | Below is the the instruction that describes the task:
### Input:
!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
### Response:
def __update_clusters(self, medoids):
"""!
@brief Forms cluster in line with specified medoids by calculation distance from each point to medoids.
"""
self.__belong = [0] * len(self.__pointer_data)
self.__clusters = [[] for i in range(len(medoids))]
for index_point in range(len(self.__pointer_data)):
index_optim = -1
dist_optim = 0.0
for index in range(len(medoids)):
dist = euclidean_distance_square(self.__pointer_data[index_point], self.__pointer_data[medoids[index]])
if (dist < dist_optim) or (index is 0):
index_optim = index
dist_optim = dist
self.__clusters[index_optim].append(index_point)
self.__belong[index_point] = index_optim
# If cluster is not able to capture object it should be removed
self.__clusters = [cluster for cluster in self.__clusters if len(cluster) > 0] |
def get_account(self, account):
""" Fetches an account with all its attributes.
:param account: an account object, with either id or name attribute set
:returns: a zobjects.Account object, filled.
"""
selector = account.to_selector()
resp = self.request_single('GetAccount', {'account': selector})
return zobjects.Account.from_dict(resp) | Fetches an account with all its attributes.
:param account: an account object, with either id or name attribute set
:returns: a zobjects.Account object, filled. | Below is the the instruction that describes the task:
### Input:
Fetches an account with all its attributes.
:param account: an account object, with either id or name attribute set
:returns: a zobjects.Account object, filled.
### Response:
def get_account(self, account):
""" Fetches an account with all its attributes.
:param account: an account object, with either id or name attribute set
:returns: a zobjects.Account object, filled.
"""
selector = account.to_selector()
resp = self.request_single('GetAccount', {'account': selector})
return zobjects.Account.from_dict(resp) |
def coerce_value(type, value):
# type: (Any, Any) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and any value, return a runtime value coerced to match the type."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValue is
# non-null.
# We only call this function after calling isValidValue.
return coerce_value(type.of_type, value)
if value is None:
return None
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, Iterable):
return [coerce_value(item_type, item) for item in value]
else:
return [coerce_value(item_type, value)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
obj = {}
for field_name, field in fields.items():
if field_name not in value:
if field.default_value is not None:
field_value = field.default_value
obj[field.out_name or field_name] = field_value
else:
field_value = coerce_value(field.type, value.get(field_name))
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_value(value) | Given a type and any value, return a runtime value coerced to match the type. | Below is the the instruction that describes the task:
### Input:
Given a type and any value, return a runtime value coerced to match the type.
### Response:
def coerce_value(type, value):
# type: (Any, Any) -> Union[List, Dict, int, float, bool, str, None]
"""Given a type and any value, return a runtime value coerced to match the type."""
if isinstance(type, GraphQLNonNull):
# Note: we're not checking that the result of coerceValue is
# non-null.
# We only call this function after calling isValidValue.
return coerce_value(type.of_type, value)
if value is None:
return None
if isinstance(type, GraphQLList):
item_type = type.of_type
if not isinstance(value, string_types) and isinstance(value, Iterable):
return [coerce_value(item_type, item) for item in value]
else:
return [coerce_value(item_type, value)]
if isinstance(type, GraphQLInputObjectType):
fields = type.fields
obj = {}
for field_name, field in fields.items():
if field_name not in value:
if field.default_value is not None:
field_value = field.default_value
obj[field.out_name or field_name] = field_value
else:
field_value = coerce_value(field.type, value.get(field_name))
obj[field.out_name or field_name] = field_value
return type.create_container(obj)
assert isinstance(type, (GraphQLScalarType, GraphQLEnumType)), "Must be input type"
return type.parse_value(value) |
def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts', preprocess=None, engine=None,
lock=None, data_vars='all', coords='different',
autoclose=None, parallel=False, **kwargs):
"""Open multiple files as a single dataset.
Requires dask to be installed. See documentation for details on dask [1].
Attributes from the first dataset file are used for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details [2].
concat_dim : None, str, DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xarray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
* 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* 'equals': all values and dimensions must be the same.
* 'identical': all values, dimensions and attributes must be the
same.
* 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding['source']``.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
""" # noqa
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
'cannot do wild-card matching for paths that are remote URLs: '
'{!r}. Instead, supply paths as an explicit list of strings.'
.format(paths))
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, Path) else p for p in paths]
if not paths:
raise IOError('no files to open')
# Coerce 1D input into ND to maintain backwards-compatible API until API
# for N-D combine decided
# (see https://github.com/pydata/xarray/pull/2553/#issuecomment-445892746)
if concat_dim is None or concat_dim is _CONCAT_DIM_DEFAULT:
concat_dims = concat_dim
elif not isinstance(concat_dim, list):
concat_dims = [concat_dim]
else:
concat_dims = concat_dim
infer_order_from_coords = False
# If infer_order_from_coords=True then this is unnecessary, but quick.
# If infer_order_from_coords=False then this creates a flat list which is
# easier to iterate over, while saving the originally-supplied structure
combined_ids_paths, concat_dims = _infer_concat_order_from_positions(
paths, concat_dims)
ids, paths = (
list(combined_ids_paths.keys()), list(combined_ids_paths.values()))
open_kwargs = dict(engine=engine, chunks=chunks or {}, lock=lock,
autoclose=autoclose, **kwargs)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
file_objs = [getattr_(ds, '_file_obj') for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, file_objs = dask.compute(datasets, file_objs)
# Close datasets in case of a ValueError
try:
if infer_order_from_coords:
# Discard ordering because it should be redone from coordinates
ids = False
combined = _auto_combine(
datasets, concat_dims=concat_dims,
compat=compat,
data_vars=data_vars, coords=coords,
infer_order_from_coords=infer_order_from_coords,
ids=ids)
except ValueError:
for ds in datasets:
ds.close()
raise
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
return combined | Open multiple files as a single dataset.
Requires dask to be installed. See documentation for details on dask [1].
Attributes from the first dataset file are used for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details [2].
concat_dim : None, str, DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xarray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
* 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* 'equals': all values and dimensions must be the same.
* 'identical': all values, dimensions and attributes must be the
same.
* 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding['source']``.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance | Below is the the instruction that describes the task:
### Input:
Open multiple files as a single dataset.
Requires dask to be installed. See documentation for details on dask [1].
Attributes from the first dataset file are used for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details [2].
concat_dim : None, str, DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xarray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
* 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* 'equals': all values and dimensions must be the same.
* 'identical': all values, dimensions and attributes must be the
same.
* 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding['source']``.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
### Response:
def open_mfdataset(paths, chunks=None, concat_dim=_CONCAT_DIM_DEFAULT,
compat='no_conflicts', preprocess=None, engine=None,
lock=None, data_vars='all', coords='different',
autoclose=None, parallel=False, **kwargs):
"""Open multiple files as a single dataset.
Requires dask to be installed. See documentation for details on dask [1].
Attributes from the first dataset file are used for the combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open. Paths can be given as strings or as pathlib
Paths.
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
If int, chunk each dimension by ``chunks``.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details [2].
concat_dim : None, str, DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xarray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
concatenation.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
* 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* 'equals': all values and dimensions must be the same.
* 'identical': all values, dimensions and attributes must be the
same.
* 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding['source']``.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' o list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
""" # noqa
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
'cannot do wild-card matching for paths that are remote URLs: '
'{!r}. Instead, supply paths as an explicit list of strings.'
.format(paths))
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, Path) else p for p in paths]
if not paths:
raise IOError('no files to open')
# Coerce 1D input into ND to maintain backwards-compatible API until API
# for N-D combine decided
# (see https://github.com/pydata/xarray/pull/2553/#issuecomment-445892746)
if concat_dim is None or concat_dim is _CONCAT_DIM_DEFAULT:
concat_dims = concat_dim
elif not isinstance(concat_dim, list):
concat_dims = [concat_dim]
else:
concat_dims = concat_dim
infer_order_from_coords = False
# If infer_order_from_coords=True then this is unnecessary, but quick.
# If infer_order_from_coords=False then this creates a flat list which is
# easier to iterate over, while saving the originally-supplied structure
combined_ids_paths, concat_dims = _infer_concat_order_from_positions(
paths, concat_dims)
ids, paths = (
list(combined_ids_paths.keys()), list(combined_ids_paths.values()))
open_kwargs = dict(engine=engine, chunks=chunks or {}, lock=lock,
autoclose=autoclose, **kwargs)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
file_objs = [getattr_(ds, '_file_obj') for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, file_objs = dask.compute(datasets, file_objs)
# Close datasets in case of a ValueError
try:
if infer_order_from_coords:
# Discard ordering because it should be redone from coordinates
ids = False
combined = _auto_combine(
datasets, concat_dims=concat_dims,
compat=compat,
data_vars=data_vars, coords=coords,
infer_order_from_coords=infer_order_from_coords,
ids=ids)
except ValueError:
for ds in datasets:
ds.close()
raise
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
return combined |
def synchronized(lock):
"""
Synchronization decorator; provide thread-safe locking on a function
http://code.activestate.com/recipes/465057/
"""
def wrap(f):
def synchronize(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return synchronize
return wrap | Synchronization decorator; provide thread-safe locking on a function
http://code.activestate.com/recipes/465057/ | Below is the the instruction that describes the task:
### Input:
Synchronization decorator; provide thread-safe locking on a function
http://code.activestate.com/recipes/465057/
### Response:
def synchronized(lock):
"""
Synchronization decorator; provide thread-safe locking on a function
http://code.activestate.com/recipes/465057/
"""
def wrap(f):
def synchronize(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return synchronize
return wrap |
def rm(self, filename):
""" Delete a file from the server.
:param filename: the file to be deleted.
:type filename: string
"""
try:
self._ftp.delete(filename)
except error_perm: # target is either a directory
# either it does not exist
try:
current_folder = self._ftp.pwd()
self.cd(filename)
except error_perm:
print('550 Delete operation failed %s '
'does not exist!' % (filename,))
else:
self.cd(current_folder)
print('550 Delete operation failed %s '
'is a folder. Use rmdir function '
'to delete it.' % (filename,)) | Delete a file from the server.
:param filename: the file to be deleted.
:type filename: string | Below is the the instruction that describes the task:
### Input:
Delete a file from the server.
:param filename: the file to be deleted.
:type filename: string
### Response:
def rm(self, filename):
""" Delete a file from the server.
:param filename: the file to be deleted.
:type filename: string
"""
try:
self._ftp.delete(filename)
except error_perm: # target is either a directory
# either it does not exist
try:
current_folder = self._ftp.pwd()
self.cd(filename)
except error_perm:
print('550 Delete operation failed %s '
'does not exist!' % (filename,))
else:
self.cd(current_folder)
print('550 Delete operation failed %s '
'is a folder. Use rmdir function '
'to delete it.' % (filename,)) |
def schedule(self, year):
"""Gets schedule information for a team-season.
:year: The year for which we want the schedule.
:returns: DataFrame of schedule information.
"""
doc = self.get_year_doc('{}_games'.format(year))
table = doc('table#games')
df = sportsref.utils.parse_table(table)
return df | Gets schedule information for a team-season.
:year: The year for which we want the schedule.
:returns: DataFrame of schedule information. | Below is the the instruction that describes the task:
### Input:
Gets schedule information for a team-season.
:year: The year for which we want the schedule.
:returns: DataFrame of schedule information.
### Response:
def schedule(self, year):
"""Gets schedule information for a team-season.
:year: The year for which we want the schedule.
:returns: DataFrame of schedule information.
"""
doc = self.get_year_doc('{}_games'.format(year))
table = doc('table#games')
df = sportsref.utils.parse_table(table)
return df |
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
self._day = data[0]
self._mon = data[1]
self._year = ((data[2] << 8) + data[3]) | :return: None | Below is the the instruction that describes the task:
### Input:
:return: None
### Response:
def populate(self, priority, address, rtr, data):
"""
:return: None
"""
assert isinstance(data, bytes)
self.needs_low_priority(priority)
self.needs_no_rtr(rtr)
self.needs_data(data, 4)
self.set_attributes(priority, address, rtr)
self._day = data[0]
self._mon = data[1]
self._year = ((data[2] << 8) + data[3]) |
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1) | Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line. | Below is the the instruction that describes the task:
### Input:
Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
### Response:
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1) |
def memoize(Class, *args, **kwargs):
'''
Memoize/record a function inside this vlermv. ::
@Vlermv.cache('~/.http')
def get(url):
return requests.get(url, auth = ('username', 'password'))
The args and kwargs get passed to the Vlermv with some slight changes.
Here are the changes.
First, the default ``key_transformer`` is the tuple key_transformer
rather than the simple key_transformer.
Second, it is valid for cache to be called without arguments.
Vlermv would ordinarily fail if no arguments were passed to it.
If you pass no arguments to cache, the Vlermv directory argument
(the one required argument) will be set to the name of the function.
Third, you are more likely to use the ``cache_exceptions`` keyword
argument; see :py:class:`~vlermv.Vlermv` for documentation on that.
'''
def decorator(func):
if len(args) == 0:
if hasattr(func, '__name__'):
_args = (func.__name__,)
else:
raise ValueError('You must specify the location to store the vlermv.')
else:
_args = args
v = Class(*_args, **kwargs)
v.func = func
return v
return decorator | Memoize/record a function inside this vlermv. ::
@Vlermv.cache('~/.http')
def get(url):
return requests.get(url, auth = ('username', 'password'))
The args and kwargs get passed to the Vlermv with some slight changes.
Here are the changes.
First, the default ``key_transformer`` is the tuple key_transformer
rather than the simple key_transformer.
Second, it is valid for cache to be called without arguments.
Vlermv would ordinarily fail if no arguments were passed to it.
If you pass no arguments to cache, the Vlermv directory argument
(the one required argument) will be set to the name of the function.
Third, you are more likely to use the ``cache_exceptions`` keyword
argument; see :py:class:`~vlermv.Vlermv` for documentation on that. | Below is the the instruction that describes the task:
### Input:
Memoize/record a function inside this vlermv. ::
@Vlermv.cache('~/.http')
def get(url):
return requests.get(url, auth = ('username', 'password'))
The args and kwargs get passed to the Vlermv with some slight changes.
Here are the changes.
First, the default ``key_transformer`` is the tuple key_transformer
rather than the simple key_transformer.
Second, it is valid for cache to be called without arguments.
Vlermv would ordinarily fail if no arguments were passed to it.
If you pass no arguments to cache, the Vlermv directory argument
(the one required argument) will be set to the name of the function.
Third, you are more likely to use the ``cache_exceptions`` keyword
argument; see :py:class:`~vlermv.Vlermv` for documentation on that.
### Response:
def memoize(Class, *args, **kwargs):
'''
Memoize/record a function inside this vlermv. ::
@Vlermv.cache('~/.http')
def get(url):
return requests.get(url, auth = ('username', 'password'))
The args and kwargs get passed to the Vlermv with some slight changes.
Here are the changes.
First, the default ``key_transformer`` is the tuple key_transformer
rather than the simple key_transformer.
Second, it is valid for cache to be called without arguments.
Vlermv would ordinarily fail if no arguments were passed to it.
If you pass no arguments to cache, the Vlermv directory argument
(the one required argument) will be set to the name of the function.
Third, you are more likely to use the ``cache_exceptions`` keyword
argument; see :py:class:`~vlermv.Vlermv` for documentation on that.
'''
def decorator(func):
if len(args) == 0:
if hasattr(func, '__name__'):
_args = (func.__name__,)
else:
raise ValueError('You must specify the location to store the vlermv.')
else:
_args = args
v = Class(*_args, **kwargs)
v.func = func
return v
return decorator |
def normalize_dsn_or_dict(d):
"""Clean up a database DSN, or dict version of a DSN, returning both the cleaned DSN and dict version"""
if isinstance(d, dict):
try:
# Convert from an AttrDict to a real dict
d = d.to_dict()
except AttributeError:
pass # Already a real dict
config = d
dsn = None
elif isinstance(d, string_types):
config = None
dsn = d
else:
raise ConfigurationError("Can't deal with database config '{}' type '{}' ".format(d, type(d)))
if dsn:
if dsn.startswith('sqlite') or dsn.startswith('spatialite'):
driver, path = dsn.split(':', 1)
slashes, path = path[:2], path[2:]
if slashes != '//':
raise ConfigurationError("Sqlite DSNs must start with at least 2 slashes")
if len(path) == 1 and path[0] == '/':
raise ConfigurationError("Sqlite DSNs can't have only 3 slashes in path")
if len(path) > 1 and path[0] != '/':
raise ConfigurationError("Sqlite DSNs with a path must have 3 or 4 slashes.")
path = path[1:]
config = dict(
server=None,
username=None,
password=None,
driver=driver,
dbname=path
)
else:
d = parse_url_to_dict(dsn)
config = dict(
server=d['hostname'],
dbname=d['path'].strip('/'),
driver=d['scheme'],
password=d.get('password', None),
username=d.get('username', None)
)
else:
up = d.get('username', '') or ''
if d.get('password'):
up += ':' + d.get('password', '')
if up:
up += '@'
if up and not d.get('server'):
raise ConfigurationError("Can't construct a DSN with a username or password without a hostname")
host_part = up + d.get('server', '') if d.get('server') else ''
if d.get('dbname', False):
path_part = '/' + d.get('dbname')
# if d['driver'] in ('sqlite3', 'sqlite', 'spatialite'):
# path_part = '/' + path_part
else:
path_part = '' # w/ no dbname, Sqlite should use memory, which required 2 slash. Rel dir is 3, abs dir is 4
dsn = '{}://{}{}'.format(d['driver'], host_part, path_part)
return config, dsn | Clean up a database DSN, or dict version of a DSN, returning both the cleaned DSN and dict version | Below is the the instruction that describes the task:
### Input:
Clean up a database DSN, or dict version of a DSN, returning both the cleaned DSN and dict version
### Response:
def normalize_dsn_or_dict(d):
"""Clean up a database DSN, or dict version of a DSN, returning both the cleaned DSN and dict version"""
if isinstance(d, dict):
try:
# Convert from an AttrDict to a real dict
d = d.to_dict()
except AttributeError:
pass # Already a real dict
config = d
dsn = None
elif isinstance(d, string_types):
config = None
dsn = d
else:
raise ConfigurationError("Can't deal with database config '{}' type '{}' ".format(d, type(d)))
if dsn:
if dsn.startswith('sqlite') or dsn.startswith('spatialite'):
driver, path = dsn.split(':', 1)
slashes, path = path[:2], path[2:]
if slashes != '//':
raise ConfigurationError("Sqlite DSNs must start with at least 2 slashes")
if len(path) == 1 and path[0] == '/':
raise ConfigurationError("Sqlite DSNs can't have only 3 slashes in path")
if len(path) > 1 and path[0] != '/':
raise ConfigurationError("Sqlite DSNs with a path must have 3 or 4 slashes.")
path = path[1:]
config = dict(
server=None,
username=None,
password=None,
driver=driver,
dbname=path
)
else:
d = parse_url_to_dict(dsn)
config = dict(
server=d['hostname'],
dbname=d['path'].strip('/'),
driver=d['scheme'],
password=d.get('password', None),
username=d.get('username', None)
)
else:
up = d.get('username', '') or ''
if d.get('password'):
up += ':' + d.get('password', '')
if up:
up += '@'
if up and not d.get('server'):
raise ConfigurationError("Can't construct a DSN with a username or password without a hostname")
host_part = up + d.get('server', '') if d.get('server') else ''
if d.get('dbname', False):
path_part = '/' + d.get('dbname')
# if d['driver'] in ('sqlite3', 'sqlite', 'spatialite'):
# path_part = '/' + path_part
else:
path_part = '' # w/ no dbname, Sqlite should use memory, which required 2 slash. Rel dir is 3, abs dir is 4
dsn = '{}://{}{}'.format(d['driver'], host_part, path_part)
return config, dsn |
def _chart_class(self, df, chart_type, **kwargs):
"""
Get the right chart class from a string
"""
if chart_type == "bar":
return Chart(df).mark_bar(**kwargs)
elif chart_type == "circle":
return Chart(df).mark_circle(**kwargs)
elif chart_type == "line":
return Chart(df).mark_line(**kwargs)
elif chart_type == "point":
return Chart(df).mark_point(**kwargs)
elif chart_type == "area":
return Chart(df).mark_area(**kwargs)
elif chart_type == "tick":
return Chart(df).mark_tick(**kwargs)
elif chart_type == "text":
return Chart(df).mark_text(**kwargs)
elif chart_type == "square":
return Chart(df).mark_square(**kwargs)
elif chart_type == "rule":
return Chart(df).mark_rule(**kwargs)
return None | Get the right chart class from a string | Below is the the instruction that describes the task:
### Input:
Get the right chart class from a string
### Response:
def _chart_class(self, df, chart_type, **kwargs):
"""
Get the right chart class from a string
"""
if chart_type == "bar":
return Chart(df).mark_bar(**kwargs)
elif chart_type == "circle":
return Chart(df).mark_circle(**kwargs)
elif chart_type == "line":
return Chart(df).mark_line(**kwargs)
elif chart_type == "point":
return Chart(df).mark_point(**kwargs)
elif chart_type == "area":
return Chart(df).mark_area(**kwargs)
elif chart_type == "tick":
return Chart(df).mark_tick(**kwargs)
elif chart_type == "text":
return Chart(df).mark_text(**kwargs)
elif chart_type == "square":
return Chart(df).mark_square(**kwargs)
elif chart_type == "rule":
return Chart(df).mark_rule(**kwargs)
return None |
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{name} cannot enlarge its target "
"object".format(name=self.name))
elif isinstance(i, dict):
raise IndexError("{name} cannot enlarge its target object"
.format(name=self.name))
return True | validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally | Below is the the instruction that describes the task:
### Input:
validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
### Response:
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{name} cannot enlarge its target "
"object".format(name=self.name))
elif isinstance(i, dict):
raise IndexError("{name} cannot enlarge its target object"
.format(name=self.name))
return True |
def strip_suffix(s, suffix, strict=False):
"""Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present"""
if s.endswith(suffix):
return s[: len(s) - len(suffix)]
elif strict:
raise WimpyError("string doesn't end with suffix")
return s | Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present | Below is the the instruction that describes the task:
### Input:
Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present
### Response:
def strip_suffix(s, suffix, strict=False):
"""Removes the suffix, if it's there, otherwise returns input string unchanged.
If strict is True, also ensures the suffix was present"""
if s.endswith(suffix):
return s[: len(s) - len(suffix)]
elif strict:
raise WimpyError("string doesn't end with suffix")
return s |
def write_edge (self, node):
"""Write edge from parent to node."""
source = dotquote(self.nodes[node["parent_url"]]["label"])
target = dotquote(node["label"])
self.writeln(u' "%s" -> "%s" [' % (source, target))
self.writeln(u' label="%s",' % dotquote(node["edge"]))
if self.has_part("result"):
self.writeln(u" valid=%d," % node["valid"])
self.writeln(u" ];") | Write edge from parent to node. | Below is the the instruction that describes the task:
### Input:
Write edge from parent to node.
### Response:
def write_edge (self, node):
"""Write edge from parent to node."""
source = dotquote(self.nodes[node["parent_url"]]["label"])
target = dotquote(node["label"])
self.writeln(u' "%s" -> "%s" [' % (source, target))
self.writeln(u' label="%s",' % dotquote(node["edge"]))
if self.has_part("result"):
self.writeln(u" valid=%d," % node["valid"])
self.writeln(u" ];") |
def marshall(self, registry):
"""Returns bytes"""
result = b""
for i in registry.get_all():
# Each message needs to be prefixed with a varint with the size of
# the message (MetrycType)
# https://github.com/matttproud/golang_protobuf_extensions/blob/master/ext/encode.go
# http://zombietetris.de/blog/building-your-own-writedelimitedto-for-python-protobuf/
body = self.marshall_collector(i).SerializeToString()
msg = encoder._VarintBytes(len(body)) + body
result += msg
return result | Returns bytes | Below is the the instruction that describes the task:
### Input:
Returns bytes
### Response:
def marshall(self, registry):
"""Returns bytes"""
result = b""
for i in registry.get_all():
# Each message needs to be prefixed with a varint with the size of
# the message (MetrycType)
# https://github.com/matttproud/golang_protobuf_extensions/blob/master/ext/encode.go
# http://zombietetris.de/blog/building-your-own-writedelimitedto-for-python-protobuf/
body = self.marshall_collector(i).SerializeToString()
msg = encoder._VarintBytes(len(body)) + body
result += msg
return result |
def on_ok(self):
""" Perform restoration on the backup file selected """
if self.dir_select.value:
npyscreen.notify_wait('In the process of restoring',
title='Restoring...')
status = self.restore(self.dirs[self.dir_select.value[0]])
if status[0]:
npyscreen.notify_confirm('Status of restore:\n' +
status[1])
else:
npyscreen.notify_confirm(status[1])
self.quit()
else:
npyscreen.notify_confirm('Choose a version to restore from') | Perform restoration on the backup file selected | Below is the the instruction that describes the task:
### Input:
Perform restoration on the backup file selected
### Response:
def on_ok(self):
""" Perform restoration on the backup file selected """
if self.dir_select.value:
npyscreen.notify_wait('In the process of restoring',
title='Restoring...')
status = self.restore(self.dirs[self.dir_select.value[0]])
if status[0]:
npyscreen.notify_confirm('Status of restore:\n' +
status[1])
else:
npyscreen.notify_confirm(status[1])
self.quit()
else:
npyscreen.notify_confirm('Choose a version to restore from') |
def _parse_info_snpeff(self, info):
"""
Specialized INFO field parser for SnpEff ANN fields.
Requires self._snpeff_ann_fields to be set.
"""
ann = info.pop('ANN', []) or []
# Overwrite the existing ANN with something parsed
# Split on '|', merge with the ANN keys parsed above.
# Ensure empty values are None rather than empty string.
items = []
for a in ann:
# For multi-allelic records, we may have already
# processed ANN. If so, quit now.
if isinstance(a, dict):
info['ANN'] = ann
return info
values = [i or None for i in a.split('|')]
item = dict(zip(self._snpeff_ann_fields, values))
# Further split the Annotation field by '&'
if item.get('Annotation'):
item['Annotation'] = item['Annotation'].split('&')
items.append(item)
info['ANN'] = items
return info | Specialized INFO field parser for SnpEff ANN fields.
Requires self._snpeff_ann_fields to be set. | Below is the the instruction that describes the task:
### Input:
Specialized INFO field parser for SnpEff ANN fields.
Requires self._snpeff_ann_fields to be set.
### Response:
def _parse_info_snpeff(self, info):
"""
Specialized INFO field parser for SnpEff ANN fields.
Requires self._snpeff_ann_fields to be set.
"""
ann = info.pop('ANN', []) or []
# Overwrite the existing ANN with something parsed
# Split on '|', merge with the ANN keys parsed above.
# Ensure empty values are None rather than empty string.
items = []
for a in ann:
# For multi-allelic records, we may have already
# processed ANN. If so, quit now.
if isinstance(a, dict):
info['ANN'] = ann
return info
values = [i or None for i in a.split('|')]
item = dict(zip(self._snpeff_ann_fields, values))
# Further split the Annotation field by '&'
if item.get('Annotation'):
item['Annotation'] = item['Annotation'].split('&')
items.append(item)
info['ANN'] = items
return info |
def get_docargs(self, args=None, prt=None):
"""Pare down docopt. Return a minimal dictionary and a set containing runtime arg values."""
# docargs = self.objdoc.get_docargs(args, exp_letters=set(['o', 't', 'p', 'c']))
docargs = self.objdoc.get_docargs(args, prt)
self._chk_docopts(docargs)
return docargs | Pare down docopt. Return a minimal dictionary and a set containing runtime arg values. | Below is the the instruction that describes the task:
### Input:
Pare down docopt. Return a minimal dictionary and a set containing runtime arg values.
### Response:
def get_docargs(self, args=None, prt=None):
"""Pare down docopt. Return a minimal dictionary and a set containing runtime arg values."""
# docargs = self.objdoc.get_docargs(args, exp_letters=set(['o', 't', 'p', 'c']))
docargs = self.objdoc.get_docargs(args, prt)
self._chk_docopts(docargs)
return docargs |
def evaluate_script(self):
"""
Evaluates current **Script_Editor_tabWidget** Widget tab Model editor content
into the interactive console.
:return: Method success.
:rtype: bool
"""
editor = self.get_current_editor()
if not editor:
return False
LOGGER.debug("> Evaluating 'Script Editor' content.")
if self.evaluate_code(foundations.strings.to_string(editor.toPlainText().toUtf8())):
self.ui_refresh.emit()
return True | Evaluates current **Script_Editor_tabWidget** Widget tab Model editor content
into the interactive console.
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Evaluates current **Script_Editor_tabWidget** Widget tab Model editor content
into the interactive console.
:return: Method success.
:rtype: bool
### Response:
def evaluate_script(self):
"""
Evaluates current **Script_Editor_tabWidget** Widget tab Model editor content
into the interactive console.
:return: Method success.
:rtype: bool
"""
editor = self.get_current_editor()
if not editor:
return False
LOGGER.debug("> Evaluating 'Script Editor' content.")
if self.evaluate_code(foundations.strings.to_string(editor.toPlainText().toUtf8())):
self.ui_refresh.emit()
return True |
def get_arguments(self):
""" Gets the arguments from the command line. """
parser = argparse.ArgumentParser(
description='Downloads images from given URL')
parser.add_argument('url2scrape', nargs=1, help="URL to scrape")
parser.add_argument('-m', '--max-images', type=int, default=None,
help="Limit on number of images\n")
parser.add_argument('-s', '--save-dir', type=str, default="images",
help="Directory in which images should be saved")
parser.add_argument('-g', '--injected', help="Scrape injected images",
action="store_true")
parser.add_argument('--proxy-server', type=str, default=None,
help="Proxy server to use")
parser.add_argument('--min-filesize', type=int, default=0,
help="Limit on size of image in bytes")
parser.add_argument('--max-filesize', type=int, default=100000000,
help="Limit on size of image in bytes")
parser.add_argument('--dump-urls', default=False,
help="Print the URLs of the images",
action="store_true")
parser.add_argument('--formats', nargs="*", default=None,
help="Specify formats in a list without any separator.\
This argument must be after the URL.")
parser.add_argument('--scrape-reverse', default=False,
help="Scrape the images in reverse order",
action="store_true")
parser.add_argument('--filename-pattern', type=str, default=None,
help="Only scrape images with filenames that\
match the given regex pattern")
parser.add_argument('--nthreads', type=int, default=10,
help="The number of threads to use when downloading images.")
args = parser.parse_args()
self.url = args.url2scrape[0]
if not re.match(r'^[a-zA-Z]+://', self.url):
self.url = 'http://' + self.url
self.no_to_download = args.max_images
save_dir = args.save_dir + '_{uri.netloc}'.format(
uri=urlparse(self.url))
if args.save_dir != "images":
save_dir = args.save_dir
self.download_path = os.path.join(os.getcwd(), save_dir)
self.use_ghost = args.injected
self.format_list = args.formats if args.formats else [
"jpg", "png", "gif", "svg", "jpeg"]
self.min_filesize = args.min_filesize
self.max_filesize = args.max_filesize
self.dump_urls = args.dump_urls
self.proxy_url = args.proxy_server
self.proxies = {}
if self.proxy_url:
if not re.match(r'^[a-zA-Z]+://', self.proxy_url):
self.proxy_url = 'http://' + self.proxy_url
proxy_start_length = self.proxy_url.find("://") + 3
self.proxies = {
self.proxy_url[:(proxy_start_length - 3)]: self.proxy_url
}
self.scrape_reverse = args.scrape_reverse
self.filename_pattern = args.filename_pattern
self.nthreads = args.nthreads
return (self.url, self.no_to_download, self.format_list,
self.download_path, self.min_filesize, self.max_filesize,
self.dump_urls, self.scrape_reverse, self.use_ghost, self.filename_pattern) | Gets the arguments from the command line. | Below is the the instruction that describes the task:
### Input:
Gets the arguments from the command line.
### Response:
def get_arguments(self):
""" Gets the arguments from the command line. """
parser = argparse.ArgumentParser(
description='Downloads images from given URL')
parser.add_argument('url2scrape', nargs=1, help="URL to scrape")
parser.add_argument('-m', '--max-images', type=int, default=None,
help="Limit on number of images\n")
parser.add_argument('-s', '--save-dir', type=str, default="images",
help="Directory in which images should be saved")
parser.add_argument('-g', '--injected', help="Scrape injected images",
action="store_true")
parser.add_argument('--proxy-server', type=str, default=None,
help="Proxy server to use")
parser.add_argument('--min-filesize', type=int, default=0,
help="Limit on size of image in bytes")
parser.add_argument('--max-filesize', type=int, default=100000000,
help="Limit on size of image in bytes")
parser.add_argument('--dump-urls', default=False,
help="Print the URLs of the images",
action="store_true")
parser.add_argument('--formats', nargs="*", default=None,
help="Specify formats in a list without any separator.\
This argument must be after the URL.")
parser.add_argument('--scrape-reverse', default=False,
help="Scrape the images in reverse order",
action="store_true")
parser.add_argument('--filename-pattern', type=str, default=None,
help="Only scrape images with filenames that\
match the given regex pattern")
parser.add_argument('--nthreads', type=int, default=10,
help="The number of threads to use when downloading images.")
args = parser.parse_args()
self.url = args.url2scrape[0]
if not re.match(r'^[a-zA-Z]+://', self.url):
self.url = 'http://' + self.url
self.no_to_download = args.max_images
save_dir = args.save_dir + '_{uri.netloc}'.format(
uri=urlparse(self.url))
if args.save_dir != "images":
save_dir = args.save_dir
self.download_path = os.path.join(os.getcwd(), save_dir)
self.use_ghost = args.injected
self.format_list = args.formats if args.formats else [
"jpg", "png", "gif", "svg", "jpeg"]
self.min_filesize = args.min_filesize
self.max_filesize = args.max_filesize
self.dump_urls = args.dump_urls
self.proxy_url = args.proxy_server
self.proxies = {}
if self.proxy_url:
if not re.match(r'^[a-zA-Z]+://', self.proxy_url):
self.proxy_url = 'http://' + self.proxy_url
proxy_start_length = self.proxy_url.find("://") + 3
self.proxies = {
self.proxy_url[:(proxy_start_length - 3)]: self.proxy_url
}
self.scrape_reverse = args.scrape_reverse
self.filename_pattern = args.filename_pattern
self.nthreads = args.nthreads
return (self.url, self.no_to_download, self.format_list,
self.download_path, self.min_filesize, self.max_filesize,
self.dump_urls, self.scrape_reverse, self.use_ghost, self.filename_pattern) |
def set_eol_chars(self, text):
"""Set widget end-of-line (EOL) characters from text (analyzes text)"""
if not is_text_string(text): # testing for QString (PyQt API#1)
text = to_text_string(text)
eol_chars = sourcecode.get_eol_chars(text)
is_document_modified = eol_chars is not None and self.eol_chars is not None
self.eol_chars = eol_chars
if is_document_modified:
self.document().setModified(True)
if self.sig_eol_chars_changed is not None:
self.sig_eol_chars_changed.emit(eol_chars) | Set widget end-of-line (EOL) characters from text (analyzes text) | Below is the the instruction that describes the task:
### Input:
Set widget end-of-line (EOL) characters from text (analyzes text)
### Response:
def set_eol_chars(self, text):
"""Set widget end-of-line (EOL) characters from text (analyzes text)"""
if not is_text_string(text): # testing for QString (PyQt API#1)
text = to_text_string(text)
eol_chars = sourcecode.get_eol_chars(text)
is_document_modified = eol_chars is not None and self.eol_chars is not None
self.eol_chars = eol_chars
if is_document_modified:
self.document().setModified(True)
if self.sig_eol_chars_changed is not None:
self.sig_eol_chars_changed.emit(eol_chars) |
def filter(self, record):
"""Is the specified record to be logged? Returns zero for no,
nonzero for yes. If deemed appropriate, the record may be modified
in-place by this method.
:param logging.LogRecord record: The log record to process
:rtype: int
"""
if self._exists:
return int(getattr(record, 'correlation_id', None) is not None)
return int(getattr(record, 'correlation_id', None) is None) | Is the specified record to be logged? Returns zero for no,
nonzero for yes. If deemed appropriate, the record may be modified
in-place by this method.
:param logging.LogRecord record: The log record to process
:rtype: int | Below is the the instruction that describes the task:
### Input:
Is the specified record to be logged? Returns zero for no,
nonzero for yes. If deemed appropriate, the record may be modified
in-place by this method.
:param logging.LogRecord record: The log record to process
:rtype: int
### Response:
def filter(self, record):
"""Is the specified record to be logged? Returns zero for no,
nonzero for yes. If deemed appropriate, the record may be modified
in-place by this method.
:param logging.LogRecord record: The log record to process
:rtype: int
"""
if self._exists:
return int(getattr(record, 'correlation_id', None) is not None)
return int(getattr(record, 'correlation_id', None) is None) |
def save(self, destination: BinaryIO, buffer_size: int=16384) -> None:
"""Save the file to the destination.
Arguments:
destination: A filename (str) or file object to write to.
buffer_size: Buffer size as used as length in
:func:`shutil.copyfileobj`.
"""
close_destination = False
if isinstance(destination, str):
destination = open(destination, 'wb')
close_destination = True
try:
copyfileobj(self.stream, destination, buffer_size)
finally:
if close_destination:
destination.close() | Save the file to the destination.
Arguments:
destination: A filename (str) or file object to write to.
buffer_size: Buffer size as used as length in
:func:`shutil.copyfileobj`. | Below is the the instruction that describes the task:
### Input:
Save the file to the destination.
Arguments:
destination: A filename (str) or file object to write to.
buffer_size: Buffer size as used as length in
:func:`shutil.copyfileobj`.
### Response:
def save(self, destination: BinaryIO, buffer_size: int=16384) -> None:
"""Save the file to the destination.
Arguments:
destination: A filename (str) or file object to write to.
buffer_size: Buffer size as used as length in
:func:`shutil.copyfileobj`.
"""
close_destination = False
if isinstance(destination, str):
destination = open(destination, 'wb')
close_destination = True
try:
copyfileobj(self.stream, destination, buffer_size)
finally:
if close_destination:
destination.close() |
def execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=False):
"""Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool)
"""
# pylint: disable=redefined-outer-name
executor = Executor(cleanup_tasks, ctx.config)
for cleanup_task in cleanup_tasks.tasks:
print("CLEANUP TASK: %s" % cleanup_task)
executor.execute((cleanup_task, dict(dry_run=dry_run))) | Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool) | Below is the the instruction that describes the task:
### Input:
Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool)
### Response:
def execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=False):
"""Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool)
"""
# pylint: disable=redefined-outer-name
executor = Executor(cleanup_tasks, ctx.config)
for cleanup_task in cleanup_tasks.tasks:
print("CLEANUP TASK: %s" % cleanup_task)
executor.execute((cleanup_task, dict(dry_run=dry_run))) |
def convertTimestamps(column):
"""Convert a dtype of a given column to a datetime.
This method tries to do this by brute force.
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Converted to datetime if no errors occured, else the
original column will be returned.
"""
tempColumn = column
try:
# Try to convert the first row and a random row instead of the complete
# column, might be faster
# tempValue = np.datetime64(column[0])
tempValue = np.datetime64(column[randint(0, len(column.index) - 1)])
tempColumn = column.apply(to_datetime)
except Exception:
pass
return tempColumn | Convert a dtype of a given column to a datetime.
This method tries to do this by brute force.
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Converted to datetime if no errors occured, else the
original column will be returned. | Below is the the instruction that describes the task:
### Input:
Convert a dtype of a given column to a datetime.
This method tries to do this by brute force.
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Converted to datetime if no errors occured, else the
original column will be returned.
### Response:
def convertTimestamps(column):
"""Convert a dtype of a given column to a datetime.
This method tries to do this by brute force.
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Converted to datetime if no errors occured, else the
original column will be returned.
"""
tempColumn = column
try:
# Try to convert the first row and a random row instead of the complete
# column, might be faster
# tempValue = np.datetime64(column[0])
tempValue = np.datetime64(column[randint(0, len(column.index) - 1)])
tempColumn = column.apply(to_datetime)
except Exception:
pass
return tempColumn |
def console_output(self, instance=None):
"""Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
"""
if instance is None:
instance = self.instance()
for stage in instance['stages']:
for job in stage['jobs']:
if job['result'] not in self.final_results:
continue
artifact = self.artifact(
instance['counter'],
stage['name'],
job['name'],
stage['counter']
)
output = artifact.get('cruise-output/console.log')
yield (
{
'pipeline': self.name,
'pipeline_counter': instance['counter'],
'stage': stage['name'],
'stage_counter': stage['counter'],
'job': job['name'],
'job_result': job['result'],
},
output.body
) | Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result | Below is the the instruction that describes the task:
### Input:
Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
### Response:
def console_output(self, instance=None):
"""Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
"""
if instance is None:
instance = self.instance()
for stage in instance['stages']:
for job in stage['jobs']:
if job['result'] not in self.final_results:
continue
artifact = self.artifact(
instance['counter'],
stage['name'],
job['name'],
stage['counter']
)
output = artifact.get('cruise-output/console.log')
yield (
{
'pipeline': self.name,
'pipeline_counter': instance['counter'],
'stage': stage['name'],
'stage_counter': stage['counter'],
'job': job['name'],
'job_result': job['result'],
},
output.body
) |
def create_unsigned_transaction(cls,
*,
nonce: int,
gas_price: int,
gas: int,
to: Address,
value: int,
data: bytes) -> 'BaseUnsignedTransaction':
"""
Create an unsigned transaction.
"""
raise NotImplementedError("Must be implemented by subclasses") | Create an unsigned transaction. | Below is the the instruction that describes the task:
### Input:
Create an unsigned transaction.
### Response:
def create_unsigned_transaction(cls,
*,
nonce: int,
gas_price: int,
gas: int,
to: Address,
value: int,
data: bytes) -> 'BaseUnsignedTransaction':
"""
Create an unsigned transaction.
"""
raise NotImplementedError("Must be implemented by subclasses") |
def ValidateURL(url, column_name=None, problems=None):
"""
Validates a non-required URL value using IsValidURL():
- if invalid adds InvalidValue error (if problems accumulator is provided)
- an empty URL is considered valid and no error or warning is issued.
"""
if IsEmpty(url) or IsValidURL(url):
return True
else:
if problems:
problems.InvalidValue(column_name, url)
return False | Validates a non-required URL value using IsValidURL():
- if invalid adds InvalidValue error (if problems accumulator is provided)
- an empty URL is considered valid and no error or warning is issued. | Below is the the instruction that describes the task:
### Input:
Validates a non-required URL value using IsValidURL():
- if invalid adds InvalidValue error (if problems accumulator is provided)
- an empty URL is considered valid and no error or warning is issued.
### Response:
def ValidateURL(url, column_name=None, problems=None):
"""
Validates a non-required URL value using IsValidURL():
- if invalid adds InvalidValue error (if problems accumulator is provided)
- an empty URL is considered valid and no error or warning is issued.
"""
if IsEmpty(url) or IsValidURL(url):
return True
else:
if problems:
problems.InvalidValue(column_name, url)
return False |
def _inhibitColumnsWithLateral(self, overlaps, lateralConnections):
"""
Performs an experimentatl local inhibition. Local inhibition is
iteratively performed on a column by column basis.
"""
n,m = self.shape
y = np.zeros(n)
s = self.sparsity
L = lateralConnections
desiredWeight = self.codeWeight
inhSignal = np.zeros(n)
sortedIndices = np.argsort(overlaps, kind='mergesort')[::-1]
currentWeight = 0
for i in sortedIndices:
if overlaps[i] < self._stimulusThreshold:
break
inhTooStrong = ( inhSignal[i] >= s )
if not inhTooStrong:
y[i] = 1.
currentWeight += 1
inhSignal[:] += L[i,:]
if self.enforceDesiredWeight and currentWeight == desiredWeight:
break
activeColumns = np.where(y==1.0)[0]
return activeColumns | Performs an experimentatl local inhibition. Local inhibition is
iteratively performed on a column by column basis. | Below is the the instruction that describes the task:
### Input:
Performs an experimentatl local inhibition. Local inhibition is
iteratively performed on a column by column basis.
### Response:
def _inhibitColumnsWithLateral(self, overlaps, lateralConnections):
"""
Performs an experimentatl local inhibition. Local inhibition is
iteratively performed on a column by column basis.
"""
n,m = self.shape
y = np.zeros(n)
s = self.sparsity
L = lateralConnections
desiredWeight = self.codeWeight
inhSignal = np.zeros(n)
sortedIndices = np.argsort(overlaps, kind='mergesort')[::-1]
currentWeight = 0
for i in sortedIndices:
if overlaps[i] < self._stimulusThreshold:
break
inhTooStrong = ( inhSignal[i] >= s )
if not inhTooStrong:
y[i] = 1.
currentWeight += 1
inhSignal[:] += L[i,:]
if self.enforceDesiredWeight and currentWeight == desiredWeight:
break
activeColumns = np.where(y==1.0)[0]
return activeColumns |
def load_fits_catalog(self, name, **kwargs):
"""Load sources from a FITS catalog file.
Parameters
----------
name : str
Catalog name or path to a catalog FITS file.
"""
# EAC split this function to make it easier to load an existing catalog
cat = catalog.Catalog.create(name)
self.load_existing_catalog(cat, **kwargs) | Load sources from a FITS catalog file.
Parameters
----------
name : str
Catalog name or path to a catalog FITS file. | Below is the the instruction that describes the task:
### Input:
Load sources from a FITS catalog file.
Parameters
----------
name : str
Catalog name or path to a catalog FITS file.
### Response:
def load_fits_catalog(self, name, **kwargs):
"""Load sources from a FITS catalog file.
Parameters
----------
name : str
Catalog name or path to a catalog FITS file.
"""
# EAC split this function to make it easier to load an existing catalog
cat = catalog.Catalog.create(name)
self.load_existing_catalog(cat, **kwargs) |
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret | Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2 | Below is the the instruction that describes the task:
### Input:
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
### Response:
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret |
def pycode(argcount,
kwonlyargcount,
nlocals,
stacksize,
flags,
codestring,
constants,
names,
varnames,
filename,
name,
firstlineno,
lnotab,
freevars=(),
cellvars=()):
"""types.CodeType constructor that accepts keyword arguments.
See Also
--------
types.CodeType
"""
return CodeType(
argcount,
kwonlyargcount,
nlocals,
stacksize,
flags,
codestring,
constants,
names,
varnames,
filename,
name,
firstlineno,
lnotab,
freevars,
cellvars,
) | types.CodeType constructor that accepts keyword arguments.
See Also
--------
types.CodeType | Below is the the instruction that describes the task:
### Input:
types.CodeType constructor that accepts keyword arguments.
See Also
--------
types.CodeType
### Response:
def pycode(argcount,
kwonlyargcount,
nlocals,
stacksize,
flags,
codestring,
constants,
names,
varnames,
filename,
name,
firstlineno,
lnotab,
freevars=(),
cellvars=()):
"""types.CodeType constructor that accepts keyword arguments.
See Also
--------
types.CodeType
"""
return CodeType(
argcount,
kwonlyargcount,
nlocals,
stacksize,
flags,
codestring,
constants,
names,
varnames,
filename,
name,
firstlineno,
lnotab,
freevars,
cellvars,
) |
def target_address(self):
"""Return the authorative target of the link."""
# If link is a receiver, target is determined by the local
# value, else use the remote.
if self._pn_link.is_receiver:
return self._pn_link.target.address
else:
return self._pn_link.remote_target.address | Return the authorative target of the link. | Below is the the instruction that describes the task:
### Input:
Return the authorative target of the link.
### Response:
def target_address(self):
"""Return the authorative target of the link."""
# If link is a receiver, target is determined by the local
# value, else use the remote.
if self._pn_link.is_receiver:
return self._pn_link.target.address
else:
return self._pn_link.remote_target.address |
def flipVertical(self):
""" flips an image object vertically
"""
self.flipV = not self.flipV
self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV) | flips an image object vertically | Below is the the instruction that describes the task:
### Input:
flips an image object vertically
### Response:
def flipVertical(self):
""" flips an image object vertically
"""
self.flipV = not self.flipV
self._transmogrophy(self.angle, self.percent, self.scaleFromCenter, self.flipH, self.flipV) |
def cli(obj, ids, query, filters):
"""Show raw data for alerts."""
client = obj['client']
if ids:
query = [('id', x) for x in ids]
elif query:
query = [('q', query)]
else:
query = build_query(filters)
alerts = client.search(query)
headers = {'id': 'ID', 'rawData': 'RAW DATA'}
click.echo(
tabulate([{'id': a.id, 'rawData': a.raw_data} for a in alerts], headers=headers, tablefmt=obj['output'])) | Show raw data for alerts. | Below is the the instruction that describes the task:
### Input:
Show raw data for alerts.
### Response:
def cli(obj, ids, query, filters):
"""Show raw data for alerts."""
client = obj['client']
if ids:
query = [('id', x) for x in ids]
elif query:
query = [('q', query)]
else:
query = build_query(filters)
alerts = client.search(query)
headers = {'id': 'ID', 'rawData': 'RAW DATA'}
click.echo(
tabulate([{'id': a.id, 'rawData': a.raw_data} for a in alerts], headers=headers, tablefmt=obj['output'])) |
def sync_model(self, comment='', compact_central=False,
release_borrowed=True, release_workset=True,
save_local=False):
"""Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well
"""
self._add_entry(templates.FILE_SYNC_START)
if compact_central:
self._add_entry(templates.FILE_SYNC_COMPACT)
if release_borrowed:
self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED)
if release_workset:
self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS)
if save_local:
self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL)
self._add_entry(templates.FILE_SYNC_COMMENT_OK
.format(sync_comment=comment)) | Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well | Below is the the instruction that describes the task:
### Input:
Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well
### Response:
def sync_model(self, comment='', compact_central=False,
release_borrowed=True, release_workset=True,
save_local=False):
"""Append a sync model entry to the journal.
This instructs Revit to sync the currently open workshared model.
Args:
comment (str): comment to be provided for the sync step
compact_central (bool): if True compacts the central file
release_borrowed (bool): if True releases the borrowed elements
release_workset (bool): if True releases the borrowed worksets
save_local (bool): if True saves the local file as well
"""
self._add_entry(templates.FILE_SYNC_START)
if compact_central:
self._add_entry(templates.FILE_SYNC_COMPACT)
if release_borrowed:
self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED)
if release_workset:
self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS)
if save_local:
self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL)
self._add_entry(templates.FILE_SYNC_COMMENT_OK
.format(sync_comment=comment)) |
def convert_timedelta(duration):
"""
Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers)
"""
days, seconds = duration.days, duration.seconds
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
return days, hours, minutes, seconds | Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers) | Below is the the instruction that describes the task:
### Input:
Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers)
### Response:
def convert_timedelta(duration):
"""
Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers)
"""
days, seconds = duration.days, duration.seconds
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
return days, hours, minutes, seconds |
def odd_odd(self):
"""Selects odd-odd nuclei from the table:
>>> Table('FRDM95').odd_odd
Out[13]:
Z N
9 9 1.21
11 0.10
13 3.08
15 9.32
...
"""
return self.select(lambda Z, N: (Z % 2) and (N % 2), name=self.name) | Selects odd-odd nuclei from the table:
>>> Table('FRDM95').odd_odd
Out[13]:
Z N
9 9 1.21
11 0.10
13 3.08
15 9.32
... | Below is the the instruction that describes the task:
### Input:
Selects odd-odd nuclei from the table:
>>> Table('FRDM95').odd_odd
Out[13]:
Z N
9 9 1.21
11 0.10
13 3.08
15 9.32
...
### Response:
def odd_odd(self):
"""Selects odd-odd nuclei from the table:
>>> Table('FRDM95').odd_odd
Out[13]:
Z N
9 9 1.21
11 0.10
13 3.08
15 9.32
...
"""
return self.select(lambda Z, N: (Z % 2) and (N % 2), name=self.name) |
def _handle_builder_exception(self, message, residue):
"""
Makes a PDB Construction Error a bit more verbose and informative
"""
message = "%s. Error when parsing residue %s:%s" %(message, residue['number'], residue['name'])
raise PDBConstructionException(message) | Makes a PDB Construction Error a bit more verbose and informative | Below is the the instruction that describes the task:
### Input:
Makes a PDB Construction Error a bit more verbose and informative
### Response:
def _handle_builder_exception(self, message, residue):
"""
Makes a PDB Construction Error a bit more verbose and informative
"""
message = "%s. Error when parsing residue %s:%s" %(message, residue['number'], residue['name'])
raise PDBConstructionException(message) |
def has_file_url(self):
"""stub"""
return bool(self._get_asset_content(
Id(self.my_osid_object._my_map['fileId']['assetId']),
self.my_osid_object._my_map['fileId']['assetContentTypeId']).has_url()) | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def has_file_url(self):
"""stub"""
return bool(self._get_asset_content(
Id(self.my_osid_object._my_map['fileId']['assetId']),
self.my_osid_object._my_map['fileId']['assetContentTypeId']).has_url()) |
def encodeLength(value):
'''
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
'''
encoded = bytearray()
while True:
digit = value % 128
value //= 128
if value > 0:
digit |= 128
encoded.append(digit)
if value <= 0:
break
return encoded | Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields. | Below is the the instruction that describes the task:
### Input:
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
### Response:
def encodeLength(value):
'''
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
'''
encoded = bytearray()
while True:
digit = value % 128
value //= 128
if value > 0:
digit |= 128
encoded.append(digit)
if value <= 0:
break
return encoded |
def from_dlpack(dlpack):
"""Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> type(z)
<class 'mxnet.ndarray.ndarray.NDArray'>
>>> z
[[ 1. 1. 1.]
[ 1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
>>> w = mx.nd.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.nd.from_dlpack(w)
>>> u += 1
>>> x
[[2. 2. 2.]
[2. 2. 2.]]
<NDArray 2x3 @cpu(0)>
"""
handle = NDArrayHandle()
dlpack = ctypes.py_object(dlpack)
assert ctypes.pythonapi.PyCapsule_IsValid(dlpack, _c_str_dltensor), ValueError(
'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.')
dlpack_handle = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor))
check_call(_LIB.MXNDArrayFromDLPack(dlpack_handle, ctypes.byref(handle)))
# Rename PyCapsule (DLPack)
ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor)
# delete the deleter of the old dlpack
ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None)
return NDArray(handle=handle) | Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> type(z)
<class 'mxnet.ndarray.ndarray.NDArray'>
>>> z
[[ 1. 1. 1.]
[ 1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
>>> w = mx.nd.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.nd.from_dlpack(w)
>>> u += 1
>>> x
[[2. 2. 2.]
[2. 2. 2.]]
<NDArray 2x3 @cpu(0)> | Below is the the instruction that describes the task:
### Input:
Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> type(z)
<class 'mxnet.ndarray.ndarray.NDArray'>
>>> z
[[ 1. 1. 1.]
[ 1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
>>> w = mx.nd.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.nd.from_dlpack(w)
>>> u += 1
>>> x
[[2. 2. 2.]
[2. 2. 2.]]
<NDArray 2x3 @cpu(0)>
### Response:
def from_dlpack(dlpack):
"""Returns a NDArray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
NDArray
a NDArray backed by a dlpack tensor
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.nd.from_dlpack(y)
>>> type(z)
<class 'mxnet.ndarray.ndarray.NDArray'>
>>> z
[[ 1. 1. 1.]
[ 1. 1. 1.]]
<NDArray 2x3 @cpu(0)>
>>> w = mx.nd.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.nd.from_dlpack(w)
>>> u += 1
>>> x
[[2. 2. 2.]
[2. 2. 2.]]
<NDArray 2x3 @cpu(0)>
"""
handle = NDArrayHandle()
dlpack = ctypes.py_object(dlpack)
assert ctypes.pythonapi.PyCapsule_IsValid(dlpack, _c_str_dltensor), ValueError(
'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.')
dlpack_handle = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor))
check_call(_LIB.MXNDArrayFromDLPack(dlpack_handle, ctypes.byref(handle)))
# Rename PyCapsule (DLPack)
ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor)
# delete the deleter of the old dlpack
ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None)
return NDArray(handle=handle) |
def get_anchor_format(a):
"""
Extract the resource file-type format from the anchor.
"""
# (. or format=) then (file_extension) then (? or $)
# e.g. "...format=txt" or "...download.mp4?..."
fmt = re.search(r"(?:\.|format=)(\w+)(?:\?.*)?$", a)
return fmt.group(1) if fmt else None | Extract the resource file-type format from the anchor. | Below is the the instruction that describes the task:
### Input:
Extract the resource file-type format from the anchor.
### Response:
def get_anchor_format(a):
"""
Extract the resource file-type format from the anchor.
"""
# (. or format=) then (file_extension) then (? or $)
# e.g. "...format=txt" or "...download.mp4?..."
fmt = re.search(r"(?:\.|format=)(\w+)(?:\?.*)?$", a)
return fmt.group(1) if fmt else None |
def transform(self, X):
"""
Args:
X: DataFrame with NaN's
Returns:
Dictionary with one key - 'X' corresponding to given DataFrame but without nan's
"""
if self.fill_missing:
X = self.filler.complete(X)
return {'X': X} | Args:
X: DataFrame with NaN's
Returns:
Dictionary with one key - 'X' corresponding to given DataFrame but without nan's | Below is the the instruction that describes the task:
### Input:
Args:
X: DataFrame with NaN's
Returns:
Dictionary with one key - 'X' corresponding to given DataFrame but without nan's
### Response:
def transform(self, X):
"""
Args:
X: DataFrame with NaN's
Returns:
Dictionary with one key - 'X' corresponding to given DataFrame but without nan's
"""
if self.fill_missing:
X = self.filler.complete(X)
return {'X': X} |
def colourblind(i):
'''
colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ
'''
rawRGBs = [(162,200,236),
(255,128,14),
(171,171,171),
(95,158,209),
(89,89,89),
(0,107,164),
(255,188,121),
(207,207,207),
(200,82,0),
(137,137,137)]
scaledRGBs = []
for r in rawRGBs:
scaledRGBs.append((old_div(r[0],255.),old_div(r[1],255.),old_div(r[2],255.)))
idx = sc.mod(i,len(scaledRGBs))
return scaledRGBs[idx] | colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ | Below is the the instruction that describes the task:
### Input:
colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ
### Response:
def colourblind(i):
'''
colour pallete from http://tableaufriction.blogspot.ro/
allegedly suitable for colour-blind folk
SJ
'''
rawRGBs = [(162,200,236),
(255,128,14),
(171,171,171),
(95,158,209),
(89,89,89),
(0,107,164),
(255,188,121),
(207,207,207),
(200,82,0),
(137,137,137)]
scaledRGBs = []
for r in rawRGBs:
scaledRGBs.append((old_div(r[0],255.),old_div(r[1],255.),old_div(r[2],255.)))
idx = sc.mod(i,len(scaledRGBs))
return scaledRGBs[idx] |
def _dfs_edges(graph, source, max_steps=None):
"""
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges.
"""
if max_steps is None:
yield networkx.dfs_edges(graph, source)
else:
steps_map = defaultdict(int)
traversed = { source }
stack = [ source ]
while stack:
src = stack.pop()
for dst in graph.successors(src):
if dst in traversed:
continue
traversed.add(dst)
dst_steps = max(steps_map[src] + 1, steps_map[dst])
if dst_steps > max_steps:
continue
yield src, dst
steps_map[dst] = dst_steps
stack.append(dst) | Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges. | Below is the the instruction that describes the task:
### Input:
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges.
### Response:
def _dfs_edges(graph, source, max_steps=None):
"""
Perform a depth-first search on the given DiGraph, with a limit on maximum steps.
:param networkx.DiGraph graph: The graph to traverse.
:param Any source: The source to begin traversal.
:param int max_steps: Maximum steps of the traversal, or None if not limiting steps.
:return: An iterator of edges.
"""
if max_steps is None:
yield networkx.dfs_edges(graph, source)
else:
steps_map = defaultdict(int)
traversed = { source }
stack = [ source ]
while stack:
src = stack.pop()
for dst in graph.successors(src):
if dst in traversed:
continue
traversed.add(dst)
dst_steps = max(steps_map[src] + 1, steps_map[dst])
if dst_steps > max_steps:
continue
yield src, dst
steps_map[dst] = dst_steps
stack.append(dst) |
def set_condition(self, value):
"""
Setter for 'condition' field.
:param value - a new value of 'condition' field. Required field. Must be a String.
"""
if value is None or not isinstance(value, str):
raise TypeError("Condition is required and must be set to a String")
else:
self.__condition = value | Setter for 'condition' field.
:param value - a new value of 'condition' field. Required field. Must be a String. | Below is the the instruction that describes the task:
### Input:
Setter for 'condition' field.
:param value - a new value of 'condition' field. Required field. Must be a String.
### Response:
def set_condition(self, value):
"""
Setter for 'condition' field.
:param value - a new value of 'condition' field. Required field. Must be a String.
"""
if value is None or not isinstance(value, str):
raise TypeError("Condition is required and must be set to a String")
else:
self.__condition = value |
def _check_transition_target(self, transition):
"""Checks the validity of a transition target
Checks whether the transition target is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid
"""
to_state_id = transition.to_state
to_outcome_id = transition.to_outcome
if to_state_id == self.state_id:
if to_outcome_id not in self.outcomes:
return False, "to_outcome is not existing"
else:
if to_state_id not in self.states:
return False, "to_state is not existing"
if to_outcome_id is not None:
return False, "to_outcome must be None as transition goes to child state"
return True, "valid" | Checks the validity of a transition target
Checks whether the transition target is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid | Below is the the instruction that describes the task:
### Input:
Checks the validity of a transition target
Checks whether the transition target is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid
### Response:
def _check_transition_target(self, transition):
"""Checks the validity of a transition target
Checks whether the transition target is valid.
:param rafcon.core.transition.Transition transition: The transition to be checked
:return bool validity, str message: validity is True, when the transition is valid, False else. message gives
more information especially if the transition is not valid
"""
to_state_id = transition.to_state
to_outcome_id = transition.to_outcome
if to_state_id == self.state_id:
if to_outcome_id not in self.outcomes:
return False, "to_outcome is not existing"
else:
if to_state_id not in self.states:
return False, "to_state is not existing"
if to_outcome_id is not None:
return False, "to_outcome must be None as transition goes to child state"
return True, "valid" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.