code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def apply_obb(self):
"""
Apply the oriented bounding box transform to the current mesh.
This will result in a mesh with an AABB centered at the
origin and the same dimensions as the OBB.
Returns
----------
matrix : (4, 4) float
Transformation matrix that was applied
to mesh to move it into OBB frame
"""
matrix = self.bounding_box_oriented.primitive.transform
matrix = np.linalg.inv(matrix)
self.apply_transform(matrix)
return matrix | Apply the oriented bounding box transform to the current mesh.
This will result in a mesh with an AABB centered at the
origin and the same dimensions as the OBB.
Returns
----------
matrix : (4, 4) float
Transformation matrix that was applied
to mesh to move it into OBB frame | Below is the the instruction that describes the task:
### Input:
Apply the oriented bounding box transform to the current mesh.
This will result in a mesh with an AABB centered at the
origin and the same dimensions as the OBB.
Returns
----------
matrix : (4, 4) float
Transformation matrix that was applied
to mesh to move it into OBB frame
### Response:
def apply_obb(self):
"""
Apply the oriented bounding box transform to the current mesh.
This will result in a mesh with an AABB centered at the
origin and the same dimensions as the OBB.
Returns
----------
matrix : (4, 4) float
Transformation matrix that was applied
to mesh to move it into OBB frame
"""
matrix = self.bounding_box_oriented.primitive.transform
matrix = np.linalg.inv(matrix)
self.apply_transform(matrix)
return matrix |
def getUser(self):
"""Returns the linked Plone User or None
"""
username = self.getUsername()
if not username:
return None
user = api.user.get(userid=username)
return user | Returns the linked Plone User or None | Below is the the instruction that describes the task:
### Input:
Returns the linked Plone User or None
### Response:
def getUser(self):
"""Returns the linked Plone User or None
"""
username = self.getUsername()
if not username:
return None
user = api.user.get(userid=username)
return user |
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}')) | Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions. | Below is the the instruction that describes the task:
### Input:
Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
### Response:
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}')) |
def load_secret(self, secret):
"""
Ask YubiHSM to load a pre-existing YubiKey secret.
The data is stored internally in the YubiHSM in temporary memory -
this operation would typically be followed by one or more L{generate_aead}
commands to actually retreive the generated secret (in encrypted form).
@param secret: YubiKey secret to load
@type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string
@returns: Number of bytes in YubiHSM internal buffer after load
@rtype: integer
@see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load}
"""
if isinstance(secret, pyhsm.aead_cmd.YHSM_YubiKeySecret):
secret = secret.pack()
return pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load(self.stick, secret).execute() | Ask YubiHSM to load a pre-existing YubiKey secret.
The data is stored internally in the YubiHSM in temporary memory -
this operation would typically be followed by one or more L{generate_aead}
commands to actually retreive the generated secret (in encrypted form).
@param secret: YubiKey secret to load
@type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string
@returns: Number of bytes in YubiHSM internal buffer after load
@rtype: integer
@see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load} | Below is the the instruction that describes the task:
### Input:
Ask YubiHSM to load a pre-existing YubiKey secret.
The data is stored internally in the YubiHSM in temporary memory -
this operation would typically be followed by one or more L{generate_aead}
commands to actually retreive the generated secret (in encrypted form).
@param secret: YubiKey secret to load
@type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string
@returns: Number of bytes in YubiHSM internal buffer after load
@rtype: integer
@see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load}
### Response:
def load_secret(self, secret):
"""
Ask YubiHSM to load a pre-existing YubiKey secret.
The data is stored internally in the YubiHSM in temporary memory -
this operation would typically be followed by one or more L{generate_aead}
commands to actually retreive the generated secret (in encrypted form).
@param secret: YubiKey secret to load
@type secret: L{pyhsm.aead_cmd.YHSM_YubiKeySecret} or string
@returns: Number of bytes in YubiHSM internal buffer after load
@rtype: integer
@see: L{pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load}
"""
if isinstance(secret, pyhsm.aead_cmd.YHSM_YubiKeySecret):
secret = secret.pack()
return pyhsm.buffer_cmd.YHSM_Cmd_Buffer_Load(self.stick, secret).execute() |
def updateColumnValues(self, networkId, tableType, columnName, default, body, verbose=None):
"""
Sets the values for cells in the table specified by the `tableType` and `networkId` parameters.
If the 'default` parameter is not specified, the message body should consist of key-value pairs with which to set values.
If the `default` parameter is specified, its value will be used for every cell in the column. This is useful to set columns like "selected."
:param networkId: SUID of the network containing the table
:param tableType: The type of table
:param columnName: Name of the column in which to set values
:param default: Default Value. If this value is provided, all cells will be set to this. -- Not required, can be None
:param body: Array of SUID Keyed values
:param verbose: print more
:returns: default: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'/columns/'+str(columnName)+'', method="PUT", body=body, verbose=verbose)
return response | Sets the values for cells in the table specified by the `tableType` and `networkId` parameters.
If the 'default` parameter is not specified, the message body should consist of key-value pairs with which to set values.
If the `default` parameter is specified, its value will be used for every cell in the column. This is useful to set columns like "selected."
:param networkId: SUID of the network containing the table
:param tableType: The type of table
:param columnName: Name of the column in which to set values
:param default: Default Value. If this value is provided, all cells will be set to this. -- Not required, can be None
:param body: Array of SUID Keyed values
:param verbose: print more
:returns: default: successful operation | Below is the the instruction that describes the task:
### Input:
Sets the values for cells in the table specified by the `tableType` and `networkId` parameters.
If the 'default` parameter is not specified, the message body should consist of key-value pairs with which to set values.
If the `default` parameter is specified, its value will be used for every cell in the column. This is useful to set columns like "selected."
:param networkId: SUID of the network containing the table
:param tableType: The type of table
:param columnName: Name of the column in which to set values
:param default: Default Value. If this value is provided, all cells will be set to this. -- Not required, can be None
:param body: Array of SUID Keyed values
:param verbose: print more
:returns: default: successful operation
### Response:
def updateColumnValues(self, networkId, tableType, columnName, default, body, verbose=None):
"""
Sets the values for cells in the table specified by the `tableType` and `networkId` parameters.
If the 'default` parameter is not specified, the message body should consist of key-value pairs with which to set values.
If the `default` parameter is specified, its value will be used for every cell in the column. This is useful to set columns like "selected."
:param networkId: SUID of the network containing the table
:param tableType: The type of table
:param columnName: Name of the column in which to set values
:param default: Default Value. If this value is provided, all cells will be set to this. -- Not required, can be None
:param body: Array of SUID Keyed values
:param verbose: print more
:returns: default: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'/columns/'+str(columnName)+'', method="PUT", body=body, verbose=verbose)
return response |
def colorline(ax, x, y, z, **kwargs):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, **kwargs)
ax.add_collection(lc)
if ax.get_autoscale_on():
ax.autoscale_view()
return lc | http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width | Below is the the instruction that describes the task:
### Input:
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
### Response:
def colorline(ax, x, y, z, **kwargs):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, **kwargs)
ax.add_collection(lc)
if ax.get_autoscale_on():
ax.autoscale_view()
return lc |
def dispose(self):
"""
Disposes every performed registration; the container can then be used again
"""
for registration in self._registrations.values():
registration.dispose()
self._registrations = {} | Disposes every performed registration; the container can then be used again | Below is the the instruction that describes the task:
### Input:
Disposes every performed registration; the container can then be used again
### Response:
def dispose(self):
"""
Disposes every performed registration; the container can then be used again
"""
for registration in self._registrations.values():
registration.dispose()
self._registrations = {} |
def get_store_profile_by_id(cls, store_profile_id, **kwargs):
"""Find StoreProfile
Return single instance of StoreProfile by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_profile_by_id(store_profile_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_profile_id: ID of storeProfile to return (required)
:return: StoreProfile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs)
else:
(data) = cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs)
return data | Find StoreProfile
Return single instance of StoreProfile by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_profile_by_id(store_profile_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_profile_id: ID of storeProfile to return (required)
:return: StoreProfile
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Find StoreProfile
Return single instance of StoreProfile by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_profile_by_id(store_profile_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_profile_id: ID of storeProfile to return (required)
:return: StoreProfile
If the method is called asynchronously,
returns the request thread.
### Response:
def get_store_profile_by_id(cls, store_profile_id, **kwargs):
"""Find StoreProfile
Return single instance of StoreProfile by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_profile_by_id(store_profile_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_profile_id: ID of storeProfile to return (required)
:return: StoreProfile
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs)
else:
(data) = cls._get_store_profile_by_id_with_http_info(store_profile_id, **kwargs)
return data |
def reverse(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = {}
params['access_token'] = self.api_key
point = self._coerce_point_to_string(query, "%(lon)s,%(lat)s")
quoted_query = quote(point.encode('utf-8'))
url = "?".join((self.api % dict(query=quoted_query),
urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
) | Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``. | Below is the the instruction that describes the task:
### Input:
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
### Response:
def reverse(
self,
query,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = {}
params['access_token'] = self.api_key
point = self._coerce_point_to_string(query, "%(lon)s,%(lat)s")
quoted_query = quote(point.encode('utf-8'))
url = "?".join((self.api % dict(query=quoted_query),
urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
) |
def setup_option_actions(self, exclude_private, exclude_uppercase,
exclude_capitalized, exclude_unsupported):
"""Setup the actions to show in the cog menu."""
self.setup_in_progress = True
self.exclude_private_action = create_action(self,
_("Exclude private references"),
tip=_("Exclude references which name starts"
" with an underscore"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_private', state))
self.exclude_private_action.setChecked(exclude_private)
self.exclude_uppercase_action = create_action(self,
_("Exclude all-uppercase references"),
tip=_("Exclude references which name is uppercase"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_uppercase', state))
self.exclude_uppercase_action.setChecked(exclude_uppercase)
self.exclude_capitalized_action = create_action(self,
_("Exclude capitalized references"),
tip=_("Exclude references which name starts with an "
"uppercase character"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_capitalized', state))
self.exclude_capitalized_action.setChecked(exclude_capitalized)
self.exclude_unsupported_action = create_action(self,
_("Exclude unsupported data types"),
tip=_("Exclude references to unsupported data types"
" (i.e. which won't be handled/saved correctly)"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_unsupported', state))
self.exclude_unsupported_action.setChecked(exclude_unsupported)
self.actions = [
self.exclude_private_action, self.exclude_uppercase_action,
self.exclude_capitalized_action, self.exclude_unsupported_action]
if is_module_installed('numpy'):
self.actions.extend([MENU_SEPARATOR, self.editor.minmax_action])
self.setup_in_progress = False | Setup the actions to show in the cog menu. | Below is the the instruction that describes the task:
### Input:
Setup the actions to show in the cog menu.
### Response:
def setup_option_actions(self, exclude_private, exclude_uppercase,
exclude_capitalized, exclude_unsupported):
"""Setup the actions to show in the cog menu."""
self.setup_in_progress = True
self.exclude_private_action = create_action(self,
_("Exclude private references"),
tip=_("Exclude references which name starts"
" with an underscore"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_private', state))
self.exclude_private_action.setChecked(exclude_private)
self.exclude_uppercase_action = create_action(self,
_("Exclude all-uppercase references"),
tip=_("Exclude references which name is uppercase"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_uppercase', state))
self.exclude_uppercase_action.setChecked(exclude_uppercase)
self.exclude_capitalized_action = create_action(self,
_("Exclude capitalized references"),
tip=_("Exclude references which name starts with an "
"uppercase character"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_capitalized', state))
self.exclude_capitalized_action.setChecked(exclude_capitalized)
self.exclude_unsupported_action = create_action(self,
_("Exclude unsupported data types"),
tip=_("Exclude references to unsupported data types"
" (i.e. which won't be handled/saved correctly)"),
toggled=lambda state:
self.sig_option_changed.emit('exclude_unsupported', state))
self.exclude_unsupported_action.setChecked(exclude_unsupported)
self.actions = [
self.exclude_private_action, self.exclude_uppercase_action,
self.exclude_capitalized_action, self.exclude_unsupported_action]
if is_module_installed('numpy'):
self.actions.extend([MENU_SEPARATOR, self.editor.minmax_action])
self.setup_in_progress = False |
def timeout(self):
"""
Optional per-query timeout. If set, this will limit the amount
of time in which the query can be executed and waited for.
.. note::
The effective timeout for the query will be either this property
or the value of :attr:`couchbase.bucket.Bucket.n1ql_timeout`
property, whichever is *lower*.
.. seealso:: couchbase.bucket.Bucket.n1ql_timeout
"""
value = self._body.get('timeout', '0s')
value = value[:-1]
return float(value) | Optional per-query timeout. If set, this will limit the amount
of time in which the query can be executed and waited for.
.. note::
The effective timeout for the query will be either this property
or the value of :attr:`couchbase.bucket.Bucket.n1ql_timeout`
property, whichever is *lower*.
.. seealso:: couchbase.bucket.Bucket.n1ql_timeout | Below is the the instruction that describes the task:
### Input:
Optional per-query timeout. If set, this will limit the amount
of time in which the query can be executed and waited for.
.. note::
The effective timeout for the query will be either this property
or the value of :attr:`couchbase.bucket.Bucket.n1ql_timeout`
property, whichever is *lower*.
.. seealso:: couchbase.bucket.Bucket.n1ql_timeout
### Response:
def timeout(self):
"""
Optional per-query timeout. If set, this will limit the amount
of time in which the query can be executed and waited for.
.. note::
The effective timeout for the query will be either this property
or the value of :attr:`couchbase.bucket.Bucket.n1ql_timeout`
property, whichever is *lower*.
.. seealso:: couchbase.bucket.Bucket.n1ql_timeout
"""
value = self._body.get('timeout', '0s')
value = value[:-1]
return float(value) |
def expand_window(center, window_size, array_size):
"""Generate a bounded windows.
maxlength = 2 * window_size + 1, lower bound is 0 and upper bound is
``array_size - 1``.
Example::
>>> expand_window(center=50, window_size=3, max=100)
[47, 48, 49, 50, 51, 52, 53]
>>> expand_window(center=2, window_size=3, max=100)
[0, 1, 2, 3, 4, 5]
>>> expand_window(center=98, window_size=3, max=100)
[95, 96, 97, 98, 99]
"""
if center - window_size < 0:
lower = 0
else:
lower = center - window_size
if center + window_size + 1 > array_size:
upper = array_size
else:
upper = center + window_size + 1
return np.array(range(lower, upper)) | Generate a bounded windows.
maxlength = 2 * window_size + 1, lower bound is 0 and upper bound is
``array_size - 1``.
Example::
>>> expand_window(center=50, window_size=3, max=100)
[47, 48, 49, 50, 51, 52, 53]
>>> expand_window(center=2, window_size=3, max=100)
[0, 1, 2, 3, 4, 5]
>>> expand_window(center=98, window_size=3, max=100)
[95, 96, 97, 98, 99] | Below is the the instruction that describes the task:
### Input:
Generate a bounded windows.
maxlength = 2 * window_size + 1, lower bound is 0 and upper bound is
``array_size - 1``.
Example::
>>> expand_window(center=50, window_size=3, max=100)
[47, 48, 49, 50, 51, 52, 53]
>>> expand_window(center=2, window_size=3, max=100)
[0, 1, 2, 3, 4, 5]
>>> expand_window(center=98, window_size=3, max=100)
[95, 96, 97, 98, 99]
### Response:
def expand_window(center, window_size, array_size):
"""Generate a bounded windows.
maxlength = 2 * window_size + 1, lower bound is 0 and upper bound is
``array_size - 1``.
Example::
>>> expand_window(center=50, window_size=3, max=100)
[47, 48, 49, 50, 51, 52, 53]
>>> expand_window(center=2, window_size=3, max=100)
[0, 1, 2, 3, 4, 5]
>>> expand_window(center=98, window_size=3, max=100)
[95, 96, 97, 98, 99]
"""
if center - window_size < 0:
lower = 0
else:
lower = center - window_size
if center + window_size + 1 > array_size:
upper = array_size
else:
upper = center + window_size + 1
return np.array(range(lower, upper)) |
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = s.translate(string.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts) | Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string. | Below is the the instruction that describes the task:
### Input:
Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
### Response:
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = s.translate(string.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts) |
def get_identities(self, item):
""" Return the identities from an item """
identities = []
if 'data' not in item:
return identities
if 'revisions' not in item['data']:
return identities
revisions = item['data']['revisions']
for revision in revisions:
user = self.get_sh_identity(revision)
yield user | Return the identities from an item | Below is the the instruction that describes the task:
### Input:
Return the identities from an item
### Response:
def get_identities(self, item):
""" Return the identities from an item """
identities = []
if 'data' not in item:
return identities
if 'revisions' not in item['data']:
return identities
revisions = item['data']['revisions']
for revision in revisions:
user = self.get_sh_identity(revision)
yield user |
def _register_config_file(self, key, val):
""" Persist a newly added config file, or update (overwrite) the value
of a previously persisted config.
"""
state = self.__load_state()
if 'config_files' not in state:
state['config_files'] = {}
state['config_files'][key] = val
self.__dump_state(state) | Persist a newly added config file, or update (overwrite) the value
of a previously persisted config. | Below is the the instruction that describes the task:
### Input:
Persist a newly added config file, or update (overwrite) the value
of a previously persisted config.
### Response:
def _register_config_file(self, key, val):
""" Persist a newly added config file, or update (overwrite) the value
of a previously persisted config.
"""
state = self.__load_state()
if 'config_files' not in state:
state['config_files'] = {}
state['config_files'][key] = val
self.__dump_state(state) |
def setup_dictionary(self, task):
"""Setup dictionary."""
dictionary_options = task.get('dictionary', {})
output = os.path.abspath(dictionary_options.get('output', self.dict_bin))
lang = dictionary_options.get('lang', 'en_US')
wordlists = dictionary_options.get('wordlists', [])
if lang and wordlists:
self.compile_dictionary(lang, dictionary_options.get('wordlists', []), None, output)
else:
output = None
return output | Setup dictionary. | Below is the the instruction that describes the task:
### Input:
Setup dictionary.
### Response:
def setup_dictionary(self, task):
"""Setup dictionary."""
dictionary_options = task.get('dictionary', {})
output = os.path.abspath(dictionary_options.get('output', self.dict_bin))
lang = dictionary_options.get('lang', 'en_US')
wordlists = dictionary_options.get('wordlists', [])
if lang and wordlists:
self.compile_dictionary(lang, dictionary_options.get('wordlists', []), None, output)
else:
output = None
return output |
def get_date_range(year=None, month=None, day=None):
"""
Return a start..end range to query for a specific month, day or year.
"""
if year is None:
return None
if month is None:
# year only
start = datetime(year, 1, 1, 0, 0, 0, tzinfo=utc)
end = datetime(year, 12, 31, 23, 59, 59, 999, tzinfo=utc)
return (start, end)
if day is None:
# year + month only
start = datetime(year, month, 1, 0, 0, 0, tzinfo=utc)
end = start + timedelta(days=monthrange(year, month)[1], microseconds=-1)
return (start, end)
else:
# Exact day
start = datetime(year, month, day, 0, 0, 0, tzinfo=utc)
end = start + timedelta(days=1, microseconds=-1)
return (start, end) | Return a start..end range to query for a specific month, day or year. | Below is the the instruction that describes the task:
### Input:
Return a start..end range to query for a specific month, day or year.
### Response:
def get_date_range(year=None, month=None, day=None):
"""
Return a start..end range to query for a specific month, day or year.
"""
if year is None:
return None
if month is None:
# year only
start = datetime(year, 1, 1, 0, 0, 0, tzinfo=utc)
end = datetime(year, 12, 31, 23, 59, 59, 999, tzinfo=utc)
return (start, end)
if day is None:
# year + month only
start = datetime(year, month, 1, 0, 0, 0, tzinfo=utc)
end = start + timedelta(days=monthrange(year, month)[1], microseconds=-1)
return (start, end)
else:
# Exact day
start = datetime(year, month, day, 0, 0, 0, tzinfo=utc)
end = start + timedelta(days=1, microseconds=-1)
return (start, end) |
def get(self):
"""Get cached refresh token.
Returns:
Cached refresh token, or ``None`` on failure.
"""
logger.info(
'Loading refresh_token from %s', repr(self._filename)
)
try:
with open(self._filename) as f:
return f.read()
except IOError as e:
logger.info('Failed to load refresh_token: %s', e) | Get cached refresh token.
Returns:
Cached refresh token, or ``None`` on failure. | Below is the the instruction that describes the task:
### Input:
Get cached refresh token.
Returns:
Cached refresh token, or ``None`` on failure.
### Response:
def get(self):
"""Get cached refresh token.
Returns:
Cached refresh token, or ``None`` on failure.
"""
logger.info(
'Loading refresh_token from %s', repr(self._filename)
)
try:
with open(self._filename) as f:
return f.read()
except IOError as e:
logger.info('Failed to load refresh_token: %s', e) |
def setNetworkName(self, networkName='GRL'):
"""set Thread Network name
Args:
networkName: the networkname string to be set
Returns:
True: successful to set the Thread Networkname
False: fail to set the Thread Networkname
"""
print '%s call setNetworkName' % self.port
print networkName
try:
cmd = 'networkname %s' % networkName
datasetCmd = 'dataset networkname %s' % networkName
self.hasActiveDatasetToCommit = True
return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("setNetworkName() Error: " + str(e)) | set Thread Network name
Args:
networkName: the networkname string to be set
Returns:
True: successful to set the Thread Networkname
False: fail to set the Thread Networkname | Below is the the instruction that describes the task:
### Input:
set Thread Network name
Args:
networkName: the networkname string to be set
Returns:
True: successful to set the Thread Networkname
False: fail to set the Thread Networkname
### Response:
def setNetworkName(self, networkName='GRL'):
"""set Thread Network name
Args:
networkName: the networkname string to be set
Returns:
True: successful to set the Thread Networkname
False: fail to set the Thread Networkname
"""
print '%s call setNetworkName' % self.port
print networkName
try:
cmd = 'networkname %s' % networkName
datasetCmd = 'dataset networkname %s' % networkName
self.hasActiveDatasetToCommit = True
return self.__sendCommand(cmd)[0] == 'Done' and self.__sendCommand(datasetCmd)[0] == 'Done'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("setNetworkName() Error: " + str(e)) |
def delete_translations_for_item_and_its_children(self, item, languages=None):
"""
deletes the translations task of an item and its children
used when a model is not enabled anymore
:param item:
:param languages:
:return:
"""
self.log('--- Deleting translations ---')
if not self.master:
self.set_master(item)
object_name = '{} - {}'.format(item._meta.app_label.lower(), item._meta.verbose_name)
object_class = item.__class__.__name__
object_pk = item.pk
filter_by = {
'object_class': object_class,
'object_name': object_name,
'object_pk': object_pk,
'done': False
}
if languages:
filter_by.update({'language__code__in': languages})
TransTask.objects.filter(**filter_by).delete()
# then process child objects from main
children = self.get_translatable_children(item)
for child in children:
self.delete_translations_for_item_and_its_children(child, languages) | deletes the translations task of an item and its children
used when a model is not enabled anymore
:param item:
:param languages:
:return: | Below is the the instruction that describes the task:
### Input:
deletes the translations task of an item and its children
used when a model is not enabled anymore
:param item:
:param languages:
:return:
### Response:
def delete_translations_for_item_and_its_children(self, item, languages=None):
"""
deletes the translations task of an item and its children
used when a model is not enabled anymore
:param item:
:param languages:
:return:
"""
self.log('--- Deleting translations ---')
if not self.master:
self.set_master(item)
object_name = '{} - {}'.format(item._meta.app_label.lower(), item._meta.verbose_name)
object_class = item.__class__.__name__
object_pk = item.pk
filter_by = {
'object_class': object_class,
'object_name': object_name,
'object_pk': object_pk,
'done': False
}
if languages:
filter_by.update({'language__code__in': languages})
TransTask.objects.filter(**filter_by).delete()
# then process child objects from main
children = self.get_translatable_children(item)
for child in children:
self.delete_translations_for_item_and_its_children(child, languages) |
def list_availability_zones(call=None):
'''
List all availability zones in the current region
'''
ret = {}
params = {'Action': 'DescribeZones',
'RegionId': get_location()}
items = query(params)
for zone in items['Zones']['Zone']:
ret[zone['ZoneId']] = {}
for item in zone:
ret[zone['ZoneId']][item] = six.text_type(zone[item])
return ret | List all availability zones in the current region | Below is the the instruction that describes the task:
### Input:
List all availability zones in the current region
### Response:
def list_availability_zones(call=None):
'''
List all availability zones in the current region
'''
ret = {}
params = {'Action': 'DescribeZones',
'RegionId': get_location()}
items = query(params)
for zone in items['Zones']['Zone']:
ret[zone['ZoneId']] = {}
for item in zone:
ret[zone['ZoneId']][item] = six.text_type(zone[item])
return ret |
def call_orig(self, *args, **kwargs):
'''
Calls the original function. Simulates __new__ and __init__ together.
'''
rval = super(StubNew, self).call_orig(self._type)
rval.__init__(*args, **kwargs)
return rval | Calls the original function. Simulates __new__ and __init__ together. | Below is the the instruction that describes the task:
### Input:
Calls the original function. Simulates __new__ and __init__ together.
### Response:
def call_orig(self, *args, **kwargs):
'''
Calls the original function. Simulates __new__ and __init__ together.
'''
rval = super(StubNew, self).call_orig(self._type)
rval.__init__(*args, **kwargs)
return rval |
def parse_nds2_enums(func):
"""Decorate a function to translate a type string into an integer
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
for kwd, enum_ in (('type', Nds2ChannelType),
('dtype', Nds2DataType)):
if kwargs.get(kwd, None) is None:
kwargs[kwd] = enum_.any()
elif not isinstance(kwargs[kwd], int):
kwargs[kwd] = enum_.find(kwargs[kwd]).value
return func(*args, **kwargs)
return wrapped_func | Decorate a function to translate a type string into an integer | Below is the the instruction that describes the task:
### Input:
Decorate a function to translate a type string into an integer
### Response:
def parse_nds2_enums(func):
"""Decorate a function to translate a type string into an integer
"""
@wraps(func)
def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring
for kwd, enum_ in (('type', Nds2ChannelType),
('dtype', Nds2DataType)):
if kwargs.get(kwd, None) is None:
kwargs[kwd] = enum_.any()
elif not isinstance(kwargs[kwd], int):
kwargs[kwd] = enum_.find(kwargs[kwd]).value
return func(*args, **kwargs)
return wrapped_func |
def address_reencode(address, blockchain='bitcoin', **blockchain_opts):
"""
Reencode an address
"""
if blockchain == 'bitcoin':
return btc_address_reencode(address, **blockchain_opts)
else:
raise ValueError("Unknown blockchain '{}'".format(blockchain)) | Reencode an address | Below is the the instruction that describes the task:
### Input:
Reencode an address
### Response:
def address_reencode(address, blockchain='bitcoin', **blockchain_opts):
"""
Reencode an address
"""
if blockchain == 'bitcoin':
return btc_address_reencode(address, **blockchain_opts)
else:
raise ValueError("Unknown blockchain '{}'".format(blockchain)) |
def speak(self, textstr, lang='en-US', gender='female', format='riff-16khz-16bit-mono-pcm'):
"""
Run will call Microsoft Translate API and and produce audio
"""
# print("speak(textstr=%s, lang=%s, gender=%s, format=%s)" % (textstr, lang, gender, format))
concatkey = '%s-%s-%s-%s' % (textstr, lang.lower(), gender.lower(), format)
key = self.tts_engine + '' + str(hash(concatkey))
self.filename = '%s-%s.mp3' % (key, lang)
# check if file exists
fileloc = self.directory + self.filename
if self.cache and os.path.isfile(self.directory + self.filename):
return self.filename
else:
with open(fileloc, 'wb') as f:
self.speech.speak_to_file(f, textstr, lang, gender, format)
return self.filename
return False | Run will call Microsoft Translate API and and produce audio | Below is the the instruction that describes the task:
### Input:
Run will call Microsoft Translate API and and produce audio
### Response:
def speak(self, textstr, lang='en-US', gender='female', format='riff-16khz-16bit-mono-pcm'):
"""
Run will call Microsoft Translate API and and produce audio
"""
# print("speak(textstr=%s, lang=%s, gender=%s, format=%s)" % (textstr, lang, gender, format))
concatkey = '%s-%s-%s-%s' % (textstr, lang.lower(), gender.lower(), format)
key = self.tts_engine + '' + str(hash(concatkey))
self.filename = '%s-%s.mp3' % (key, lang)
# check if file exists
fileloc = self.directory + self.filename
if self.cache and os.path.isfile(self.directory + self.filename):
return self.filename
else:
with open(fileloc, 'wb') as f:
self.speech.speak_to_file(f, textstr, lang, gender, format)
return self.filename
return False |
def _run_init_queries(self):
'''
Initialization queries
'''
for obj in (Package, PackageCfgFile, PayloadFile, IgnoredDir, AllowedDir):
self._db.create_table_from_object(obj()) | Initialization queries | Below is the the instruction that describes the task:
### Input:
Initialization queries
### Response:
def _run_init_queries(self):
'''
Initialization queries
'''
for obj in (Package, PackageCfgFile, PayloadFile, IgnoredDir, AllowedDir):
self._db.create_table_from_object(obj()) |
def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION)) | Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError | Below is the the instruction that describes the task:
### Input:
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
### Response:
def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION)) |
def compute_performance(SC, verbose=True, output='dict'):
"""
Return some performance value for comparison.
Parameters
-------
SC: SortingComparison instance
The SortingComparison
verbose: bool
Display on console or not
output: dict or pandas
Returns
----------
performance: dict or pandas.Serie depending output param
"""
counts = SC._counts
tp_rate = float(counts['TP']) / counts['TOT_ST1'] * 100
cl_rate = float(counts['CL']) / counts['TOT_ST1'] * 100
fn_rate = float(counts['FN']) / counts['TOT_ST1'] * 100
fp_st1 = float(counts['FP']) / counts['TOT_ST1'] * 100
fp_st2 = float(counts['FP']) / counts['TOT_ST2'] * 100
accuracy = tp_rate / (tp_rate + fn_rate + fp_st1) * 100
sensitivity = tp_rate / (tp_rate + fn_rate) * 100
miss_rate = fn_rate / (tp_rate + fn_rate) * 100
precision = tp_rate / (tp_rate + fp_st1) * 100
false_discovery_rate = fp_st1 / (tp_rate + fp_st1) * 100
performance = {'tp': tp_rate, 'cl': cl_rate, 'fn': fn_rate, 'fp_st1': fp_st1, 'fp_st2': fp_st2,
'accuracy': accuracy, 'sensitivity': sensitivity, 'precision': precision, 'miss_rate': miss_rate,
'false_disc_rate': false_discovery_rate}
if verbose:
txt = _txt_performance.format(**performance)
print(txt)
if output == 'dict':
return performance
elif output == 'pandas':
return pd.Series(performance) | Return some performance value for comparison.
Parameters
-------
SC: SortingComparison instance
The SortingComparison
verbose: bool
Display on console or not
output: dict or pandas
Returns
----------
performance: dict or pandas.Serie depending output param | Below is the the instruction that describes the task:
### Input:
Return some performance value for comparison.
Parameters
-------
SC: SortingComparison instance
The SortingComparison
verbose: bool
Display on console or not
output: dict or pandas
Returns
----------
performance: dict or pandas.Serie depending output param
### Response:
def compute_performance(SC, verbose=True, output='dict'):
"""
Return some performance value for comparison.
Parameters
-------
SC: SortingComparison instance
The SortingComparison
verbose: bool
Display on console or not
output: dict or pandas
Returns
----------
performance: dict or pandas.Serie depending output param
"""
counts = SC._counts
tp_rate = float(counts['TP']) / counts['TOT_ST1'] * 100
cl_rate = float(counts['CL']) / counts['TOT_ST1'] * 100
fn_rate = float(counts['FN']) / counts['TOT_ST1'] * 100
fp_st1 = float(counts['FP']) / counts['TOT_ST1'] * 100
fp_st2 = float(counts['FP']) / counts['TOT_ST2'] * 100
accuracy = tp_rate / (tp_rate + fn_rate + fp_st1) * 100
sensitivity = tp_rate / (tp_rate + fn_rate) * 100
miss_rate = fn_rate / (tp_rate + fn_rate) * 100
precision = tp_rate / (tp_rate + fp_st1) * 100
false_discovery_rate = fp_st1 / (tp_rate + fp_st1) * 100
performance = {'tp': tp_rate, 'cl': cl_rate, 'fn': fn_rate, 'fp_st1': fp_st1, 'fp_st2': fp_st2,
'accuracy': accuracy, 'sensitivity': sensitivity, 'precision': precision, 'miss_rate': miss_rate,
'false_disc_rate': false_discovery_rate}
if verbose:
txt = _txt_performance.format(**performance)
print(txt)
if output == 'dict':
return performance
elif output == 'pandas':
return pd.Series(performance) |
def fcoe_get_login_output_fcoe_login_list_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, "output")
fcoe_login_list = ET.SubElement(output, "fcoe-login-list")
fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac")
fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac')
interface_type = ET.SubElement(fcoe_login_list, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcoe_get_login_output_fcoe_login_list_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, "output")
fcoe_login_list = ET.SubElement(output, "fcoe-login-list")
fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac")
fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac')
interface_type = ET.SubElement(fcoe_login_list, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _set_master_state(self, state):
"""Set the state of the SDPMaster."""
if state == 'init':
self._service_state.update_current_state('init', force=True)
self.set_state(DevState.INIT)
elif state == 'on':
self.set_state(DevState.ON)
self._service_state.update_current_state('on') | Set the state of the SDPMaster. | Below is the the instruction that describes the task:
### Input:
Set the state of the SDPMaster.
### Response:
def _set_master_state(self, state):
"""Set the state of the SDPMaster."""
if state == 'init':
self._service_state.update_current_state('init', force=True)
self.set_state(DevState.INIT)
elif state == 'on':
self.set_state(DevState.ON)
self._service_state.update_current_state('on') |
def was_applied(self):
"""
For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request or on
a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch
succeeds or fails.
Only valid when one of the of the internal row factories is in use.
"""
if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory):
raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,))
is_batch_statement = isinstance(self.response_future.query, BatchStatement)
if is_batch_statement and (not self.column_names or self.column_names[0] != "[applied]"):
raise RuntimeError("No LWT were present in the BatchStatement")
if not is_batch_statement and len(self.current_rows) != 1:
raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows)))
row = self.current_rows[0]
if isinstance(row, tuple):
return row[0]
else:
return row['[applied]'] | For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request or on
a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch
succeeds or fails.
Only valid when one of the of the internal row factories is in use. | Below is the the instruction that describes the task:
### Input:
For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request or on
a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch
succeeds or fails.
Only valid when one of the of the internal row factories is in use.
### Response:
def was_applied(self):
"""
For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request or on
a :class:`.query.BatchStatement` containing LWT. In the latter case either all the batch
succeeds or fails.
Only valid when one of the of the internal row factories is in use.
"""
if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory):
raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factory,))
is_batch_statement = isinstance(self.response_future.query, BatchStatement)
if is_batch_statement and (not self.column_names or self.column_names[0] != "[applied]"):
raise RuntimeError("No LWT were present in the BatchStatement")
if not is_batch_statement and len(self.current_rows) != 1:
raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows)))
row = self.current_rows[0]
if isinstance(row, tuple):
return row[0]
else:
return row['[applied]'] |
def izip_exact(*iterables):
"""
A lazy izip() that ensures that all iterables have the same length.
A LengthMismatch exception is raised if the iterables' lengths differ.
Examples
--------
>>> list(zip_exc([]))
[]
>>> list(zip_exc((), (), ()))
[]
>>> list(zip_exc("abc", range(3)))
[('a', 0), ('b', 1), ('c', 2)]
>>> try:
... list(zip_exc("", range(3)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), ()))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), range(2), range(4)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> items = zip_exc(range(3), range(2), range(4))
>>> next(items)
(0, 0, 0)
>>> next(items)
(1, 1, 1)
>>> try: next(items)
... except LengthMismatch: print "mismatch"
mismatch
References
----------
[1] http://code.activestate.com/recipes/497006-zip_exc-a-lazy-zip-that-ensures-that-all-iterables/
"""
rest = [chain(i, _throw()) for i in iterables[1:]]
first = chain(iterables[0], _check(rest))
return zip(*[first] + rest) | A lazy izip() that ensures that all iterables have the same length.
A LengthMismatch exception is raised if the iterables' lengths differ.
Examples
--------
>>> list(zip_exc([]))
[]
>>> list(zip_exc((), (), ()))
[]
>>> list(zip_exc("abc", range(3)))
[('a', 0), ('b', 1), ('c', 2)]
>>> try:
... list(zip_exc("", range(3)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), ()))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), range(2), range(4)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> items = zip_exc(range(3), range(2), range(4))
>>> next(items)
(0, 0, 0)
>>> next(items)
(1, 1, 1)
>>> try: next(items)
... except LengthMismatch: print "mismatch"
mismatch
References
----------
[1] http://code.activestate.com/recipes/497006-zip_exc-a-lazy-zip-that-ensures-that-all-iterables/ | Below is the the instruction that describes the task:
### Input:
A lazy izip() that ensures that all iterables have the same length.
A LengthMismatch exception is raised if the iterables' lengths differ.
Examples
--------
>>> list(zip_exc([]))
[]
>>> list(zip_exc((), (), ()))
[]
>>> list(zip_exc("abc", range(3)))
[('a', 0), ('b', 1), ('c', 2)]
>>> try:
... list(zip_exc("", range(3)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), ()))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), range(2), range(4)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> items = zip_exc(range(3), range(2), range(4))
>>> next(items)
(0, 0, 0)
>>> next(items)
(1, 1, 1)
>>> try: next(items)
... except LengthMismatch: print "mismatch"
mismatch
References
----------
[1] http://code.activestate.com/recipes/497006-zip_exc-a-lazy-zip-that-ensures-that-all-iterables/
### Response:
def izip_exact(*iterables):
"""
A lazy izip() that ensures that all iterables have the same length.
A LengthMismatch exception is raised if the iterables' lengths differ.
Examples
--------
>>> list(zip_exc([]))
[]
>>> list(zip_exc((), (), ()))
[]
>>> list(zip_exc("abc", range(3)))
[('a', 0), ('b', 1), ('c', 2)]
>>> try:
... list(zip_exc("", range(3)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), ()))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> try:
... list(zip_exc(range(3), range(2), range(4)))
... except LengthMismatch:
... print "mismatch"
mismatch
>>> items = zip_exc(range(3), range(2), range(4))
>>> next(items)
(0, 0, 0)
>>> next(items)
(1, 1, 1)
>>> try: next(items)
... except LengthMismatch: print "mismatch"
mismatch
References
----------
[1] http://code.activestate.com/recipes/497006-zip_exc-a-lazy-zip-that-ensures-that-all-iterables/
"""
rest = [chain(i, _throw()) for i in iterables[1:]]
first = chain(iterables[0], _check(rest))
return zip(*[first] + rest) |
def getPerfInfo(rh, useridlist):
"""
Get the performance information for a userid
Input:
Request Handle
Userid to query <- may change this to a list later.
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Stripped and reformatted output of the SMCLI command.
"""
rh.printSysLog("Enter vmUtils.getPerfInfo, userid: " + useridlist)
parms = ["-T", rh.userid,
"-c", "1"]
results = invokeSMCLI(rh, "Image_Performance_Query", parms)
if results['overallRC'] != 0:
# SMCLI failed.
rh.printLn("ES", results['response'])
rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " +
str(results['overallRC']))
return results
lines = results['response'].split("\n")
usedTime = 0
totalCpu = 0
totalMem = 0
usedMem = 0
try:
for line in lines:
if "Used CPU time:" in line:
usedTime = line.split()[3].strip('"')
# Value is in us, need make it seconds
usedTime = int(usedTime) / 1000000
if "Guest CPUs:" in line:
totalCpu = line.split()[2].strip('"')
if "Max memory:" in line:
totalMem = line.split()[2].strip('"')
# Value is in Kb, need to make it Mb
totalMem = int(totalMem) / 1024
if "Used memory:" in line:
usedMem = line.split()[2].strip('"')
usedMem = int(usedMem) / 1024
except Exception as e:
msg = msgs.msg['0412'][1] % (modId, type(e).__name__,
str(e), results['response'])
rh.printLn("ES", msg)
results['overallRC'] = 4
results['rc'] = 4
results['rs'] = 412
if results['overallRC'] == 0:
memstr = "Total Memory: %iM\n" % totalMem
usedmemstr = "Used Memory: %iM\n" % usedMem
procstr = "Processors: %s\n" % totalCpu
timestr = "CPU Used Time: %i sec\n" % usedTime
results['response'] = memstr + usedmemstr + procstr + timestr
rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " +
str(results['rc']))
return results | Get the performance information for a userid
Input:
Request Handle
Userid to query <- may change this to a list later.
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Stripped and reformatted output of the SMCLI command. | Below is the the instruction that describes the task:
### Input:
Get the performance information for a userid
Input:
Request Handle
Userid to query <- may change this to a list later.
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Stripped and reformatted output of the SMCLI command.
### Response:
def getPerfInfo(rh, useridlist):
"""
Get the performance information for a userid
Input:
Request Handle
Userid to query <- may change this to a list later.
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - RC returned from SMCLI if overallRC = 0.
rs - RS returned from SMCLI if overallRC = 0.
errno - Errno returned from SMCLI if overallRC = 0.
response - Stripped and reformatted output of the SMCLI command.
"""
rh.printSysLog("Enter vmUtils.getPerfInfo, userid: " + useridlist)
parms = ["-T", rh.userid,
"-c", "1"]
results = invokeSMCLI(rh, "Image_Performance_Query", parms)
if results['overallRC'] != 0:
# SMCLI failed.
rh.printLn("ES", results['response'])
rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " +
str(results['overallRC']))
return results
lines = results['response'].split("\n")
usedTime = 0
totalCpu = 0
totalMem = 0
usedMem = 0
try:
for line in lines:
if "Used CPU time:" in line:
usedTime = line.split()[3].strip('"')
# Value is in us, need make it seconds
usedTime = int(usedTime) / 1000000
if "Guest CPUs:" in line:
totalCpu = line.split()[2].strip('"')
if "Max memory:" in line:
totalMem = line.split()[2].strip('"')
# Value is in Kb, need to make it Mb
totalMem = int(totalMem) / 1024
if "Used memory:" in line:
usedMem = line.split()[2].strip('"')
usedMem = int(usedMem) / 1024
except Exception as e:
msg = msgs.msg['0412'][1] % (modId, type(e).__name__,
str(e), results['response'])
rh.printLn("ES", msg)
results['overallRC'] = 4
results['rc'] = 4
results['rs'] = 412
if results['overallRC'] == 0:
memstr = "Total Memory: %iM\n" % totalMem
usedmemstr = "Used Memory: %iM\n" % usedMem
procstr = "Processors: %s\n" % totalCpu
timestr = "CPU Used Time: %i sec\n" % usedTime
results['response'] = memstr + usedmemstr + procstr + timestr
rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " +
str(results['rc']))
return results |
def parse_create(prs, conn):
"""Create record.
Arguments:
prs: parser object of argparse
conn: dictionary of connection information
"""
prs_create = prs.add_parser(
'create', help='create record of specific zone')
set_option(prs_create, 'domain')
conn_options(prs_create, conn)
prs_create.set_defaults(func=create) | Create record.
Arguments:
prs: parser object of argparse
conn: dictionary of connection information | Below is the the instruction that describes the task:
### Input:
Create record.
Arguments:
prs: parser object of argparse
conn: dictionary of connection information
### Response:
def parse_create(prs, conn):
"""Create record.
Arguments:
prs: parser object of argparse
conn: dictionary of connection information
"""
prs_create = prs.add_parser(
'create', help='create record of specific zone')
set_option(prs_create, 'domain')
conn_options(prs_create, conn)
prs_create.set_defaults(func=create) |
def list_namespaced_job(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_job # noqa: E501
list or watch objects of kind Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501
return data | list_namespaced_job # noqa: E501
list or watch objects of kind Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
list_namespaced_job # noqa: E501
list or watch objects of kind Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
### Response:
def list_namespaced_job(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_job # noqa: E501
list or watch objects of kind Job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1JobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501
else:
(data) = self.list_namespaced_job_with_http_info(namespace, **kwargs) # noqa: E501
return data |
def get_bilinear_residuals_stepp(input_params, xvals, yvals, slope1_fit):
'''
Returns the residual sum-of-squares value of a bilinear fit to a data
set - with a segment - 1 gradient fixed by an input value (slope_1_fit)
:param list input_params:
Input parameters for the bilinear model [slope2, crossover_point,
intercept]
:param numpy.ndarray xvals:
x-values of the data to be fit
:param numpy.ndarray yvals:
y-values of the data to be fit
:param float slope1_fit:
Gradient of the first slope
:returns:
Residual sum-of-squares of fit
'''
params = np.hstack([slope1_fit, input_params])
num_x = len(xvals)
y_model = np.zeros(num_x, dtype=float)
residuals = np.zeros(num_x, dtype=float)
for iloc in range(0, num_x):
y_model[iloc] = piecewise_linear_scalar(params, xvals[iloc])
residuals[iloc] = (yvals[iloc] - y_model[iloc]) ** 2.0
return np.sum(residuals) | Returns the residual sum-of-squares value of a bilinear fit to a data
set - with a segment - 1 gradient fixed by an input value (slope_1_fit)
:param list input_params:
Input parameters for the bilinear model [slope2, crossover_point,
intercept]
:param numpy.ndarray xvals:
x-values of the data to be fit
:param numpy.ndarray yvals:
y-values of the data to be fit
:param float slope1_fit:
Gradient of the first slope
:returns:
Residual sum-of-squares of fit | Below is the the instruction that describes the task:
### Input:
Returns the residual sum-of-squares value of a bilinear fit to a data
set - with a segment - 1 gradient fixed by an input value (slope_1_fit)
:param list input_params:
Input parameters for the bilinear model [slope2, crossover_point,
intercept]
:param numpy.ndarray xvals:
x-values of the data to be fit
:param numpy.ndarray yvals:
y-values of the data to be fit
:param float slope1_fit:
Gradient of the first slope
:returns:
Residual sum-of-squares of fit
### Response:
def get_bilinear_residuals_stepp(input_params, xvals, yvals, slope1_fit):
'''
Returns the residual sum-of-squares value of a bilinear fit to a data
set - with a segment - 1 gradient fixed by an input value (slope_1_fit)
:param list input_params:
Input parameters for the bilinear model [slope2, crossover_point,
intercept]
:param numpy.ndarray xvals:
x-values of the data to be fit
:param numpy.ndarray yvals:
y-values of the data to be fit
:param float slope1_fit:
Gradient of the first slope
:returns:
Residual sum-of-squares of fit
'''
params = np.hstack([slope1_fit, input_params])
num_x = len(xvals)
y_model = np.zeros(num_x, dtype=float)
residuals = np.zeros(num_x, dtype=float)
for iloc in range(0, num_x):
y_model[iloc] = piecewise_linear_scalar(params, xvals[iloc])
residuals[iloc] = (yvals[iloc] - y_model[iloc]) ** 2.0
return np.sum(residuals) |
def _fetchDevAll(self, namestr, devlist, statsfunc):
"""Initialize I/O stats for devices.
@param namestr: Field name component indicating device type.
@param devlist: List of devices.
@param statsfunc: Function for retrieving stats for device.
"""
for dev in devlist:
stats = statsfunc(dev)
name = 'diskio_%s_requests' % namestr
if self.hasGraph(name):
self.setGraphVal(name, dev + '_read', stats['rios'])
self.setGraphVal(name, dev + '_write', stats['wios'])
name = 'diskio_%s_bytes' % namestr
if self.hasGraph(name):
self.setGraphVal(name, dev + '_read', stats['rbytes'])
self.setGraphVal(name, dev + '_write', stats['wbytes'])
name = 'diskio_%s_active' % namestr
if self.hasGraph(name):
self.setGraphVal(name, dev, stats['ios_active']) | Initialize I/O stats for devices.
@param namestr: Field name component indicating device type.
@param devlist: List of devices.
@param statsfunc: Function for retrieving stats for device. | Below is the the instruction that describes the task:
### Input:
Initialize I/O stats for devices.
@param namestr: Field name component indicating device type.
@param devlist: List of devices.
@param statsfunc: Function for retrieving stats for device.
### Response:
def _fetchDevAll(self, namestr, devlist, statsfunc):
"""Initialize I/O stats for devices.
@param namestr: Field name component indicating device type.
@param devlist: List of devices.
@param statsfunc: Function for retrieving stats for device.
"""
for dev in devlist:
stats = statsfunc(dev)
name = 'diskio_%s_requests' % namestr
if self.hasGraph(name):
self.setGraphVal(name, dev + '_read', stats['rios'])
self.setGraphVal(name, dev + '_write', stats['wios'])
name = 'diskio_%s_bytes' % namestr
if self.hasGraph(name):
self.setGraphVal(name, dev + '_read', stats['rbytes'])
self.setGraphVal(name, dev + '_write', stats['wbytes'])
name = 'diskio_%s_active' % namestr
if self.hasGraph(name):
self.setGraphVal(name, dev, stats['ios_active']) |
def call_workflow_event(instance, event, after=True):
"""Calls the instance's workflow event
"""
if not event.transition:
return False
portal_type = instance.portal_type
wf_module = _load_wf_module('{}.events'.format(portal_type.lower()))
if not wf_module:
return False
# Inspect if event_<transition_id> function exists in the module
prefix = after and "after" or "before"
func_name = "{}_{}".format(prefix, event.transition.id)
func = getattr(wf_module, func_name, False)
if not func:
return False
logger.info('WF event: {0}.events.{1}'
.format(portal_type.lower(), func_name))
func(instance)
return True | Calls the instance's workflow event | Below is the the instruction that describes the task:
### Input:
Calls the instance's workflow event
### Response:
def call_workflow_event(instance, event, after=True):
"""Calls the instance's workflow event
"""
if not event.transition:
return False
portal_type = instance.portal_type
wf_module = _load_wf_module('{}.events'.format(portal_type.lower()))
if not wf_module:
return False
# Inspect if event_<transition_id> function exists in the module
prefix = after and "after" or "before"
func_name = "{}_{}".format(prefix, event.transition.id)
func = getattr(wf_module, func_name, False)
if not func:
return False
logger.info('WF event: {0}.events.{1}'
.format(portal_type.lower(), func_name))
func(instance)
return True |
def _retrieve_userinfo(self, access_token=None):
"""
Requests extra user information from the Provider's UserInfo and
returns the result.
:returns: The contents of the UserInfo endpoint.
:rtype: dict
"""
if 'userinfo_uri' not in self.client_secrets:
logger.debug('Userinfo uri not specified')
raise AssertionError('UserInfo URI not specified')
# Cache the info from this request
if '_oidc_userinfo' in g:
return g._oidc_userinfo
http = httplib2.Http()
if access_token is None:
try:
credentials = OAuth2Credentials.from_json(
self.credentials_store[g.oidc_id_token['sub']])
except KeyError:
logger.debug("Expired ID token, credentials missing",
exc_info=True)
return None
credentials.authorize(http)
resp, content = http.request(self.client_secrets['userinfo_uri'])
else:
# We have been manually overriden with an access token
resp, content = http.request(
self.client_secrets['userinfo_uri'],
"POST",
body=urlencode({"access_token": access_token}),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
logger.debug('Retrieved user info: %s' % content)
info = _json_loads(content)
g._oidc_userinfo = info
return info | Requests extra user information from the Provider's UserInfo and
returns the result.
:returns: The contents of the UserInfo endpoint.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Requests extra user information from the Provider's UserInfo and
returns the result.
:returns: The contents of the UserInfo endpoint.
:rtype: dict
### Response:
def _retrieve_userinfo(self, access_token=None):
"""
Requests extra user information from the Provider's UserInfo and
returns the result.
:returns: The contents of the UserInfo endpoint.
:rtype: dict
"""
if 'userinfo_uri' not in self.client_secrets:
logger.debug('Userinfo uri not specified')
raise AssertionError('UserInfo URI not specified')
# Cache the info from this request
if '_oidc_userinfo' in g:
return g._oidc_userinfo
http = httplib2.Http()
if access_token is None:
try:
credentials = OAuth2Credentials.from_json(
self.credentials_store[g.oidc_id_token['sub']])
except KeyError:
logger.debug("Expired ID token, credentials missing",
exc_info=True)
return None
credentials.authorize(http)
resp, content = http.request(self.client_secrets['userinfo_uri'])
else:
# We have been manually overriden with an access token
resp, content = http.request(
self.client_secrets['userinfo_uri'],
"POST",
body=urlencode({"access_token": access_token}),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
logger.debug('Retrieved user info: %s' % content)
info = _json_loads(content)
g._oidc_userinfo = info
return info |
def open(filename,
mode='r',
content_type=None,
options=None,
read_buffer_size=storage_api.ReadBuffer.DEFAULT_BUFFER_SIZE,
retry_params=None,
_account_id=None,
offset=0):
"""Opens a Google Cloud Storage file and returns it as a File-like object.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
mode: 'r' for reading mode. 'w' for writing mode.
In reading mode, the file must exist. In writing mode, a file will
be created or be overrode.
content_type: The MIME type of the file. str. Only valid in writing mode.
options: A str->basestring dict to specify additional headers to pass to
GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Supported options are x-goog-acl, x-goog-meta-, cache-control,
content-disposition, and content-encoding.
Only valid in writing mode.
See https://developers.google.com/storage/docs/reference-headers
for details.
read_buffer_size: The buffer size for read. Read keeps a buffer
and prefetches another one. To minimize blocking for large files,
always read by buffer size. To minimize number of RPC requests for
small files, set a large buffer size. Max is 30MB.
retry_params: An instance of api_utils.RetryParams for subsequent calls
to GCS from this file handle. If None, the default one is used.
_account_id: Internal-use only.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
Returns:
A reading or writing buffer that supports File-like interface. Buffer
must be closed after operations are done.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
ValueError: invalid open mode or if content_type or options are specified
in reading mode.
"""
common.validate_file_path(filename)
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
filename = api_utils._quote_filename(filename)
if mode == 'w':
common.validate_options(options)
return storage_api.StreamingBuffer(api, filename, content_type, options)
elif mode == 'r':
if content_type or options:
raise ValueError('Options and content_type can only be specified '
'for writing mode.')
return storage_api.ReadBuffer(api,
filename,
buffer_size=read_buffer_size,
offset=offset)
else:
raise ValueError('Invalid mode %s.' % mode) | Opens a Google Cloud Storage file and returns it as a File-like object.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
mode: 'r' for reading mode. 'w' for writing mode.
In reading mode, the file must exist. In writing mode, a file will
be created or be overrode.
content_type: The MIME type of the file. str. Only valid in writing mode.
options: A str->basestring dict to specify additional headers to pass to
GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Supported options are x-goog-acl, x-goog-meta-, cache-control,
content-disposition, and content-encoding.
Only valid in writing mode.
See https://developers.google.com/storage/docs/reference-headers
for details.
read_buffer_size: The buffer size for read. Read keeps a buffer
and prefetches another one. To minimize blocking for large files,
always read by buffer size. To minimize number of RPC requests for
small files, set a large buffer size. Max is 30MB.
retry_params: An instance of api_utils.RetryParams for subsequent calls
to GCS from this file handle. If None, the default one is used.
_account_id: Internal-use only.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
Returns:
A reading or writing buffer that supports File-like interface. Buffer
must be closed after operations are done.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
ValueError: invalid open mode or if content_type or options are specified
in reading mode. | Below is the the instruction that describes the task:
### Input:
Opens a Google Cloud Storage file and returns it as a File-like object.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
mode: 'r' for reading mode. 'w' for writing mode.
In reading mode, the file must exist. In writing mode, a file will
be created or be overrode.
content_type: The MIME type of the file. str. Only valid in writing mode.
options: A str->basestring dict to specify additional headers to pass to
GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Supported options are x-goog-acl, x-goog-meta-, cache-control,
content-disposition, and content-encoding.
Only valid in writing mode.
See https://developers.google.com/storage/docs/reference-headers
for details.
read_buffer_size: The buffer size for read. Read keeps a buffer
and prefetches another one. To minimize blocking for large files,
always read by buffer size. To minimize number of RPC requests for
small files, set a large buffer size. Max is 30MB.
retry_params: An instance of api_utils.RetryParams for subsequent calls
to GCS from this file handle. If None, the default one is used.
_account_id: Internal-use only.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
Returns:
A reading or writing buffer that supports File-like interface. Buffer
must be closed after operations are done.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
ValueError: invalid open mode or if content_type or options are specified
in reading mode.
### Response:
def open(filename,
mode='r',
content_type=None,
options=None,
read_buffer_size=storage_api.ReadBuffer.DEFAULT_BUFFER_SIZE,
retry_params=None,
_account_id=None,
offset=0):
"""Opens a Google Cloud Storage file and returns it as a File-like object.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
mode: 'r' for reading mode. 'w' for writing mode.
In reading mode, the file must exist. In writing mode, a file will
be created or be overrode.
content_type: The MIME type of the file. str. Only valid in writing mode.
options: A str->basestring dict to specify additional headers to pass to
GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Supported options are x-goog-acl, x-goog-meta-, cache-control,
content-disposition, and content-encoding.
Only valid in writing mode.
See https://developers.google.com/storage/docs/reference-headers
for details.
read_buffer_size: The buffer size for read. Read keeps a buffer
and prefetches another one. To minimize blocking for large files,
always read by buffer size. To minimize number of RPC requests for
small files, set a large buffer size. Max is 30MB.
retry_params: An instance of api_utils.RetryParams for subsequent calls
to GCS from this file handle. If None, the default one is used.
_account_id: Internal-use only.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
Returns:
A reading or writing buffer that supports File-like interface. Buffer
must be closed after operations are done.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
ValueError: invalid open mode or if content_type or options are specified
in reading mode.
"""
common.validate_file_path(filename)
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
filename = api_utils._quote_filename(filename)
if mode == 'w':
common.validate_options(options)
return storage_api.StreamingBuffer(api, filename, content_type, options)
elif mode == 'r':
if content_type or options:
raise ValueError('Options and content_type can only be specified '
'for writing mode.')
return storage_api.ReadBuffer(api,
filename,
buffer_size=read_buffer_size,
offset=offset)
else:
raise ValueError('Invalid mode %s.' % mode) |
def nodes_by_category(self, category):
""" Returns nodes with the given category attribute.
"""
return [n for n in self.nodes if n.category == category] | Returns nodes with the given category attribute. | Below is the the instruction that describes the task:
### Input:
Returns nodes with the given category attribute.
### Response:
def nodes_by_category(self, category):
""" Returns nodes with the given category attribute.
"""
return [n for n in self.nodes if n.category == category] |
def get_key(self):
"""
Return the call key, even if it has to be parsed from the source.
"""
if not isinstance(self.key, Unparseable):
return self.key
line = self.source[self.col_offset:]
regex = re.compile('''pyconfig\.[eginst]+\(([^,]+).*?\)''')
match = regex.match(line)
if not match:
return Unparseable()
return "<%s>" % match.group(1) | Return the call key, even if it has to be parsed from the source. | Below is the the instruction that describes the task:
### Input:
Return the call key, even if it has to be parsed from the source.
### Response:
def get_key(self):
"""
Return the call key, even if it has to be parsed from the source.
"""
if not isinstance(self.key, Unparseable):
return self.key
line = self.source[self.col_offset:]
regex = re.compile('''pyconfig\.[eginst]+\(([^,]+).*?\)''')
match = regex.match(line)
if not match:
return Unparseable()
return "<%s>" % match.group(1) |
def randomSize(cls, widthLimits, heightLimits, origin=None):
'''
:param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
'''
r = cls(0, 0, origin)
r.w = random.randint(widthLimits[0], widthLimits[1])
r.h = random.randint(heightLimits[0], heightLimits[1])
return r | :param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle | Below is the the instruction that describes the task:
### Input:
:param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
### Response:
def randomSize(cls, widthLimits, heightLimits, origin=None):
'''
:param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
'''
r = cls(0, 0, origin)
r.w = random.randint(widthLimits[0], widthLimits[1])
r.h = random.randint(heightLimits[0], heightLimits[1])
return r |
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
"""
try:
# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)
frame_futures = {}
def task_done():
self._poll_queue.task_done()
def add(future):
# add future to query frame_futures
# returns: worker lives on?
# `None` task signifies thread termination
if future is None:
task_done()
return False
if future.id not in frame_futures and not future.done():
frame_futures[future.id] = future
else:
task_done()
return True
while True:
frame_futures.clear()
# blocking add first scheduled
frame_earliest, future = self._poll_queue.get()
if not add(future):
return
# try grouping if scheduled within grouping timeframe
while len(frame_futures) < self._STATUS_QUERY_SIZE:
try:
task = self._poll_queue.get_nowait()
except queue.Empty:
break
at, future = task
if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME:
if not add(future):
return
else:
task_done()
self._poll_queue.put(task)
break
# build a query string with ids of all futures in this frame
ids = [future.id for future in frame_futures.values()]
_LOGGER.debug("Polling for status of futures: %s", ids)
query_string = 'problems/?id=' + ','.join(ids)
# if futures were cancelled while `add`ing, skip empty frame
if not ids:
continue
# wait until `frame_earliest` before polling
delay = frame_earliest - time.time()
if delay > 0:
_LOGGER.debug("Pausing polling %.2f sec for futures: %s", delay, ids)
time.sleep(delay)
else:
_LOGGER.trace("Skipping non-positive delay of %.2f sec", delay)
try:
_LOGGER.trace("Executing poll API request")
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
statuses = response.json()
for status in statuses:
self._handle_problem_status(status, frame_futures[status['id']])
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for id_ in frame_futures.keys():
frame_futures[id_]._set_error(IOError(exception), sys.exc_info())
for id_ in frame_futures.keys():
task_done()
time.sleep(0)
except Exception as err:
_LOGGER.exception(err) | Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread. | Below is the the instruction that describes the task:
### Input:
Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
### Response:
def _do_poll_problems(self):
"""Poll the server for the status of a set of problems.
Note:
This method is always run inside of a daemon thread.
"""
try:
# grouped futures (all scheduled within _POLL_GROUP_TIMEFRAME)
frame_futures = {}
def task_done():
self._poll_queue.task_done()
def add(future):
# add future to query frame_futures
# returns: worker lives on?
# `None` task signifies thread termination
if future is None:
task_done()
return False
if future.id not in frame_futures and not future.done():
frame_futures[future.id] = future
else:
task_done()
return True
while True:
frame_futures.clear()
# blocking add first scheduled
frame_earliest, future = self._poll_queue.get()
if not add(future):
return
# try grouping if scheduled within grouping timeframe
while len(frame_futures) < self._STATUS_QUERY_SIZE:
try:
task = self._poll_queue.get_nowait()
except queue.Empty:
break
at, future = task
if at - frame_earliest <= self._POLL_GROUP_TIMEFRAME:
if not add(future):
return
else:
task_done()
self._poll_queue.put(task)
break
# build a query string with ids of all futures in this frame
ids = [future.id for future in frame_futures.values()]
_LOGGER.debug("Polling for status of futures: %s", ids)
query_string = 'problems/?id=' + ','.join(ids)
# if futures were cancelled while `add`ing, skip empty frame
if not ids:
continue
# wait until `frame_earliest` before polling
delay = frame_earliest - time.time()
if delay > 0:
_LOGGER.debug("Pausing polling %.2f sec for futures: %s", delay, ids)
time.sleep(delay)
else:
_LOGGER.trace("Skipping non-positive delay of %.2f sec", delay)
try:
_LOGGER.trace("Executing poll API request")
try:
response = self.session.get(posixpath.join(self.endpoint, query_string))
except requests.exceptions.Timeout:
raise RequestTimeout
if response.status_code == 401:
raise SolverAuthenticationError()
response.raise_for_status()
statuses = response.json()
for status in statuses:
self._handle_problem_status(status, frame_futures[status['id']])
except BaseException as exception:
if not isinstance(exception, SolverAuthenticationError):
exception = IOError(exception)
for id_ in frame_futures.keys():
frame_futures[id_]._set_error(IOError(exception), sys.exc_info())
for id_ in frame_futures.keys():
task_done()
time.sleep(0)
except Exception as err:
_LOGGER.exception(err) |
def resnet_v2(inputs,
block_fn,
layer_blocks,
filters,
data_format="channels_first",
is_training=False,
is_cifar=False,
use_td=False,
targeting_rate=None,
keep_prob=None):
"""Resnet model.
Args:
inputs: `Tensor` images.
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include
in each of the 3 or 4 block groups. Each group consists of blocks that
take inputs of the same resolution.
filters: list of 4 or 5 `int`s denoting the number of filter to include in
block.
data_format: `str`, "channels_first" `[batch, channels, height,
width]` or "channels_last" `[batch, height, width, channels]`.
is_training: bool, build in training mode or not.
is_cifar: bool, whether the data is CIFAR or not.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
Returns:
Pre-logit activations.
"""
inputs = block_layer(
inputs=inputs,
filters=filters[1],
block_fn=block_fn,
blocks=layer_blocks[0],
strides=1,
is_training=is_training,
name="block_layer1",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
inputs = block_layer(
inputs=inputs,
filters=filters[2],
block_fn=block_fn,
blocks=layer_blocks[1],
strides=2,
is_training=is_training,
name="block_layer2",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
inputs = block_layer(
inputs=inputs,
filters=filters[3],
block_fn=block_fn,
blocks=layer_blocks[2],
strides=2,
is_training=is_training,
name="block_layer3",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
if not is_cifar:
inputs = block_layer(
inputs=inputs,
filters=filters[4],
block_fn=block_fn,
blocks=layer_blocks[3],
strides=2,
is_training=is_training,
name="block_layer4",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
return inputs | Resnet model.
Args:
inputs: `Tensor` images.
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include
in each of the 3 or 4 block groups. Each group consists of blocks that
take inputs of the same resolution.
filters: list of 4 or 5 `int`s denoting the number of filter to include in
block.
data_format: `str`, "channels_first" `[batch, channels, height,
width]` or "channels_last" `[batch, height, width, channels]`.
is_training: bool, build in training mode or not.
is_cifar: bool, whether the data is CIFAR or not.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
Returns:
Pre-logit activations. | Below is the the instruction that describes the task:
### Input:
Resnet model.
Args:
inputs: `Tensor` images.
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include
in each of the 3 or 4 block groups. Each group consists of blocks that
take inputs of the same resolution.
filters: list of 4 or 5 `int`s denoting the number of filter to include in
block.
data_format: `str`, "channels_first" `[batch, channels, height,
width]` or "channels_last" `[batch, height, width, channels]`.
is_training: bool, build in training mode or not.
is_cifar: bool, whether the data is CIFAR or not.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
Returns:
Pre-logit activations.
### Response:
def resnet_v2(inputs,
block_fn,
layer_blocks,
filters,
data_format="channels_first",
is_training=False,
is_cifar=False,
use_td=False,
targeting_rate=None,
keep_prob=None):
"""Resnet model.
Args:
inputs: `Tensor` images.
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include
in each of the 3 or 4 block groups. Each group consists of blocks that
take inputs of the same resolution.
filters: list of 4 or 5 `int`s denoting the number of filter to include in
block.
data_format: `str`, "channels_first" `[batch, channels, height,
width]` or "channels_last" `[batch, height, width, channels]`.
is_training: bool, build in training mode or not.
is_cifar: bool, whether the data is CIFAR or not.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
Returns:
Pre-logit activations.
"""
inputs = block_layer(
inputs=inputs,
filters=filters[1],
block_fn=block_fn,
blocks=layer_blocks[0],
strides=1,
is_training=is_training,
name="block_layer1",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
inputs = block_layer(
inputs=inputs,
filters=filters[2],
block_fn=block_fn,
blocks=layer_blocks[1],
strides=2,
is_training=is_training,
name="block_layer2",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
inputs = block_layer(
inputs=inputs,
filters=filters[3],
block_fn=block_fn,
blocks=layer_blocks[2],
strides=2,
is_training=is_training,
name="block_layer3",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
if not is_cifar:
inputs = block_layer(
inputs=inputs,
filters=filters[4],
block_fn=block_fn,
blocks=layer_blocks[3],
strides=2,
is_training=is_training,
name="block_layer4",
data_format=data_format,
use_td=use_td,
targeting_rate=targeting_rate,
keep_prob=keep_prob)
return inputs |
def get_user_details(self, response):
"""
Return user details from Dataporten
Set username to eduPersonPrincipalName
"""
user = super(DataportenFeideOAuth2, self).get_user_details(response)
sec_userids = user['userid_sec']
for userid in sec_userids:
usertype, username = userid.split(':')
if usertype == 'feide':
user['username'] = username
break
return user | Return user details from Dataporten
Set username to eduPersonPrincipalName | Below is the the instruction that describes the task:
### Input:
Return user details from Dataporten
Set username to eduPersonPrincipalName
### Response:
def get_user_details(self, response):
"""
Return user details from Dataporten
Set username to eduPersonPrincipalName
"""
user = super(DataportenFeideOAuth2, self).get_user_details(response)
sec_userids = user['userid_sec']
for userid in sec_userids:
usertype, username = userid.split(':')
if usertype == 'feide':
user['username'] = username
break
return user |
def main():
"""
iodp_samples_magic.py
OPTIONS:
-f FILE, input csv file
-Fsa FILE, output samples file for updating, default is to overwrite existing samples file
"""
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['WD', False, '.'], ['ID', False, '.'], ['f', True, ''], ['Fsa', False, 'samples.txt'], ['DM', False, 3]])
args = sys.argv
checked_args = extractor.extract_and_check_args(args, dataframe)
samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'Fsa', 'WD', 'ID', 'DM'], checked_args)
data_model_num = int(float(data_model_num))
if '-Fsa' not in args and data_model_num == 2:
output_samp_file = "er_samples.txt"
ran, error = convert.iodp_samples(samp_file, output_samp_file, output_dir_path,
input_dir_path, data_model_num=data_model_num)
if not ran:
print("-W- " + error) | iodp_samples_magic.py
OPTIONS:
-f FILE, input csv file
-Fsa FILE, output samples file for updating, default is to overwrite existing samples file | Below is the the instruction that describes the task:
### Input:
iodp_samples_magic.py
OPTIONS:
-f FILE, input csv file
-Fsa FILE, output samples file for updating, default is to overwrite existing samples file
### Response:
def main():
"""
iodp_samples_magic.py
OPTIONS:
-f FILE, input csv file
-Fsa FILE, output samples file for updating, default is to overwrite existing samples file
"""
if "-h" in sys.argv:
print(main.__doc__)
sys.exit()
dataframe = extractor.command_line_dataframe([['WD', False, '.'], ['ID', False, '.'], ['f', True, ''], ['Fsa', False, 'samples.txt'], ['DM', False, 3]])
args = sys.argv
checked_args = extractor.extract_and_check_args(args, dataframe)
samp_file, output_samp_file, output_dir_path, input_dir_path, data_model_num = extractor.get_vars(['f', 'Fsa', 'WD', 'ID', 'DM'], checked_args)
data_model_num = int(float(data_model_num))
if '-Fsa' not in args and data_model_num == 2:
output_samp_file = "er_samples.txt"
ran, error = convert.iodp_samples(samp_file, output_samp_file, output_dir_path,
input_dir_path, data_model_num=data_model_num)
if not ran:
print("-W- " + error) |
def _autocorr_func2(mags, lag, maglen, magmed, magstd):
'''
This is an alternative function to calculate the autocorrelation.
This version is from (first definition):
https://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations
Parameters
----------
mags : np.array
This is the magnitudes array. MUST NOT have any nans.
lag : float
The specific lag value to calculate the auto-correlation for. This MUST
be less than total number of observations in `mags`.
maglen : int
The number of elements in the `mags` array.
magmed : float
The median of the `mags` array.
magstd : float
The standard deviation of the `mags` array.
Returns
-------
float
The auto-correlation at this specific `lag` value.
'''
lagindex = nparange(0,maglen-lag)
products = (mags[lagindex] - magmed) * (mags[lagindex+lag] - magmed)
autocovarfunc = npsum(products)/lagindex.size
varfunc = npsum(
(mags[lagindex] - magmed)*(mags[lagindex] - magmed)
)/mags.size
acorr = autocovarfunc/varfunc
return acorr | This is an alternative function to calculate the autocorrelation.
This version is from (first definition):
https://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations
Parameters
----------
mags : np.array
This is the magnitudes array. MUST NOT have any nans.
lag : float
The specific lag value to calculate the auto-correlation for. This MUST
be less than total number of observations in `mags`.
maglen : int
The number of elements in the `mags` array.
magmed : float
The median of the `mags` array.
magstd : float
The standard deviation of the `mags` array.
Returns
-------
float
The auto-correlation at this specific `lag` value. | Below is the the instruction that describes the task:
### Input:
This is an alternative function to calculate the autocorrelation.
This version is from (first definition):
https://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations
Parameters
----------
mags : np.array
This is the magnitudes array. MUST NOT have any nans.
lag : float
The specific lag value to calculate the auto-correlation for. This MUST
be less than total number of observations in `mags`.
maglen : int
The number of elements in the `mags` array.
magmed : float
The median of the `mags` array.
magstd : float
The standard deviation of the `mags` array.
Returns
-------
float
The auto-correlation at this specific `lag` value.
### Response:
def _autocorr_func2(mags, lag, maglen, magmed, magstd):
'''
This is an alternative function to calculate the autocorrelation.
This version is from (first definition):
https://en.wikipedia.org/wiki/Correlogram#Estimation_of_autocorrelations
Parameters
----------
mags : np.array
This is the magnitudes array. MUST NOT have any nans.
lag : float
The specific lag value to calculate the auto-correlation for. This MUST
be less than total number of observations in `mags`.
maglen : int
The number of elements in the `mags` array.
magmed : float
The median of the `mags` array.
magstd : float
The standard deviation of the `mags` array.
Returns
-------
float
The auto-correlation at this specific `lag` value.
'''
lagindex = nparange(0,maglen-lag)
products = (mags[lagindex] - magmed) * (mags[lagindex+lag] - magmed)
autocovarfunc = npsum(products)/lagindex.size
varfunc = npsum(
(mags[lagindex] - magmed)*(mags[lagindex] - magmed)
)/mags.size
acorr = autocovarfunc/varfunc
return acorr |
def palette(fg, bg=-1):
"""
Since curses only supports a finite amount of initialised colour pairs
we memoise any selections you've made as an attribute on this function
"""
if not hasattr(palette, "counter"):
palette.counter = 1
if not hasattr(palette, "selections"):
palette.selections = {}
selection = "%s%s" % (str(fg), str(bg))
if not selection in palette.selections:
palette.selections[selection] = palette.counter
palette.counter += 1
# Get available colours
colors = [c for c in dir(_curses) if c.startswith('COLOR')]
if isinstance(fg, str):
if not "COLOR_"+fg.upper() in colors:
fg = -1
else:
fg = getattr(_curses, "COLOR_"+fg.upper())
if isinstance(bg, str):
if not "COLOR_"+bg.upper() in colors:
bg = -1
else:
bg = getattr(_curses, "COLOR_"+bg.upper())
_curses.init_pair(palette.selections[selection], fg, bg)
return _curses.color_pair(palette.selections[selection]) | Since curses only supports a finite amount of initialised colour pairs
we memoise any selections you've made as an attribute on this function | Below is the the instruction that describes the task:
### Input:
Since curses only supports a finite amount of initialised colour pairs
we memoise any selections you've made as an attribute on this function
### Response:
def palette(fg, bg=-1):
"""
Since curses only supports a finite amount of initialised colour pairs
we memoise any selections you've made as an attribute on this function
"""
if not hasattr(palette, "counter"):
palette.counter = 1
if not hasattr(palette, "selections"):
palette.selections = {}
selection = "%s%s" % (str(fg), str(bg))
if not selection in palette.selections:
palette.selections[selection] = palette.counter
palette.counter += 1
# Get available colours
colors = [c for c in dir(_curses) if c.startswith('COLOR')]
if isinstance(fg, str):
if not "COLOR_"+fg.upper() in colors:
fg = -1
else:
fg = getattr(_curses, "COLOR_"+fg.upper())
if isinstance(bg, str):
if not "COLOR_"+bg.upper() in colors:
bg = -1
else:
bg = getattr(_curses, "COLOR_"+bg.upper())
_curses.init_pair(palette.selections[selection], fg, bg)
return _curses.color_pair(palette.selections[selection]) |
def get_label_items(self, label_id, top=None, skip=None):
"""GetLabelItems.
Get items under a label.
:param str label_id: Unique identifier of label
:param int top: Max number of items to return
:param int skip: Number of items to skip
:rtype: [TfvcItem]
"""
route_values = {}
if label_id is not None:
route_values['labelId'] = self._serialize.url('label_id', label_id, 'str')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='06166e34-de17-4b60-8cd1-23182a346fda',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcItem]', self._unwrap_collection(response)) | GetLabelItems.
Get items under a label.
:param str label_id: Unique identifier of label
:param int top: Max number of items to return
:param int skip: Number of items to skip
:rtype: [TfvcItem] | Below is the the instruction that describes the task:
### Input:
GetLabelItems.
Get items under a label.
:param str label_id: Unique identifier of label
:param int top: Max number of items to return
:param int skip: Number of items to skip
:rtype: [TfvcItem]
### Response:
def get_label_items(self, label_id, top=None, skip=None):
"""GetLabelItems.
Get items under a label.
:param str label_id: Unique identifier of label
:param int top: Max number of items to return
:param int skip: Number of items to skip
:rtype: [TfvcItem]
"""
route_values = {}
if label_id is not None:
route_values['labelId'] = self._serialize.url('label_id', label_id, 'str')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='06166e34-de17-4b60-8cd1-23182a346fda',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcItem]', self._unwrap_collection(response)) |
def abbreviate(s, maxlength=25):
"""Color-aware abbreviator"""
assert maxlength >= 4
skip = False
abbrv = None
i = 0
for j, c in enumerate(s):
if c == '\033':
skip = True
elif skip:
if c == 'm':
skip = False
else:
i += 1
if i == maxlength - 1:
abbrv = s[:j] + '\033[0m...'
elif i > maxlength:
break
if i <= maxlength:
return s
else:
return abbrv | Color-aware abbreviator | Below is the the instruction that describes the task:
### Input:
Color-aware abbreviator
### Response:
def abbreviate(s, maxlength=25):
"""Color-aware abbreviator"""
assert maxlength >= 4
skip = False
abbrv = None
i = 0
for j, c in enumerate(s):
if c == '\033':
skip = True
elif skip:
if c == 'm':
skip = False
else:
i += 1
if i == maxlength - 1:
abbrv = s[:j] + '\033[0m...'
elif i > maxlength:
break
if i <= maxlength:
return s
else:
return abbrv |
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage:
app.config['IMAGE_STORE_TYPE']='fs'
app.config['IMAGE_STORE_PATH']='/var/app/images'
app.config['IMAGE_STORE_BASE_URL']='http://img.website.com'
The result dictionary `image_store` would look like:
{
'type': 'fs',
'path': '/var/app/images',
'base_url':'http://image.website.com'
}
This is often useful when configuration options map directly to keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
:return: a dict instance
"""
rv = {}
for key, value in six.iteritems(self):
if not key.startswith(namespace):
continue
if trim_namespace:
key = key[len(namespace):]
else:
key = key
if lowercase:
key = key.lower()
rv[key] = value
return rv | Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage:
app.config['IMAGE_STORE_TYPE']='fs'
app.config['IMAGE_STORE_PATH']='/var/app/images'
app.config['IMAGE_STORE_BASE_URL']='http://img.website.com'
The result dictionary `image_store` would look like:
{
'type': 'fs',
'path': '/var/app/images',
'base_url':'http://image.website.com'
}
This is often useful when configuration options map directly to keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
:return: a dict instance | Below is the the instruction that describes the task:
### Input:
Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage:
app.config['IMAGE_STORE_TYPE']='fs'
app.config['IMAGE_STORE_PATH']='/var/app/images'
app.config['IMAGE_STORE_BASE_URL']='http://img.website.com'
The result dictionary `image_store` would look like:
{
'type': 'fs',
'path': '/var/app/images',
'base_url':'http://image.website.com'
}
This is often useful when configuration options map directly to keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
:return: a dict instance
### Response:
def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
"""Returns a dictionary containing a subset of configuration options
that match the specified namespace/prefix. Example usage:
app.config['IMAGE_STORE_TYPE']='fs'
app.config['IMAGE_STORE_PATH']='/var/app/images'
app.config['IMAGE_STORE_BASE_URL']='http://img.website.com'
The result dictionary `image_store` would look like:
{
'type': 'fs',
'path': '/var/app/images',
'base_url':'http://image.website.com'
}
This is often useful when configuration options map directly to keyword arguments in functions or class constructors.
:param namespace: a configuration namespace
:param lowercase: a flag indicating if the keys of the resulting
dictionary should be lowercase
:param trim_namespace: a flag indicating if the keys of the resulting
dictionary should not include the namespace
:return: a dict instance
"""
rv = {}
for key, value in six.iteritems(self):
if not key.startswith(namespace):
continue
if trim_namespace:
key = key[len(namespace):]
else:
key = key
if lowercase:
key = key.lower()
rv[key] = value
return rv |
def add_lv_grid_district(self, lv_grid_district):
# TODO: check docstring
"""Adds a LV grid district to _lv_grid_districts if not already existing
Args
----
lv_grid_district: :shapely:`Shapely Polygon object<polygons>`
Descr
"""
if lv_grid_district not in self._lv_grid_districts and \
isinstance(lv_grid_district, LVGridDistrictDing0):
self._lv_grid_districts.append(lv_grid_district) | Adds a LV grid district to _lv_grid_districts if not already existing
Args
----
lv_grid_district: :shapely:`Shapely Polygon object<polygons>`
Descr | Below is the the instruction that describes the task:
### Input:
Adds a LV grid district to _lv_grid_districts if not already existing
Args
----
lv_grid_district: :shapely:`Shapely Polygon object<polygons>`
Descr
### Response:
def add_lv_grid_district(self, lv_grid_district):
# TODO: check docstring
"""Adds a LV grid district to _lv_grid_districts if not already existing
Args
----
lv_grid_district: :shapely:`Shapely Polygon object<polygons>`
Descr
"""
if lv_grid_district not in self._lv_grid_districts and \
isinstance(lv_grid_district, LVGridDistrictDing0):
self._lv_grid_districts.append(lv_grid_district) |
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(unicode(self / pattern))] | Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories. | Below is the the instruction that describes the task:
### Input:
Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
### Response:
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(unicode(self / pattern))] |
def AdaptiveFilter(model="lms", **kwargs):
"""
Function that filter data with selected adaptive filter.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples, columns are
input arrays.
**Kwargs:**
* Any key argument that can be accepted with selected filter model.
For more information see documentation of desired adaptive filter.
* It should be at least filter size `n`.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
"""
# check if the filter size was specified
if not "n" in kwargs:
raise ValueError('Filter size is not defined (n=?).')
# create filter according model
if model in ["LMS", "lms"]:
f = FilterLMS(**kwargs)
elif model in ["NLMS", "nlms"]:
f = FilterNLMS(**kwargs)
elif model in ["RLS", "rls"]:
f = FilterRLS(**kwargs)
elif model in ["GNGD", "gngd"]:
f = FilterGNGD(**kwargs)
elif model in ["AP", "ap"]:
f = FilterAP(**kwargs)
elif model in ["LMF", "lmf"]:
f = FilterLMF(**kwargs)
elif model in ["NLMF", "nlmf"]:
f = FilterNLMF(**kwargs)
else:
raise ValueError('Unknown model of filter {}'.format(model))
# return filter
return f | Function that filter data with selected adaptive filter.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples, columns are
input arrays.
**Kwargs:**
* Any key argument that can be accepted with selected filter model.
For more information see documentation of desired adaptive filter.
* It should be at least filter size `n`.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample. | Below is the the instruction that describes the task:
### Input:
Function that filter data with selected adaptive filter.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples, columns are
input arrays.
**Kwargs:**
* Any key argument that can be accepted with selected filter model.
For more information see documentation of desired adaptive filter.
* It should be at least filter size `n`.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
### Response:
def AdaptiveFilter(model="lms", **kwargs):
"""
Function that filter data with selected adaptive filter.
**Args:**
* `d` : desired value (1 dimensional array)
* `x` : input matrix (2-dimensional array). Rows are samples, columns are
input arrays.
**Kwargs:**
* Any key argument that can be accepted with selected filter model.
For more information see documentation of desired adaptive filter.
* It should be at least filter size `n`.
**Returns:**
* `y` : output value (1 dimensional array).
The size corresponds with the desired value.
* `e` : filter error for every sample (1 dimensional array).
The size corresponds with the desired value.
* `w` : history of all weights (2 dimensional array).
Every row is set of the weights for given sample.
"""
# check if the filter size was specified
if not "n" in kwargs:
raise ValueError('Filter size is not defined (n=?).')
# create filter according model
if model in ["LMS", "lms"]:
f = FilterLMS(**kwargs)
elif model in ["NLMS", "nlms"]:
f = FilterNLMS(**kwargs)
elif model in ["RLS", "rls"]:
f = FilterRLS(**kwargs)
elif model in ["GNGD", "gngd"]:
f = FilterGNGD(**kwargs)
elif model in ["AP", "ap"]:
f = FilterAP(**kwargs)
elif model in ["LMF", "lmf"]:
f = FilterLMF(**kwargs)
elif model in ["NLMF", "nlmf"]:
f = FilterNLMF(**kwargs)
else:
raise ValueError('Unknown model of filter {}'.format(model))
# return filter
return f |
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
status_view_mode = cls._ParseStringOption(
options, 'status_view_mode',
default_value=status_view.StatusView.MODE_WINDOW)
setattr(configuration_object, '_status_view_mode', status_view_mode) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type. | Below is the the instruction that describes the task:
### Input:
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
### Response:
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
status_view_mode = cls._ParseStringOption(
options, 'status_view_mode',
default_value=status_view.StatusView.MODE_WINDOW)
setattr(configuration_object, '_status_view_mode', status_view_mode) |
def filter_variant_sequences(
variant_sequences,
preferred_sequence_length,
min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,):
"""
Drop variant sequences which are shorter than request or don't have
enough supporting reads.
"""
variant_sequences = trim_variant_sequences(
variant_sequences, min_variant_sequence_coverage)
return filter_variant_sequences_by_length(
variant_sequences=variant_sequences,
preferred_sequence_length=preferred_sequence_length) | Drop variant sequences which are shorter than request or don't have
enough supporting reads. | Below is the the instruction that describes the task:
### Input:
Drop variant sequences which are shorter than request or don't have
enough supporting reads.
### Response:
def filter_variant_sequences(
variant_sequences,
preferred_sequence_length,
min_variant_sequence_coverage=MIN_VARIANT_SEQUENCE_COVERAGE,):
"""
Drop variant sequences which are shorter than request or don't have
enough supporting reads.
"""
variant_sequences = trim_variant_sequences(
variant_sequences, min_variant_sequence_coverage)
return filter_variant_sequences_by_length(
variant_sequences=variant_sequences,
preferred_sequence_length=preferred_sequence_length) |
def epoch(self):
"""GPS epoch associated with these data
:type: `~astropy.time.Time`
"""
try:
if self._epoch is None:
return None
return Time(*modf(self._epoch)[::-1], format='gps', scale='utc')
except AttributeError:
self._epoch = None
return self._epoch | GPS epoch associated with these data
:type: `~astropy.time.Time` | Below is the the instruction that describes the task:
### Input:
GPS epoch associated with these data
:type: `~astropy.time.Time`
### Response:
def epoch(self):
"""GPS epoch associated with these data
:type: `~astropy.time.Time`
"""
try:
if self._epoch is None:
return None
return Time(*modf(self._epoch)[::-1], format='gps', scale='utc')
except AttributeError:
self._epoch = None
return self._epoch |
def ds_IsEmpty(ds):
"""Check to see if dataset is empty after warp
"""
out = False
b = ds.GetRasterBand(1)
#Looks like this throws:
#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.
#Should just catch this rater than bothering with logic below
try:
mm = b.ComputeRasterMinMax()
if (mm[0] == mm[1]):
ndv = b.GetNoDataValue()
if ndv is None:
out = True
else:
if (mm[0] == ndv):
out = True
except Exception:
out = True
#Check for std of nan
#import math
#stats = b.ComputeStatistics(1)
#for x in stats:
# if math.isnan(x):
# out = True
# break
return out | Check to see if dataset is empty after warp | Below is the the instruction that describes the task:
### Input:
Check to see if dataset is empty after warp
### Response:
def ds_IsEmpty(ds):
"""Check to see if dataset is empty after warp
"""
out = False
b = ds.GetRasterBand(1)
#Looks like this throws:
#ERROR 1: Failed to compute min/max, no valid pixels found in sampling.
#Should just catch this rater than bothering with logic below
try:
mm = b.ComputeRasterMinMax()
if (mm[0] == mm[1]):
ndv = b.GetNoDataValue()
if ndv is None:
out = True
else:
if (mm[0] == ndv):
out = True
except Exception:
out = True
#Check for std of nan
#import math
#stats = b.ComputeStatistics(1)
#for x in stats:
# if math.isnan(x):
# out = True
# break
return out |
def finish(self):
"""Wait for GL commands to to finish
This creates a GLIR command for glFinish and then processes the
GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this
function will return before GL has finished processing the commands.
"""
if hasattr(self, 'flush_commands'):
context = self
else:
context = get_current_canvas().context
context.glir.command('FUNC', 'glFinish')
context.flush_commands() | Wait for GL commands to to finish
This creates a GLIR command for glFinish and then processes the
GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this
function will return before GL has finished processing the commands. | Below is the the instruction that describes the task:
### Input:
Wait for GL commands to to finish
This creates a GLIR command for glFinish and then processes the
GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this
function will return before GL has finished processing the commands.
### Response:
def finish(self):
"""Wait for GL commands to to finish
This creates a GLIR command for glFinish and then processes the
GLIR commands. If the GLIR interpreter is remote (e.g. WebGL), this
function will return before GL has finished processing the commands.
"""
if hasattr(self, 'flush_commands'):
context = self
else:
context = get_current_canvas().context
context.glir.command('FUNC', 'glFinish')
context.flush_commands() |
async def handle_callback(self, aws_callback: typing.Coroutine, response):
"""Process coroutine callback function"""
callback_result = None
try:
callback_result = await aws_callback
except NothingMatchedError as e:
self.logger.error(f'<Item: {str(e).lower()}>')
except Exception as e:
self.logger.error(f'<Callback[{aws_callback.__name__}]: {e}')
return callback_result, response | Process coroutine callback function | Below is the the instruction that describes the task:
### Input:
Process coroutine callback function
### Response:
async def handle_callback(self, aws_callback: typing.Coroutine, response):
"""Process coroutine callback function"""
callback_result = None
try:
callback_result = await aws_callback
except NothingMatchedError as e:
self.logger.error(f'<Item: {str(e).lower()}>')
except Exception as e:
self.logger.error(f'<Callback[{aws_callback.__name__}]: {e}')
return callback_result, response |
def scheduler(self, sleep_time=0.2):
"""Starts the scheduler to check for scheduled calls and execute them
at the correct time.
Args:
sleep_time (float): The amount of time to wait in seconds between
each loop iteration. This prevents the scheduler from consuming
100% of the host's CPU. Defaults to 0.2 seconds.
Returns:
None
"""
while self.listening:
# If we have any scheduled calls, execute them and remove them from
# our list of scheduled calls.
if self.scheduled_calls:
timestamp = time.time()
self.scheduled_calls[:] = [item for item in self.scheduled_calls
if not self.time_reached(timestamp, item)]
time.sleep(sleep_time)
logger.info("Shutting down the call scheduler...") | Starts the scheduler to check for scheduled calls and execute them
at the correct time.
Args:
sleep_time (float): The amount of time to wait in seconds between
each loop iteration. This prevents the scheduler from consuming
100% of the host's CPU. Defaults to 0.2 seconds.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Starts the scheduler to check for scheduled calls and execute them
at the correct time.
Args:
sleep_time (float): The amount of time to wait in seconds between
each loop iteration. This prevents the scheduler from consuming
100% of the host's CPU. Defaults to 0.2 seconds.
Returns:
None
### Response:
def scheduler(self, sleep_time=0.2):
"""Starts the scheduler to check for scheduled calls and execute them
at the correct time.
Args:
sleep_time (float): The amount of time to wait in seconds between
each loop iteration. This prevents the scheduler from consuming
100% of the host's CPU. Defaults to 0.2 seconds.
Returns:
None
"""
while self.listening:
# If we have any scheduled calls, execute them and remove them from
# our list of scheduled calls.
if self.scheduled_calls:
timestamp = time.time()
self.scheduled_calls[:] = [item for item in self.scheduled_calls
if not self.time_reached(timestamp, item)]
time.sleep(sleep_time)
logger.info("Shutting down the call scheduler...") |
def set_current_language(self, language_code, initialize=False):
"""
Switch the currently activate language of the object.
"""
self._current_language = normalize_language_code(language_code or get_language())
# Ensure the translation is present for __get__ queries.
if initialize:
self._get_translated_model(use_fallback=False, auto_create=True) | Switch the currently activate language of the object. | Below is the the instruction that describes the task:
### Input:
Switch the currently activate language of the object.
### Response:
def set_current_language(self, language_code, initialize=False):
"""
Switch the currently activate language of the object.
"""
self._current_language = normalize_language_code(language_code or get_language())
# Ensure the translation is present for __get__ queries.
if initialize:
self._get_translated_model(use_fallback=False, auto_create=True) |
def set(self, key, value, key_length=0):
"""Set value to key-value
Params:
<str> key
<int> value
<int> key_length
Return:
<int> key_value
"""
if key_length < 1:
key_length = len(key)
if self.k:
self._update(key, value)
return self.set_method(self, key, key_length, value) | Set value to key-value
Params:
<str> key
<int> value
<int> key_length
Return:
<int> key_value | Below is the the instruction that describes the task:
### Input:
Set value to key-value
Params:
<str> key
<int> value
<int> key_length
Return:
<int> key_value
### Response:
def set(self, key, value, key_length=0):
"""Set value to key-value
Params:
<str> key
<int> value
<int> key_length
Return:
<int> key_value
"""
if key_length < 1:
key_length = len(key)
if self.k:
self._update(key, value)
return self.set_method(self, key, key_length, value) |
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False):
"""Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format."""
full_page_box_list = []
rotation_list = []
if args.verbose and not quiet:
print("\nOriginal full page sizes, in PDF format (lbrt):")
for page_num in range(input_doc.getNumPages()):
# Get the current page and find the full-page box.
curr_page = input_doc.getPage(page_num)
full_page_box = get_full_page_box_assigning_media_and_crop(curr_page)
if args.verbose and not quiet:
# want to display page num numbering from 1, so add one
print("\t"+str(page_num+1), " rot =",
curr_page.rotationAngle, "\t", full_page_box)
# Convert the RectangleObject to floats in an ordinary list and append.
ordinary_box = [float(b) for b in full_page_box]
full_page_box_list.append(ordinary_box)
# Append the rotation value to the rotation_list.
rotation_list.append(curr_page.rotationAngle)
return full_page_box_list, rotation_list | Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format. | Below is the the instruction that describes the task:
### Input:
Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format.
### Response:
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False):
"""Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format."""
full_page_box_list = []
rotation_list = []
if args.verbose and not quiet:
print("\nOriginal full page sizes, in PDF format (lbrt):")
for page_num in range(input_doc.getNumPages()):
# Get the current page and find the full-page box.
curr_page = input_doc.getPage(page_num)
full_page_box = get_full_page_box_assigning_media_and_crop(curr_page)
if args.verbose and not quiet:
# want to display page num numbering from 1, so add one
print("\t"+str(page_num+1), " rot =",
curr_page.rotationAngle, "\t", full_page_box)
# Convert the RectangleObject to floats in an ordinary list and append.
ordinary_box = [float(b) for b in full_page_box]
full_page_box_list.append(ordinary_box)
# Append the rotation value to the rotation_list.
rotation_list.append(curr_page.rotationAngle)
return full_page_box_list, rotation_list |
def saturated_vapor_pressure(t_kelvin):
"""Saturated Vapor Pressure (Pa) at t_kelvin (K).
This function accounts for the different behaviour above vs. below
the freezing point of water.
Note:
[1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic
Properties of Ordinary Water Substance for General and Scientific Use ",
Journal of Physical and Chemical Reference Data,
June 2002 ,Volume 31, Issue 2, pp. 387535
[2] Vaisala. (2013) Humidity Conversion Formulas:
Calculation Formulas for Humidity.
www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
"""
if t_kelvin >= 273.15:
# Calculate saturation vapor pressure above freezing
sig = 1 - (t_kelvin / 647.096)
sig_polynomial = (-7.85951783 * sig) + (1.84408259 * sig ** 1.5) + \
(-11.7866487 * sig ** 3) + (22.6807411 * sig ** 3.5) + \
(-15.9618719 * sig ** 4) + (1.80122502 * sig ** 7.5)
crit_temp = 647.096 / t_kelvin
exponent = crit_temp * sig_polynomial
p_ws = math.exp(exponent) * 22064000
else:
# Calculate saturation vapor pressure below freezing
theta = t_kelvin / 273.15
exponent = -13.928169 * (1 - theta ** -1.5) + \
34.707823 * (1 - theta ** -1.25)
p_ws = math.exp(exponent) * 611.657
return p_ws | Saturated Vapor Pressure (Pa) at t_kelvin (K).
This function accounts for the different behaviour above vs. below
the freezing point of water.
Note:
[1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic
Properties of Ordinary Water Substance for General and Scientific Use ",
Journal of Physical and Chemical Reference Data,
June 2002 ,Volume 31, Issue 2, pp. 387535
[2] Vaisala. (2013) Humidity Conversion Formulas:
Calculation Formulas for Humidity.
www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf | Below is the the instruction that describes the task:
### Input:
Saturated Vapor Pressure (Pa) at t_kelvin (K).
This function accounts for the different behaviour above vs. below
the freezing point of water.
Note:
[1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic
Properties of Ordinary Water Substance for General and Scientific Use ",
Journal of Physical and Chemical Reference Data,
June 2002 ,Volume 31, Issue 2, pp. 387535
[2] Vaisala. (2013) Humidity Conversion Formulas:
Calculation Formulas for Humidity.
www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
### Response:
def saturated_vapor_pressure(t_kelvin):
"""Saturated Vapor Pressure (Pa) at t_kelvin (K).
This function accounts for the different behaviour above vs. below
the freezing point of water.
Note:
[1] W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic
Properties of Ordinary Water Substance for General and Scientific Use ",
Journal of Physical and Chemical Reference Data,
June 2002 ,Volume 31, Issue 2, pp. 387535
[2] Vaisala. (2013) Humidity Conversion Formulas:
Calculation Formulas for Humidity.
www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
"""
if t_kelvin >= 273.15:
# Calculate saturation vapor pressure above freezing
sig = 1 - (t_kelvin / 647.096)
sig_polynomial = (-7.85951783 * sig) + (1.84408259 * sig ** 1.5) + \
(-11.7866487 * sig ** 3) + (22.6807411 * sig ** 3.5) + \
(-15.9618719 * sig ** 4) + (1.80122502 * sig ** 7.5)
crit_temp = 647.096 / t_kelvin
exponent = crit_temp * sig_polynomial
p_ws = math.exp(exponent) * 22064000
else:
# Calculate saturation vapor pressure below freezing
theta = t_kelvin / 273.15
exponent = -13.928169 * (1 - theta ** -1.5) + \
34.707823 * (1 - theta ** -1.25)
p_ws = math.exp(exponent) * 611.657
return p_ws |
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username)) | Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo | Below is the the instruction that describes the task:
### Input:
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
### Response:
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username)) |
def _link_barcodes(self):
"""
Private function. Links Sample barcodes in a dictionary as
[Assembly].barcodes, with barcodes parsed from the 'barcodes_path'
parameter. This function is called during set_params() when setting
the barcodes_path.
"""
## parse barcodefile
try:
## allows fuzzy match to barcodefile name
barcodefile = glob.glob(self.paramsdict["barcodes_path"])[0]
## read in the file
bdf = pd.read_csv(barcodefile, header=None, delim_whitespace=1, dtype=str)
bdf = bdf.dropna()
## make sure bars are upper case
bdf[1] = bdf[1].str.upper()
## if replicates are present then print a warning
reps = bdf[0].unique().shape[0] != bdf[0].shape[0]
if reps:
print("{spacer}Warning: technical replicates (same name) will be combined."\
.format(**{'spacer': self._spacer}))
## add -technical-replicate-N to replicate names
reps = [i for i in bdf[0] if list(bdf[0]).count(i) > 1]
ureps = list(set(reps))
for name in ureps:
idxs = bdf[bdf[0] == ureps[0]].index.tolist()
for num, idx in enumerate(idxs):
bdf.ix[idx][0] = bdf.ix[idx][0] + "-technical-replicate-" + str(num+1)
## make sure chars are all proper
if not all(bdf[1].apply(set("RKSYWMCATG").issuperset)):
LOGGER.warn(BAD_BARCODE)
raise IPyradError(BAD_BARCODE)
## 3rad/seqcap use multiplexed barcodes
## We'll concatenate them with a plus and split them later
if "3rad" in self.paramsdict["datatype"]:
try:
bdf[2] = bdf[2].str.upper()
self.barcodes = dict(zip(bdf[0], bdf[1] + "+" + bdf[2]))
except KeyError as inst:
msg = " 3rad assumes multiplexed barcodes. Doublecheck your barcodes file."
LOGGER.error(msg)
raise IPyradError(msg)
else:
## set attribute on Assembly object
self.barcodes = dict(zip(bdf[0], bdf[1]))
except (IOError, IndexError):
raise IPyradWarningExit(\
" Barcodes file not found. You entered: {}"\
.format(self.paramsdict["barcodes_path"]))
except ValueError as inst:
msg = " Barcodes file format error."
LOGGER.warn(msg)
raise IPyradError(inst) | Private function. Links Sample barcodes in a dictionary as
[Assembly].barcodes, with barcodes parsed from the 'barcodes_path'
parameter. This function is called during set_params() when setting
the barcodes_path. | Below is the the instruction that describes the task:
### Input:
Private function. Links Sample barcodes in a dictionary as
[Assembly].barcodes, with barcodes parsed from the 'barcodes_path'
parameter. This function is called during set_params() when setting
the barcodes_path.
### Response:
def _link_barcodes(self):
"""
Private function. Links Sample barcodes in a dictionary as
[Assembly].barcodes, with barcodes parsed from the 'barcodes_path'
parameter. This function is called during set_params() when setting
the barcodes_path.
"""
## parse barcodefile
try:
## allows fuzzy match to barcodefile name
barcodefile = glob.glob(self.paramsdict["barcodes_path"])[0]
## read in the file
bdf = pd.read_csv(barcodefile, header=None, delim_whitespace=1, dtype=str)
bdf = bdf.dropna()
## make sure bars are upper case
bdf[1] = bdf[1].str.upper()
## if replicates are present then print a warning
reps = bdf[0].unique().shape[0] != bdf[0].shape[0]
if reps:
print("{spacer}Warning: technical replicates (same name) will be combined."\
.format(**{'spacer': self._spacer}))
## add -technical-replicate-N to replicate names
reps = [i for i in bdf[0] if list(bdf[0]).count(i) > 1]
ureps = list(set(reps))
for name in ureps:
idxs = bdf[bdf[0] == ureps[0]].index.tolist()
for num, idx in enumerate(idxs):
bdf.ix[idx][0] = bdf.ix[idx][0] + "-technical-replicate-" + str(num+1)
## make sure chars are all proper
if not all(bdf[1].apply(set("RKSYWMCATG").issuperset)):
LOGGER.warn(BAD_BARCODE)
raise IPyradError(BAD_BARCODE)
## 3rad/seqcap use multiplexed barcodes
## We'll concatenate them with a plus and split them later
if "3rad" in self.paramsdict["datatype"]:
try:
bdf[2] = bdf[2].str.upper()
self.barcodes = dict(zip(bdf[0], bdf[1] + "+" + bdf[2]))
except KeyError as inst:
msg = " 3rad assumes multiplexed barcodes. Doublecheck your barcodes file."
LOGGER.error(msg)
raise IPyradError(msg)
else:
## set attribute on Assembly object
self.barcodes = dict(zip(bdf[0], bdf[1]))
except (IOError, IndexError):
raise IPyradWarningExit(\
" Barcodes file not found. You entered: {}"\
.format(self.paramsdict["barcodes_path"]))
except ValueError as inst:
msg = " Barcodes file format error."
LOGGER.warn(msg)
raise IPyradError(inst) |
def set_level(self, position, channel=None):
"""Seek a specific value by specifying a float() from 0.0 to 1.0."""
try:
position = float(position)
except Exception as err:
LOG.debug("HelperLevel.set_level: Exception %s" % (err,))
return False
self.writeNodeData("LEVEL", position, channel) | Seek a specific value by specifying a float() from 0.0 to 1.0. | Below is the the instruction that describes the task:
### Input:
Seek a specific value by specifying a float() from 0.0 to 1.0.
### Response:
def set_level(self, position, channel=None):
"""Seek a specific value by specifying a float() from 0.0 to 1.0."""
try:
position = float(position)
except Exception as err:
LOG.debug("HelperLevel.set_level: Exception %s" % (err,))
return False
self.writeNodeData("LEVEL", position, channel) |
def humanize_filesize(filesize: int) -> Tuple[str, str]:
"""Return human readable pair of size and unit from the given filesize in bytes."""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if filesize < 1024.0:
return '{:3.1f}'.format(filesize), unit+'B'
filesize /= 1024.0 | Return human readable pair of size and unit from the given filesize in bytes. | Below is the the instruction that describes the task:
### Input:
Return human readable pair of size and unit from the given filesize in bytes.
### Response:
def humanize_filesize(filesize: int) -> Tuple[str, str]:
"""Return human readable pair of size and unit from the given filesize in bytes."""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if filesize < 1024.0:
return '{:3.1f}'.format(filesize), unit+'B'
filesize /= 1024.0 |
def _get_object_menu_models():
"""
we need to create basic permissions
for only CRUD enabled models
"""
from pyoko.conf import settings
enabled_models = []
for entry in settings.OBJECT_MENU.values():
for mdl in entry:
if 'wf' not in mdl:
enabled_models.append(mdl['name'])
return enabled_models | we need to create basic permissions
for only CRUD enabled models | Below is the the instruction that describes the task:
### Input:
we need to create basic permissions
for only CRUD enabled models
### Response:
def _get_object_menu_models():
"""
we need to create basic permissions
for only CRUD enabled models
"""
from pyoko.conf import settings
enabled_models = []
for entry in settings.OBJECT_MENU.values():
for mdl in entry:
if 'wf' not in mdl:
enabled_models.append(mdl['name'])
return enabled_models |
def install_default_handler(self, http_error_code):
"""Install a default error handler for `http_error_code`.
The default error handler renders a template named error404.html
for http_error_code 404.
"""
logger.debug(
"Set Default HTTP error handler for status code %d", http_error_code
)
handler = partial(self.handle_http_error, http_error_code)
self.errorhandler(http_error_code)(handler) | Install a default error handler for `http_error_code`.
The default error handler renders a template named error404.html
for http_error_code 404. | Below is the the instruction that describes the task:
### Input:
Install a default error handler for `http_error_code`.
The default error handler renders a template named error404.html
for http_error_code 404.
### Response:
def install_default_handler(self, http_error_code):
"""Install a default error handler for `http_error_code`.
The default error handler renders a template named error404.html
for http_error_code 404.
"""
logger.debug(
"Set Default HTTP error handler for status code %d", http_error_code
)
handler = partial(self.handle_http_error, http_error_code)
self.errorhandler(http_error_code)(handler) |
def from_dict(cls, raw_data, **kwargs):
"""
This factory for :class:`Model` creates a Model from a dict object.
"""
instance = cls()
instance.populate(raw_data, **kwargs)
instance.validate(**kwargs)
return instance | This factory for :class:`Model` creates a Model from a dict object. | Below is the the instruction that describes the task:
### Input:
This factory for :class:`Model` creates a Model from a dict object.
### Response:
def from_dict(cls, raw_data, **kwargs):
"""
This factory for :class:`Model` creates a Model from a dict object.
"""
instance = cls()
instance.populate(raw_data, **kwargs)
instance.validate(**kwargs)
return instance |
def appendQKeyEvent(self, keyEvent: QtGui.QKeyEvent):
"""
Append another key to the key sequence represented by this object.
|Args|
* ``keyEvent`` (**QKeyEvent**): the key to add.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Store the QKeyEvent.
self.keylistKeyEvent.append(keyEvent)
# Convenience shortcuts.
mod = keyEvent.modifiers()
key = keyEvent.key()
# Add the modifier and key to the list. The modifier is a
# QFlag structure and must by typecast to an integer to avoid
# difficulties with the hashing in the ``match`` routine of
# the ``QtmacsKeymap`` object.
self.keylistQtConstants.append((int(mod), key)) | Append another key to the key sequence represented by this object.
|Args|
* ``keyEvent`` (**QKeyEvent**): the key to add.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type. | Below is the the instruction that describes the task:
### Input:
Append another key to the key sequence represented by this object.
|Args|
* ``keyEvent`` (**QKeyEvent**): the key to add.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
### Response:
def appendQKeyEvent(self, keyEvent: QtGui.QKeyEvent):
"""
Append another key to the key sequence represented by this object.
|Args|
* ``keyEvent`` (**QKeyEvent**): the key to add.
|Returns|
**None**
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
"""
# Store the QKeyEvent.
self.keylistKeyEvent.append(keyEvent)
# Convenience shortcuts.
mod = keyEvent.modifiers()
key = keyEvent.key()
# Add the modifier and key to the list. The modifier is a
# QFlag structure and must by typecast to an integer to avoid
# difficulties with the hashing in the ``match`` routine of
# the ``QtmacsKeymap`` object.
self.keylistQtConstants.append((int(mod), key)) |
def _wrap_event(event_):
"""Wrap hangouts_pb2.Event in ConversationEvent subclass."""
cls = conversation_event.ConversationEvent
if event_.HasField('chat_message'):
cls = conversation_event.ChatMessageEvent
elif event_.HasField('otr_modification'):
cls = conversation_event.OTREvent
elif event_.HasField('conversation_rename'):
cls = conversation_event.RenameEvent
elif event_.HasField('membership_change'):
cls = conversation_event.MembershipChangeEvent
elif event_.HasField('hangout_event'):
cls = conversation_event.HangoutEvent
elif event_.HasField('group_link_sharing_modification'):
cls = conversation_event.GroupLinkSharingModificationEvent
return cls(event_) | Wrap hangouts_pb2.Event in ConversationEvent subclass. | Below is the the instruction that describes the task:
### Input:
Wrap hangouts_pb2.Event in ConversationEvent subclass.
### Response:
def _wrap_event(event_):
"""Wrap hangouts_pb2.Event in ConversationEvent subclass."""
cls = conversation_event.ConversationEvent
if event_.HasField('chat_message'):
cls = conversation_event.ChatMessageEvent
elif event_.HasField('otr_modification'):
cls = conversation_event.OTREvent
elif event_.HasField('conversation_rename'):
cls = conversation_event.RenameEvent
elif event_.HasField('membership_change'):
cls = conversation_event.MembershipChangeEvent
elif event_.HasField('hangout_event'):
cls = conversation_event.HangoutEvent
elif event_.HasField('group_link_sharing_modification'):
cls = conversation_event.GroupLinkSharingModificationEvent
return cls(event_) |
def filter_variants(variant_collection, patient, filter_fn, **kwargs):
"""Filter variants from the Variant Collection
Parameters
----------
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn: function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Returns
-------
varcode.VariantCollection
Filtered variant collection, with only the variants passing the filter
"""
if filter_fn:
return variant_collection.clone_with_new_elements([
variant
for variant in variant_collection
if filter_fn(FilterableVariant(
variant=variant,
variant_collection=variant_collection,
patient=patient,
), **kwargs)
])
else:
return variant_collection | Filter variants from the Variant Collection
Parameters
----------
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn: function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Returns
-------
varcode.VariantCollection
Filtered variant collection, with only the variants passing the filter | Below is the the instruction that describes the task:
### Input:
Filter variants from the Variant Collection
Parameters
----------
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn: function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Returns
-------
varcode.VariantCollection
Filtered variant collection, with only the variants passing the filter
### Response:
def filter_variants(variant_collection, patient, filter_fn, **kwargs):
"""Filter variants from the Variant Collection
Parameters
----------
variant_collection : varcode.VariantCollection
patient : cohorts.Patient
filter_fn: function
Takes a FilterableVariant and returns a boolean. Only variants returning True are preserved.
Returns
-------
varcode.VariantCollection
Filtered variant collection, with only the variants passing the filter
"""
if filter_fn:
return variant_collection.clone_with_new_elements([
variant
for variant in variant_collection
if filter_fn(FilterableVariant(
variant=variant,
variant_collection=variant_collection,
patient=patient,
), **kwargs)
])
else:
return variant_collection |
def _secondary_min(self):
"""Getter for the minimum series value"""
return (
self.secondary_range[0]
if (self.secondary_range
and self.secondary_range[0] is not None) else
(min(self._secondary_values) if self._secondary_values else None)
) | Getter for the minimum series value | Below is the the instruction that describes the task:
### Input:
Getter for the minimum series value
### Response:
def _secondary_min(self):
"""Getter for the minimum series value"""
return (
self.secondary_range[0]
if (self.secondary_range
and self.secondary_range[0] is not None) else
(min(self._secondary_values) if self._secondary_values else None)
) |
def magic_timeit(setup, stmt, ncalls=None, repeat=3, force_ms=False):
"""Time execution of a Python statement or expression
Usage:\\
%timeit [-n<N> -r<R> [-t|-c]] statement
Time execution of a Python statement or expression using the timeit
module.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
Examples:
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
import timeit
import math
units = ["s", "ms", 'us', "ns"]
scaling = [1, 1e3, 1e6, 1e9]
timer = timeit.Timer(stmt, setup)
if ncalls is None:
# determine number so that 0.2 <= total time < 2.0
number = 1
for _ in range(1, 10):
if timer.timeit(number) >= 0.1:
break
number *= 10
else:
number = ncalls
best = min(timer.repeat(repeat, number)) / number
if force_ms:
order = 1
else:
if best > 0.0 and best < 1000.0:
order = min(-int(math.floor(math.log10(best)) // 3), 3)
elif best >= 1000.0:
order = 0
else:
order = 3
return {'loops': number,
'repeat': repeat,
'timing': best * scaling[order],
'units': units[order]} | Time execution of a Python statement or expression
Usage:\\
%timeit [-n<N> -r<R> [-t|-c]] statement
Time execution of a Python statement or expression using the timeit
module.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
Examples:
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit. | Below is the the instruction that describes the task:
### Input:
Time execution of a Python statement or expression
Usage:\\
%timeit [-n<N> -r<R> [-t|-c]] statement
Time execution of a Python statement or expression using the timeit
module.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
Examples:
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit.
### Response:
def magic_timeit(setup, stmt, ncalls=None, repeat=3, force_ms=False):
"""Time execution of a Python statement or expression
Usage:\\
%timeit [-n<N> -r<R> [-t|-c]] statement
Time execution of a Python statement or expression using the timeit
module.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
Examples:
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loops, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
import timeit
import math
units = ["s", "ms", 'us', "ns"]
scaling = [1, 1e3, 1e6, 1e9]
timer = timeit.Timer(stmt, setup)
if ncalls is None:
# determine number so that 0.2 <= total time < 2.0
number = 1
for _ in range(1, 10):
if timer.timeit(number) >= 0.1:
break
number *= 10
else:
number = ncalls
best = min(timer.repeat(repeat, number)) / number
if force_ms:
order = 1
else:
if best > 0.0 and best < 1000.0:
order = min(-int(math.floor(math.log10(best)) // 3), 3)
elif best >= 1000.0:
order = 0
else:
order = 3
return {'loops': number,
'repeat': repeat,
'timing': best * scaling[order],
'units': units[order]} |
def _csv(self, cursor, fieldnames, output_fh):
"""Writes the rows of `cursor` in CSV format to `output_fh`
and returns it.
:param cursor: database cursor containing data to be output
:type cursor: `sqlite3.Cursor`
:param fieldnames: row headings
:type fieldnames: `list`
:param output_fh: file to write data to
:type output_fh: file object
:rtype: file object
"""
self._logger.info('Finished query; outputting results in CSV format')
# Specify a lineterminator to avoid an extra \r being added on
# Windows; see
# https://stackoverflow.com/questions/3191528/csv-in-python-adding-extra-carriage-return
if sys.platform in ('win32', 'cygwin') and output_fh is sys.stdout:
writer = csv.writer(output_fh, lineterminator='\n')
else:
writer = csv.writer(output_fh)
writer.writerow(fieldnames)
for row in cursor:
writer.writerow(row)
self._logger.info('Finished outputting results')
return output_fh | Writes the rows of `cursor` in CSV format to `output_fh`
and returns it.
:param cursor: database cursor containing data to be output
:type cursor: `sqlite3.Cursor`
:param fieldnames: row headings
:type fieldnames: `list`
:param output_fh: file to write data to
:type output_fh: file object
:rtype: file object | Below is the the instruction that describes the task:
### Input:
Writes the rows of `cursor` in CSV format to `output_fh`
and returns it.
:param cursor: database cursor containing data to be output
:type cursor: `sqlite3.Cursor`
:param fieldnames: row headings
:type fieldnames: `list`
:param output_fh: file to write data to
:type output_fh: file object
:rtype: file object
### Response:
def _csv(self, cursor, fieldnames, output_fh):
"""Writes the rows of `cursor` in CSV format to `output_fh`
and returns it.
:param cursor: database cursor containing data to be output
:type cursor: `sqlite3.Cursor`
:param fieldnames: row headings
:type fieldnames: `list`
:param output_fh: file to write data to
:type output_fh: file object
:rtype: file object
"""
self._logger.info('Finished query; outputting results in CSV format')
# Specify a lineterminator to avoid an extra \r being added on
# Windows; see
# https://stackoverflow.com/questions/3191528/csv-in-python-adding-extra-carriage-return
if sys.platform in ('win32', 'cygwin') and output_fh is sys.stdout:
writer = csv.writer(output_fh, lineterminator='\n')
else:
writer = csv.writer(output_fh)
writer.writerow(fieldnames)
for row in cursor:
writer.writerow(row)
self._logger.info('Finished outputting results')
return output_fh |
def draw(self, **kwargs):
"""Draw the polygon
Optional Inputs:
------------
All optional inputs are passed to ``matplotlib.patches.Polygon``
Notes:
---------
Does not accept maptype as an argument.
"""
ax = mp.gca()
shape = matplotlib.patches.Polygon(self.polygon, **kwargs)
ax.add_artist(shape) | Draw the polygon
Optional Inputs:
------------
All optional inputs are passed to ``matplotlib.patches.Polygon``
Notes:
---------
Does not accept maptype as an argument. | Below is the the instruction that describes the task:
### Input:
Draw the polygon
Optional Inputs:
------------
All optional inputs are passed to ``matplotlib.patches.Polygon``
Notes:
---------
Does not accept maptype as an argument.
### Response:
def draw(self, **kwargs):
"""Draw the polygon
Optional Inputs:
------------
All optional inputs are passed to ``matplotlib.patches.Polygon``
Notes:
---------
Does not accept maptype as an argument.
"""
ax = mp.gca()
shape = matplotlib.patches.Polygon(self.polygon, **kwargs)
ax.add_artist(shape) |
def cross_v2(vec1, vec2):
"""Return the crossproduct of the two vectors as a Vec2.
Cross product doesn't really make sense in 2D, but return the Z component
of the 3d result.
"""
return vec1.y * vec2.x - vec1.x * vec2.y | Return the crossproduct of the two vectors as a Vec2.
Cross product doesn't really make sense in 2D, but return the Z component
of the 3d result. | Below is the the instruction that describes the task:
### Input:
Return the crossproduct of the two vectors as a Vec2.
Cross product doesn't really make sense in 2D, but return the Z component
of the 3d result.
### Response:
def cross_v2(vec1, vec2):
"""Return the crossproduct of the two vectors as a Vec2.
Cross product doesn't really make sense in 2D, but return the Z component
of the 3d result.
"""
return vec1.y * vec2.x - vec1.x * vec2.y |
def convert_activation(net, node, module, builder):
"""Convert an activation layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mx_non_linearity = _get_attrs(node)['act_type']
#TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR
if mx_non_linearity == 'relu':
non_linearity = 'RELU'
elif mx_non_linearity == 'tanh':
non_linearity = 'TANH'
elif mx_non_linearity == 'sigmoid':
non_linearity = 'SIGMOID'
else:
raise TypeError('Unknown activation type %s' % mx_non_linearity)
builder.add_activation(name = name,
non_linearity = non_linearity,
input_name = input_name,
output_name = output_name) | Convert an activation layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object. | Below is the the instruction that describes the task:
### Input:
Convert an activation layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
### Response:
def convert_activation(net, node, module, builder):
"""Convert an activation layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mx_non_linearity = _get_attrs(node)['act_type']
#TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR
if mx_non_linearity == 'relu':
non_linearity = 'RELU'
elif mx_non_linearity == 'tanh':
non_linearity = 'TANH'
elif mx_non_linearity == 'sigmoid':
non_linearity = 'SIGMOID'
else:
raise TypeError('Unknown activation type %s' % mx_non_linearity)
builder.add_activation(name = name,
non_linearity = non_linearity,
input_name = input_name,
output_name = output_name) |
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return CPIODirectory(self._file_system, self.path_spec) | Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available. | Below is the the instruction that describes the task:
### Input:
Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available.
### Response:
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return CPIODirectory(self._file_system, self.path_spec) |
def p_pragma_assign(self, p):
'pragma : LPAREN TIMES ID EQUALS expression TIMES RPAREN'
p[0] = Pragma(PragmaEntry(p[3], p[5], lineno=p.lineno(1)),
lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | pragma : LPAREN TIMES ID EQUALS expression TIMES RPAREN | Below is the the instruction that describes the task:
### Input:
pragma : LPAREN TIMES ID EQUALS expression TIMES RPAREN
### Response:
def p_pragma_assign(self, p):
'pragma : LPAREN TIMES ID EQUALS expression TIMES RPAREN'
p[0] = Pragma(PragmaEntry(p[3], p[5], lineno=p.lineno(1)),
lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def _make_text_predict_fn(self, labels, instance, column_to_explain):
"""Create a predict_fn that can be used by LIME text explainer. """
def _predict_fn(perturbed_text):
predict_input = []
for x in perturbed_text:
instance_copy = dict(instance)
instance_copy[column_to_explain] = x
predict_input.append(instance_copy)
df = _local_predict.get_prediction_results(self._model_dir, predict_input,
self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, df)
return np.asarray(probs)
return _predict_fn | Create a predict_fn that can be used by LIME text explainer. | Below is the the instruction that describes the task:
### Input:
Create a predict_fn that can be used by LIME text explainer.
### Response:
def _make_text_predict_fn(self, labels, instance, column_to_explain):
"""Create a predict_fn that can be used by LIME text explainer. """
def _predict_fn(perturbed_text):
predict_input = []
for x in perturbed_text:
instance_copy = dict(instance)
instance_copy[column_to_explain] = x
predict_input.append(instance_copy)
df = _local_predict.get_prediction_results(self._model_dir, predict_input,
self._headers, with_source=False)
probs = _local_predict.get_probs_for_labels(labels, df)
return np.asarray(probs)
return _predict_fn |
def write_collection(self, filename, collection):
"""
Writes a collection of stop words into a file.
"""
collection = sorted(list(collection))
with open(filename, 'wb+') as fd:
fd.truncate()
fd.write('\n'.join(collection).encode('utf-8')) | Writes a collection of stop words into a file. | Below is the the instruction that describes the task:
### Input:
Writes a collection of stop words into a file.
### Response:
def write_collection(self, filename, collection):
"""
Writes a collection of stop words into a file.
"""
collection = sorted(list(collection))
with open(filename, 'wb+') as fd:
fd.truncate()
fd.write('\n'.join(collection).encode('utf-8')) |
def canonical_request(self):
"""
The AWS SigV4 canonical request given parameters from an HTTP request.
This process is outlined here:
http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
The canonical request is:
request_method + '\n' +
canonical_uri_path + '\n' +
canonical_query_string + '\n' +
signed_headers + '\n' +
sha256(body).hexdigest()
"""
signed_headers = self.signed_headers
header_lines = "".join(
["%s:%s\n" % item for item in iteritems(signed_headers)])
header_keys = ";".join([key for key in iterkeys(self.signed_headers)])
return (self.request_method + "\n" +
self.canonical_uri_path + "\n" +
self.canonical_query_string + "\n" +
header_lines + "\n" +
header_keys + "\n" +
sha256(self.body).hexdigest()) | The AWS SigV4 canonical request given parameters from an HTTP request.
This process is outlined here:
http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
The canonical request is:
request_method + '\n' +
canonical_uri_path + '\n' +
canonical_query_string + '\n' +
signed_headers + '\n' +
sha256(body).hexdigest() | Below is the the instruction that describes the task:
### Input:
The AWS SigV4 canonical request given parameters from an HTTP request.
This process is outlined here:
http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
The canonical request is:
request_method + '\n' +
canonical_uri_path + '\n' +
canonical_query_string + '\n' +
signed_headers + '\n' +
sha256(body).hexdigest()
### Response:
def canonical_request(self):
"""
The AWS SigV4 canonical request given parameters from an HTTP request.
This process is outlined here:
http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
The canonical request is:
request_method + '\n' +
canonical_uri_path + '\n' +
canonical_query_string + '\n' +
signed_headers + '\n' +
sha256(body).hexdigest()
"""
signed_headers = self.signed_headers
header_lines = "".join(
["%s:%s\n" % item for item in iteritems(signed_headers)])
header_keys = ";".join([key for key in iterkeys(self.signed_headers)])
return (self.request_method + "\n" +
self.canonical_uri_path + "\n" +
self.canonical_query_string + "\n" +
header_lines + "\n" +
header_keys + "\n" +
sha256(self.body).hexdigest()) |
def pageview(self, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
arr_num = []
postinfo = MLog.query_all_current_url()
for i in postinfo:
postnum = MLog.count_of_current_url(i.current_url)
arr_num.append(postnum)
self.render('misc/log/pageview.html',
kwd=kwd,
infos=MLog.query_all_pageview(current_page_num=current_page_number),
postinfo=postinfo,
arr_num=arr_num,
format_date=tools.format_date,
userinfo=self.userinfo) | View the list of the Log. | Below is the the instruction that describes the task:
### Input:
View the list of the Log.
### Response:
def pageview(self, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
arr_num = []
postinfo = MLog.query_all_current_url()
for i in postinfo:
postnum = MLog.count_of_current_url(i.current_url)
arr_num.append(postnum)
self.render('misc/log/pageview.html',
kwd=kwd,
infos=MLog.query_all_pageview(current_page_num=current_page_number),
postinfo=postinfo,
arr_num=arr_num,
format_date=tools.format_date,
userinfo=self.userinfo) |
def maybe_download(url, filename):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = request.urlretrieve(url + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath | Download the data from Yann's website, unless it's already here. | Below is the the instruction that describes the task:
### Input:
Download the data from Yann's website, unless it's already here.
### Response:
def maybe_download(url, filename):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = request.urlretrieve(url + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath |
def pixelToMap(pX, pY, geoTransform):
"""Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
pX = np.asarray(pX, dtype=float)
pY = np.asarray(pY, dtype=float)
pX += 0.5
pY += 0.5
mX, mY = applyGeoTransform(pX, pY, geoTransform)
return mX, mY | Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform) | Below is the the instruction that describes the task:
### Input:
Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
### Response:
def pixelToMap(pX, pY, geoTransform):
"""Convert pixel coordinates to map coordinates based on geotransform
Accepts float or NumPy arrays
GDAL model used here - upper left corner of upper left pixel for mX, mY (and in GeoTransform)
"""
pX = np.asarray(pX, dtype=float)
pY = np.asarray(pY, dtype=float)
pX += 0.5
pY += 0.5
mX, mY = applyGeoTransform(pX, pY, geoTransform)
return mX, mY |
def _set_catalog_view(self, session):
"""Sets the underlying catalog view to match current view"""
if self._catalog_view == COMPARATIVE:
try:
session.use_comparative_catalog_view()
except AttributeError:
pass
else:
try:
session.use_plenary_catalog_view()
except AttributeError:
pass | Sets the underlying catalog view to match current view | Below is the the instruction that describes the task:
### Input:
Sets the underlying catalog view to match current view
### Response:
def _set_catalog_view(self, session):
"""Sets the underlying catalog view to match current view"""
if self._catalog_view == COMPARATIVE:
try:
session.use_comparative_catalog_view()
except AttributeError:
pass
else:
try:
session.use_plenary_catalog_view()
except AttributeError:
pass |
def _clean_up_columns(
self):
"""clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
tableName = self.dbTableName
print "cleaning up %(tableName)s columns" % locals()
sqlQuery = u"""
set sql_mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;
update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = "";
update %(tableName)s set notes = null where notes = "";
update %(tableName)s set redshift = null where redshift = 0;
update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = "";
update %(tableName)s set hubble_const = null where hubble_const = 0;
update %(tableName)s set lmc_mod = null where lmc_mod = 0;
update %(tableName)s set master_row = 0;
update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None | clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring | Below is the the instruction that describes the task:
### Input:
clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
### Response:
def _clean_up_columns(
self):
"""clean up columns of the NED table
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring
"""
self.log.debug('starting the ``_clean_up_columns`` method')
tableName = self.dbTableName
print "cleaning up %(tableName)s columns" % locals()
sqlQuery = u"""
set sql_mode="STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION";
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u"""
update %(tableName)s set dist_mod_err = null where dist_mod_err = 0;
update %(tableName)s set dist_in_ned_flag = null where dist_in_ned_flag = "";
update %(tableName)s set notes = null where notes = "";
update %(tableName)s set redshift = null where redshift = 0;
update %(tableName)s set dist_derived_from_sn = null where dist_derived_from_sn = "";
update %(tableName)s set hubble_const = null where hubble_const = 0;
update %(tableName)s set lmc_mod = null where lmc_mod = 0;
update %(tableName)s set master_row = 0;
update %(tableName)s set master_row = 1 where primaryId in (select * from (select distinct primaryId from %(tableName)s group by galaxy_index_id) as alias);
""" % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug('completed the ``_clean_up_columns`` method')
return None |
def timestamp_feature(catalog, soup):
"""The datetime the xml file was last modified.
"""
catalog.timestamp = int(soup.coursedb['timestamp'])
catalog.datetime = datetime.datetime.fromtimestamp(catalog.timestamp)
logger.info('Catalog last updated on %s' % catalog.datetime) | The datetime the xml file was last modified. | Below is the the instruction that describes the task:
### Input:
The datetime the xml file was last modified.
### Response:
def timestamp_feature(catalog, soup):
"""The datetime the xml file was last modified.
"""
catalog.timestamp = int(soup.coursedb['timestamp'])
catalog.datetime = datetime.datetime.fromtimestamp(catalog.timestamp)
logger.info('Catalog last updated on %s' % catalog.datetime) |
def list(self, activity_name=values.unset, activity_sid=values.unset,
available=values.unset, friendly_name=values.unset,
target_workers_expression=values.unset, task_queue_name=values.unset,
task_queue_sid=values.unset, limit=None, page_size=None):
"""
Lists WorkerInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode activity_name: Filter by workers that are in a particular Activity by Friendly Name
:param unicode activity_sid: Filter by workers that are in a particular Activity by SID
:param unicode available: Filter by workers that are available or unavailable.
:param unicode friendly_name: Filter by a worker's friendly name
:param unicode target_workers_expression: Filter by workers that would match an expression on a TaskQueue.
:param unicode task_queue_name: Filter by workers that are eligible for a TaskQueue by Friendly Name
:param unicode task_queue_sid: Filter by workers that are eligible for a TaskQueue by SID
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance]
"""
return list(self.stream(
activity_name=activity_name,
activity_sid=activity_sid,
available=available,
friendly_name=friendly_name,
target_workers_expression=target_workers_expression,
task_queue_name=task_queue_name,
task_queue_sid=task_queue_sid,
limit=limit,
page_size=page_size,
)) | Lists WorkerInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode activity_name: Filter by workers that are in a particular Activity by Friendly Name
:param unicode activity_sid: Filter by workers that are in a particular Activity by SID
:param unicode available: Filter by workers that are available or unavailable.
:param unicode friendly_name: Filter by a worker's friendly name
:param unicode target_workers_expression: Filter by workers that would match an expression on a TaskQueue.
:param unicode task_queue_name: Filter by workers that are eligible for a TaskQueue by Friendly Name
:param unicode task_queue_sid: Filter by workers that are eligible for a TaskQueue by SID
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance] | Below is the the instruction that describes the task:
### Input:
Lists WorkerInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode activity_name: Filter by workers that are in a particular Activity by Friendly Name
:param unicode activity_sid: Filter by workers that are in a particular Activity by SID
:param unicode available: Filter by workers that are available or unavailable.
:param unicode friendly_name: Filter by a worker's friendly name
:param unicode target_workers_expression: Filter by workers that would match an expression on a TaskQueue.
:param unicode task_queue_name: Filter by workers that are eligible for a TaskQueue by Friendly Name
:param unicode task_queue_sid: Filter by workers that are eligible for a TaskQueue by SID
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance]
### Response:
def list(self, activity_name=values.unset, activity_sid=values.unset,
available=values.unset, friendly_name=values.unset,
target_workers_expression=values.unset, task_queue_name=values.unset,
task_queue_sid=values.unset, limit=None, page_size=None):
"""
Lists WorkerInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode activity_name: Filter by workers that are in a particular Activity by Friendly Name
:param unicode activity_sid: Filter by workers that are in a particular Activity by SID
:param unicode available: Filter by workers that are available or unavailable.
:param unicode friendly_name: Filter by a worker's friendly name
:param unicode target_workers_expression: Filter by workers that would match an expression on a TaskQueue.
:param unicode task_queue_name: Filter by workers that are eligible for a TaskQueue by Friendly Name
:param unicode task_queue_sid: Filter by workers that are eligible for a TaskQueue by SID
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.taskrouter.v1.workspace.worker.WorkerInstance]
"""
return list(self.stream(
activity_name=activity_name,
activity_sid=activity_sid,
available=available,
friendly_name=friendly_name,
target_workers_expression=target_workers_expression,
task_queue_name=task_queue_name,
task_queue_sid=task_queue_sid,
limit=limit,
page_size=page_size,
)) |
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory") | Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None) | Below is the the instruction that describes the task:
### Input:
Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
### Response:
def IsDir(v):
"""Verify the directory exists.
>>> IsDir()('/')
'/'
>>> with raises(DirInvalid, 'Not a directory'):
... IsDir()(None)
"""
try:
if v:
v = str(v)
return os.path.isdir(v)
else:
raise DirInvalid("Not a directory")
except TypeError:
raise DirInvalid("Not a directory") |
def get_items_by_search(self, item_query, item_search):
"""Pass through to provider ItemSearchSession.get_items_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_items_by_search(item_query, item_search) | Pass through to provider ItemSearchSession.get_items_by_search | Below is the the instruction that describes the task:
### Input:
Pass through to provider ItemSearchSession.get_items_by_search
### Response:
def get_items_by_search(self, item_query, item_search):
"""Pass through to provider ItemSearchSession.get_items_by_search"""
# Implemented from azosid template for -
# osid.resource.ResourceSearchSession.get_resources_by_search_template
if not self._can('search'):
raise PermissionDenied()
return self._provider_session.get_items_by_search(item_query, item_search) |
def _getRightsAssignments(user_right):
'''
helper function to return all the user rights assignments/users
'''
sids = []
polHandle = win32security.LsaOpenPolicy(None, win32security.POLICY_ALL_ACCESS)
sids = win32security.LsaEnumerateAccountsWithUserRight(polHandle, user_right)
return sids | helper function to return all the user rights assignments/users | Below is the the instruction that describes the task:
### Input:
helper function to return all the user rights assignments/users
### Response:
def _getRightsAssignments(user_right):
'''
helper function to return all the user rights assignments/users
'''
sids = []
polHandle = win32security.LsaOpenPolicy(None, win32security.POLICY_ALL_ACCESS)
sids = win32security.LsaEnumerateAccountsWithUserRight(polHandle, user_right)
return sids |
def _most_recent_assembly(assembly_names):
"""
Given list of (in this case, matched) assemblies, identify the most recent
("recency" here is determined by sorting based on the numeric element of the assembly name)
"""
match_recency = [
int(re.search('\d+', assembly_name).group())
for assembly_name in assembly_names
]
most_recent = [
x for (y, x) in sorted(zip(match_recency, assembly_names), reverse=True)][0]
return most_recent | Given list of (in this case, matched) assemblies, identify the most recent
("recency" here is determined by sorting based on the numeric element of the assembly name) | Below is the the instruction that describes the task:
### Input:
Given list of (in this case, matched) assemblies, identify the most recent
("recency" here is determined by sorting based on the numeric element of the assembly name)
### Response:
def _most_recent_assembly(assembly_names):
"""
Given list of (in this case, matched) assemblies, identify the most recent
("recency" here is determined by sorting based on the numeric element of the assembly name)
"""
match_recency = [
int(re.search('\d+', assembly_name).group())
for assembly_name in assembly_names
]
most_recent = [
x for (y, x) in sorted(zip(match_recency, assembly_names), reverse=True)][0]
return most_recent |
def delete_certificate(ctx, slot, management_key, pin):
"""
Delete a certificate.
Delete a certificate from a slot on the YubiKey.
"""
controller = ctx.obj['controller']
_ensure_authenticated(ctx, controller, pin, management_key)
controller.delete_certificate(slot) | Delete a certificate.
Delete a certificate from a slot on the YubiKey. | Below is the the instruction that describes the task:
### Input:
Delete a certificate.
Delete a certificate from a slot on the YubiKey.
### Response:
def delete_certificate(ctx, slot, management_key, pin):
"""
Delete a certificate.
Delete a certificate from a slot on the YubiKey.
"""
controller = ctx.obj['controller']
_ensure_authenticated(ctx, controller, pin, management_key)
controller.delete_certificate(slot) |
def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip() | Iterate through lines of file. | Below is the the instruction that describes the task:
### Input:
Iterate through lines of file.
### Response:
def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.