code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _hook_syscall(self, uc, data):
"""
Unicorn hook that transfers control to Manticore so it can execute the syscall
"""
logger.debug(f"Stopping emulation at {hex(uc.reg_read(self._to_unicorn_id('RIP')))} to perform syscall")
self.sync_unicorn_to_manticore()
from ..native.cpu.abstractcpu import Syscall
self._to_raise = Syscall()
uc.emu_stop() | Unicorn hook that transfers control to Manticore so it can execute the syscall | Below is the the instruction that describes the task:
### Input:
Unicorn hook that transfers control to Manticore so it can execute the syscall
### Response:
def _hook_syscall(self, uc, data):
"""
Unicorn hook that transfers control to Manticore so it can execute the syscall
"""
logger.debug(f"Stopping emulation at {hex(uc.reg_read(self._to_unicorn_id('RIP')))} to perform syscall")
self.sync_unicorn_to_manticore()
from ..native.cpu.abstractcpu import Syscall
self._to_raise = Syscall()
uc.emu_stop() |
def clean_tempfiles():
'''Clean up temp files'''
for fn in TEMP_FILES:
if os.path.exists(fn):
os.unlink(fn) | Clean up temp files | Below is the the instruction that describes the task:
### Input:
Clean up temp files
### Response:
def clean_tempfiles():
'''Clean up temp files'''
for fn in TEMP_FILES:
if os.path.exists(fn):
os.unlink(fn) |
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
See :meth:`mutagen.id3.ID3.save` for more info.
"""
if v2_version == 3:
# EasyID3 only works with v2.4 frames, so update_to_v23() would
# break things. We have to save a shallow copy of all tags
# and restore it after saving. Due to CHAP/CTOC copying has
# to be done recursively implemented in ID3Tags.
backup = self.__id3._copy()
try:
self.__id3.update_to_v23()
self.__id3.save(
filething, v1=v1, v2_version=v2_version, v23_sep=v23_sep,
padding=padding)
finally:
self.__id3._restore(backup)
else:
self.__id3.save(filething, v1=v1, v2_version=v2_version,
v23_sep=v23_sep, padding=padding) | save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
See :meth:`mutagen.id3.ID3.save` for more info. | Below is the the instruction that describes the task:
### Input:
save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
See :meth:`mutagen.id3.ID3.save` for more info.
### Response:
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/',
padding=None):
"""save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None)
Save changes to a file.
See :meth:`mutagen.id3.ID3.save` for more info.
"""
if v2_version == 3:
# EasyID3 only works with v2.4 frames, so update_to_v23() would
# break things. We have to save a shallow copy of all tags
# and restore it after saving. Due to CHAP/CTOC copying has
# to be done recursively implemented in ID3Tags.
backup = self.__id3._copy()
try:
self.__id3.update_to_v23()
self.__id3.save(
filething, v1=v1, v2_version=v2_version, v23_sep=v23_sep,
padding=padding)
finally:
self.__id3._restore(backup)
else:
self.__id3.save(filething, v1=v1, v2_version=v2_version,
v23_sep=v23_sep, padding=padding) |
def close(self):
"""
Closes the lvm and vg_t handle. Usually you would never need to use this method
unless you are doing operations using the ctypes function wrappers in conversion.py
*Raises:*
* HandleError
"""
if self.handle:
cl = lvm_vg_close(self.handle)
if cl != 0:
raise HandleError("Failed to close VG handle after init check.")
self.__vgh = None
self.lvm.close() | Closes the lvm and vg_t handle. Usually you would never need to use this method
unless you are doing operations using the ctypes function wrappers in conversion.py
*Raises:*
* HandleError | Below is the the instruction that describes the task:
### Input:
Closes the lvm and vg_t handle. Usually you would never need to use this method
unless you are doing operations using the ctypes function wrappers in conversion.py
*Raises:*
* HandleError
### Response:
def close(self):
"""
Closes the lvm and vg_t handle. Usually you would never need to use this method
unless you are doing operations using the ctypes function wrappers in conversion.py
*Raises:*
* HandleError
"""
if self.handle:
cl = lvm_vg_close(self.handle)
if cl != 0:
raise HandleError("Failed to close VG handle after init check.")
self.__vgh = None
self.lvm.close() |
def get_data_dir():
"""
Find out our installation prefix and data directory. These can be in
different places depending on how ansible-cmdb was installed.
"""
data_dir_paths = [
os.path.join(os.path.dirname(ansiblecmdb.__file__), 'data'),
os.path.join(os.path.dirname(sys.argv[0]), '..', 'lib', 'ansiblecmdb', 'data'),
'/usr/local/lib/ansiblecmdb/data',
'/usr/lib/ansiblecmdb/data',
]
data_dir = util.find_path(data_dir_paths, 'tpl/html_fancy.tpl')
if not data_dir:
sys.stdout.write("Couldn't find the data dir for the templates. I tried: {0}\n".format(", ".join(data_dir_paths)))
sys.exit(1)
return data_dir | Find out our installation prefix and data directory. These can be in
different places depending on how ansible-cmdb was installed. | Below is the the instruction that describes the task:
### Input:
Find out our installation prefix and data directory. These can be in
different places depending on how ansible-cmdb was installed.
### Response:
def get_data_dir():
"""
Find out our installation prefix and data directory. These can be in
different places depending on how ansible-cmdb was installed.
"""
data_dir_paths = [
os.path.join(os.path.dirname(ansiblecmdb.__file__), 'data'),
os.path.join(os.path.dirname(sys.argv[0]), '..', 'lib', 'ansiblecmdb', 'data'),
'/usr/local/lib/ansiblecmdb/data',
'/usr/lib/ansiblecmdb/data',
]
data_dir = util.find_path(data_dir_paths, 'tpl/html_fancy.tpl')
if not data_dir:
sys.stdout.write("Couldn't find the data dir for the templates. I tried: {0}\n".format(", ".join(data_dir_paths)))
sys.exit(1)
return data_dir |
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items | Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``. | Below is the the instruction that describes the task:
### Input:
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
### Response:
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items |
def get_marker(marker_type, enum_ammo=False):
'''
Returns a marker function of the requested marker_type
>>> marker = get_marker('uniq')(__test_missile)
>>> type(marker)
<type 'str'>
>>> len(marker)
32
>>> get_marker('uri')(__test_missile)
'_example_search_hello_help_us'
>>> marker = get_marker('non-existent')(__test_missile)
Traceback (most recent call last):
...
NotImplementedError: No such marker: "non-existent"
>>> get_marker('3')(__test_missile)
'_example_search_hello'
>>> marker = get_marker('3', True)
>>> marker(__test_missile)
'_example_search_hello#0'
>>> marker(__test_missile)
'_example_search_hello#1'
'''
try:
limit = int(marker_type)
if limit:
marker = __UriMarker(limit)
else:
def marker(m):
return ''
except ValueError:
if marker_type in __markers:
marker = __markers[marker_type]
else:
raise NotImplementedError('No such marker: "%s"' % marker_type)
# todo: fix u'False'
if enum_ammo:
marker = __Enumerator(marker)
return marker | Returns a marker function of the requested marker_type
>>> marker = get_marker('uniq')(__test_missile)
>>> type(marker)
<type 'str'>
>>> len(marker)
32
>>> get_marker('uri')(__test_missile)
'_example_search_hello_help_us'
>>> marker = get_marker('non-existent')(__test_missile)
Traceback (most recent call last):
...
NotImplementedError: No such marker: "non-existent"
>>> get_marker('3')(__test_missile)
'_example_search_hello'
>>> marker = get_marker('3', True)
>>> marker(__test_missile)
'_example_search_hello#0'
>>> marker(__test_missile)
'_example_search_hello#1' | Below is the the instruction that describes the task:
### Input:
Returns a marker function of the requested marker_type
>>> marker = get_marker('uniq')(__test_missile)
>>> type(marker)
<type 'str'>
>>> len(marker)
32
>>> get_marker('uri')(__test_missile)
'_example_search_hello_help_us'
>>> marker = get_marker('non-existent')(__test_missile)
Traceback (most recent call last):
...
NotImplementedError: No such marker: "non-existent"
>>> get_marker('3')(__test_missile)
'_example_search_hello'
>>> marker = get_marker('3', True)
>>> marker(__test_missile)
'_example_search_hello#0'
>>> marker(__test_missile)
'_example_search_hello#1'
### Response:
def get_marker(marker_type, enum_ammo=False):
'''
Returns a marker function of the requested marker_type
>>> marker = get_marker('uniq')(__test_missile)
>>> type(marker)
<type 'str'>
>>> len(marker)
32
>>> get_marker('uri')(__test_missile)
'_example_search_hello_help_us'
>>> marker = get_marker('non-existent')(__test_missile)
Traceback (most recent call last):
...
NotImplementedError: No such marker: "non-existent"
>>> get_marker('3')(__test_missile)
'_example_search_hello'
>>> marker = get_marker('3', True)
>>> marker(__test_missile)
'_example_search_hello#0'
>>> marker(__test_missile)
'_example_search_hello#1'
'''
try:
limit = int(marker_type)
if limit:
marker = __UriMarker(limit)
else:
def marker(m):
return ''
except ValueError:
if marker_type in __markers:
marker = __markers[marker_type]
else:
raise NotImplementedError('No such marker: "%s"' % marker_type)
# todo: fix u'False'
if enum_ammo:
marker = __Enumerator(marker)
return marker |
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
try:
# Python 2:
return self._gdbval.dereference()[name]
except RuntimeError:
# Python 3:
return self._gdbval.dereference()['ob_base'][name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name] | Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*). | Below is the the instruction that describes the task:
### Input:
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
### Response:
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
try:
# Python 2:
return self._gdbval.dereference()[name]
except RuntimeError:
# Python 3:
return self._gdbval.dereference()['ob_base'][name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name] |
def generate_multiline_list(
self,
items, # type: typing.List[typing.Text]
before='', # type: typing.Text
after='', # type: typing.Text
delim=('(', ')'), # type: DelimTuple
compact=True, # type: bool
sep=',', # type: typing.Text
skip_last_sep=False # type: bool
):
# type: (...) -> None
"""
Given a list of items, emits one item per line.
This is convenient for function prototypes and invocations, as well as
for instantiating arrays, sets, and maps in some languages.
TODO(kelkabany): A backend that uses tabs cannot be used with this
if compact is false.
Args:
items (list[str]): Should contain the items to generate a list of.
before (str): The string to come before the list of items.
after (str): The string to follow the list of items.
delim (str, str): The first element is added immediately following
`before`. The second element is added prior to `after`.
compact (bool): In compact mode, the enclosing parentheses are on
the same lines as the first and last list item.
sep (str): The string that follows each list item when compact is
true. If compact is false, the separator is omitted for the
last item.
skip_last_sep (bool): When compact is false, whether the last line
should have a trailing separator. Ignored when compact is true.
"""
assert len(delim) == 2 and isinstance(delim[0], six.text_type) and \
isinstance(delim[1], six.text_type), 'delim must be a tuple of two unicode strings.'
if len(items) == 0:
self.emit(before + delim[0] + delim[1] + after)
return
if len(items) == 1:
self.emit(before + delim[0] + items[0] + delim[1] + after)
return
if compact:
self.emit(before + delim[0] + items[0] + sep)
def emit_list(items):
items = items[1:]
for (i, item) in enumerate(items):
if i == len(items) - 1:
self.emit(item + delim[1] + after)
else:
self.emit(item + sep)
if before or delim[0]:
with self.indent(len(before) + len(delim[0])):
emit_list(items)
else:
emit_list(items)
else:
if before or delim[0]:
self.emit(before + delim[0])
with self.indent():
for (i, item) in enumerate(items):
if i == len(items) - 1 and skip_last_sep:
self.emit(item)
else:
self.emit(item + sep)
if delim[1] or after:
self.emit(delim[1] + after)
elif delim[1]:
self.emit(delim[1]) | Given a list of items, emits one item per line.
This is convenient for function prototypes and invocations, as well as
for instantiating arrays, sets, and maps in some languages.
TODO(kelkabany): A backend that uses tabs cannot be used with this
if compact is false.
Args:
items (list[str]): Should contain the items to generate a list of.
before (str): The string to come before the list of items.
after (str): The string to follow the list of items.
delim (str, str): The first element is added immediately following
`before`. The second element is added prior to `after`.
compact (bool): In compact mode, the enclosing parentheses are on
the same lines as the first and last list item.
sep (str): The string that follows each list item when compact is
true. If compact is false, the separator is omitted for the
last item.
skip_last_sep (bool): When compact is false, whether the last line
should have a trailing separator. Ignored when compact is true. | Below is the the instruction that describes the task:
### Input:
Given a list of items, emits one item per line.
This is convenient for function prototypes and invocations, as well as
for instantiating arrays, sets, and maps in some languages.
TODO(kelkabany): A backend that uses tabs cannot be used with this
if compact is false.
Args:
items (list[str]): Should contain the items to generate a list of.
before (str): The string to come before the list of items.
after (str): The string to follow the list of items.
delim (str, str): The first element is added immediately following
`before`. The second element is added prior to `after`.
compact (bool): In compact mode, the enclosing parentheses are on
the same lines as the first and last list item.
sep (str): The string that follows each list item when compact is
true. If compact is false, the separator is omitted for the
last item.
skip_last_sep (bool): When compact is false, whether the last line
should have a trailing separator. Ignored when compact is true.
### Response:
def generate_multiline_list(
self,
items, # type: typing.List[typing.Text]
before='', # type: typing.Text
after='', # type: typing.Text
delim=('(', ')'), # type: DelimTuple
compact=True, # type: bool
sep=',', # type: typing.Text
skip_last_sep=False # type: bool
):
# type: (...) -> None
"""
Given a list of items, emits one item per line.
This is convenient for function prototypes and invocations, as well as
for instantiating arrays, sets, and maps in some languages.
TODO(kelkabany): A backend that uses tabs cannot be used with this
if compact is false.
Args:
items (list[str]): Should contain the items to generate a list of.
before (str): The string to come before the list of items.
after (str): The string to follow the list of items.
delim (str, str): The first element is added immediately following
`before`. The second element is added prior to `after`.
compact (bool): In compact mode, the enclosing parentheses are on
the same lines as the first and last list item.
sep (str): The string that follows each list item when compact is
true. If compact is false, the separator is omitted for the
last item.
skip_last_sep (bool): When compact is false, whether the last line
should have a trailing separator. Ignored when compact is true.
"""
assert len(delim) == 2 and isinstance(delim[0], six.text_type) and \
isinstance(delim[1], six.text_type), 'delim must be a tuple of two unicode strings.'
if len(items) == 0:
self.emit(before + delim[0] + delim[1] + after)
return
if len(items) == 1:
self.emit(before + delim[0] + items[0] + delim[1] + after)
return
if compact:
self.emit(before + delim[0] + items[0] + sep)
def emit_list(items):
items = items[1:]
for (i, item) in enumerate(items):
if i == len(items) - 1:
self.emit(item + delim[1] + after)
else:
self.emit(item + sep)
if before or delim[0]:
with self.indent(len(before) + len(delim[0])):
emit_list(items)
else:
emit_list(items)
else:
if before or delim[0]:
self.emit(before + delim[0])
with self.indent():
for (i, item) in enumerate(items):
if i == len(items) - 1 and skip_last_sep:
self.emit(item)
else:
self.emit(item + sep)
if delim[1] or after:
self.emit(delim[1] + after)
elif delim[1]:
self.emit(delim[1]) |
def continuous_query_exists(database, name, **client_args):
'''
Check if continuous query with given name exists on the database.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.continuous_query_exists metrics default
'''
if get_continuous_query(database, name, **client_args):
return True
return False | Check if continuous query with given name exists on the database.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.continuous_query_exists metrics default | Below is the the instruction that describes the task:
### Input:
Check if continuous query with given name exists on the database.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.continuous_query_exists metrics default
### Response:
def continuous_query_exists(database, name, **client_args):
'''
Check if continuous query with given name exists on the database.
database
Name of the database for which the continuous query was
defined.
name
Name of the continuous query to check.
CLI Example:
.. code-block:: bash
salt '*' influxdb.continuous_query_exists metrics default
'''
if get_continuous_query(database, name, **client_args):
return True
return False |
def date(name=None):
"""
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
:param name: name for the field
:return: grammar for the date field
"""
if name is None:
name = 'Date Field'
# Basic field
# This regex allows values from 00000101 to 99991231
field = pp.Regex('[0-9][0-9][0-9][0-9](0[1-9]|1[0-2])'
'(0[1-9]|[1-2][0-9]|3[0-1])')
# Parse action
field.setParseAction(lambda d: datetime.datetime.strptime(d[0], '%Y%m%d')
.date())
# Name
field.setName(name)
# White spaces are not removed
field.leaveWhitespace()
return field | Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
:param name: name for the field
:return: grammar for the date field | Below is the the instruction that describes the task:
### Input:
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
:param name: name for the field
:return: grammar for the date field
### Response:
def date(name=None):
"""
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
:param name: name for the field
:return: grammar for the date field
"""
if name is None:
name = 'Date Field'
# Basic field
# This regex allows values from 00000101 to 99991231
field = pp.Regex('[0-9][0-9][0-9][0-9](0[1-9]|1[0-2])'
'(0[1-9]|[1-2][0-9]|3[0-1])')
# Parse action
field.setParseAction(lambda d: datetime.datetime.strptime(d[0], '%Y%m%d')
.date())
# Name
field.setName(name)
# White spaces are not removed
field.leaveWhitespace()
return field |
def number_of_states(dtrajs):
r"""
Determine the number of states from a set of discrete trajectories
Parameters
----------
dtrajs : list of int-arrays
discrete trajectories
"""
# determine number of states n
nmax = 0
for dtraj in dtrajs:
nmax = max(nmax, np.max(dtraj))
# return number of states
return nmax + 1 | r"""
Determine the number of states from a set of discrete trajectories
Parameters
----------
dtrajs : list of int-arrays
discrete trajectories | Below is the the instruction that describes the task:
### Input:
r"""
Determine the number of states from a set of discrete trajectories
Parameters
----------
dtrajs : list of int-arrays
discrete trajectories
### Response:
def number_of_states(dtrajs):
r"""
Determine the number of states from a set of discrete trajectories
Parameters
----------
dtrajs : list of int-arrays
discrete trajectories
"""
# determine number of states n
nmax = 0
for dtraj in dtrajs:
nmax = max(nmax, np.max(dtraj))
# return number of states
return nmax + 1 |
def black(m):
"""Return a function that maps all values from [0.0,m] to 0, and maps
the range [m,1.0] into [0.0, 1.0] linearly.
"""
m = float(m)
def f(x):
if x <= m:
return 0.0
return (x - m) / (1.0 - m)
return f | Return a function that maps all values from [0.0,m] to 0, and maps
the range [m,1.0] into [0.0, 1.0] linearly. | Below is the the instruction that describes the task:
### Input:
Return a function that maps all values from [0.0,m] to 0, and maps
the range [m,1.0] into [0.0, 1.0] linearly.
### Response:
def black(m):
"""Return a function that maps all values from [0.0,m] to 0, and maps
the range [m,1.0] into [0.0, 1.0] linearly.
"""
m = float(m)
def f(x):
if x <= m:
return 0.0
return (x - m) / (1.0 - m)
return f |
def mpl_weight2qt(weight):
"""Convert a weight from matplotlib definition to a Qt weight
Parameters
----------
weight: int or string
Either an integer between 1 and 1000 or a string out of
:attr:`weights_mpl2qt`
Returns
-------
int
One type of the PyQt5.QtGui.QFont.Weight"""
try:
weight = weights_mpl2qt[weight]
except KeyError:
try:
weight = float(weight) / 10
except (ValueError, TypeError):
weight = QtGui.QFont.Normal
else:
try:
weight = min(filter(lambda w: w >= weight, weights_qt2mpl),
key=lambda w: abs(w - weight))
except ValueError:
weight = QtGui.QFont.Normal
return weight | Convert a weight from matplotlib definition to a Qt weight
Parameters
----------
weight: int or string
Either an integer between 1 and 1000 or a string out of
:attr:`weights_mpl2qt`
Returns
-------
int
One type of the PyQt5.QtGui.QFont.Weight | Below is the the instruction that describes the task:
### Input:
Convert a weight from matplotlib definition to a Qt weight
Parameters
----------
weight: int or string
Either an integer between 1 and 1000 or a string out of
:attr:`weights_mpl2qt`
Returns
-------
int
One type of the PyQt5.QtGui.QFont.Weight
### Response:
def mpl_weight2qt(weight):
"""Convert a weight from matplotlib definition to a Qt weight
Parameters
----------
weight: int or string
Either an integer between 1 and 1000 or a string out of
:attr:`weights_mpl2qt`
Returns
-------
int
One type of the PyQt5.QtGui.QFont.Weight"""
try:
weight = weights_mpl2qt[weight]
except KeyError:
try:
weight = float(weight) / 10
except (ValueError, TypeError):
weight = QtGui.QFont.Normal
else:
try:
weight = min(filter(lambda w: w >= weight, weights_qt2mpl),
key=lambda w: abs(w - weight))
except ValueError:
weight = QtGui.QFont.Normal
return weight |
def plaintext(cls):
"""Uses only authentication mechanisms that provide the credentials in
un-hashed form, typically meaning
:attr:`~pysasl.AuthenticationCredentials.has_secret` is True.
Returns:
A new :class:`SASLAuth` object.
"""
builtin_mechs = cls._get_builtin_mechanisms()
plaintext_mechs = [mech for _, mech in builtin_mechs.items()
if mech.insecure and mech.priority is not None]
return SASLAuth(plaintext_mechs) | Uses only authentication mechanisms that provide the credentials in
un-hashed form, typically meaning
:attr:`~pysasl.AuthenticationCredentials.has_secret` is True.
Returns:
A new :class:`SASLAuth` object. | Below is the the instruction that describes the task:
### Input:
Uses only authentication mechanisms that provide the credentials in
un-hashed form, typically meaning
:attr:`~pysasl.AuthenticationCredentials.has_secret` is True.
Returns:
A new :class:`SASLAuth` object.
### Response:
def plaintext(cls):
"""Uses only authentication mechanisms that provide the credentials in
un-hashed form, typically meaning
:attr:`~pysasl.AuthenticationCredentials.has_secret` is True.
Returns:
A new :class:`SASLAuth` object.
"""
builtin_mechs = cls._get_builtin_mechanisms()
plaintext_mechs = [mech for _, mech in builtin_mechs.items()
if mech.insecure and mech.priority is not None]
return SASLAuth(plaintext_mechs) |
def add_xref(self, id, xref):
"""
Adds an xref to the xref graph
"""
# note: does not update meta object
if self.xref_graph is None:
self.xref_graph = nx.MultiGraph()
self.xref_graph.add_edge(xref, id) | Adds an xref to the xref graph | Below is the the instruction that describes the task:
### Input:
Adds an xref to the xref graph
### Response:
def add_xref(self, id, xref):
"""
Adds an xref to the xref graph
"""
# note: does not update meta object
if self.xref_graph is None:
self.xref_graph = nx.MultiGraph()
self.xref_graph.add_edge(xref, id) |
def unique_everseen(seq):
"""Solution found here : http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | Solution found here : http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order | Below is the the instruction that describes the task:
### Input:
Solution found here : http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
### Response:
def unique_everseen(seq):
"""Solution found here : http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] |
def set_lock_code(ctx, lock_code, new_lock_code, clear, generate, force):
"""
Set or change the configuration lock code.
A lock code may be used to protect the application configuration.
The lock code must be a 32 characters (16 bytes) hex value.
"""
dev = ctx.obj['dev']
def prompt_new_lock_code():
return prompt_lock_code(prompt='Enter your new lock code')
def prompt_current_lock_code():
return prompt_lock_code(prompt='Enter your current lock code')
def change_lock_code(lock_code, new_lock_code):
lock_code = _parse_lock_code(ctx, lock_code)
new_lock_code = _parse_lock_code(ctx, new_lock_code)
try:
dev.write_config(
device_config(
config_lock=new_lock_code),
reboot=True,
lock_key=lock_code)
except Exception as e:
logger.error('Changing the lock code failed', exc_info=e)
ctx.fail('Failed to change the lock code. Wrong current code?')
def set_lock_code(new_lock_code):
new_lock_code = _parse_lock_code(ctx, new_lock_code)
try:
dev.write_config(
device_config(
config_lock=new_lock_code),
reboot=True)
except Exception as e:
logger.error('Setting the lock code failed', exc_info=e)
ctx.fail('Failed to set the lock code.')
if generate and new_lock_code:
ctx.fail('Invalid options: --new-lock-code conflicts with --generate.')
if clear:
new_lock_code = CLEAR_LOCK_CODE
if generate:
new_lock_code = b2a_hex(os.urandom(16)).decode('utf-8')
click.echo(
'Using a randomly generated lock code: {}'.format(new_lock_code))
force or click.confirm(
'Lock configuration with this lock code?', abort=True, err=True)
if dev.config.configuration_locked:
if lock_code:
if new_lock_code:
change_lock_code(lock_code, new_lock_code)
else:
new_lock_code = prompt_new_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
if new_lock_code:
lock_code = prompt_current_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
lock_code = prompt_current_lock_code()
new_lock_code = prompt_new_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
if lock_code:
ctx.fail(
'There is no current lock code set. '
'Use --new-lock-code to set one.')
else:
if new_lock_code:
set_lock_code(new_lock_code)
else:
new_lock_code = prompt_new_lock_code()
set_lock_code(new_lock_code) | Set or change the configuration lock code.
A lock code may be used to protect the application configuration.
The lock code must be a 32 characters (16 bytes) hex value. | Below is the the instruction that describes the task:
### Input:
Set or change the configuration lock code.
A lock code may be used to protect the application configuration.
The lock code must be a 32 characters (16 bytes) hex value.
### Response:
def set_lock_code(ctx, lock_code, new_lock_code, clear, generate, force):
"""
Set or change the configuration lock code.
A lock code may be used to protect the application configuration.
The lock code must be a 32 characters (16 bytes) hex value.
"""
dev = ctx.obj['dev']
def prompt_new_lock_code():
return prompt_lock_code(prompt='Enter your new lock code')
def prompt_current_lock_code():
return prompt_lock_code(prompt='Enter your current lock code')
def change_lock_code(lock_code, new_lock_code):
lock_code = _parse_lock_code(ctx, lock_code)
new_lock_code = _parse_lock_code(ctx, new_lock_code)
try:
dev.write_config(
device_config(
config_lock=new_lock_code),
reboot=True,
lock_key=lock_code)
except Exception as e:
logger.error('Changing the lock code failed', exc_info=e)
ctx.fail('Failed to change the lock code. Wrong current code?')
def set_lock_code(new_lock_code):
new_lock_code = _parse_lock_code(ctx, new_lock_code)
try:
dev.write_config(
device_config(
config_lock=new_lock_code),
reboot=True)
except Exception as e:
logger.error('Setting the lock code failed', exc_info=e)
ctx.fail('Failed to set the lock code.')
if generate and new_lock_code:
ctx.fail('Invalid options: --new-lock-code conflicts with --generate.')
if clear:
new_lock_code = CLEAR_LOCK_CODE
if generate:
new_lock_code = b2a_hex(os.urandom(16)).decode('utf-8')
click.echo(
'Using a randomly generated lock code: {}'.format(new_lock_code))
force or click.confirm(
'Lock configuration with this lock code?', abort=True, err=True)
if dev.config.configuration_locked:
if lock_code:
if new_lock_code:
change_lock_code(lock_code, new_lock_code)
else:
new_lock_code = prompt_new_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
if new_lock_code:
lock_code = prompt_current_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
lock_code = prompt_current_lock_code()
new_lock_code = prompt_new_lock_code()
change_lock_code(lock_code, new_lock_code)
else:
if lock_code:
ctx.fail(
'There is no current lock code set. '
'Use --new-lock-code to set one.')
else:
if new_lock_code:
set_lock_code(new_lock_code)
else:
new_lock_code = prompt_new_lock_code()
set_lock_code(new_lock_code) |
def sismember(self, name, value):
"""Emulate sismember."""
redis_set = self._get_set(name, 'SISMEMBER')
if not redis_set:
return 0
result = self._encode(value) in redis_set
return 1 if result else 0 | Emulate sismember. | Below is the the instruction that describes the task:
### Input:
Emulate sismember.
### Response:
def sismember(self, name, value):
"""Emulate sismember."""
redis_set = self._get_set(name, 'SISMEMBER')
if not redis_set:
return 0
result = self._encode(value) in redis_set
return 1 if result else 0 |
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
"""Downloads Sina videos by URL.
"""
if 'news.sina.com.cn/zxt' in url:
sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
return
vid = match1(url, r'vid=(\d+)')
if vid is None:
video_page = get_content(url)
vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'')
if hd_vid == '0':
vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|')
vid = vids[-1]
if vid is None:
vid = match1(video_page, r'vid:"?(\d+)"?')
if vid:
#title = match1(video_page, r'title\s*:\s*\'([^\']+)\'')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
else:
vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"')
if vkey is None:
vid = match1(url, r'#(\d+)')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
return
title = match1(video_page, r'title\s*:\s*"([^"]+)"')
sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only) | Downloads Sina videos by URL. | Below is the the instruction that describes the task:
### Input:
Downloads Sina videos by URL.
### Response:
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
"""Downloads Sina videos by URL.
"""
if 'news.sina.com.cn/zxt' in url:
sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
return
vid = match1(url, r'vid=(\d+)')
if vid is None:
video_page = get_content(url)
vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'')
if hd_vid == '0':
vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|')
vid = vids[-1]
if vid is None:
vid = match1(video_page, r'vid:"?(\d+)"?')
if vid:
#title = match1(video_page, r'title\s*:\s*\'([^\']+)\'')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
else:
vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"')
if vkey is None:
vid = match1(url, r'#(\d+)')
sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only)
return
title = match1(video_page, r'title\s*:\s*"([^"]+)"')
sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only) |
def generate_fft_plan(length, level=None, dtype='float64', forward=True):
"""Build a `REAL8FFTPlan` for a fast Fourier transform.
Parameters
----------
length : `int`
number of samples to plan for in each FFT.
level : `int`, optional
amount of work to do when planning the FFT, default set by
`LAL_FFTPLAN_LEVEL` module variable.
dtype : :class:`numpy.dtype`, `type`, `str`, optional
numeric type of data to plan for
forward : bool, optional, default: `True`
whether to create a forward or reverse FFT plan
Returns
-------
plan : `REAL8FFTPlan` or similar
FFT plan of the relevant data type
"""
from ...utils.lal import (find_typed_function, to_lal_type_str)
# generate key for caching plan
laltype = to_lal_type_str(dtype)
key = (length, bool(forward), laltype)
# find existing plan
try:
return LAL_FFTPLANS[key]
# or create one
except KeyError:
create = find_typed_function(dtype, 'Create', 'FFTPlan')
if level is None:
level = LAL_FFTPLAN_LEVEL
LAL_FFTPLANS[key] = create(length, int(bool(forward)), level)
return LAL_FFTPLANS[key] | Build a `REAL8FFTPlan` for a fast Fourier transform.
Parameters
----------
length : `int`
number of samples to plan for in each FFT.
level : `int`, optional
amount of work to do when planning the FFT, default set by
`LAL_FFTPLAN_LEVEL` module variable.
dtype : :class:`numpy.dtype`, `type`, `str`, optional
numeric type of data to plan for
forward : bool, optional, default: `True`
whether to create a forward or reverse FFT plan
Returns
-------
plan : `REAL8FFTPlan` or similar
FFT plan of the relevant data type | Below is the the instruction that describes the task:
### Input:
Build a `REAL8FFTPlan` for a fast Fourier transform.
Parameters
----------
length : `int`
number of samples to plan for in each FFT.
level : `int`, optional
amount of work to do when planning the FFT, default set by
`LAL_FFTPLAN_LEVEL` module variable.
dtype : :class:`numpy.dtype`, `type`, `str`, optional
numeric type of data to plan for
forward : bool, optional, default: `True`
whether to create a forward or reverse FFT plan
Returns
-------
plan : `REAL8FFTPlan` or similar
FFT plan of the relevant data type
### Response:
def generate_fft_plan(length, level=None, dtype='float64', forward=True):
"""Build a `REAL8FFTPlan` for a fast Fourier transform.
Parameters
----------
length : `int`
number of samples to plan for in each FFT.
level : `int`, optional
amount of work to do when planning the FFT, default set by
`LAL_FFTPLAN_LEVEL` module variable.
dtype : :class:`numpy.dtype`, `type`, `str`, optional
numeric type of data to plan for
forward : bool, optional, default: `True`
whether to create a forward or reverse FFT plan
Returns
-------
plan : `REAL8FFTPlan` or similar
FFT plan of the relevant data type
"""
from ...utils.lal import (find_typed_function, to_lal_type_str)
# generate key for caching plan
laltype = to_lal_type_str(dtype)
key = (length, bool(forward), laltype)
# find existing plan
try:
return LAL_FFTPLANS[key]
# or create one
except KeyError:
create = find_typed_function(dtype, 'Create', 'FFTPlan')
if level is None:
level = LAL_FFTPLAN_LEVEL
LAL_FFTPLANS[key] = create(length, int(bool(forward)), level)
return LAL_FFTPLANS[key] |
def is_valid(self, field_name: str, value, kg: dict) -> Optional[dict]:
"""
Check if this value is valid for the given name property according to input knowledge graph and ontology.
If is valid, then return a dict with key @id or @value for ObjectProperty or DatatypeProperty.
No schema checked by this function.
:param field_name: name of the property, if prefix is omitted, then use default namespace
:param value: the value that try to add
:param kg: the knowledge graph that perform adding action
:return: None if the value isn't valid for the property, otherwise return {key: value}, key is @id for
ObjectProperty and @value for DatatypeProperty.
"""
# property
uri = self.__is_valid_uri_resolve(field_name, kg.get("@context"))
property_ = self.get_entity(uri)
if not isinstance(property_, OntologyProperty):
logging.warning("Property is not OntologyProperty, ignoring it: %s", uri)
return None
if not self.__is_valid_domain(property_, kg):
logging.warning("Property does not have valid domain, ignoring it: %s", uri)
return None
# check if is valid range
# first determine the input value type
if isinstance(property_, OntologyDatatypeProperty):
types = self.__is_valid_determine_value_type(value)
else:
if isinstance(value, dict):
try:
types = map(self.get_entity, value['@type'])
except KeyError:
return None # input entity without type
elif self.__is_schema_org_datatype(property_):
if self.expanded_jsonld:
return {'@value': self.__serialize_type(value)}
else:
return value
else:
return {'@id': self.__serialize_type(value)}
# check if is a valid range
if any(property_.is_legal_object(type_) for type_ in types):
if isinstance(property_, OntologyObjectProperty):
return value
elif self.expanded_jsonld:
return {'@value': self.__serialize_type(value)}
else:
return self.__serialize_type(value)
return None | Check if this value is valid for the given name property according to input knowledge graph and ontology.
If is valid, then return a dict with key @id or @value for ObjectProperty or DatatypeProperty.
No schema checked by this function.
:param field_name: name of the property, if prefix is omitted, then use default namespace
:param value: the value that try to add
:param kg: the knowledge graph that perform adding action
:return: None if the value isn't valid for the property, otherwise return {key: value}, key is @id for
ObjectProperty and @value for DatatypeProperty. | Below is the the instruction that describes the task:
### Input:
Check if this value is valid for the given name property according to input knowledge graph and ontology.
If is valid, then return a dict with key @id or @value for ObjectProperty or DatatypeProperty.
No schema checked by this function.
:param field_name: name of the property, if prefix is omitted, then use default namespace
:param value: the value that try to add
:param kg: the knowledge graph that perform adding action
:return: None if the value isn't valid for the property, otherwise return {key: value}, key is @id for
ObjectProperty and @value for DatatypeProperty.
### Response:
def is_valid(self, field_name: str, value, kg: dict) -> Optional[dict]:
"""
Check if this value is valid for the given name property according to input knowledge graph and ontology.
If is valid, then return a dict with key @id or @value for ObjectProperty or DatatypeProperty.
No schema checked by this function.
:param field_name: name of the property, if prefix is omitted, then use default namespace
:param value: the value that try to add
:param kg: the knowledge graph that perform adding action
:return: None if the value isn't valid for the property, otherwise return {key: value}, key is @id for
ObjectProperty and @value for DatatypeProperty.
"""
# property
uri = self.__is_valid_uri_resolve(field_name, kg.get("@context"))
property_ = self.get_entity(uri)
if not isinstance(property_, OntologyProperty):
logging.warning("Property is not OntologyProperty, ignoring it: %s", uri)
return None
if not self.__is_valid_domain(property_, kg):
logging.warning("Property does not have valid domain, ignoring it: %s", uri)
return None
# check if is valid range
# first determine the input value type
if isinstance(property_, OntologyDatatypeProperty):
types = self.__is_valid_determine_value_type(value)
else:
if isinstance(value, dict):
try:
types = map(self.get_entity, value['@type'])
except KeyError:
return None # input entity without type
elif self.__is_schema_org_datatype(property_):
if self.expanded_jsonld:
return {'@value': self.__serialize_type(value)}
else:
return value
else:
return {'@id': self.__serialize_type(value)}
# check if is a valid range
if any(property_.is_legal_object(type_) for type_ in types):
if isinstance(property_, OntologyObjectProperty):
return value
elif self.expanded_jsonld:
return {'@value': self.__serialize_type(value)}
else:
return self.__serialize_type(value)
return None |
def _plot_graph(G, vertex_color, vertex_size, highlight,
edges, edge_color, edge_width,
indices, colorbar, limits, ax, title, backend):
r"""Plot a graph with signals as color or vertex size.
Parameters
----------
vertex_color : array_like or color
Signal to plot as vertex color (length is the number of vertices).
If None, vertex color is set to `graph.plotting['vertex_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each vertex color can by specified by an RGB(A) array of dimension
`n_vertices` x 3 (or 4).
vertex_size : array_like or int
Signal to plot as vertex size (length is the number of vertices).
Vertex size ranges from 0.5 to 2 times `graph.plotting['vertex_size']`.
If None, vertex size is set to `graph.plotting['vertex_size']`.
Alternatively, a size can be passed as an integer.
The pyqtgraph backend only accepts an integer size.
highlight : iterable
List of indices of vertices to be highlighted.
Useful for example to show where a filter was localized.
Only available with the matplotlib backend.
edges : bool
Whether to draw edges in addition to vertices.
Default to True if less than 10,000 edges to draw.
Note that drawing many edges can be slow.
edge_color : array_like or color
Signal to plot as edge color (length is the number of edges).
Edge color is given by `graph.plotting['edge_color']` and transparency
ranges from 0.2 to 0.9.
If None, edge color is set to `graph.plotting['edge_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each edge color can by specified by an RGB(A) array of dimension
`n_edges` x 3 (or 4).
Only available with the matplotlib backend.
edge_width : array_like or int
Signal to plot as edge width (length is the number of edges).
Edge width ranges from 0.5 to 2 times `graph.plotting['edge_width']`.
If None, edge width is set to `graph.plotting['edge_width']`.
Alternatively, a width can be passed as an integer.
Only available with the matplotlib backend.
indices : bool
Whether to print the node indices (in the adjacency / Laplacian matrix
and signal vectors) on top of each node.
Useful to locate a node of interest.
Only available with the matplotlib backend.
colorbar : bool
Whether to plot a colorbar indicating the signal's amplitude.
Only available with the matplotlib backend.
limits : [vmin, vmax]
Map colors from vmin to vmax.
Defaults to signal minimum and maximum value.
Only available with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
Axes where to draw the graph. Optional, created if not passed.
Only available with the matplotlib backend.
title : str
Title of the figure.
backend: {'matplotlib', 'pyqtgraph', None}
Defines the drawing backend to use.
Defaults to :data:`pygsp.plotting.BACKEND`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure the plot belongs to. Only with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
The axes the plot belongs to. Only with the matplotlib backend.
Notes
-----
The orientation of directed edges is not shown. If edges exist in both
directions, they will be drawn on top of each other.
Examples
--------
>>> import matplotlib
>>> graph = graphs.Sensor(20, seed=42)
>>> graph.compute_fourier_basis(n_eigenvectors=4)
>>> _, _, weights = graph.get_edge_list()
>>> fig, ax = graph.plot(graph.U[:, 1], vertex_size=graph.dw,
... edge_color=weights)
>>> graph.plotting['vertex_size'] = 300
>>> graph.plotting['edge_width'] = 5
>>> graph.plotting['edge_style'] = '--'
>>> fig, ax = graph.plot(edge_width=weights, edge_color=(0, .8, .8, .5),
... vertex_color='black')
>>> fig, ax = graph.plot(vertex_size=graph.dw, indices=True,
... highlight=[17, 3, 16], edges=False)
"""
if not hasattr(G, 'coords') or G.coords is None:
raise AttributeError('Graph has no coordinate set. '
'Please run G.set_coordinates() first.')
check_2d_3d = (G.coords.ndim != 2) or (G.coords.shape[1] not in [2, 3])
if G.coords.ndim != 1 and check_2d_3d:
raise AttributeError('Coordinates should be in 1D, 2D or 3D space.')
if G.coords.shape[0] != G.N:
raise AttributeError('Graph needs G.N = {} coordinates.'.format(G.N))
if backend is None:
backend = BACKEND
def check_shape(signal, name, length, many=False):
if (signal.ndim == 0) or (signal.shape[0] != length):
txt = '{}: signal should have length {}.'
txt = txt.format(name, length)
raise ValueError(txt)
if (not many) and (signal.ndim != 1):
txt = '{}: can plot only one signal (not {}).'
txt = txt.format(name, signal.shape[1])
raise ValueError(txt)
def normalize(x):
"""Scale values in [intercept, 1]. Return 0.5 if constant.
Set intercept value in G.plotting["normalize_intercept"]
with value in [0, 1], default is .25.
"""
ptp = x.ptp()
if ptp == 0:
return np.full(x.shape, 0.5)
else:
intercept = G.plotting['normalize_intercept']
return (1. - intercept) * (x - x.min()) / ptp + intercept
def is_color(color):
if backend == 'matplotlib':
mpl, _, _ = _import_plt()
if mpl.colors.is_color_like(color):
return True # single color
try:
return all(map(mpl.colors.is_color_like, color)) # color list
except TypeError:
return False # e.g., color is an int
else:
return False # No support for pyqtgraph (yet).
if vertex_color is None:
limits = [0, 0]
colorbar = False
if backend == 'matplotlib':
vertex_color = (G.plotting['vertex_color'],)
elif is_color(vertex_color):
limits = [0, 0]
colorbar = False
else:
vertex_color = np.asanyarray(vertex_color).squeeze()
check_shape(vertex_color, 'Vertex color', G.n_vertices,
many=(G.coords.ndim == 1))
if vertex_size is None:
vertex_size = G.plotting['vertex_size']
elif not np.isscalar(vertex_size):
vertex_size = np.asanyarray(vertex_size).squeeze()
check_shape(vertex_size, 'Vertex size', G.n_vertices)
vertex_size = G.plotting['vertex_size'] * 4 * normalize(vertex_size)**2
if edges is None:
edges = G.Ne < 10e3
if edge_color is None:
edge_color = (G.plotting['edge_color'],)
elif not is_color(edge_color):
edge_color = np.asanyarray(edge_color).squeeze()
check_shape(edge_color, 'Edge color', G.n_edges)
edge_color = 0.9 * normalize(edge_color)
edge_color = [
np.tile(G.plotting['edge_color'][:3], [len(edge_color), 1]),
edge_color[:, np.newaxis],
]
edge_color = np.concatenate(edge_color, axis=1)
if edge_width is None:
edge_width = G.plotting['edge_width']
elif not np.isscalar(edge_width):
edge_width = np.array(edge_width).squeeze()
check_shape(edge_width, 'Edge width', G.n_edges)
edge_width = G.plotting['edge_width'] * 2 * normalize(edge_width)
if limits is None:
limits = [1.05*vertex_color.min(), 1.05*vertex_color.max()]
if title is None:
title = G.__repr__(limit=4)
if backend == 'pyqtgraph':
if vertex_color is None:
_qtg_plot_graph(G, edges=edges, vertex_size=vertex_size,
title=title)
else:
_qtg_plot_signal(G, signal=vertex_color, vertex_size=vertex_size,
edges=edges, limits=limits, title=title)
elif backend == 'matplotlib':
return _plt_plot_graph(G, vertex_color=vertex_color,
vertex_size=vertex_size, highlight=highlight,
edges=edges, indices=indices, colorbar=colorbar,
edge_color=edge_color, edge_width=edge_width,
limits=limits, ax=ax, title=title)
else:
raise ValueError('Unknown backend {}.'.format(backend)) | r"""Plot a graph with signals as color or vertex size.
Parameters
----------
vertex_color : array_like or color
Signal to plot as vertex color (length is the number of vertices).
If None, vertex color is set to `graph.plotting['vertex_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each vertex color can by specified by an RGB(A) array of dimension
`n_vertices` x 3 (or 4).
vertex_size : array_like or int
Signal to plot as vertex size (length is the number of vertices).
Vertex size ranges from 0.5 to 2 times `graph.plotting['vertex_size']`.
If None, vertex size is set to `graph.plotting['vertex_size']`.
Alternatively, a size can be passed as an integer.
The pyqtgraph backend only accepts an integer size.
highlight : iterable
List of indices of vertices to be highlighted.
Useful for example to show where a filter was localized.
Only available with the matplotlib backend.
edges : bool
Whether to draw edges in addition to vertices.
Default to True if less than 10,000 edges to draw.
Note that drawing many edges can be slow.
edge_color : array_like or color
Signal to plot as edge color (length is the number of edges).
Edge color is given by `graph.plotting['edge_color']` and transparency
ranges from 0.2 to 0.9.
If None, edge color is set to `graph.plotting['edge_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each edge color can by specified by an RGB(A) array of dimension
`n_edges` x 3 (or 4).
Only available with the matplotlib backend.
edge_width : array_like or int
Signal to plot as edge width (length is the number of edges).
Edge width ranges from 0.5 to 2 times `graph.plotting['edge_width']`.
If None, edge width is set to `graph.plotting['edge_width']`.
Alternatively, a width can be passed as an integer.
Only available with the matplotlib backend.
indices : bool
Whether to print the node indices (in the adjacency / Laplacian matrix
and signal vectors) on top of each node.
Useful to locate a node of interest.
Only available with the matplotlib backend.
colorbar : bool
Whether to plot a colorbar indicating the signal's amplitude.
Only available with the matplotlib backend.
limits : [vmin, vmax]
Map colors from vmin to vmax.
Defaults to signal minimum and maximum value.
Only available with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
Axes where to draw the graph. Optional, created if not passed.
Only available with the matplotlib backend.
title : str
Title of the figure.
backend: {'matplotlib', 'pyqtgraph', None}
Defines the drawing backend to use.
Defaults to :data:`pygsp.plotting.BACKEND`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure the plot belongs to. Only with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
The axes the plot belongs to. Only with the matplotlib backend.
Notes
-----
The orientation of directed edges is not shown. If edges exist in both
directions, they will be drawn on top of each other.
Examples
--------
>>> import matplotlib
>>> graph = graphs.Sensor(20, seed=42)
>>> graph.compute_fourier_basis(n_eigenvectors=4)
>>> _, _, weights = graph.get_edge_list()
>>> fig, ax = graph.plot(graph.U[:, 1], vertex_size=graph.dw,
... edge_color=weights)
>>> graph.plotting['vertex_size'] = 300
>>> graph.plotting['edge_width'] = 5
>>> graph.plotting['edge_style'] = '--'
>>> fig, ax = graph.plot(edge_width=weights, edge_color=(0, .8, .8, .5),
... vertex_color='black')
>>> fig, ax = graph.plot(vertex_size=graph.dw, indices=True,
... highlight=[17, 3, 16], edges=False) | Below is the the instruction that describes the task:
### Input:
r"""Plot a graph with signals as color or vertex size.
Parameters
----------
vertex_color : array_like or color
Signal to plot as vertex color (length is the number of vertices).
If None, vertex color is set to `graph.plotting['vertex_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each vertex color can by specified by an RGB(A) array of dimension
`n_vertices` x 3 (or 4).
vertex_size : array_like or int
Signal to plot as vertex size (length is the number of vertices).
Vertex size ranges from 0.5 to 2 times `graph.plotting['vertex_size']`.
If None, vertex size is set to `graph.plotting['vertex_size']`.
Alternatively, a size can be passed as an integer.
The pyqtgraph backend only accepts an integer size.
highlight : iterable
List of indices of vertices to be highlighted.
Useful for example to show where a filter was localized.
Only available with the matplotlib backend.
edges : bool
Whether to draw edges in addition to vertices.
Default to True if less than 10,000 edges to draw.
Note that drawing many edges can be slow.
edge_color : array_like or color
Signal to plot as edge color (length is the number of edges).
Edge color is given by `graph.plotting['edge_color']` and transparency
ranges from 0.2 to 0.9.
If None, edge color is set to `graph.plotting['edge_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each edge color can by specified by an RGB(A) array of dimension
`n_edges` x 3 (or 4).
Only available with the matplotlib backend.
edge_width : array_like or int
Signal to plot as edge width (length is the number of edges).
Edge width ranges from 0.5 to 2 times `graph.plotting['edge_width']`.
If None, edge width is set to `graph.plotting['edge_width']`.
Alternatively, a width can be passed as an integer.
Only available with the matplotlib backend.
indices : bool
Whether to print the node indices (in the adjacency / Laplacian matrix
and signal vectors) on top of each node.
Useful to locate a node of interest.
Only available with the matplotlib backend.
colorbar : bool
Whether to plot a colorbar indicating the signal's amplitude.
Only available with the matplotlib backend.
limits : [vmin, vmax]
Map colors from vmin to vmax.
Defaults to signal minimum and maximum value.
Only available with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
Axes where to draw the graph. Optional, created if not passed.
Only available with the matplotlib backend.
title : str
Title of the figure.
backend: {'matplotlib', 'pyqtgraph', None}
Defines the drawing backend to use.
Defaults to :data:`pygsp.plotting.BACKEND`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure the plot belongs to. Only with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
The axes the plot belongs to. Only with the matplotlib backend.
Notes
-----
The orientation of directed edges is not shown. If edges exist in both
directions, they will be drawn on top of each other.
Examples
--------
>>> import matplotlib
>>> graph = graphs.Sensor(20, seed=42)
>>> graph.compute_fourier_basis(n_eigenvectors=4)
>>> _, _, weights = graph.get_edge_list()
>>> fig, ax = graph.plot(graph.U[:, 1], vertex_size=graph.dw,
... edge_color=weights)
>>> graph.plotting['vertex_size'] = 300
>>> graph.plotting['edge_width'] = 5
>>> graph.plotting['edge_style'] = '--'
>>> fig, ax = graph.plot(edge_width=weights, edge_color=(0, .8, .8, .5),
... vertex_color='black')
>>> fig, ax = graph.plot(vertex_size=graph.dw, indices=True,
... highlight=[17, 3, 16], edges=False)
### Response:
def _plot_graph(G, vertex_color, vertex_size, highlight,
edges, edge_color, edge_width,
indices, colorbar, limits, ax, title, backend):
r"""Plot a graph with signals as color or vertex size.
Parameters
----------
vertex_color : array_like or color
Signal to plot as vertex color (length is the number of vertices).
If None, vertex color is set to `graph.plotting['vertex_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each vertex color can by specified by an RGB(A) array of dimension
`n_vertices` x 3 (or 4).
vertex_size : array_like or int
Signal to plot as vertex size (length is the number of vertices).
Vertex size ranges from 0.5 to 2 times `graph.plotting['vertex_size']`.
If None, vertex size is set to `graph.plotting['vertex_size']`.
Alternatively, a size can be passed as an integer.
The pyqtgraph backend only accepts an integer size.
highlight : iterable
List of indices of vertices to be highlighted.
Useful for example to show where a filter was localized.
Only available with the matplotlib backend.
edges : bool
Whether to draw edges in addition to vertices.
Default to True if less than 10,000 edges to draw.
Note that drawing many edges can be slow.
edge_color : array_like or color
Signal to plot as edge color (length is the number of edges).
Edge color is given by `graph.plotting['edge_color']` and transparency
ranges from 0.2 to 0.9.
If None, edge color is set to `graph.plotting['edge_color']`.
Alternatively, a color can be set in any format accepted by matplotlib.
Each edge color can by specified by an RGB(A) array of dimension
`n_edges` x 3 (or 4).
Only available with the matplotlib backend.
edge_width : array_like or int
Signal to plot as edge width (length is the number of edges).
Edge width ranges from 0.5 to 2 times `graph.plotting['edge_width']`.
If None, edge width is set to `graph.plotting['edge_width']`.
Alternatively, a width can be passed as an integer.
Only available with the matplotlib backend.
indices : bool
Whether to print the node indices (in the adjacency / Laplacian matrix
and signal vectors) on top of each node.
Useful to locate a node of interest.
Only available with the matplotlib backend.
colorbar : bool
Whether to plot a colorbar indicating the signal's amplitude.
Only available with the matplotlib backend.
limits : [vmin, vmax]
Map colors from vmin to vmax.
Defaults to signal minimum and maximum value.
Only available with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
Axes where to draw the graph. Optional, created if not passed.
Only available with the matplotlib backend.
title : str
Title of the figure.
backend: {'matplotlib', 'pyqtgraph', None}
Defines the drawing backend to use.
Defaults to :data:`pygsp.plotting.BACKEND`.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
The figure the plot belongs to. Only with the matplotlib backend.
ax : :class:`matplotlib.axes.Axes`
The axes the plot belongs to. Only with the matplotlib backend.
Notes
-----
The orientation of directed edges is not shown. If edges exist in both
directions, they will be drawn on top of each other.
Examples
--------
>>> import matplotlib
>>> graph = graphs.Sensor(20, seed=42)
>>> graph.compute_fourier_basis(n_eigenvectors=4)
>>> _, _, weights = graph.get_edge_list()
>>> fig, ax = graph.plot(graph.U[:, 1], vertex_size=graph.dw,
... edge_color=weights)
>>> graph.plotting['vertex_size'] = 300
>>> graph.plotting['edge_width'] = 5
>>> graph.plotting['edge_style'] = '--'
>>> fig, ax = graph.plot(edge_width=weights, edge_color=(0, .8, .8, .5),
... vertex_color='black')
>>> fig, ax = graph.plot(vertex_size=graph.dw, indices=True,
... highlight=[17, 3, 16], edges=False)
"""
if not hasattr(G, 'coords') or G.coords is None:
raise AttributeError('Graph has no coordinate set. '
'Please run G.set_coordinates() first.')
check_2d_3d = (G.coords.ndim != 2) or (G.coords.shape[1] not in [2, 3])
if G.coords.ndim != 1 and check_2d_3d:
raise AttributeError('Coordinates should be in 1D, 2D or 3D space.')
if G.coords.shape[0] != G.N:
raise AttributeError('Graph needs G.N = {} coordinates.'.format(G.N))
if backend is None:
backend = BACKEND
def check_shape(signal, name, length, many=False):
if (signal.ndim == 0) or (signal.shape[0] != length):
txt = '{}: signal should have length {}.'
txt = txt.format(name, length)
raise ValueError(txt)
if (not many) and (signal.ndim != 1):
txt = '{}: can plot only one signal (not {}).'
txt = txt.format(name, signal.shape[1])
raise ValueError(txt)
def normalize(x):
"""Scale values in [intercept, 1]. Return 0.5 if constant.
Set intercept value in G.plotting["normalize_intercept"]
with value in [0, 1], default is .25.
"""
ptp = x.ptp()
if ptp == 0:
return np.full(x.shape, 0.5)
else:
intercept = G.plotting['normalize_intercept']
return (1. - intercept) * (x - x.min()) / ptp + intercept
def is_color(color):
if backend == 'matplotlib':
mpl, _, _ = _import_plt()
if mpl.colors.is_color_like(color):
return True # single color
try:
return all(map(mpl.colors.is_color_like, color)) # color list
except TypeError:
return False # e.g., color is an int
else:
return False # No support for pyqtgraph (yet).
if vertex_color is None:
limits = [0, 0]
colorbar = False
if backend == 'matplotlib':
vertex_color = (G.plotting['vertex_color'],)
elif is_color(vertex_color):
limits = [0, 0]
colorbar = False
else:
vertex_color = np.asanyarray(vertex_color).squeeze()
check_shape(vertex_color, 'Vertex color', G.n_vertices,
many=(G.coords.ndim == 1))
if vertex_size is None:
vertex_size = G.plotting['vertex_size']
elif not np.isscalar(vertex_size):
vertex_size = np.asanyarray(vertex_size).squeeze()
check_shape(vertex_size, 'Vertex size', G.n_vertices)
vertex_size = G.plotting['vertex_size'] * 4 * normalize(vertex_size)**2
if edges is None:
edges = G.Ne < 10e3
if edge_color is None:
edge_color = (G.plotting['edge_color'],)
elif not is_color(edge_color):
edge_color = np.asanyarray(edge_color).squeeze()
check_shape(edge_color, 'Edge color', G.n_edges)
edge_color = 0.9 * normalize(edge_color)
edge_color = [
np.tile(G.plotting['edge_color'][:3], [len(edge_color), 1]),
edge_color[:, np.newaxis],
]
edge_color = np.concatenate(edge_color, axis=1)
if edge_width is None:
edge_width = G.plotting['edge_width']
elif not np.isscalar(edge_width):
edge_width = np.array(edge_width).squeeze()
check_shape(edge_width, 'Edge width', G.n_edges)
edge_width = G.plotting['edge_width'] * 2 * normalize(edge_width)
if limits is None:
limits = [1.05*vertex_color.min(), 1.05*vertex_color.max()]
if title is None:
title = G.__repr__(limit=4)
if backend == 'pyqtgraph':
if vertex_color is None:
_qtg_plot_graph(G, edges=edges, vertex_size=vertex_size,
title=title)
else:
_qtg_plot_signal(G, signal=vertex_color, vertex_size=vertex_size,
edges=edges, limits=limits, title=title)
elif backend == 'matplotlib':
return _plt_plot_graph(G, vertex_color=vertex_color,
vertex_size=vertex_size, highlight=highlight,
edges=edges, indices=indices, colorbar=colorbar,
edge_color=edge_color, edge_width=edge_width,
limits=limits, ax=ax, title=title)
else:
raise ValueError('Unknown backend {}.'.format(backend)) |
def ignite(self, args=None):
"""
* Make the virtualenv
* Install dependencies into that virtualenv
* Start the program!
"""
if args is None:
args = sys.argv[1:]
if os.environ.get("MANAGED_VIRTUALENV", None) != "1":
made = self.make_virtualenv()
if made or os.environ.get("VENV_STARTER_CHECK_DEPS", None) != "0":
self.install_deps()
self.start_program(args) | * Make the virtualenv
* Install dependencies into that virtualenv
* Start the program! | Below is the the instruction that describes the task:
### Input:
* Make the virtualenv
* Install dependencies into that virtualenv
* Start the program!
### Response:
def ignite(self, args=None):
"""
* Make the virtualenv
* Install dependencies into that virtualenv
* Start the program!
"""
if args is None:
args = sys.argv[1:]
if os.environ.get("MANAGED_VIRTUALENV", None) != "1":
made = self.make_virtualenv()
if made or os.environ.get("VENV_STARTER_CHECK_DEPS", None) != "0":
self.install_deps()
self.start_program(args) |
def get_hash(self, place):
"""
Return the Geohash of *place*.
If it's not present in the collection, ``None`` will be returned
instead.
"""
pickled_place = self._pickle(place)
try:
return self.redis.geohash(self.key, pickled_place)[0]
except (AttributeError, TypeError):
return None | Return the Geohash of *place*.
If it's not present in the collection, ``None`` will be returned
instead. | Below is the the instruction that describes the task:
### Input:
Return the Geohash of *place*.
If it's not present in the collection, ``None`` will be returned
instead.
### Response:
def get_hash(self, place):
"""
Return the Geohash of *place*.
If it's not present in the collection, ``None`` will be returned
instead.
"""
pickled_place = self._pickle(place)
try:
return self.redis.geohash(self.key, pickled_place)[0]
except (AttributeError, TypeError):
return None |
def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1) | Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option. | Below is the the instruction that describes the task:
### Input:
Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
### Response:
def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1) |
def compute_ratings(data=None):
""" Returns the tuples of (model_id, rating, sigma)
N.B. that `model_id` here is NOT the model number in the run
'data' is tuples of (winner, loser) model_ids (not model numbers)
"""
if data is None:
with sqlite3.connect("ratings.db") as db:
data = db.execute("select model_winner, model_loser from wins").fetchall()
model_ids = set([d[0] for d in data]).union(set([d[1] for d in data]))
# Map model_ids to a contiguous range.
ordered = sorted(model_ids)
new_id = {}
for i, m in enumerate(ordered):
new_id[m] = i
# A function to rewrite the model_ids in our pairs
def ilsr_data(d):
p1, p2 = d
p1 = new_id[p1]
p2 = new_id[p2]
return (p1, p2)
pairs = list(map(ilsr_data, data))
ilsr_param = choix.ilsr_pairwise(
len(ordered),
pairs,
alpha=0.0001,
max_iter=800)
hessian = choix.opt.PairwiseFcts(pairs, penalty=.1).hessian(ilsr_param)
std_err = np.sqrt(np.diagonal(np.linalg.inv(hessian)))
# Elo conversion
elo_mult = 400 / math.log(10)
min_rating = min(ilsr_param)
ratings = {}
for model_id, param, err in zip(ordered, ilsr_param, std_err):
ratings[model_id] = (elo_mult * (param - min_rating), elo_mult * err)
return ratings | Returns the tuples of (model_id, rating, sigma)
N.B. that `model_id` here is NOT the model number in the run
'data' is tuples of (winner, loser) model_ids (not model numbers) | Below is the the instruction that describes the task:
### Input:
Returns the tuples of (model_id, rating, sigma)
N.B. that `model_id` here is NOT the model number in the run
'data' is tuples of (winner, loser) model_ids (not model numbers)
### Response:
def compute_ratings(data=None):
""" Returns the tuples of (model_id, rating, sigma)
N.B. that `model_id` here is NOT the model number in the run
'data' is tuples of (winner, loser) model_ids (not model numbers)
"""
if data is None:
with sqlite3.connect("ratings.db") as db:
data = db.execute("select model_winner, model_loser from wins").fetchall()
model_ids = set([d[0] for d in data]).union(set([d[1] for d in data]))
# Map model_ids to a contiguous range.
ordered = sorted(model_ids)
new_id = {}
for i, m in enumerate(ordered):
new_id[m] = i
# A function to rewrite the model_ids in our pairs
def ilsr_data(d):
p1, p2 = d
p1 = new_id[p1]
p2 = new_id[p2]
return (p1, p2)
pairs = list(map(ilsr_data, data))
ilsr_param = choix.ilsr_pairwise(
len(ordered),
pairs,
alpha=0.0001,
max_iter=800)
hessian = choix.opt.PairwiseFcts(pairs, penalty=.1).hessian(ilsr_param)
std_err = np.sqrt(np.diagonal(np.linalg.inv(hessian)))
# Elo conversion
elo_mult = 400 / math.log(10)
min_rating = min(ilsr_param)
ratings = {}
for model_id, param, err in zip(ordered, ilsr_param, std_err):
ratings[model_id] = (elo_mult * (param - min_rating), elo_mult * err)
return ratings |
def send_config_set(
self,
config_commands=None,
exit_config_mode=False,
delay_factor=1,
max_loops=150,
strip_prompt=False,
strip_command=False,
config_mode_command=None,
):
"""Remain in configuration mode."""
return super(VyOSSSH, self).send_config_set(
config_commands=config_commands,
exit_config_mode=exit_config_mode,
delay_factor=delay_factor,
max_loops=max_loops,
strip_prompt=strip_prompt,
strip_command=strip_command,
config_mode_command=config_mode_command,
) | Remain in configuration mode. | Below is the the instruction that describes the task:
### Input:
Remain in configuration mode.
### Response:
def send_config_set(
self,
config_commands=None,
exit_config_mode=False,
delay_factor=1,
max_loops=150,
strip_prompt=False,
strip_command=False,
config_mode_command=None,
):
"""Remain in configuration mode."""
return super(VyOSSSH, self).send_config_set(
config_commands=config_commands,
exit_config_mode=exit_config_mode,
delay_factor=delay_factor,
max_loops=max_loops,
strip_prompt=strip_prompt,
strip_command=strip_command,
config_mode_command=config_mode_command,
) |
def pipeline_delete(id, hosts=None, profile=None):
'''
.. versionadded:: 2017.7.0
Delete Ingest pipeline. Available since Elasticsearch 5.0.
id
Pipeline id
CLI example::
salt myminion elasticsearch.pipeline_delete mypipeline
'''
es = _get_instance(hosts, profile)
try:
ret = es.ingest.delete_pipeline(id=id)
return ret.get('acknowledged', False)
except elasticsearch.NotFoundError:
return True
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot delete pipeline {0}, server returned code {1} with message {2}".format(id, e.status_code, e.error))
except AttributeError:
raise CommandExecutionError("Method is applicable only for Elasticsearch 5.0+") | .. versionadded:: 2017.7.0
Delete Ingest pipeline. Available since Elasticsearch 5.0.
id
Pipeline id
CLI example::
salt myminion elasticsearch.pipeline_delete mypipeline | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2017.7.0
Delete Ingest pipeline. Available since Elasticsearch 5.0.
id
Pipeline id
CLI example::
salt myminion elasticsearch.pipeline_delete mypipeline
### Response:
def pipeline_delete(id, hosts=None, profile=None):
'''
.. versionadded:: 2017.7.0
Delete Ingest pipeline. Available since Elasticsearch 5.0.
id
Pipeline id
CLI example::
salt myminion elasticsearch.pipeline_delete mypipeline
'''
es = _get_instance(hosts, profile)
try:
ret = es.ingest.delete_pipeline(id=id)
return ret.get('acknowledged', False)
except elasticsearch.NotFoundError:
return True
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot delete pipeline {0}, server returned code {1} with message {2}".format(id, e.status_code, e.error))
except AttributeError:
raise CommandExecutionError("Method is applicable only for Elasticsearch 5.0+") |
def bulk_shear(pressure, u, v, heights=None, bottom=None, depth=None):
r"""Calculate bulk shear through a layer.
Layer top and bottom specified in meters or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
u : `pint.Quantity`
U-component of wind.
v : `pint.Quantity`
V-component of wind.
height : `pint.Quantity`, optional
Heights from sounding
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa. Defaults to 100 hPa.
bottom: `pint.Quantity`, optional
The bottom of the layer in height or pressure coordinates.
If using a height, it must be in the same coordinates as the given
heights (i.e., don't use meters AGL unless given heights
are in meters AGL.) Defaults to the highest pressure or lowest height given.
Returns
-------
u_shr: `pint.Quantity`
u-component of layer bulk shear
v_shr: `pint.Quantity`
v-component of layer bulk shear
"""
_, u_layer, v_layer = get_layer(pressure, u, v, heights=heights,
bottom=bottom, depth=depth)
u_shr = u_layer[-1] - u_layer[0]
v_shr = v_layer[-1] - v_layer[0]
return u_shr, v_shr | r"""Calculate bulk shear through a layer.
Layer top and bottom specified in meters or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
u : `pint.Quantity`
U-component of wind.
v : `pint.Quantity`
V-component of wind.
height : `pint.Quantity`, optional
Heights from sounding
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa. Defaults to 100 hPa.
bottom: `pint.Quantity`, optional
The bottom of the layer in height or pressure coordinates.
If using a height, it must be in the same coordinates as the given
heights (i.e., don't use meters AGL unless given heights
are in meters AGL.) Defaults to the highest pressure or lowest height given.
Returns
-------
u_shr: `pint.Quantity`
u-component of layer bulk shear
v_shr: `pint.Quantity`
v-component of layer bulk shear | Below is the the instruction that describes the task:
### Input:
r"""Calculate bulk shear through a layer.
Layer top and bottom specified in meters or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
u : `pint.Quantity`
U-component of wind.
v : `pint.Quantity`
V-component of wind.
height : `pint.Quantity`, optional
Heights from sounding
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa. Defaults to 100 hPa.
bottom: `pint.Quantity`, optional
The bottom of the layer in height or pressure coordinates.
If using a height, it must be in the same coordinates as the given
heights (i.e., don't use meters AGL unless given heights
are in meters AGL.) Defaults to the highest pressure or lowest height given.
Returns
-------
u_shr: `pint.Quantity`
u-component of layer bulk shear
v_shr: `pint.Quantity`
v-component of layer bulk shear
### Response:
def bulk_shear(pressure, u, v, heights=None, bottom=None, depth=None):
r"""Calculate bulk shear through a layer.
Layer top and bottom specified in meters or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
u : `pint.Quantity`
U-component of wind.
v : `pint.Quantity`
V-component of wind.
height : `pint.Quantity`, optional
Heights from sounding
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa. Defaults to 100 hPa.
bottom: `pint.Quantity`, optional
The bottom of the layer in height or pressure coordinates.
If using a height, it must be in the same coordinates as the given
heights (i.e., don't use meters AGL unless given heights
are in meters AGL.) Defaults to the highest pressure or lowest height given.
Returns
-------
u_shr: `pint.Quantity`
u-component of layer bulk shear
v_shr: `pint.Quantity`
v-component of layer bulk shear
"""
_, u_layer, v_layer = get_layer(pressure, u, v, heights=heights,
bottom=bottom, depth=depth)
u_shr = u_layer[-1] - u_layer[0]
v_shr = v_layer[-1] - v_layer[0]
return u_shr, v_shr |
def expected_values(self, beta):
""" Expected values of the function given the covariance matrix and hyperparameters
Parameters
----------
beta : np.ndarray
Contains untransformed values for latent variables
Returns
----------
The expected values of the function
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
alpha = self._alpha(L)
return np.dot(np.transpose(self.kernel.K(parm)), alpha) | Expected values of the function given the covariance matrix and hyperparameters
Parameters
----------
beta : np.ndarray
Contains untransformed values for latent variables
Returns
----------
The expected values of the function | Below is the the instruction that describes the task:
### Input:
Expected values of the function given the covariance matrix and hyperparameters
Parameters
----------
beta : np.ndarray
Contains untransformed values for latent variables
Returns
----------
The expected values of the function
### Response:
def expected_values(self, beta):
""" Expected values of the function given the covariance matrix and hyperparameters
Parameters
----------
beta : np.ndarray
Contains untransformed values for latent variables
Returns
----------
The expected values of the function
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
alpha = self._alpha(L)
return np.dot(np.transpose(self.kernel.K(parm)), alpha) |
def next_frame_pixel_noise():
"""Basic 2-frame conv model with pixel noise."""
hparams = next_frame_basic_deterministic()
hparams.add_hparam("video_modality_input_noise", 0.05)
hparams.bottom["inputs"] = modalities.video_pixel_noise_bottom
hparams.top["inputs"] = modalities.video_top
return hparams | Basic 2-frame conv model with pixel noise. | Below is the the instruction that describes the task:
### Input:
Basic 2-frame conv model with pixel noise.
### Response:
def next_frame_pixel_noise():
"""Basic 2-frame conv model with pixel noise."""
hparams = next_frame_basic_deterministic()
hparams.add_hparam("video_modality_input_noise", 0.05)
hparams.bottom["inputs"] = modalities.video_pixel_noise_bottom
hparams.top["inputs"] = modalities.video_top
return hparams |
def get_by(self, name):
"""get a todo list ux by name
:rtype: TodoListUX
"""
item = self.app.get_by(name)
return TodoListUX(ux=self, controlled_list=item) | get a todo list ux by name
:rtype: TodoListUX | Below is the the instruction that describes the task:
### Input:
get a todo list ux by name
:rtype: TodoListUX
### Response:
def get_by(self, name):
"""get a todo list ux by name
:rtype: TodoListUX
"""
item = self.app.get_by(name)
return TodoListUX(ux=self, controlled_list=item) |
def print_statements(self):
"""Print all INDRA Statements collected by the processors."""
for i, stmt in enumerate(self.statements):
print("%s: %s" % (i, stmt)) | Print all INDRA Statements collected by the processors. | Below is the the instruction that describes the task:
### Input:
Print all INDRA Statements collected by the processors.
### Response:
def print_statements(self):
"""Print all INDRA Statements collected by the processors."""
for i, stmt in enumerate(self.statements):
print("%s: %s" % (i, stmt)) |
def _find_player_id(self, row):
"""
Find the player's ID.
Find the player's ID as embedded in the 'data-append-csv' attribute,
such as 'zettehe01' for Henrik Zetterberg.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` of the player's ID, such as 'zettehe01' for
Henrik Zetterberg.
"""
player_id = row('th').attr('data-append-csv')
if not player_id:
player_id = row('td').attr('data-append-csv')
return player_id | Find the player's ID.
Find the player's ID as embedded in the 'data-append-csv' attribute,
such as 'zettehe01' for Henrik Zetterberg.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` of the player's ID, such as 'zettehe01' for
Henrik Zetterberg. | Below is the the instruction that describes the task:
### Input:
Find the player's ID.
Find the player's ID as embedded in the 'data-append-csv' attribute,
such as 'zettehe01' for Henrik Zetterberg.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` of the player's ID, such as 'zettehe01' for
Henrik Zetterberg.
### Response:
def _find_player_id(self, row):
"""
Find the player's ID.
Find the player's ID as embedded in the 'data-append-csv' attribute,
such as 'zettehe01' for Henrik Zetterberg.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` of the player's ID, such as 'zettehe01' for
Henrik Zetterberg.
"""
player_id = row('th').attr('data-append-csv')
if not player_id:
player_id = row('td').attr('data-append-csv')
return player_id |
def update(self, *others):
"""
Update the set, adding elements from all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python.
"""
return self._op_update_helper(
tuple(others), operator.or_, 'sunionstore', update=True
) | Update the set, adding elements from all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python. | Below is the the instruction that describes the task:
### Input:
Update the set, adding elements from all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python.
### Response:
def update(self, *others):
"""
Update the set, adding elements from all *others*.
:param others: Iterables, each one as a single positional argument.
:rtype: None
.. note::
If all *others* are :class:`Set` instances, the operation
is performed completely in Redis. Otherwise, values are retrieved
from Redis and the operation is performed in Python.
"""
return self._op_update_helper(
tuple(others), operator.or_, 'sunionstore', update=True
) |
def generate_token(key, user_id, action_id='', when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))
digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))
digester.update(DELIMITER)
digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))
digester.update(DELIMITER)
when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')
digester.update(when)
digest = digester.digest()
token = base64.urlsafe_b64encode(digest + DELIMITER + when)
return token | Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token. | Below is the the instruction that describes the task:
### Input:
Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
### Response:
def generate_token(key, user_id, action_id='', when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))
digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))
digester.update(DELIMITER)
digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))
digester.update(DELIMITER)
when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')
digester.update(when)
digest = digester.digest()
token = base64.urlsafe_b64encode(digest + DELIMITER + when)
return token |
def create(self, name, hwaddr=None, network=None, nat=False, settings={}):
"""
Create a bridge with the given name, hwaddr and networking setup
:param name: name of the bridge (must be unique), 15 characters or less, and not equal to "default".
:param hwaddr: MAC address of the bridge. If none, a one will be created for u
:param network: Networking mode, options are none, static, and dnsmasq
:param nat: If true, SNAT will be enabled on this bridge. (IF and ONLY IF an IP is set on the bridge
via the settings, otherwise flag will be ignored) (the cidr attribute of either static, or dnsmasq modes)
:param settings: Networking setting, depending on the selected mode.
none:
no settings, bridge won't get any ip settings
static:
settings={'cidr': 'ip/net'}
bridge will get assigned the given IP address
dnsmasq:
settings={'cidr': 'ip/net', 'start': 'ip', 'end': 'ip'}
bridge will get assigned the ip in cidr
and each running container that is attached to this IP will get
IP from the start/end range. Netmask of the range is the netmask
part of the provided cidr.
if nat is true, SNAT rules will be automatically added in the firewall.
"""
args = {
'name': name,
'hwaddr': hwaddr,
'network': {
'mode': network,
'nat': nat,
'settings': settings,
}
}
self._bridge_create_chk.check(args)
return self._client.json('bridge.create', args) | Create a bridge with the given name, hwaddr and networking setup
:param name: name of the bridge (must be unique), 15 characters or less, and not equal to "default".
:param hwaddr: MAC address of the bridge. If none, a one will be created for u
:param network: Networking mode, options are none, static, and dnsmasq
:param nat: If true, SNAT will be enabled on this bridge. (IF and ONLY IF an IP is set on the bridge
via the settings, otherwise flag will be ignored) (the cidr attribute of either static, or dnsmasq modes)
:param settings: Networking setting, depending on the selected mode.
none:
no settings, bridge won't get any ip settings
static:
settings={'cidr': 'ip/net'}
bridge will get assigned the given IP address
dnsmasq:
settings={'cidr': 'ip/net', 'start': 'ip', 'end': 'ip'}
bridge will get assigned the ip in cidr
and each running container that is attached to this IP will get
IP from the start/end range. Netmask of the range is the netmask
part of the provided cidr.
if nat is true, SNAT rules will be automatically added in the firewall. | Below is the the instruction that describes the task:
### Input:
Create a bridge with the given name, hwaddr and networking setup
:param name: name of the bridge (must be unique), 15 characters or less, and not equal to "default".
:param hwaddr: MAC address of the bridge. If none, a one will be created for u
:param network: Networking mode, options are none, static, and dnsmasq
:param nat: If true, SNAT will be enabled on this bridge. (IF and ONLY IF an IP is set on the bridge
via the settings, otherwise flag will be ignored) (the cidr attribute of either static, or dnsmasq modes)
:param settings: Networking setting, depending on the selected mode.
none:
no settings, bridge won't get any ip settings
static:
settings={'cidr': 'ip/net'}
bridge will get assigned the given IP address
dnsmasq:
settings={'cidr': 'ip/net', 'start': 'ip', 'end': 'ip'}
bridge will get assigned the ip in cidr
and each running container that is attached to this IP will get
IP from the start/end range. Netmask of the range is the netmask
part of the provided cidr.
if nat is true, SNAT rules will be automatically added in the firewall.
### Response:
def create(self, name, hwaddr=None, network=None, nat=False, settings={}):
"""
Create a bridge with the given name, hwaddr and networking setup
:param name: name of the bridge (must be unique), 15 characters or less, and not equal to "default".
:param hwaddr: MAC address of the bridge. If none, a one will be created for u
:param network: Networking mode, options are none, static, and dnsmasq
:param nat: If true, SNAT will be enabled on this bridge. (IF and ONLY IF an IP is set on the bridge
via the settings, otherwise flag will be ignored) (the cidr attribute of either static, or dnsmasq modes)
:param settings: Networking setting, depending on the selected mode.
none:
no settings, bridge won't get any ip settings
static:
settings={'cidr': 'ip/net'}
bridge will get assigned the given IP address
dnsmasq:
settings={'cidr': 'ip/net', 'start': 'ip', 'end': 'ip'}
bridge will get assigned the ip in cidr
and each running container that is attached to this IP will get
IP from the start/end range. Netmask of the range is the netmask
part of the provided cidr.
if nat is true, SNAT rules will be automatically added in the firewall.
"""
args = {
'name': name,
'hwaddr': hwaddr,
'network': {
'mode': network,
'nat': nat,
'settings': settings,
}
}
self._bridge_create_chk.check(args)
return self._client.json('bridge.create', args) |
def user_id(self):
"""Who created the event (:class:`~hangups.user.UserID`)."""
return user.UserID(chat_id=self._event.sender_id.chat_id,
gaia_id=self._event.sender_id.gaia_id) | Who created the event (:class:`~hangups.user.UserID`). | Below is the the instruction that describes the task:
### Input:
Who created the event (:class:`~hangups.user.UserID`).
### Response:
def user_id(self):
"""Who created the event (:class:`~hangups.user.UserID`)."""
return user.UserID(chat_id=self._event.sender_id.chat_id,
gaia_id=self._event.sender_id.gaia_id) |
def list_locks(account_id, resource_type=None, resource_id=None):
"""Show extant locks and unlocks.
"""
locks = Client(BASE_URL, account_id).list_locks().json()
for r in locks:
if 'LockDate' in r:
r['LockDate'] = datetime.fromtimestamp(r['LockDate'])
if 'RevisionDate' in r:
r['RevisionDate'] = datetime.fromtimestamp(r['RevisionDate'])
print(tabulate.tabulate(
locks,
headers="keys",
tablefmt='fancy_grid')) | Show extant locks and unlocks. | Below is the the instruction that describes the task:
### Input:
Show extant locks and unlocks.
### Response:
def list_locks(account_id, resource_type=None, resource_id=None):
"""Show extant locks and unlocks.
"""
locks = Client(BASE_URL, account_id).list_locks().json()
for r in locks:
if 'LockDate' in r:
r['LockDate'] = datetime.fromtimestamp(r['LockDate'])
if 'RevisionDate' in r:
r['RevisionDate'] = datetime.fromtimestamp(r['RevisionDate'])
print(tabulate.tabulate(
locks,
headers="keys",
tablefmt='fancy_grid')) |
def wait_for_server(pbclient=None, dc_id=None, serverid=None,
indicator='state', state='AVAILABLE', timeout=300):
'''
wait for a server/VM to reach a defined state for a specified time
indicator := {state|vmstate} specifies if server or VM stat is tested
state specifies the status the indicator should have
'''
if pbclient is None:
raise ValueError("argument 'pbclient' must not be None")
if dc_id is None:
raise ValueError("argument 'dc_id' must not be None")
if serverid is None:
raise ValueError("argument 'serverid' must not be None")
total_sleep_time = 0
seconds = 5
while total_sleep_time < timeout:
time.sleep(seconds)
total_sleep_time += seconds
if total_sleep_time == 60:
# Increase polling interval after one minute
seconds = 10
elif total_sleep_time == 600:
# Increase polling interval after 10 minutes
seconds = 20
server = getServerStates(pbclient, dc_id, serverid)
if server[indicator] == state:
break
# end while(total_sleep_time)
return server | wait for a server/VM to reach a defined state for a specified time
indicator := {state|vmstate} specifies if server or VM stat is tested
state specifies the status the indicator should have | Below is the the instruction that describes the task:
### Input:
wait for a server/VM to reach a defined state for a specified time
indicator := {state|vmstate} specifies if server or VM stat is tested
state specifies the status the indicator should have
### Response:
def wait_for_server(pbclient=None, dc_id=None, serverid=None,
indicator='state', state='AVAILABLE', timeout=300):
'''
wait for a server/VM to reach a defined state for a specified time
indicator := {state|vmstate} specifies if server or VM stat is tested
state specifies the status the indicator should have
'''
if pbclient is None:
raise ValueError("argument 'pbclient' must not be None")
if dc_id is None:
raise ValueError("argument 'dc_id' must not be None")
if serverid is None:
raise ValueError("argument 'serverid' must not be None")
total_sleep_time = 0
seconds = 5
while total_sleep_time < timeout:
time.sleep(seconds)
total_sleep_time += seconds
if total_sleep_time == 60:
# Increase polling interval after one minute
seconds = 10
elif total_sleep_time == 600:
# Increase polling interval after 10 minutes
seconds = 20
server = getServerStates(pbclient, dc_id, serverid)
if server[indicator] == state:
break
# end while(total_sleep_time)
return server |
def match_blocks(hash_func, old_children, new_children):
"""Use difflib to find matching blocks."""
sm = difflib.SequenceMatcher(
_is_junk,
a=[hash_func(c) for c in old_children],
b=[hash_func(c) for c in new_children],
)
return sm | Use difflib to find matching blocks. | Below is the the instruction that describes the task:
### Input:
Use difflib to find matching blocks.
### Response:
def match_blocks(hash_func, old_children, new_children):
"""Use difflib to find matching blocks."""
sm = difflib.SequenceMatcher(
_is_junk,
a=[hash_func(c) for c in old_children],
b=[hash_func(c) for c in new_children],
)
return sm |
def get_value(self, field_name):
""" returns a value for a given field name """
if field_name in self.fields:
return self._dict['attributes'][field_name]
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
return self._dict['geometry']
return None | returns a value for a given field name | Below is the the instruction that describes the task:
### Input:
returns a value for a given field name
### Response:
def get_value(self, field_name):
""" returns a value for a given field name """
if field_name in self.fields:
return self._dict['attributes'][field_name]
elif field_name.upper() in ['SHAPE', 'SHAPE@', "GEOMETRY"]:
return self._dict['geometry']
return None |
def action_set(method_name):
"""
Creates a setter that will call the action method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the action.
@type method_name: str
"""
def action_set(value, context, **_params):
method = getattr(context["action"], method_name)
return _set(method, context["key"], value, (), {})
return action_set | Creates a setter that will call the action method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the action.
@type method_name: str | Below is the the instruction that describes the task:
### Input:
Creates a setter that will call the action method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the action.
@type method_name: str
### Response:
def action_set(method_name):
"""
Creates a setter that will call the action method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the action.
@type method_name: str
"""
def action_set(value, context, **_params):
method = getattr(context["action"], method_name)
return _set(method, context["key"], value, (), {})
return action_set |
def to_valueset(self, state):
"""
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
"""
return state.solver.VS(state.arch.bits, self.region, self.region_base_addr, self.address) | Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance | Below is the the instruction that describes the task:
### Input:
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
### Response:
def to_valueset(self, state):
"""
Convert to a ValueSet instance
:param state: A state
:return: The converted ValueSet instance
"""
return state.solver.VS(state.arch.bits, self.region, self.region_base_addr, self.address) |
def expect(self, pattern, timeout=-1):
"""Waits on the given pattern to appear in std_out"""
if self.blocking:
raise RuntimeError("expect can only be used on non-blocking commands.")
try:
self.subprocess.expect(pattern=pattern, timeout=timeout)
except pexpect.EOF:
pass | Waits on the given pattern to appear in std_out | Below is the the instruction that describes the task:
### Input:
Waits on the given pattern to appear in std_out
### Response:
def expect(self, pattern, timeout=-1):
"""Waits on the given pattern to appear in std_out"""
if self.blocking:
raise RuntimeError("expect can only be used on non-blocking commands.")
try:
self.subprocess.expect(pattern=pattern, timeout=timeout)
except pexpect.EOF:
pass |
def n1qlQueryAll(self, *args, **kwargs):
"""
Execute a N1QL query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.N1QLRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase.bucket.Bucket.n1ql_query`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.n1qlQueryAll('SELECT * from `travel-sample` WHERE city=$1`,
'Reno')
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase.bucket.Bucket.n1ql_query`
"""
if not self.connected:
cb = lambda x: self.n1qlQueryAll(*args, **kwargs)
return self.connect().addCallback(cb)
kwargs['itercls'] = BatchedN1QLRequest
o = super(RawBucket, self).n1ql_query(*args, **kwargs)
o.start()
return o._getDeferred() | Execute a N1QL query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.N1QLRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase.bucket.Bucket.n1ql_query`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.n1qlQueryAll('SELECT * from `travel-sample` WHERE city=$1`,
'Reno')
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase.bucket.Bucket.n1ql_query` | Below is the the instruction that describes the task:
### Input:
Execute a N1QL query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.N1QLRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase.bucket.Bucket.n1ql_query`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.n1qlQueryAll('SELECT * from `travel-sample` WHERE city=$1`,
'Reno')
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase.bucket.Bucket.n1ql_query`
### Response:
def n1qlQueryAll(self, *args, **kwargs):
"""
Execute a N1QL query, retrieving all rows.
This method returns a :class:`Deferred` object which is executed
with a :class:`~.N1QLRequest` object. The object may be iterated
over to yield the rows in the result set.
This method is similar to :meth:`~couchbase.bucket.Bucket.n1ql_query`
in its arguments.
Example::
def handler(req):
for row in req:
# ... handle row
d = cb.n1qlQueryAll('SELECT * from `travel-sample` WHERE city=$1`,
'Reno')
d.addCallback(handler)
:return: A :class:`Deferred`
.. seealso:: :meth:`~couchbase.bucket.Bucket.n1ql_query`
"""
if not self.connected:
cb = lambda x: self.n1qlQueryAll(*args, **kwargs)
return self.connect().addCallback(cb)
kwargs['itercls'] = BatchedN1QLRequest
o = super(RawBucket, self).n1ql_query(*args, **kwargs)
o.start()
return o._getDeferred() |
def _pump(self):
'''
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
'''
while (not self._busy) and len(self._queue):
cmd = self._queue.pop(0)
self._name = cmd[2]
try:
cmd[0](*cmd[1])
except Exception as e:
self.notify('error', exception=e)
if self._debug:
traceback.print_exc() | Attempts to process the next command in the queue if one exists and the
driver is not currently busy. | Below is the the instruction that describes the task:
### Input:
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
### Response:
def _pump(self):
'''
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
'''
while (not self._busy) and len(self._queue):
cmd = self._queue.pop(0)
self._name = cmd[2]
try:
cmd[0](*cmd[1])
except Exception as e:
self.notify('error', exception=e)
if self._debug:
traceback.print_exc() |
def precision(self):
"""
Inverse of posterior covariance
"""
if self._precision is None:
cov = np.atleast_3d(self.covariance)
self._precision = np.zeros(cov.shape) # if one covariance per dimension
for p in range(cov.shape[-1]):
self._precision[:, :, p] = pdinv(cov[:, :, p])[0]
return self._precision | Inverse of posterior covariance | Below is the the instruction that describes the task:
### Input:
Inverse of posterior covariance
### Response:
def precision(self):
"""
Inverse of posterior covariance
"""
if self._precision is None:
cov = np.atleast_3d(self.covariance)
self._precision = np.zeros(cov.shape) # if one covariance per dimension
for p in range(cov.shape[-1]):
self._precision[:, :, p] = pdinv(cov[:, :, p])[0]
return self._precision |
def handle_simple_sequencing(func):
"""decorator, deal with simple sequencing cases"""
from .assessment import assessment_utilities
def wrapper(*args, **kwargs):
# re-order these things because have to delete the part after
# removing it from the parent sequence map
if 'create_assessment_part' in func.__name__:
result = func(*args, **kwargs)
assessment_utilities.update_parent_sequence_map(result)
elif func.__name__ == 'delete_assessment_part':
# args[1] is the assessment_part_id
assessment_utilities.remove_from_parent_sequence_map(*args)
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
return result
return wrapper | decorator, deal with simple sequencing cases | Below is the the instruction that describes the task:
### Input:
decorator, deal with simple sequencing cases
### Response:
def handle_simple_sequencing(func):
"""decorator, deal with simple sequencing cases"""
from .assessment import assessment_utilities
def wrapper(*args, **kwargs):
# re-order these things because have to delete the part after
# removing it from the parent sequence map
if 'create_assessment_part' in func.__name__:
result = func(*args, **kwargs)
assessment_utilities.update_parent_sequence_map(result)
elif func.__name__ == 'delete_assessment_part':
# args[1] is the assessment_part_id
assessment_utilities.remove_from_parent_sequence_map(*args)
result = func(*args, **kwargs)
else:
result = func(*args, **kwargs)
return result
return wrapper |
def optional_service_connections(self):
'''Finds all service connections in which one or more components are
not required.
If all the components involved in a connection are required, that
connection is also required. If one or more are not required, that
connection is optional.
Example:
>>> s = RtsProfile(xml_spec=open('test/rtsystem.xml').read())
>>> len(s.optional_service_connections())
0
'''
result = []
for conn in self._service_port_connectors:
source_comp = self.find_comp_by_target(conn.source_service_port)
target_comp = self.find_comp_by_target(conn.target_service_port)
if not source_comp.is_required or not target_comp.is_required:
result.append(conn)
return result | Finds all service connections in which one or more components are
not required.
If all the components involved in a connection are required, that
connection is also required. If one or more are not required, that
connection is optional.
Example:
>>> s = RtsProfile(xml_spec=open('test/rtsystem.xml').read())
>>> len(s.optional_service_connections())
0 | Below is the the instruction that describes the task:
### Input:
Finds all service connections in which one or more components are
not required.
If all the components involved in a connection are required, that
connection is also required. If one or more are not required, that
connection is optional.
Example:
>>> s = RtsProfile(xml_spec=open('test/rtsystem.xml').read())
>>> len(s.optional_service_connections())
0
### Response:
def optional_service_connections(self):
'''Finds all service connections in which one or more components are
not required.
If all the components involved in a connection are required, that
connection is also required. If one or more are not required, that
connection is optional.
Example:
>>> s = RtsProfile(xml_spec=open('test/rtsystem.xml').read())
>>> len(s.optional_service_connections())
0
'''
result = []
for conn in self._service_port_connectors:
source_comp = self.find_comp_by_target(conn.source_service_port)
target_comp = self.find_comp_by_target(conn.target_service_port)
if not source_comp.is_required or not target_comp.is_required:
result.append(conn)
return result |
def get_variant(variant_handle, context=None):
"""Create a variant given its handle (or serialized dict equivalent)
Args:
variant_handle (`ResourceHandle` or dict): Resource handle, or
equivalent serialized dict representation from
ResourceHandle.to_dict
context (`ResolvedContext`): The context this variant is associated
with, if any.
Returns:
`Variant`.
"""
if isinstance(variant_handle, dict):
variant_handle = ResourceHandle.from_dict(variant_handle)
variant_resource = package_repository_manager.get_resource_from_handle(variant_handle)
variant = Variant(variant_resource, context=context)
return variant | Create a variant given its handle (or serialized dict equivalent)
Args:
variant_handle (`ResourceHandle` or dict): Resource handle, or
equivalent serialized dict representation from
ResourceHandle.to_dict
context (`ResolvedContext`): The context this variant is associated
with, if any.
Returns:
`Variant`. | Below is the the instruction that describes the task:
### Input:
Create a variant given its handle (or serialized dict equivalent)
Args:
variant_handle (`ResourceHandle` or dict): Resource handle, or
equivalent serialized dict representation from
ResourceHandle.to_dict
context (`ResolvedContext`): The context this variant is associated
with, if any.
Returns:
`Variant`.
### Response:
def get_variant(variant_handle, context=None):
"""Create a variant given its handle (or serialized dict equivalent)
Args:
variant_handle (`ResourceHandle` or dict): Resource handle, or
equivalent serialized dict representation from
ResourceHandle.to_dict
context (`ResolvedContext`): The context this variant is associated
with, if any.
Returns:
`Variant`.
"""
if isinstance(variant_handle, dict):
variant_handle = ResourceHandle.from_dict(variant_handle)
variant_resource = package_repository_manager.get_resource_from_handle(variant_handle)
variant = Variant(variant_resource, context=context)
return variant |
def replace_event_annotations(event, newanns):
"""Replace event annotations with the provided ones."""
_humilis = event.get("_humilis", {})
if not _humilis:
event["_humilis"] = {"annotation": newanns}
else:
event["_humilis"]["annotation"] = newanns | Replace event annotations with the provided ones. | Below is the the instruction that describes the task:
### Input:
Replace event annotations with the provided ones.
### Response:
def replace_event_annotations(event, newanns):
"""Replace event annotations with the provided ones."""
_humilis = event.get("_humilis", {})
if not _humilis:
event["_humilis"] = {"annotation": newanns}
else:
event["_humilis"]["annotation"] = newanns |
def list_networks(kwargs=None, call=None):
'''
List all the standard networks for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_networks my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_networks function must be called with '
'-f or --function.'
)
return {'Networks': salt.utils.vmware.list_networks(_get_si())} | List all the standard networks for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_networks my-vmware-config | Below is the the instruction that describes the task:
### Input:
List all the standard networks for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_networks my-vmware-config
### Response:
def list_networks(kwargs=None, call=None):
'''
List all the standard networks for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_networks my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_networks function must be called with '
'-f or --function.'
)
return {'Networks': salt.utils.vmware.list_networks(_get_si())} |
def _string_to_record_type(string):
'''
Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
:param string: A record type, e.g. A, TXT, NS
:type string: ``str``
:rtype: :class:`RecordType`
'''
string = string.upper()
record_type = getattr(RecordType, string)
return record_type | Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
:param string: A record type, e.g. A, TXT, NS
:type string: ``str``
:rtype: :class:`RecordType` | Below is the the instruction that describes the task:
### Input:
Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
:param string: A record type, e.g. A, TXT, NS
:type string: ``str``
:rtype: :class:`RecordType`
### Response:
def _string_to_record_type(string):
'''
Return a string representation of a DNS record type to a
libcloud RecordType ENUM.
:param string: A record type, e.g. A, TXT, NS
:type string: ``str``
:rtype: :class:`RecordType`
'''
string = string.upper()
record_type = getattr(RecordType, string)
return record_type |
def extract_flac (archive, compression, cmd, verbosity, interactive, outdir):
"""Decompress a FLAC archive to a WAV file."""
outfile = util.get_single_outfile(outdir, archive, extension=".wav")
cmdlist = [cmd, '--decode', archive, '--output-name', outfile]
return cmdlist | Decompress a FLAC archive to a WAV file. | Below is the the instruction that describes the task:
### Input:
Decompress a FLAC archive to a WAV file.
### Response:
def extract_flac (archive, compression, cmd, verbosity, interactive, outdir):
"""Decompress a FLAC archive to a WAV file."""
outfile = util.get_single_outfile(outdir, archive, extension=".wav")
cmdlist = [cmd, '--decode', archive, '--output-name', outfile]
return cmdlist |
def relabel(self, i):
'''
API: relabel(self, i)
Description:
Used by max_flow_preflowpush() method for relabelling node i.
Input:
i: Node that is being relabelled.
Post:
'distance' attribute of node i is updated.
'''
min_distance = 2*len(self.get_node_list()) + 1
for j in self.get_neighbors(i):
if (self.get_node_attr(j, 'distance') < min_distance and
(self.get_edge_attr(i, j, 'flow') <
self.get_edge_attr(i, j, 'capacity'))):
min_distance = self.get_node_attr(j, 'distance')
for j in self.get_in_neighbors(i):
if (self.get_node_attr(j, 'distance') < min_distance and
self.get_edge_attr(j, i, 'flow') > 0):
min_distance = self.get_node_attr(j, 'distance')
self.set_node_attr(i, 'distance', min_distance + 1) | API: relabel(self, i)
Description:
Used by max_flow_preflowpush() method for relabelling node i.
Input:
i: Node that is being relabelled.
Post:
'distance' attribute of node i is updated. | Below is the the instruction that describes the task:
### Input:
API: relabel(self, i)
Description:
Used by max_flow_preflowpush() method for relabelling node i.
Input:
i: Node that is being relabelled.
Post:
'distance' attribute of node i is updated.
### Response:
def relabel(self, i):
'''
API: relabel(self, i)
Description:
Used by max_flow_preflowpush() method for relabelling node i.
Input:
i: Node that is being relabelled.
Post:
'distance' attribute of node i is updated.
'''
min_distance = 2*len(self.get_node_list()) + 1
for j in self.get_neighbors(i):
if (self.get_node_attr(j, 'distance') < min_distance and
(self.get_edge_attr(i, j, 'flow') <
self.get_edge_attr(i, j, 'capacity'))):
min_distance = self.get_node_attr(j, 'distance')
for j in self.get_in_neighbors(i):
if (self.get_node_attr(j, 'distance') < min_distance and
self.get_edge_attr(j, i, 'flow') > 0):
min_distance = self.get_node_attr(j, 'distance')
self.set_node_attr(i, 'distance', min_distance + 1) |
def _get(self, *rules):
# type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]]
"""
Get rules representing parameters.
The return rules can be different from parameters, in case parameter define multiple rules in one class.
:param rules: For which rules get the representation.
:return: List of rules representing parameters.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid.
"""
for rule in rules:
if not inspect.isclass(rule) or not issubclass(rule, Rule):
raise NotRuleException(rule)
for r in self._split_rules(rule):
yield self._find_rule(r) | Get rules representing parameters.
The return rules can be different from parameters, in case parameter define multiple rules in one class.
:param rules: For which rules get the representation.
:return: List of rules representing parameters.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid. | Below is the the instruction that describes the task:
### Input:
Get rules representing parameters.
The return rules can be different from parameters, in case parameter define multiple rules in one class.
:param rules: For which rules get the representation.
:return: List of rules representing parameters.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid.
### Response:
def _get(self, *rules):
# type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]]
"""
Get rules representing parameters.
The return rules can be different from parameters, in case parameter define multiple rules in one class.
:param rules: For which rules get the representation.
:return: List of rules representing parameters.
:raise NotRuleException: If the parameter doesn't inherit from Rule.
:raise RuleException: If the syntax of the rule is invalid.
"""
for rule in rules:
if not inspect.isclass(rule) or not issubclass(rule, Rule):
raise NotRuleException(rule)
for r in self._split_rules(rule):
yield self._find_rule(r) |
def set_packet_headers(self, headers):
""" Set packet header.
The method will try to set ps_headerprotocol to inform the Xena GUI and tester how to interpret the packet
header byte sequence specified with PS_PACKETHEADER.
This is mainly for information purposes, and the stream will transmit the packet header bytes even if no
protocol segments are specified.
If the method fails to set some segment it will log a warning and skip setup.
:param headers: current packet headers
:type headers: pypacker.layer12.ethernet.Ethernet
"""
bin_headers = '0x' + binascii.hexlify(headers.bin()).decode('utf-8')
self.set_attributes(ps_packetheader=bin_headers)
body_handler = headers
ps_headerprotocol = []
while body_handler:
segment = pypacker_2_xena.get(str(body_handler).split('(')[0].lower(), None)
if not segment:
self.logger.warning('pypacker header {} not in conversion list'.format(segment))
return
ps_headerprotocol.append(segment)
if type(body_handler) is Ethernet and body_handler.vlan:
ps_headerprotocol.append('vlan')
body_handler = body_handler.body_handler
self.set_attributes(ps_headerprotocol=' '.join(ps_headerprotocol)) | Set packet header.
The method will try to set ps_headerprotocol to inform the Xena GUI and tester how to interpret the packet
header byte sequence specified with PS_PACKETHEADER.
This is mainly for information purposes, and the stream will transmit the packet header bytes even if no
protocol segments are specified.
If the method fails to set some segment it will log a warning and skip setup.
:param headers: current packet headers
:type headers: pypacker.layer12.ethernet.Ethernet | Below is the the instruction that describes the task:
### Input:
Set packet header.
The method will try to set ps_headerprotocol to inform the Xena GUI and tester how to interpret the packet
header byte sequence specified with PS_PACKETHEADER.
This is mainly for information purposes, and the stream will transmit the packet header bytes even if no
protocol segments are specified.
If the method fails to set some segment it will log a warning and skip setup.
:param headers: current packet headers
:type headers: pypacker.layer12.ethernet.Ethernet
### Response:
def set_packet_headers(self, headers):
""" Set packet header.
The method will try to set ps_headerprotocol to inform the Xena GUI and tester how to interpret the packet
header byte sequence specified with PS_PACKETHEADER.
This is mainly for information purposes, and the stream will transmit the packet header bytes even if no
protocol segments are specified.
If the method fails to set some segment it will log a warning and skip setup.
:param headers: current packet headers
:type headers: pypacker.layer12.ethernet.Ethernet
"""
bin_headers = '0x' + binascii.hexlify(headers.bin()).decode('utf-8')
self.set_attributes(ps_packetheader=bin_headers)
body_handler = headers
ps_headerprotocol = []
while body_handler:
segment = pypacker_2_xena.get(str(body_handler).split('(')[0].lower(), None)
if not segment:
self.logger.warning('pypacker header {} not in conversion list'.format(segment))
return
ps_headerprotocol.append(segment)
if type(body_handler) is Ethernet and body_handler.vlan:
ps_headerprotocol.append('vlan')
body_handler = body_handler.body_handler
self.set_attributes(ps_headerprotocol=' '.join(ps_headerprotocol)) |
def validate(name, # type: str
value, # type: Any
enforce_not_none=True, # type: bool
equals=None, # type: Any
instance_of=None, # type: Union[Type, Tuple[Type]]
subclass_of=None, # type: Union[Type, Tuple[Type]]
is_in=None, # type: Container
subset_of=None, # type: Set
contains = None, # type: Union[Any, Iterable]
superset_of=None, # type: Set
min_value=None, # type: Any
min_strict=False, # type: bool
max_value=None, # type: Any
max_strict=False, # type: bool
length=None, # type: int
min_len=None, # type: int
min_len_strict=False, # type: bool
max_len=None, # type: int
max_len_strict=False, # type: bool
custom=None, # type: Callable[[Any], Any]
error_type=None, # type: Type[ValidationError]
help_msg=None, # type: str
**kw_context_args):
"""
A validation function for quick inline validation of `value`, with minimal capabilities:
* None handling: reject None (enforce_not_none=True, default), or accept None silently (enforce_not_none=False)
* Type validation: `value` should be an instance of any of `var_types` if provided
* Value validation:
* if `allowed_values` is provided, `value` should be in that set
* if `min_value` (resp. `max_value`) is provided, `value` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_strict`, resp. `max_strict`, to `True`
* if `min_len` (resp. `max_len`) is provided, `len(value)` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_len_strict`, resp. `max_len_strict`, to `True`
:param name: the applicative name of the checked value, that will be used in error messages
:param value: the value to check
:param enforce_not_none: boolean, default True. Whether to enforce that `value` is not None.
:param equals: an optional value to enforce.
:param instance_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param subclass_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param is_in: an optional set of allowed values.
:param subset_of: an optional superset for the variable
:param contains: an optional value that the variable should contain (value in variable == True)
:param superset_of: an optional subset for the variable
:param min_value: an optional minimum value
:param min_strict: if True, only values strictly greater than `min_value` will be accepted
:param max_value: an optional maximum value
:param max_strict: if True, only values strictly lesser than `max_value` will be accepted
:param length: an optional strict length
:param min_len: an optional minimum length
:param min_len_strict: if True, only values with length strictly greater than `min_len` will be accepted
:param max_len: an optional maximum length
:param max_len_strict: if True, only values with length strictly lesser than `max_len` will be accepted
:param custom: a custom base validation function or list of base validation functions to use. This is the same
syntax than for valid8 decorators. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type),
or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate
an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be
used instead of callables, they will be transformed to functions automatically.
:param error_type: a subclass of `ValidationError` to raise in case of validation failure. By default a
`ValidationError` will be raised with the provided `help_msg`
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: nothing in case of success. Otherwise, raises a ValidationError
"""
# backwards compatibility
instance_of = instance_of or (kw_context_args.pop('allowed_types') if 'allowed_types' in kw_context_args else None)
is_in = is_in or (kw_context_args.pop('allowed_values') if 'allowed_values' in kw_context_args else None)
try:
# the following corresponds to an inline version of
# - _none_rejecter in base.py
# - gt/lt in comparables.py
# - is_in/contains/subset_of/superset_of/has_length/minlen/maxlen/is_in in collections.py
# - instance_of/subclass_of in types.py
# try (https://github.com/orf/inliner) to perform the inlining below automatically without code duplication ?
# > maybe not because quite dangerous (AST mod) and below we skip the "return True" everywhere for performance
#
# Another alternative: easy Cython compiling https://github.com/AlanCristhian/statically
# > but this is not py2 compliant
if value is None:
# inlined version of _none_rejecter in base.py
if enforce_not_none:
raise ValueIsNone(wrong_value=value)
# raise MissingMandatoryParameterException('Error, ' + name + '" is mandatory, it should be non-None')
# else do nothing and return
else:
if equals is not None:
if value != equals:
raise NotEqual(wrong_value=value, ref_value=equals)
if instance_of is not None:
assert_instance_of(value, instance_of)
if subclass_of is not None:
assert_subclass_of(value, subclass_of)
if is_in is not None:
# inlined version of is_in(allowed_values=allowed_values)(value) without 'return True'
if value not in is_in:
raise NotInAllowedValues(wrong_value=value, allowed_values=is_in)
if contains is not None:
# inlined version of contains(ref_value=contains)(value) without 'return True'
if contains not in value:
raise DoesNotContainValue(wrong_value=value, ref_value=contains)
if subset_of is not None:
# inlined version of is_subset(reference_set=subset_of)(value)
missing = value - subset_of
if len(missing) != 0:
raise NotSubset(wrong_value=value, reference_set=subset_of, unsupported=missing)
if superset_of is not None:
# inlined version of is_superset(reference_set=superset_of)(value)
missing = superset_of - value
if len(missing) != 0:
raise NotSuperset(wrong_value=value, reference_set=superset_of, missing=missing)
if min_value is not None:
# inlined version of gt(min_value=min_value, strict=min_strict)(value) without 'return True'
if min_strict:
if not value > min_value:
raise TooSmall(wrong_value=value, min_value=min_value, strict=True)
else:
if not value >= min_value:
raise TooSmall(wrong_value=value, min_value=min_value, strict=False)
if max_value is not None:
# inlined version of lt(max_value=max_value, strict=max_strict)(value) without 'return True'
if max_strict:
if not value < max_value:
raise TooBig(wrong_value=value, max_value=max_value, strict=True)
else:
if not value <= max_value:
raise TooBig(wrong_value=value, max_value=max_value, strict=False)
if length is not None:
# inlined version of has_length() without 'return True'
if len(value) != length:
raise WrongLength(wrong_value=value, ref_length=length)
if min_len is not None:
# inlined version of minlen(min_length=min_len, strict=min_len_strict)(value) without 'return True'
if min_len_strict:
if not len(value) > min_len:
raise TooShort(wrong_value=value, min_length=min_len, strict=True)
else:
if not len(value) >= min_len:
raise TooShort(wrong_value=value, min_length=min_len, strict=False)
if max_len is not None:
# inlined version of maxlen(max_length=max_len, strict=max_len_strict)(value) without 'return True'
if max_len_strict:
if not len(value) < max_len:
raise TooLong(wrong_value=value, max_length=max_len, strict=True)
else:
if not len(value) <= max_len:
raise TooLong(wrong_value=value, max_length=max_len, strict=False)
except Exception as e:
err = _QUICK_VALIDATOR._create_validation_error(name, value, validation_outcome=e, error_type=error_type,
help_msg=help_msg, **kw_context_args)
raise_(err)
if custom is not None:
# traditional custom validator
assert_valid(name, value, custom, error_type=error_type, help_msg=help_msg, **kw_context_args)
else:
# basic (and not enough) check to verify that there was no typo leading an argument to be put in kw_context_args
if error_type is None and help_msg is None and len(kw_context_args) > 0:
raise ValueError("Keyword context arguments have been provided but help_msg and error_type are not: {}"
"".format(kw_context_args)) | A validation function for quick inline validation of `value`, with minimal capabilities:
* None handling: reject None (enforce_not_none=True, default), or accept None silently (enforce_not_none=False)
* Type validation: `value` should be an instance of any of `var_types` if provided
* Value validation:
* if `allowed_values` is provided, `value` should be in that set
* if `min_value` (resp. `max_value`) is provided, `value` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_strict`, resp. `max_strict`, to `True`
* if `min_len` (resp. `max_len`) is provided, `len(value)` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_len_strict`, resp. `max_len_strict`, to `True`
:param name: the applicative name of the checked value, that will be used in error messages
:param value: the value to check
:param enforce_not_none: boolean, default True. Whether to enforce that `value` is not None.
:param equals: an optional value to enforce.
:param instance_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param subclass_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param is_in: an optional set of allowed values.
:param subset_of: an optional superset for the variable
:param contains: an optional value that the variable should contain (value in variable == True)
:param superset_of: an optional subset for the variable
:param min_value: an optional minimum value
:param min_strict: if True, only values strictly greater than `min_value` will be accepted
:param max_value: an optional maximum value
:param max_strict: if True, only values strictly lesser than `max_value` will be accepted
:param length: an optional strict length
:param min_len: an optional minimum length
:param min_len_strict: if True, only values with length strictly greater than `min_len` will be accepted
:param max_len: an optional maximum length
:param max_len_strict: if True, only values with length strictly lesser than `max_len` will be accepted
:param custom: a custom base validation function or list of base validation functions to use. This is the same
syntax than for valid8 decorators. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type),
or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate
an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be
used instead of callables, they will be transformed to functions automatically.
:param error_type: a subclass of `ValidationError` to raise in case of validation failure. By default a
`ValidationError` will be raised with the provided `help_msg`
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: nothing in case of success. Otherwise, raises a ValidationError | Below is the the instruction that describes the task:
### Input:
A validation function for quick inline validation of `value`, with minimal capabilities:
* None handling: reject None (enforce_not_none=True, default), or accept None silently (enforce_not_none=False)
* Type validation: `value` should be an instance of any of `var_types` if provided
* Value validation:
* if `allowed_values` is provided, `value` should be in that set
* if `min_value` (resp. `max_value`) is provided, `value` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_strict`, resp. `max_strict`, to `True`
* if `min_len` (resp. `max_len`) is provided, `len(value)` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_len_strict`, resp. `max_len_strict`, to `True`
:param name: the applicative name of the checked value, that will be used in error messages
:param value: the value to check
:param enforce_not_none: boolean, default True. Whether to enforce that `value` is not None.
:param equals: an optional value to enforce.
:param instance_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param subclass_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param is_in: an optional set of allowed values.
:param subset_of: an optional superset for the variable
:param contains: an optional value that the variable should contain (value in variable == True)
:param superset_of: an optional subset for the variable
:param min_value: an optional minimum value
:param min_strict: if True, only values strictly greater than `min_value` will be accepted
:param max_value: an optional maximum value
:param max_strict: if True, only values strictly lesser than `max_value` will be accepted
:param length: an optional strict length
:param min_len: an optional minimum length
:param min_len_strict: if True, only values with length strictly greater than `min_len` will be accepted
:param max_len: an optional maximum length
:param max_len_strict: if True, only values with length strictly lesser than `max_len` will be accepted
:param custom: a custom base validation function or list of base validation functions to use. This is the same
syntax than for valid8 decorators. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type),
or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate
an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be
used instead of callables, they will be transformed to functions automatically.
:param error_type: a subclass of `ValidationError` to raise in case of validation failure. By default a
`ValidationError` will be raised with the provided `help_msg`
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: nothing in case of success. Otherwise, raises a ValidationError
### Response:
def validate(name, # type: str
value, # type: Any
enforce_not_none=True, # type: bool
equals=None, # type: Any
instance_of=None, # type: Union[Type, Tuple[Type]]
subclass_of=None, # type: Union[Type, Tuple[Type]]
is_in=None, # type: Container
subset_of=None, # type: Set
contains = None, # type: Union[Any, Iterable]
superset_of=None, # type: Set
min_value=None, # type: Any
min_strict=False, # type: bool
max_value=None, # type: Any
max_strict=False, # type: bool
length=None, # type: int
min_len=None, # type: int
min_len_strict=False, # type: bool
max_len=None, # type: int
max_len_strict=False, # type: bool
custom=None, # type: Callable[[Any], Any]
error_type=None, # type: Type[ValidationError]
help_msg=None, # type: str
**kw_context_args):
"""
A validation function for quick inline validation of `value`, with minimal capabilities:
* None handling: reject None (enforce_not_none=True, default), or accept None silently (enforce_not_none=False)
* Type validation: `value` should be an instance of any of `var_types` if provided
* Value validation:
* if `allowed_values` is provided, `value` should be in that set
* if `min_value` (resp. `max_value`) is provided, `value` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_strict`, resp. `max_strict`, to `True`
* if `min_len` (resp. `max_len`) is provided, `len(value)` should be greater than it. Comparison is not strict by
default and can be set to strict by setting `min_len_strict`, resp. `max_len_strict`, to `True`
:param name: the applicative name of the checked value, that will be used in error messages
:param value: the value to check
:param enforce_not_none: boolean, default True. Whether to enforce that `value` is not None.
:param equals: an optional value to enforce.
:param instance_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param subclass_of: optional type(s) to enforce. If a tuple of types is provided it is considered alternate types: one
match is enough to succeed. If None, type will not be enforced
:param is_in: an optional set of allowed values.
:param subset_of: an optional superset for the variable
:param contains: an optional value that the variable should contain (value in variable == True)
:param superset_of: an optional subset for the variable
:param min_value: an optional minimum value
:param min_strict: if True, only values strictly greater than `min_value` will be accepted
:param max_value: an optional maximum value
:param max_strict: if True, only values strictly lesser than `max_value` will be accepted
:param length: an optional strict length
:param min_len: an optional minimum length
:param min_len_strict: if True, only values with length strictly greater than `min_len` will be accepted
:param max_len: an optional maximum length
:param max_len_strict: if True, only values with length strictly lesser than `max_len` will be accepted
:param custom: a custom base validation function or list of base validation functions to use. This is the same
syntax than for valid8 decorators. A callable, a tuple(callable, help_msg_str), a tuple(callable, failure_type),
or a list of several such elements. Nested lists are supported and indicate an implicit `and_`. Tuples indicate
an implicit `_failure_raiser`. [mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be
used instead of callables, they will be transformed to functions automatically.
:param error_type: a subclass of `ValidationError` to raise in case of validation failure. By default a
`ValidationError` will be raised with the provided `help_msg`
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: nothing in case of success. Otherwise, raises a ValidationError
"""
# backwards compatibility
instance_of = instance_of or (kw_context_args.pop('allowed_types') if 'allowed_types' in kw_context_args else None)
is_in = is_in or (kw_context_args.pop('allowed_values') if 'allowed_values' in kw_context_args else None)
try:
# the following corresponds to an inline version of
# - _none_rejecter in base.py
# - gt/lt in comparables.py
# - is_in/contains/subset_of/superset_of/has_length/minlen/maxlen/is_in in collections.py
# - instance_of/subclass_of in types.py
# try (https://github.com/orf/inliner) to perform the inlining below automatically without code duplication ?
# > maybe not because quite dangerous (AST mod) and below we skip the "return True" everywhere for performance
#
# Another alternative: easy Cython compiling https://github.com/AlanCristhian/statically
# > but this is not py2 compliant
if value is None:
# inlined version of _none_rejecter in base.py
if enforce_not_none:
raise ValueIsNone(wrong_value=value)
# raise MissingMandatoryParameterException('Error, ' + name + '" is mandatory, it should be non-None')
# else do nothing and return
else:
if equals is not None:
if value != equals:
raise NotEqual(wrong_value=value, ref_value=equals)
if instance_of is not None:
assert_instance_of(value, instance_of)
if subclass_of is not None:
assert_subclass_of(value, subclass_of)
if is_in is not None:
# inlined version of is_in(allowed_values=allowed_values)(value) without 'return True'
if value not in is_in:
raise NotInAllowedValues(wrong_value=value, allowed_values=is_in)
if contains is not None:
# inlined version of contains(ref_value=contains)(value) without 'return True'
if contains not in value:
raise DoesNotContainValue(wrong_value=value, ref_value=contains)
if subset_of is not None:
# inlined version of is_subset(reference_set=subset_of)(value)
missing = value - subset_of
if len(missing) != 0:
raise NotSubset(wrong_value=value, reference_set=subset_of, unsupported=missing)
if superset_of is not None:
# inlined version of is_superset(reference_set=superset_of)(value)
missing = superset_of - value
if len(missing) != 0:
raise NotSuperset(wrong_value=value, reference_set=superset_of, missing=missing)
if min_value is not None:
# inlined version of gt(min_value=min_value, strict=min_strict)(value) without 'return True'
if min_strict:
if not value > min_value:
raise TooSmall(wrong_value=value, min_value=min_value, strict=True)
else:
if not value >= min_value:
raise TooSmall(wrong_value=value, min_value=min_value, strict=False)
if max_value is not None:
# inlined version of lt(max_value=max_value, strict=max_strict)(value) without 'return True'
if max_strict:
if not value < max_value:
raise TooBig(wrong_value=value, max_value=max_value, strict=True)
else:
if not value <= max_value:
raise TooBig(wrong_value=value, max_value=max_value, strict=False)
if length is not None:
# inlined version of has_length() without 'return True'
if len(value) != length:
raise WrongLength(wrong_value=value, ref_length=length)
if min_len is not None:
# inlined version of minlen(min_length=min_len, strict=min_len_strict)(value) without 'return True'
if min_len_strict:
if not len(value) > min_len:
raise TooShort(wrong_value=value, min_length=min_len, strict=True)
else:
if not len(value) >= min_len:
raise TooShort(wrong_value=value, min_length=min_len, strict=False)
if max_len is not None:
# inlined version of maxlen(max_length=max_len, strict=max_len_strict)(value) without 'return True'
if max_len_strict:
if not len(value) < max_len:
raise TooLong(wrong_value=value, max_length=max_len, strict=True)
else:
if not len(value) <= max_len:
raise TooLong(wrong_value=value, max_length=max_len, strict=False)
except Exception as e:
err = _QUICK_VALIDATOR._create_validation_error(name, value, validation_outcome=e, error_type=error_type,
help_msg=help_msg, **kw_context_args)
raise_(err)
if custom is not None:
# traditional custom validator
assert_valid(name, value, custom, error_type=error_type, help_msg=help_msg, **kw_context_args)
else:
# basic (and not enough) check to verify that there was no typo leading an argument to be put in kw_context_args
if error_type is None and help_msg is None and len(kw_context_args) > 0:
raise ValueError("Keyword context arguments have been provided but help_msg and error_type are not: {}"
"".format(kw_context_args)) |
def AuthorizeGroup(self, group, subject):
"""Allow given group access to a given subject."""
# Add the subject to the dict if is isn't present, so it will get checked in
# CheckPermissions
self.authorized_users.setdefault(subject, set())
self.group_access_manager.AuthorizeGroup(group, subject) | Allow given group access to a given subject. | Below is the the instruction that describes the task:
### Input:
Allow given group access to a given subject.
### Response:
def AuthorizeGroup(self, group, subject):
"""Allow given group access to a given subject."""
# Add the subject to the dict if is isn't present, so it will get checked in
# CheckPermissions
self.authorized_users.setdefault(subject, set())
self.group_access_manager.AuthorizeGroup(group, subject) |
def _logging_callback(level, domain, message, data):
""" Callback that outputs libgphoto2's logging message via
Python's standard logging facilities.
:param level: libgphoto2 logging level
:param domain: component the message originates from
:param message: logging message
:param data: Other data in the logging record (unused)
"""
domain = ffi.string(domain).decode()
message = ffi.string(message).decode()
logger = LOGGER.getChild(domain)
if level not in LOG_LEVELS:
return
logger.log(LOG_LEVELS[level], message) | Callback that outputs libgphoto2's logging message via
Python's standard logging facilities.
:param level: libgphoto2 logging level
:param domain: component the message originates from
:param message: logging message
:param data: Other data in the logging record (unused) | Below is the the instruction that describes the task:
### Input:
Callback that outputs libgphoto2's logging message via
Python's standard logging facilities.
:param level: libgphoto2 logging level
:param domain: component the message originates from
:param message: logging message
:param data: Other data in the logging record (unused)
### Response:
def _logging_callback(level, domain, message, data):
""" Callback that outputs libgphoto2's logging message via
Python's standard logging facilities.
:param level: libgphoto2 logging level
:param domain: component the message originates from
:param message: logging message
:param data: Other data in the logging record (unused)
"""
domain = ffi.string(domain).decode()
message = ffi.string(message).decode()
logger = LOGGER.getChild(domain)
if level not in LOG_LEVELS:
return
logger.log(LOG_LEVELS[level], message) |
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0] | Guess the mime type | Below is the the instruction that describes the task:
### Input:
Guess the mime type
### Response:
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0] |
def write_pickle(self, path, compress=False):
"""Serialize the current `GOParser` object and store it in a pickle file.
Parameters
----------
path: str
Path of the output file.
compress: bool, optional
Whether to compress the file using gzip.
Returns
-------
None
Notes
-----
Compression with gzip is significantly slower than storing the file
in uncompressed form.
"""
logger.info('Writing pickle to "%s"...', path)
if compress:
with gzip.open(path, 'wb') as ofh:
pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL)
else:
with open(path, 'wb') as ofh:
pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL) | Serialize the current `GOParser` object and store it in a pickle file.
Parameters
----------
path: str
Path of the output file.
compress: bool, optional
Whether to compress the file using gzip.
Returns
-------
None
Notes
-----
Compression with gzip is significantly slower than storing the file
in uncompressed form. | Below is the the instruction that describes the task:
### Input:
Serialize the current `GOParser` object and store it in a pickle file.
Parameters
----------
path: str
Path of the output file.
compress: bool, optional
Whether to compress the file using gzip.
Returns
-------
None
Notes
-----
Compression with gzip is significantly slower than storing the file
in uncompressed form.
### Response:
def write_pickle(self, path, compress=False):
"""Serialize the current `GOParser` object and store it in a pickle file.
Parameters
----------
path: str
Path of the output file.
compress: bool, optional
Whether to compress the file using gzip.
Returns
-------
None
Notes
-----
Compression with gzip is significantly slower than storing the file
in uncompressed form.
"""
logger.info('Writing pickle to "%s"...', path)
if compress:
with gzip.open(path, 'wb') as ofh:
pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL)
else:
with open(path, 'wb') as ofh:
pickle.dump(self, ofh, pickle.HIGHEST_PROTOCOL) |
def extract_replacements(self, trajectory):
"""Extracts the wildcards and file replacements from the `trajectory`"""
self.env_name = trajectory.v_environment_name
self.traj_name = trajectory.v_name
self.set_name = trajectory.f_wildcard('$set')
self.run_name = trajectory.f_wildcard('$') | Extracts the wildcards and file replacements from the `trajectory` | Below is the the instruction that describes the task:
### Input:
Extracts the wildcards and file replacements from the `trajectory`
### Response:
def extract_replacements(self, trajectory):
"""Extracts the wildcards and file replacements from the `trajectory`"""
self.env_name = trajectory.v_environment_name
self.traj_name = trajectory.v_name
self.set_name = trajectory.f_wildcard('$set')
self.run_name = trajectory.f_wildcard('$') |
def main(argv):
"""This function sets up a command-line option parser and then calls
to do all of the real work.
"""
import argparse
import codecs
# have to be ready to deal with utf-8 names
out = codecs.getwriter('utf-8')(sys.stdout)
description = '''Takes a series of at least 2 OTT ids and reports the OTT of their least inclusive taxonomic ancestor and that taxon's ancestors.'''
parser = argparse.ArgumentParser(prog='ot-taxo-mrca-to-root', description=description)
parser.add_argument('ids', nargs='+', type=int, help='OTT IDs')
args = parser.parse_args(argv)
id_list = args.ids
last_id = id_list.pop()
anc_list = get_taxonomic_ancestor_ids(last_id)
common_anc = set(anc_list)
for curr_id in id_list:
curr_anc_set = set(get_taxonomic_ancestor_ids(curr_id))
common_anc &= curr_anc_set
if not common_anc:
break
for anc_id in anc_list:
if anc_id in common_anc:
out.write('{}\n'.format(anc_id)) | This function sets up a command-line option parser and then calls
to do all of the real work. | Below is the the instruction that describes the task:
### Input:
This function sets up a command-line option parser and then calls
to do all of the real work.
### Response:
def main(argv):
"""This function sets up a command-line option parser and then calls
to do all of the real work.
"""
import argparse
import codecs
# have to be ready to deal with utf-8 names
out = codecs.getwriter('utf-8')(sys.stdout)
description = '''Takes a series of at least 2 OTT ids and reports the OTT of their least inclusive taxonomic ancestor and that taxon's ancestors.'''
parser = argparse.ArgumentParser(prog='ot-taxo-mrca-to-root', description=description)
parser.add_argument('ids', nargs='+', type=int, help='OTT IDs')
args = parser.parse_args(argv)
id_list = args.ids
last_id = id_list.pop()
anc_list = get_taxonomic_ancestor_ids(last_id)
common_anc = set(anc_list)
for curr_id in id_list:
curr_anc_set = set(get_taxonomic_ancestor_ids(curr_id))
common_anc &= curr_anc_set
if not common_anc:
break
for anc_id in anc_list:
if anc_id in common_anc:
out.write('{}\n'.format(anc_id)) |
def apache_config(config, outputfile):
'''
Generate a valid Apache configuration file, based on the given settings.
'''
if os.path.exists(outputfile):
os.rename(outputfile, outputfile + ".old")
print("Renamed existing Apache config file to " + outputfile + ".old")
from django.conf import settings
f = open(outputfile, 'w')
print("Generating Apache configuration in " + outputfile)
subdir = (len(settings.HOST_DIR) > 0)
text = """
# OpenSubmit Configuration for Apache 2.4
# These directives are expected to live in some <VirtualHost> block
"""
if subdir:
text += "Alias /%s/static/ %s\n" % (settings.HOST_DIR,
settings.STATIC_ROOT)
text += " WSGIScriptAlias /%s %s/wsgi.py\n" % (
settings.HOST_DIR, settings.SCRIPT_ROOT)
else:
text += "Alias /static/ %s\n" % (settings.STATIC_ROOT)
text += " WSGIScriptAlias / %s/wsgi.py" % (settings.SCRIPT_ROOT)
text += """
WSGIPassAuthorization On
<Directory {static_path}>
Require all granted
</Directory>
<Directory {install_path}>
<Files wsgi.py>
Require all granted
</Files>
</Directory>
""".format(static_path=settings.STATIC_ROOT, install_path=settings.SCRIPT_ROOT)
f.write(text)
f.close() | Generate a valid Apache configuration file, based on the given settings. | Below is the the instruction that describes the task:
### Input:
Generate a valid Apache configuration file, based on the given settings.
### Response:
def apache_config(config, outputfile):
'''
Generate a valid Apache configuration file, based on the given settings.
'''
if os.path.exists(outputfile):
os.rename(outputfile, outputfile + ".old")
print("Renamed existing Apache config file to " + outputfile + ".old")
from django.conf import settings
f = open(outputfile, 'w')
print("Generating Apache configuration in " + outputfile)
subdir = (len(settings.HOST_DIR) > 0)
text = """
# OpenSubmit Configuration for Apache 2.4
# These directives are expected to live in some <VirtualHost> block
"""
if subdir:
text += "Alias /%s/static/ %s\n" % (settings.HOST_DIR,
settings.STATIC_ROOT)
text += " WSGIScriptAlias /%s %s/wsgi.py\n" % (
settings.HOST_DIR, settings.SCRIPT_ROOT)
else:
text += "Alias /static/ %s\n" % (settings.STATIC_ROOT)
text += " WSGIScriptAlias / %s/wsgi.py" % (settings.SCRIPT_ROOT)
text += """
WSGIPassAuthorization On
<Directory {static_path}>
Require all granted
</Directory>
<Directory {install_path}>
<Files wsgi.py>
Require all granted
</Files>
</Directory>
""".format(static_path=settings.STATIC_ROOT, install_path=settings.SCRIPT_ROOT)
f.write(text)
f.close() |
def internal_error(exception, template_path, is_admin, db=None):
"""
Render an "internal error" page. The following variables will be populated when rendering the
template:
title: The page title
message: The body of the error message to display to the user
preformat: Boolean stating whether to wrap the error message in a pre
As well as rendering the error message to the user, this will also log the exception
:param exception: The exception that was caught
:param template_path: The template to render (i.e. "main/error.html")
:param is_admin: Can the logged in user always view detailed error reports?
:param db: The Flask-SQLAlchemy instance
:return: Flask Response
"""
if db:
try:
db.session.rollback()
except: # noqa: E722
pass
title = str(exception)
message = traceback.format_exc()
preformat = True
log.error('Exception caught: {}\n{}'.format(title, message))
if current_app.config.get('TEST_MODE'):
show_detailed_error = True
message = 'Note: You are seeing this error message because the server is in test mode.\n\n{}'.format(message)
elif is_admin:
show_detailed_error = True
message = 'Note: You are seeing this error message because you are a member of staff.\n\n{}'.format(message)
else:
title = '500 Internal Server Error'
message = 'Something went wrong while processing your request.'
preformat = False
show_detailed_error = False
try:
return render_template(template_path, title=title, message=message, preformat=preformat,
exception=exception, is_admin=is_admin,
show_detailed_error=show_detailed_error), 500
except: # noqa: E722
log.exception('Error rendering error page!')
return '500 Internal Server Error', 500 | Render an "internal error" page. The following variables will be populated when rendering the
template:
title: The page title
message: The body of the error message to display to the user
preformat: Boolean stating whether to wrap the error message in a pre
As well as rendering the error message to the user, this will also log the exception
:param exception: The exception that was caught
:param template_path: The template to render (i.e. "main/error.html")
:param is_admin: Can the logged in user always view detailed error reports?
:param db: The Flask-SQLAlchemy instance
:return: Flask Response | Below is the the instruction that describes the task:
### Input:
Render an "internal error" page. The following variables will be populated when rendering the
template:
title: The page title
message: The body of the error message to display to the user
preformat: Boolean stating whether to wrap the error message in a pre
As well as rendering the error message to the user, this will also log the exception
:param exception: The exception that was caught
:param template_path: The template to render (i.e. "main/error.html")
:param is_admin: Can the logged in user always view detailed error reports?
:param db: The Flask-SQLAlchemy instance
:return: Flask Response
### Response:
def internal_error(exception, template_path, is_admin, db=None):
"""
Render an "internal error" page. The following variables will be populated when rendering the
template:
title: The page title
message: The body of the error message to display to the user
preformat: Boolean stating whether to wrap the error message in a pre
As well as rendering the error message to the user, this will also log the exception
:param exception: The exception that was caught
:param template_path: The template to render (i.e. "main/error.html")
:param is_admin: Can the logged in user always view detailed error reports?
:param db: The Flask-SQLAlchemy instance
:return: Flask Response
"""
if db:
try:
db.session.rollback()
except: # noqa: E722
pass
title = str(exception)
message = traceback.format_exc()
preformat = True
log.error('Exception caught: {}\n{}'.format(title, message))
if current_app.config.get('TEST_MODE'):
show_detailed_error = True
message = 'Note: You are seeing this error message because the server is in test mode.\n\n{}'.format(message)
elif is_admin:
show_detailed_error = True
message = 'Note: You are seeing this error message because you are a member of staff.\n\n{}'.format(message)
else:
title = '500 Internal Server Error'
message = 'Something went wrong while processing your request.'
preformat = False
show_detailed_error = False
try:
return render_template(template_path, title=title, message=message, preformat=preformat,
exception=exception, is_admin=is_admin,
show_detailed_error=show_detailed_error), 500
except: # noqa: E722
log.exception('Error rendering error page!')
return '500 Internal Server Error', 500 |
def additionalAssignment(MobileAllocation_presence=0,
StartingTime_presence=0):
"""ADDITIONAL ASSIGNMENT Section 9.1.1"""
# Mandatory
a = TpPd(pd=0x6)
b = MessageType(mesType=0x3B) # 00111011
c = ChannelDescription()
packet = a / b / c
# Not Mandatory
if MobileAllocation_presence is 1:
d = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
packet = packet / d
if StartingTime_presence is 1:
e = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
packet = packet / e
return packet | ADDITIONAL ASSIGNMENT Section 9.1.1 | Below is the the instruction that describes the task:
### Input:
ADDITIONAL ASSIGNMENT Section 9.1.1
### Response:
def additionalAssignment(MobileAllocation_presence=0,
StartingTime_presence=0):
"""ADDITIONAL ASSIGNMENT Section 9.1.1"""
# Mandatory
a = TpPd(pd=0x6)
b = MessageType(mesType=0x3B) # 00111011
c = ChannelDescription()
packet = a / b / c
# Not Mandatory
if MobileAllocation_presence is 1:
d = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0)
packet = packet / d
if StartingTime_presence is 1:
e = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0)
packet = packet / e
return packet |
def get_member_profile(self, member_id):
''' a method to retrieve member profile details
:param member_id: integer with member id from member profile
:return: dictionary with member profile details inside [json] key
profile_details = self.objects.profile.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.get_member_profile' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats'
}
# send requests
profile_details = self._get_request(url, params=params)
# construct method output
if profile_details['json']:
profile_details['json'] = self._reconstruct_member(profile_details['json'])
return profile_details | a method to retrieve member profile details
:param member_id: integer with member id from member profile
:return: dictionary with member profile details inside [json] key
profile_details = self.objects.profile.schema | Below is the the instruction that describes the task:
### Input:
a method to retrieve member profile details
:param member_id: integer with member id from member profile
:return: dictionary with member profile details inside [json] key
profile_details = self.objects.profile.schema
### Response:
def get_member_profile(self, member_id):
''' a method to retrieve member profile details
:param member_id: integer with member id from member profile
:return: dictionary with member profile details inside [json] key
profile_details = self.objects.profile.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.get_member_profile' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats'
}
# send requests
profile_details = self._get_request(url, params=params)
# construct method output
if profile_details['json']:
profile_details['json'] = self._reconstruct_member(profile_details['json'])
return profile_details |
def encodeValue(self, value, toBeAdded=True):
"""Value is encoded as a sdr using the encoding parameters of the Field"""
encodedValue = np.array(self.encoder.encode(value), dtype=realDType)
if toBeAdded:
self.encodings.append(encodedValue)
self.numEncodings+=1
return encodedValue | Value is encoded as a sdr using the encoding parameters of the Field | Below is the the instruction that describes the task:
### Input:
Value is encoded as a sdr using the encoding parameters of the Field
### Response:
def encodeValue(self, value, toBeAdded=True):
"""Value is encoded as a sdr using the encoding parameters of the Field"""
encodedValue = np.array(self.encoder.encode(value), dtype=realDType)
if toBeAdded:
self.encodings.append(encodedValue)
self.numEncodings+=1
return encodedValue |
def resolve(label):
'''
composite mapping
given f(x) and g(x) here: GLOBALTT & LOCALTT respectivly
in order of preference
return g(f(x))|f(x)|g(x) | x
TODO consider returning x on fall through
: return label's mapping
'''
term_id = label
if label is not None and label in LOCALTT:
term_id = LOCALTT[label]
if term_id in GLOBALTT:
term_id = GLOBALTT[term_id]
else:
LOG.warning(
'Local translation but do not have a global term_id for %s', label)
elif label is not None and label in GLOBALTT:
term_id = GLOBALTT[label]
else:
LOG.error('Do not have any mapping for label: ' + label)
return term_id | composite mapping
given f(x) and g(x) here: GLOBALTT & LOCALTT respectivly
in order of preference
return g(f(x))|f(x)|g(x) | x
TODO consider returning x on fall through
: return label's mapping | Below is the the instruction that describes the task:
### Input:
composite mapping
given f(x) and g(x) here: GLOBALTT & LOCALTT respectivly
in order of preference
return g(f(x))|f(x)|g(x) | x
TODO consider returning x on fall through
: return label's mapping
### Response:
def resolve(label):
'''
composite mapping
given f(x) and g(x) here: GLOBALTT & LOCALTT respectivly
in order of preference
return g(f(x))|f(x)|g(x) | x
TODO consider returning x on fall through
: return label's mapping
'''
term_id = label
if label is not None and label in LOCALTT:
term_id = LOCALTT[label]
if term_id in GLOBALTT:
term_id = GLOBALTT[term_id]
else:
LOG.warning(
'Local translation but do not have a global term_id for %s', label)
elif label is not None and label in GLOBALTT:
term_id = GLOBALTT[label]
else:
LOG.error('Do not have any mapping for label: ' + label)
return term_id |
def compare_digest(a, b):
"""
PyJWT expects hmac.compare_digest to exist for all Python 3.x, however it was added in Python > 3.3
It has a fallback for Python 2.x but not for Pythons between 2.x and 3.3
Copied from: https://github.com/python/cpython/commit/6cea65555caf2716b4633827715004ab0291a282#diff-c49659257ec1b129707ce47a98adc96eL16
Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks.
"""
# Consistent timing matters more here than data type flexibility
if not (isinstance(a, bytes) and isinstance(b, bytes)):
raise TypeError("inputs must be bytes instances")
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len(a) != len(b):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0 | PyJWT expects hmac.compare_digest to exist for all Python 3.x, however it was added in Python > 3.3
It has a fallback for Python 2.x but not for Pythons between 2.x and 3.3
Copied from: https://github.com/python/cpython/commit/6cea65555caf2716b4633827715004ab0291a282#diff-c49659257ec1b129707ce47a98adc96eL16
Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks. | Below is the the instruction that describes the task:
### Input:
PyJWT expects hmac.compare_digest to exist for all Python 3.x, however it was added in Python > 3.3
It has a fallback for Python 2.x but not for Pythons between 2.x and 3.3
Copied from: https://github.com/python/cpython/commit/6cea65555caf2716b4633827715004ab0291a282#diff-c49659257ec1b129707ce47a98adc96eL16
Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks.
### Response:
def compare_digest(a, b):
"""
PyJWT expects hmac.compare_digest to exist for all Python 3.x, however it was added in Python > 3.3
It has a fallback for Python 2.x but not for Pythons between 2.x and 3.3
Copied from: https://github.com/python/cpython/commit/6cea65555caf2716b4633827715004ab0291a282#diff-c49659257ec1b129707ce47a98adc96eL16
Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks.
"""
# Consistent timing matters more here than data type flexibility
if not (isinstance(a, bytes) and isinstance(b, bytes)):
raise TypeError("inputs must be bytes instances")
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len(a) != len(b):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0 |
def _handle_status(self, key, value):
"""Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown.
"""
if key in ("DELETE_PROBLEM", "KEY_CONSIDERED"):
self.status = self.problem_reason.get(value, "Unknown error: %r"
% value)
else:
raise ValueError("Unknown status message: %r" % key) | Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown. | Below is the the instruction that describes the task:
### Input:
Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown.
### Response:
def _handle_status(self, key, value):
"""Parse a status code from the attached GnuPG process.
:raises: :exc:`~exceptions.ValueError` if the status message is unknown.
"""
if key in ("DELETE_PROBLEM", "KEY_CONSIDERED"):
self.status = self.problem_reason.get(value, "Unknown error: %r"
% value)
else:
raise ValueError("Unknown status message: %r" % key) |
def delete_mapping(self, index, doc_type):
"""
Delete a typed JSON document type from a specific index.
(See :ref:`es-guide-reference-api-admin-indices-delete-mapping`)
"""
path = make_path(index, doc_type)
return self.conn._send_request('DELETE', path) | Delete a typed JSON document type from a specific index.
(See :ref:`es-guide-reference-api-admin-indices-delete-mapping`) | Below is the the instruction that describes the task:
### Input:
Delete a typed JSON document type from a specific index.
(See :ref:`es-guide-reference-api-admin-indices-delete-mapping`)
### Response:
def delete_mapping(self, index, doc_type):
"""
Delete a typed JSON document type from a specific index.
(See :ref:`es-guide-reference-api-admin-indices-delete-mapping`)
"""
path = make_path(index, doc_type)
return self.conn._send_request('DELETE', path) |
def run_check(self, template_name=None, service_dir=None):
" Run checking scripts. "
print_header('Check requirements', sep='-')
map(lambda cmd: call("bash %s" % cmd), self._gen_scripts(
'check', template_name=template_name, service_dir=service_dir))
return True | Run checking scripts. | Below is the the instruction that describes the task:
### Input:
Run checking scripts.
### Response:
def run_check(self, template_name=None, service_dir=None):
" Run checking scripts. "
print_header('Check requirements', sep='-')
map(lambda cmd: call("bash %s" % cmd), self._gen_scripts(
'check', template_name=template_name, service_dir=service_dir))
return True |
def __update(self, task_source):
""" Recheck next start of tasks from the given one only
:param task_source: source to check
:return: None
"""
next_start = task_source.next_start()
if next_start is not None:
if next_start.tzinfo is None or next_start.tzinfo != timezone.utc:
raise ValueError('Invalid timezone information')
if self.__next_start is None or next_start < self.__next_start:
self.__next_start = next_start
self.__next_sources = [task_source]
elif next_start == self.__next_start:
self.__next_sources.append(task_source) | Recheck next start of tasks from the given one only
:param task_source: source to check
:return: None | Below is the the instruction that describes the task:
### Input:
Recheck next start of tasks from the given one only
:param task_source: source to check
:return: None
### Response:
def __update(self, task_source):
""" Recheck next start of tasks from the given one only
:param task_source: source to check
:return: None
"""
next_start = task_source.next_start()
if next_start is not None:
if next_start.tzinfo is None or next_start.tzinfo != timezone.utc:
raise ValueError('Invalid timezone information')
if self.__next_start is None or next_start < self.__next_start:
self.__next_start = next_start
self.__next_sources = [task_source]
elif next_start == self.__next_start:
self.__next_sources.append(task_source) |
def calculate_new_length(gene_split, gene_results, hit):
''' Function for calcualting new length if the gene is split on several
contigs
'''
# Looping over splitted hits and calculate new length
first = 1
for split in gene_split[hit['sbjct_header']]:
new_start = int(gene_results[split]['sbjct_start'])
new_end = int(gene_results[split]['sbjct_end'])
# Get the frist HSP
if first == 1:
new_length = int(gene_results[split]['HSP_length'])
old_start = new_start
old_end = new_end
first = 0
continue
if new_start < old_start:
new_length = new_length + (old_start - new_start)
old_start = new_start
if new_end > old_end:
new_length = new_length + (new_end - old_end)
old_end = new_end
return(new_length) | Function for calcualting new length if the gene is split on several
contigs | Below is the the instruction that describes the task:
### Input:
Function for calcualting new length if the gene is split on several
contigs
### Response:
def calculate_new_length(gene_split, gene_results, hit):
''' Function for calcualting new length if the gene is split on several
contigs
'''
# Looping over splitted hits and calculate new length
first = 1
for split in gene_split[hit['sbjct_header']]:
new_start = int(gene_results[split]['sbjct_start'])
new_end = int(gene_results[split]['sbjct_end'])
# Get the frist HSP
if first == 1:
new_length = int(gene_results[split]['HSP_length'])
old_start = new_start
old_end = new_end
first = 0
continue
if new_start < old_start:
new_length = new_length + (old_start - new_start)
old_start = new_start
if new_end > old_end:
new_length = new_length + (new_end - old_end)
old_end = new_end
return(new_length) |
def set_start(self,time,pass_to_command_line=True):
"""
Set the GPS start time of the analysis node by setting a --gps-start-time
option to the node when it is executed.
@param time: GPS start time of job.
@bool pass_to_command_line: add gps-start-time as variable option.
"""
if pass_to_command_line:
self.add_var_opt('gps-start-time',time)
self.__start = time
self.__data_start = time | Set the GPS start time of the analysis node by setting a --gps-start-time
option to the node when it is executed.
@param time: GPS start time of job.
@bool pass_to_command_line: add gps-start-time as variable option. | Below is the the instruction that describes the task:
### Input:
Set the GPS start time of the analysis node by setting a --gps-start-time
option to the node when it is executed.
@param time: GPS start time of job.
@bool pass_to_command_line: add gps-start-time as variable option.
### Response:
def set_start(self,time,pass_to_command_line=True):
"""
Set the GPS start time of the analysis node by setting a --gps-start-time
option to the node when it is executed.
@param time: GPS start time of job.
@bool pass_to_command_line: add gps-start-time as variable option.
"""
if pass_to_command_line:
self.add_var_opt('gps-start-time',time)
self.__start = time
self.__data_start = time |
def query_text():
"""Get a list of DDOs that match with the given text.
---
tags:
- ddo
parameters:
- name: text
in: query
description: ID of the asset.
required: true
type: string
- name: sort
in: query
type: object
description: Key or list of keys to sort the result
example: {"value":1}
- name: offset
in: query
type: int
description: Number of records per page
example: 100
- name: page
in: query
type: int
description: Page showed
example: 1
responses:
200:
description: successful action
"""
data = request.args
assert isinstance(data, dict), 'invalid `args` type, should already formatted into a dict.'
search_model = FullTextModel(text=data.get('text', None),
sort=None if data.get('sort', None) is None else json.loads(
data.get('sort', None)),
offset=int(data.get('offset', 100)),
page=int(data.get('page', 1)))
query_result = dao.query(search_model)
for i in query_result[0]:
_sanitize_record(i)
response = _make_paginate_response(query_result, search_model)
return Response(json.dumps(response, default=_my_converter), 200,
content_type='application/json') | Get a list of DDOs that match with the given text.
---
tags:
- ddo
parameters:
- name: text
in: query
description: ID of the asset.
required: true
type: string
- name: sort
in: query
type: object
description: Key or list of keys to sort the result
example: {"value":1}
- name: offset
in: query
type: int
description: Number of records per page
example: 100
- name: page
in: query
type: int
description: Page showed
example: 1
responses:
200:
description: successful action | Below is the the instruction that describes the task:
### Input:
Get a list of DDOs that match with the given text.
---
tags:
- ddo
parameters:
- name: text
in: query
description: ID of the asset.
required: true
type: string
- name: sort
in: query
type: object
description: Key or list of keys to sort the result
example: {"value":1}
- name: offset
in: query
type: int
description: Number of records per page
example: 100
- name: page
in: query
type: int
description: Page showed
example: 1
responses:
200:
description: successful action
### Response:
def query_text():
"""Get a list of DDOs that match with the given text.
---
tags:
- ddo
parameters:
- name: text
in: query
description: ID of the asset.
required: true
type: string
- name: sort
in: query
type: object
description: Key or list of keys to sort the result
example: {"value":1}
- name: offset
in: query
type: int
description: Number of records per page
example: 100
- name: page
in: query
type: int
description: Page showed
example: 1
responses:
200:
description: successful action
"""
data = request.args
assert isinstance(data, dict), 'invalid `args` type, should already formatted into a dict.'
search_model = FullTextModel(text=data.get('text', None),
sort=None if data.get('sort', None) is None else json.loads(
data.get('sort', None)),
offset=int(data.get('offset', 100)),
page=int(data.get('page', 1)))
query_result = dao.query(search_model)
for i in query_result[0]:
_sanitize_record(i)
response = _make_paginate_response(query_result, search_model)
return Response(json.dumps(response, default=_my_converter), 200,
content_type='application/json') |
def geo_max_distance(left, right):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar
"""
op = ops.GeoMaxDistance(left, right)
return op.to_expr() | Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar | Below is the the instruction that describes the task:
### Input:
Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar
### Response:
def geo_max_distance(left, right):
"""Returns the 2-dimensional maximum distance between two geometries in
projected units. If g1 and g2 is the same geometry the function will
return the distance between the two vertices most far from each other
in that geometry
Parameters
----------
left : geometry
right : geometry
Returns
-------
MaxDistance : double scalar
"""
op = ops.GeoMaxDistance(left, right)
return op.to_expr() |
def register(self, event, callable, priority=10):
"""Register interest in an event.
event: name of the event (str)
callable: the callable to be used as a callback function
Returns an EventReceiver object. To unregister interest, simply
delete the object."""
logger.debug('registered: ' + event + ': ' + repr(callable) + ' [' + repr(self) + ']')
return EventReceiver(event, callable, manager=self, priority=priority) | Register interest in an event.
event: name of the event (str)
callable: the callable to be used as a callback function
Returns an EventReceiver object. To unregister interest, simply
delete the object. | Below is the the instruction that describes the task:
### Input:
Register interest in an event.
event: name of the event (str)
callable: the callable to be used as a callback function
Returns an EventReceiver object. To unregister interest, simply
delete the object.
### Response:
def register(self, event, callable, priority=10):
"""Register interest in an event.
event: name of the event (str)
callable: the callable to be used as a callback function
Returns an EventReceiver object. To unregister interest, simply
delete the object."""
logger.debug('registered: ' + event + ': ' + repr(callable) + ' [' + repr(self) + ']')
return EventReceiver(event, callable, manager=self, priority=priority) |
async def on_raw_notice(self, message):
""" Modify NOTICE to redirect CTCP messages. """
nick, metadata = self._parse_user(message.source)
target, msg = message.params
if is_ctcp(msg):
self._sync_user(nick, metadata)
type, response = parse_ctcp(msg)
# Find dedicated handler if it exists.
attr = 'on_ctcp_' + pydle.protocol.identifierify(type) + '_reply'
if hasattr(self, attr):
await getattr(self, attr)(user, target, response)
# Invoke global handler.
await self.on_ctcp_reply(user, target, type, response)
else:
await super().on_raw_notice(message) | Modify NOTICE to redirect CTCP messages. | Below is the the instruction that describes the task:
### Input:
Modify NOTICE to redirect CTCP messages.
### Response:
async def on_raw_notice(self, message):
""" Modify NOTICE to redirect CTCP messages. """
nick, metadata = self._parse_user(message.source)
target, msg = message.params
if is_ctcp(msg):
self._sync_user(nick, metadata)
type, response = parse_ctcp(msg)
# Find dedicated handler if it exists.
attr = 'on_ctcp_' + pydle.protocol.identifierify(type) + '_reply'
if hasattr(self, attr):
await getattr(self, attr)(user, target, response)
# Invoke global handler.
await self.on_ctcp_reply(user, target, type, response)
else:
await super().on_raw_notice(message) |
def remove_service_checks(self, service_id):
"""
Remove all checks from a service.
"""
from hypermap.aggregator.models import Service
service = Service.objects.get(id=service_id)
service.check_set.all().delete()
layer_to_process = service.layer_set.all()
for layer in layer_to_process:
layer.check_set.all().delete() | Remove all checks from a service. | Below is the the instruction that describes the task:
### Input:
Remove all checks from a service.
### Response:
def remove_service_checks(self, service_id):
"""
Remove all checks from a service.
"""
from hypermap.aggregator.models import Service
service = Service.objects.get(id=service_id)
service.check_set.all().delete()
layer_to_process = service.layer_set.all()
for layer in layer_to_process:
layer.check_set.all().delete() |
def validate(cls, partial=True, **kwargs):
"""
Validate kwargs before setting attributes on the model
"""
data = kwargs
if not partial:
data = dict(**kwargs, **{col.name: None for col in cls.__table__.c
if col.name not in kwargs})
errors = defaultdict(list)
for name, value in data.items():
for validator in cls._get_validators(name):
try:
validator(value)
except ValidationError as e:
e.model = cls
e.column = name
errors[name].append(str(e))
if errors:
raise ValidationErrors(errors) | Validate kwargs before setting attributes on the model | Below is the the instruction that describes the task:
### Input:
Validate kwargs before setting attributes on the model
### Response:
def validate(cls, partial=True, **kwargs):
"""
Validate kwargs before setting attributes on the model
"""
data = kwargs
if not partial:
data = dict(**kwargs, **{col.name: None for col in cls.__table__.c
if col.name not in kwargs})
errors = defaultdict(list)
for name, value in data.items():
for validator in cls._get_validators(name):
try:
validator(value)
except ValidationError as e:
e.model = cls
e.column = name
errors[name].append(str(e))
if errors:
raise ValidationErrors(errors) |
def write_user(user):
"""Write an user ot the database.
:param User user: User to write
"""
udata = {}
for f in USER_FIELDS:
udata[f] = getattr(user, f)
for p in PERMISSIONS:
udata[p] = '1' if getattr(user, p) else '0'
db.hmset('user:{0}'.format(user.uid), udata) | Write an user ot the database.
:param User user: User to write | Below is the the instruction that describes the task:
### Input:
Write an user ot the database.
:param User user: User to write
### Response:
def write_user(user):
"""Write an user ot the database.
:param User user: User to write
"""
udata = {}
for f in USER_FIELDS:
udata[f] = getattr(user, f)
for p in PERMISSIONS:
udata[p] = '1' if getattr(user, p) else '0'
db.hmset('user:{0}'.format(user.uid), udata) |
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char) | Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo'] | Below is the the instruction that describes the task:
### Input:
Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
### Response:
def copy(self, items=None):
"""Return a new NGram object with the same settings, and
referencing the same items. Copy is shallow in that
each item is not recursively copied. Optionally specify
alternate items to populate the copy.
>>> from ngram import NGram
>>> from copy import deepcopy
>>> n = NGram(['eggs', 'spam'])
>>> m = n.copy()
>>> m.add('ham')
>>> sorted(list(n))
['eggs', 'spam']
>>> sorted(list(m))
['eggs', 'ham', 'spam']
>>> p = n.copy(['foo', 'bar'])
>>> sorted(list(p))
['bar', 'foo']
"""
return NGram(items if items is not None else self,
self.threshold, self.warp, self._key,
self.N, self._pad_len, self._pad_char) |
def _generic_mixer(slice1, slice2, mixer_name, **kwargs):
"""
Generic mixer to process two slices with appropriate mixer
and return the composite to be displayed.
"""
mixer_name = mixer_name.lower()
if mixer_name in ['color_mix', 'rgb']:
mixed = _mix_color(slice1, slice2, **kwargs)
cmap = None # data is already RGB-ed
elif mixer_name in ['checkerboard', 'checker', 'cb', 'checker_board']:
checkers = _get_checkers(slice1.shape, **kwargs)
mixed = _checker_mixer(slice1, slice2, checkers)
cmap = 'gray'
elif mixer_name in ['diff', 'voxelwise_diff', 'vdiff']:
mixed, cmap = _diff_image(slice1, slice2, **kwargs)
# if kwargs['overlay_image'] is True:
# diff_cmap = diff_colormap()
# plt.imshow(slice1, alpha=kwargs['overlay_alpha'], **display_params)
# plt.hold(True)
# plt.imshow(mixed,
# cmap=diff_cmap,
# vmin=min_value, vmax=max_value,
# **display_params)
# else:
# plt.imshow(mixed, cmap=cmap,
# vmin=min_value, vmax=max_value,
# **display_params)
else:
raise ValueError('Invalid mixer name chosen.')
disp_params = dict(cmap=cmap)
return mixed, disp_params | Generic mixer to process two slices with appropriate mixer
and return the composite to be displayed. | Below is the the instruction that describes the task:
### Input:
Generic mixer to process two slices with appropriate mixer
and return the composite to be displayed.
### Response:
def _generic_mixer(slice1, slice2, mixer_name, **kwargs):
"""
Generic mixer to process two slices with appropriate mixer
and return the composite to be displayed.
"""
mixer_name = mixer_name.lower()
if mixer_name in ['color_mix', 'rgb']:
mixed = _mix_color(slice1, slice2, **kwargs)
cmap = None # data is already RGB-ed
elif mixer_name in ['checkerboard', 'checker', 'cb', 'checker_board']:
checkers = _get_checkers(slice1.shape, **kwargs)
mixed = _checker_mixer(slice1, slice2, checkers)
cmap = 'gray'
elif mixer_name in ['diff', 'voxelwise_diff', 'vdiff']:
mixed, cmap = _diff_image(slice1, slice2, **kwargs)
# if kwargs['overlay_image'] is True:
# diff_cmap = diff_colormap()
# plt.imshow(slice1, alpha=kwargs['overlay_alpha'], **display_params)
# plt.hold(True)
# plt.imshow(mixed,
# cmap=diff_cmap,
# vmin=min_value, vmax=max_value,
# **display_params)
# else:
# plt.imshow(mixed, cmap=cmap,
# vmin=min_value, vmax=max_value,
# **display_params)
else:
raise ValueError('Invalid mixer name chosen.')
disp_params = dict(cmap=cmap)
return mixed, disp_params |
def create_interface_method_ref(self, class_: str, if_method: str,
descriptor: str) -> InterfaceMethodRef:
"""
Creates a new :class:`ConstantInterfaceMethodRef`, adding it to the
pool and returning it.
:param class_: The name of the class to which `if_method` belongs.
:param if_method: The name of the interface method.
:param descriptor: The descriptor for `if_method`.
"""
self.append((
11,
self.create_class(class_).index,
self.create_name_and_type(if_method, descriptor).index
))
return self.get(self.raw_count - 1) | Creates a new :class:`ConstantInterfaceMethodRef`, adding it to the
pool and returning it.
:param class_: The name of the class to which `if_method` belongs.
:param if_method: The name of the interface method.
:param descriptor: The descriptor for `if_method`. | Below is the the instruction that describes the task:
### Input:
Creates a new :class:`ConstantInterfaceMethodRef`, adding it to the
pool and returning it.
:param class_: The name of the class to which `if_method` belongs.
:param if_method: The name of the interface method.
:param descriptor: The descriptor for `if_method`.
### Response:
def create_interface_method_ref(self, class_: str, if_method: str,
descriptor: str) -> InterfaceMethodRef:
"""
Creates a new :class:`ConstantInterfaceMethodRef`, adding it to the
pool and returning it.
:param class_: The name of the class to which `if_method` belongs.
:param if_method: The name of the interface method.
:param descriptor: The descriptor for `if_method`.
"""
self.append((
11,
self.create_class(class_).index,
self.create_name_and_type(if_method, descriptor).index
))
return self.get(self.raw_count - 1) |
def create_authz_decision_query_using_assertion(self, destination,
assertion, action=None,
resource=None,
subject=None, message_id=0,
consent=None,
extensions=None,
sign=False, nsprefix=None):
""" Makes an authz decision query based on a previously received
Assertion.
:param destination: The IdP endpoint to send the request to
:param assertion: An Assertion instance
:param action: The action you want to perform (has to be at least one)
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
"""
if action:
if isinstance(action, six.string_types):
_action = [saml.Action(text=action)]
else:
_action = [saml.Action(text=a) for a in action]
else:
_action = None
return self.create_authz_decision_query(
destination, _action, saml.Evidence(assertion=assertion),
resource, subject, message_id=message_id, consent=consent,
extensions=extensions, sign=sign, nsprefix=nsprefix) | Makes an authz decision query based on a previously received
Assertion.
:param destination: The IdP endpoint to send the request to
:param assertion: An Assertion instance
:param action: The action you want to perform (has to be at least one)
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance | Below is the the instruction that describes the task:
### Input:
Makes an authz decision query based on a previously received
Assertion.
:param destination: The IdP endpoint to send the request to
:param assertion: An Assertion instance
:param action: The action you want to perform (has to be at least one)
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
### Response:
def create_authz_decision_query_using_assertion(self, destination,
assertion, action=None,
resource=None,
subject=None, message_id=0,
consent=None,
extensions=None,
sign=False, nsprefix=None):
""" Makes an authz decision query based on a previously received
Assertion.
:param destination: The IdP endpoint to send the request to
:param assertion: An Assertion instance
:param action: The action you want to perform (has to be at least one)
:param resource: The resource you want to perform the action on
:param subject: Who wants to do the thing
:param message_id: Message identifier
:param consent: If the principal gave her consent to this request
:param extensions: Possible request extensions
:param sign: Whether the request should be signed or not.
:return: AuthzDecisionQuery instance
"""
if action:
if isinstance(action, six.string_types):
_action = [saml.Action(text=action)]
else:
_action = [saml.Action(text=a) for a in action]
else:
_action = None
return self.create_authz_decision_query(
destination, _action, saml.Evidence(assertion=assertion),
resource, subject, message_id=message_id, consent=consent,
extensions=extensions, sign=sign, nsprefix=nsprefix) |
def find_frametype(channel, gpstime=None, frametype_match=None,
host=None, port=None, return_all=False, allow_tape=False,
connection=None, on_gaps='error'):
"""Find the frametype(s) that hold data for a given channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
gpstime : `int`, optional
target GPS time at which to find correct type
frametype_match : `str`, optional
regular expression to use for frametype `str` matching
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
return_all : `bool`, optional, default: `False`
return all found types, default is to return to 'best' match
allow_tape : `bool`, optional, default: `False`
do not test types whose frame files are stored on tape (not on
spinning disk)
on_gaps : `str`, optional
action to take when gaps are discovered in datafind coverage,
default: ``'error'``, i.e. don't match frametypes with gaps.
Select ``'ignore'`` to ignore gaps, or ``'warn'`` to display
warnings when gaps are found in a datafind `find_urls` query
Returns
-------
If a single name is given, and `return_all=False` (default):
frametype : `str`
name of best match frame type
If a single name is given, and `return_all=True`:
types : `list` of `str`
the list of all matching frame types
If multiple names are given, the above return types are wrapped into a
`dict` of `(channel, type_or_list)` pairs.
Examples
--------
>>> from gwpy.io import datafind as io_datafind
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462)
'H1_R'
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462,
... return_all=True)
['H1_R', 'H1_C']
>>> io_datafind.find_frametype(
... ('H1:IMC-PWR_IN_OUTPUT', 'H1:OMC-DCPD_SUM_OUTPUT',
... 'H1:GDS-CALIB_STRAIN'),
... gpstime=1126259462, return_all=True))"
{'H1:GDS-CALIB_STRAIN': ['H1_HOFT_C00'],
'H1:OMC-DCPD_SUM_OUTPUT': ['H1_R', 'H1_C'],
'H1:IMC-PWR_IN_OUTPUT': ['H1_R', 'H1_C']}
"""
# this function is now horrendously complicated to support a large
# number of different use cases, hopefully the comments are sufficient
from ..detector import Channel
# format channel names as list
if isinstance(channel, (list, tuple)):
channels = channel
else:
channels = [channel]
# create set() of GWF channel names, and dict map back to user names
# this allows users to use nds-style names in this query, e.g.
# 'X1:TEST.mean,m-trend', and still get results
chans = {Channel(c).name: c for c in channels}
names = set(chans.keys())
# format GPS time(s)
if isinstance(gpstime, (list, tuple)):
from ..segments import Segment
gpssegment = Segment(*gpstime)
gpstime = gpssegment[0]
else:
gpssegment = None
if gpstime is not None:
gpstime = int(to_gps(gpstime))
# if use gaps post-S5 GPStime, forcibly skip _GRBYYMMDD frametypes at CIT
if frametype_match is None and gpstime is not None and gpstime > 875232014:
frametype_match = GRB_TYPE
# -- go
match = {}
ifos = set() # record IFOs we have queried to prevent duplication
# loop over set of names, which should get smaller as names are searched
while names:
# parse first channel name (to get IFO and channel type)
try:
name = next(iter(names))
except KeyError:
break
else:
chan = Channel(chans[name])
# parse IFO ID
try:
ifo = chan.ifo[0]
except TypeError: # chan.ifo is None
raise ValueError("Cannot parse interferometer prefix from channel "
"name %r, cannot proceed with find()" % str(chan))
# if we've already gone through the types for this IFO, skip
if ifo in ifos:
names.pop()
continue
ifos.add(ifo)
types = find_types(ifo, match=frametype_match, trend=chan.type,
connection=connection)
# loop over types testing each in turn
for ftype in types:
# find instance of this frametype
try:
path = find_latest(ifo, ftype, gpstime=gpstime,
allow_tape=allow_tape,
connection=connection, on_missing='ignore')
except (RuntimeError, IOError, IndexError): # something went wrong
continue
# check for gaps in the record for this type
if gpssegment is None:
gaps = 0
else:
cache = find_urls(ifo, ftype, *gpssegment, on_gaps=on_gaps,
connection=connection)
csegs = cache_segments(cache)
gaps = abs(gpssegment) - abs(csegs)
# search the TOC for one frame file and match any channels
i = 0
nchan = len(names)
for n in iter_channel_names(path):
if n in names:
i += 1
c = chans[n] # map back to user-given channel name
try:
match[c].append((ftype, path, -gaps))
except KeyError:
match[c] = [(ftype, path, -gaps)]
if not return_all: # match once only
names.remove(n)
if not names or n == nchan: # break out of TOC loop
break
if not names: # break out of ftype loop if all names matched
break
try:
names.pop()
except KeyError: # done
break
# raise exception if nothing found
missing = set(channels) - set(match.keys())
if missing:
msg = "Cannot locate the following channel(s) in any known frametype"
if gpstime:
msg += " at GPS=%d" % gpstime
msg += ":\n {}".format('\n '.join(missing))
if not allow_tape:
msg += ("\n[files on tape have not been checked, use "
"allow_tape=True for a complete search]")
raise ValueError(msg)
# if matching all types, rank based on coverage, tape, and TOC size
if return_all:
paths = set(p[1] for key in match for p in match[key])
rank = {path: (on_tape(path), num_channels(path)) for path in paths}
# deprioritise types on tape and those with lots of channels
for key in match:
match[key].sort(key=lambda x: (x[2],) + rank[x[1]])
# remove instance paths (just leave channel and list of frametypes)
ftypes = {key: list(list(zip(*match[key]))[0]) for key in match}
else:
ftypes = {key: match[key][0][0] for key in match}
# if given a list of channels, return a dict
if isinstance(channel, (list, tuple)):
return ftypes
# otherwise just return a list for this type
return ftypes[str(channel)] | Find the frametype(s) that hold data for a given channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
gpstime : `int`, optional
target GPS time at which to find correct type
frametype_match : `str`, optional
regular expression to use for frametype `str` matching
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
return_all : `bool`, optional, default: `False`
return all found types, default is to return to 'best' match
allow_tape : `bool`, optional, default: `False`
do not test types whose frame files are stored on tape (not on
spinning disk)
on_gaps : `str`, optional
action to take when gaps are discovered in datafind coverage,
default: ``'error'``, i.e. don't match frametypes with gaps.
Select ``'ignore'`` to ignore gaps, or ``'warn'`` to display
warnings when gaps are found in a datafind `find_urls` query
Returns
-------
If a single name is given, and `return_all=False` (default):
frametype : `str`
name of best match frame type
If a single name is given, and `return_all=True`:
types : `list` of `str`
the list of all matching frame types
If multiple names are given, the above return types are wrapped into a
`dict` of `(channel, type_or_list)` pairs.
Examples
--------
>>> from gwpy.io import datafind as io_datafind
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462)
'H1_R'
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462,
... return_all=True)
['H1_R', 'H1_C']
>>> io_datafind.find_frametype(
... ('H1:IMC-PWR_IN_OUTPUT', 'H1:OMC-DCPD_SUM_OUTPUT',
... 'H1:GDS-CALIB_STRAIN'),
... gpstime=1126259462, return_all=True))"
{'H1:GDS-CALIB_STRAIN': ['H1_HOFT_C00'],
'H1:OMC-DCPD_SUM_OUTPUT': ['H1_R', 'H1_C'],
'H1:IMC-PWR_IN_OUTPUT': ['H1_R', 'H1_C']} | Below is the the instruction that describes the task:
### Input:
Find the frametype(s) that hold data for a given channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
gpstime : `int`, optional
target GPS time at which to find correct type
frametype_match : `str`, optional
regular expression to use for frametype `str` matching
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
return_all : `bool`, optional, default: `False`
return all found types, default is to return to 'best' match
allow_tape : `bool`, optional, default: `False`
do not test types whose frame files are stored on tape (not on
spinning disk)
on_gaps : `str`, optional
action to take when gaps are discovered in datafind coverage,
default: ``'error'``, i.e. don't match frametypes with gaps.
Select ``'ignore'`` to ignore gaps, or ``'warn'`` to display
warnings when gaps are found in a datafind `find_urls` query
Returns
-------
If a single name is given, and `return_all=False` (default):
frametype : `str`
name of best match frame type
If a single name is given, and `return_all=True`:
types : `list` of `str`
the list of all matching frame types
If multiple names are given, the above return types are wrapped into a
`dict` of `(channel, type_or_list)` pairs.
Examples
--------
>>> from gwpy.io import datafind as io_datafind
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462)
'H1_R'
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462,
... return_all=True)
['H1_R', 'H1_C']
>>> io_datafind.find_frametype(
... ('H1:IMC-PWR_IN_OUTPUT', 'H1:OMC-DCPD_SUM_OUTPUT',
... 'H1:GDS-CALIB_STRAIN'),
... gpstime=1126259462, return_all=True))"
{'H1:GDS-CALIB_STRAIN': ['H1_HOFT_C00'],
'H1:OMC-DCPD_SUM_OUTPUT': ['H1_R', 'H1_C'],
'H1:IMC-PWR_IN_OUTPUT': ['H1_R', 'H1_C']}
### Response:
def find_frametype(channel, gpstime=None, frametype_match=None,
host=None, port=None, return_all=False, allow_tape=False,
connection=None, on_gaps='error'):
"""Find the frametype(s) that hold data for a given channel
Parameters
----------
channel : `str`, `~gwpy.detector.Channel`
the channel to be found
gpstime : `int`, optional
target GPS time at which to find correct type
frametype_match : `str`, optional
regular expression to use for frametype `str` matching
host : `str`, optional
name of datafind host to use
port : `int`, optional
port on datafind host to use
return_all : `bool`, optional, default: `False`
return all found types, default is to return to 'best' match
allow_tape : `bool`, optional, default: `False`
do not test types whose frame files are stored on tape (not on
spinning disk)
on_gaps : `str`, optional
action to take when gaps are discovered in datafind coverage,
default: ``'error'``, i.e. don't match frametypes with gaps.
Select ``'ignore'`` to ignore gaps, or ``'warn'`` to display
warnings when gaps are found in a datafind `find_urls` query
Returns
-------
If a single name is given, and `return_all=False` (default):
frametype : `str`
name of best match frame type
If a single name is given, and `return_all=True`:
types : `list` of `str`
the list of all matching frame types
If multiple names are given, the above return types are wrapped into a
`dict` of `(channel, type_or_list)` pairs.
Examples
--------
>>> from gwpy.io import datafind as io_datafind
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462)
'H1_R'
>>> io_datafind.find_frametype('H1:IMC-PWR_IN_OUTPUT', gpstime=1126259462,
... return_all=True)
['H1_R', 'H1_C']
>>> io_datafind.find_frametype(
... ('H1:IMC-PWR_IN_OUTPUT', 'H1:OMC-DCPD_SUM_OUTPUT',
... 'H1:GDS-CALIB_STRAIN'),
... gpstime=1126259462, return_all=True))"
{'H1:GDS-CALIB_STRAIN': ['H1_HOFT_C00'],
'H1:OMC-DCPD_SUM_OUTPUT': ['H1_R', 'H1_C'],
'H1:IMC-PWR_IN_OUTPUT': ['H1_R', 'H1_C']}
"""
# this function is now horrendously complicated to support a large
# number of different use cases, hopefully the comments are sufficient
from ..detector import Channel
# format channel names as list
if isinstance(channel, (list, tuple)):
channels = channel
else:
channels = [channel]
# create set() of GWF channel names, and dict map back to user names
# this allows users to use nds-style names in this query, e.g.
# 'X1:TEST.mean,m-trend', and still get results
chans = {Channel(c).name: c for c in channels}
names = set(chans.keys())
# format GPS time(s)
if isinstance(gpstime, (list, tuple)):
from ..segments import Segment
gpssegment = Segment(*gpstime)
gpstime = gpssegment[0]
else:
gpssegment = None
if gpstime is not None:
gpstime = int(to_gps(gpstime))
# if use gaps post-S5 GPStime, forcibly skip _GRBYYMMDD frametypes at CIT
if frametype_match is None and gpstime is not None and gpstime > 875232014:
frametype_match = GRB_TYPE
# -- go
match = {}
ifos = set() # record IFOs we have queried to prevent duplication
# loop over set of names, which should get smaller as names are searched
while names:
# parse first channel name (to get IFO and channel type)
try:
name = next(iter(names))
except KeyError:
break
else:
chan = Channel(chans[name])
# parse IFO ID
try:
ifo = chan.ifo[0]
except TypeError: # chan.ifo is None
raise ValueError("Cannot parse interferometer prefix from channel "
"name %r, cannot proceed with find()" % str(chan))
# if we've already gone through the types for this IFO, skip
if ifo in ifos:
names.pop()
continue
ifos.add(ifo)
types = find_types(ifo, match=frametype_match, trend=chan.type,
connection=connection)
# loop over types testing each in turn
for ftype in types:
# find instance of this frametype
try:
path = find_latest(ifo, ftype, gpstime=gpstime,
allow_tape=allow_tape,
connection=connection, on_missing='ignore')
except (RuntimeError, IOError, IndexError): # something went wrong
continue
# check for gaps in the record for this type
if gpssegment is None:
gaps = 0
else:
cache = find_urls(ifo, ftype, *gpssegment, on_gaps=on_gaps,
connection=connection)
csegs = cache_segments(cache)
gaps = abs(gpssegment) - abs(csegs)
# search the TOC for one frame file and match any channels
i = 0
nchan = len(names)
for n in iter_channel_names(path):
if n in names:
i += 1
c = chans[n] # map back to user-given channel name
try:
match[c].append((ftype, path, -gaps))
except KeyError:
match[c] = [(ftype, path, -gaps)]
if not return_all: # match once only
names.remove(n)
if not names or n == nchan: # break out of TOC loop
break
if not names: # break out of ftype loop if all names matched
break
try:
names.pop()
except KeyError: # done
break
# raise exception if nothing found
missing = set(channels) - set(match.keys())
if missing:
msg = "Cannot locate the following channel(s) in any known frametype"
if gpstime:
msg += " at GPS=%d" % gpstime
msg += ":\n {}".format('\n '.join(missing))
if not allow_tape:
msg += ("\n[files on tape have not been checked, use "
"allow_tape=True for a complete search]")
raise ValueError(msg)
# if matching all types, rank based on coverage, tape, and TOC size
if return_all:
paths = set(p[1] for key in match for p in match[key])
rank = {path: (on_tape(path), num_channels(path)) for path in paths}
# deprioritise types on tape and those with lots of channels
for key in match:
match[key].sort(key=lambda x: (x[2],) + rank[x[1]])
# remove instance paths (just leave channel and list of frametypes)
ftypes = {key: list(list(zip(*match[key]))[0]) for key in match}
else:
ftypes = {key: match[key][0][0] for key in match}
# if given a list of channels, return a dict
if isinstance(channel, (list, tuple)):
return ftypes
# otherwise just return a list for this type
return ftypes[str(channel)] |
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units | Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error | Below is the the instruction that describes the task:
### Input:
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
### Response:
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units |
def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True):
'''Create a handler for query engine based on a URL.
The following environment variables are used for default connection:
TD_API_KEY API key
TD_API_SERVER API server (default: api.treasuredata.com)
HTTP_PROXY HTTP proxy (optional)
Parameters
----------
url : string
Engine descriptor in the form "type://apikey@host/database?params..."
Use shorthand notation "type:database?params..." for the default connection.
con : Connection, optional
Handler returned by connect. If not given, default connection is used.
header : string or boolean, default True
Prepend comment strings, in the form "-- comment", as a header of queries.
Set False to disable header.
show_progress : double or boolean, default 5.0
Number of seconds to wait before printing progress.
Set False to disable progress entirely.
clear_progress : boolean, default True
If True, clear progress when query completed.
Returns
-------
QueryEngine
'''
url = urlparse(url)
engine_type = url.scheme if url.scheme else 'presto'
if con is None:
if url.netloc:
# create connection
apikey, host = url.netloc.split('@')
con = Connection(apikey=apikey, endpoint="https://{0}/".format(host))
else:
# default connection
con = Connection()
database = url.path[1:] if url.path.startswith('/') else url.path
params = {
'type': engine_type,
}
params.update(parse_qsl(url.query))
return QueryEngine(con, database, params,
header=header,
show_progress=show_progress,
clear_progress=clear_progress) | Create a handler for query engine based on a URL.
The following environment variables are used for default connection:
TD_API_KEY API key
TD_API_SERVER API server (default: api.treasuredata.com)
HTTP_PROXY HTTP proxy (optional)
Parameters
----------
url : string
Engine descriptor in the form "type://apikey@host/database?params..."
Use shorthand notation "type:database?params..." for the default connection.
con : Connection, optional
Handler returned by connect. If not given, default connection is used.
header : string or boolean, default True
Prepend comment strings, in the form "-- comment", as a header of queries.
Set False to disable header.
show_progress : double or boolean, default 5.0
Number of seconds to wait before printing progress.
Set False to disable progress entirely.
clear_progress : boolean, default True
If True, clear progress when query completed.
Returns
-------
QueryEngine | Below is the the instruction that describes the task:
### Input:
Create a handler for query engine based on a URL.
The following environment variables are used for default connection:
TD_API_KEY API key
TD_API_SERVER API server (default: api.treasuredata.com)
HTTP_PROXY HTTP proxy (optional)
Parameters
----------
url : string
Engine descriptor in the form "type://apikey@host/database?params..."
Use shorthand notation "type:database?params..." for the default connection.
con : Connection, optional
Handler returned by connect. If not given, default connection is used.
header : string or boolean, default True
Prepend comment strings, in the form "-- comment", as a header of queries.
Set False to disable header.
show_progress : double or boolean, default 5.0
Number of seconds to wait before printing progress.
Set False to disable progress entirely.
clear_progress : boolean, default True
If True, clear progress when query completed.
Returns
-------
QueryEngine
### Response:
def create_engine(url, con=None, header=True, show_progress=5.0, clear_progress=True):
'''Create a handler for query engine based on a URL.
The following environment variables are used for default connection:
TD_API_KEY API key
TD_API_SERVER API server (default: api.treasuredata.com)
HTTP_PROXY HTTP proxy (optional)
Parameters
----------
url : string
Engine descriptor in the form "type://apikey@host/database?params..."
Use shorthand notation "type:database?params..." for the default connection.
con : Connection, optional
Handler returned by connect. If not given, default connection is used.
header : string or boolean, default True
Prepend comment strings, in the form "-- comment", as a header of queries.
Set False to disable header.
show_progress : double or boolean, default 5.0
Number of seconds to wait before printing progress.
Set False to disable progress entirely.
clear_progress : boolean, default True
If True, clear progress when query completed.
Returns
-------
QueryEngine
'''
url = urlparse(url)
engine_type = url.scheme if url.scheme else 'presto'
if con is None:
if url.netloc:
# create connection
apikey, host = url.netloc.split('@')
con = Connection(apikey=apikey, endpoint="https://{0}/".format(host))
else:
# default connection
con = Connection()
database = url.path[1:] if url.path.startswith('/') else url.path
params = {
'type': engine_type,
}
params.update(parse_qsl(url.query))
return QueryEngine(con, database, params,
header=header,
show_progress=show_progress,
clear_progress=clear_progress) |
def get_cg_volumes(self, group_id):
""" return all non snapshots volumes in cg """
for volume in self.xcli_client.cmd.vol_list(cg=group_id):
if volume.snapshot_of == '':
yield volume.name | return all non snapshots volumes in cg | Below is the the instruction that describes the task:
### Input:
return all non snapshots volumes in cg
### Response:
def get_cg_volumes(self, group_id):
""" return all non snapshots volumes in cg """
for volume in self.xcli_client.cmd.vol_list(cg=group_id):
if volume.snapshot_of == '':
yield volume.name |
def _index(self, name):
'''Returns index transforms for ``name``.
:type name: unicode
:rtype: ``{ create |--> function, transform |--> function }``
'''
name = name.decode('utf-8')
try:
return self._indexes[name]
except KeyError:
raise KeyError('Index "%s" has not been registered with '
'this FC store.' % name) | Returns index transforms for ``name``.
:type name: unicode
:rtype: ``{ create |--> function, transform |--> function }`` | Below is the the instruction that describes the task:
### Input:
Returns index transforms for ``name``.
:type name: unicode
:rtype: ``{ create |--> function, transform |--> function }``
### Response:
def _index(self, name):
'''Returns index transforms for ``name``.
:type name: unicode
:rtype: ``{ create |--> function, transform |--> function }``
'''
name = name.decode('utf-8')
try:
return self._indexes[name]
except KeyError:
raise KeyError('Index "%s" has not been registered with '
'this FC store.' % name) |
def daemonize_if(opts):
'''
Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call
'''
if 'salt-call' in sys.argv[0]:
return
if not opts.get('multiprocessing', True):
return
if sys.platform.startswith('win'):
return
daemonize(False) | Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call | Below is the the instruction that describes the task:
### Input:
Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call
### Response:
def daemonize_if(opts):
'''
Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call
'''
if 'salt-call' in sys.argv[0]:
return
if not opts.get('multiprocessing', True):
return
if sys.platform.startswith('win'):
return
daemonize(False) |
def add(queue_name, payload=None, content_type=None, source=None, task_id=None,
build_id=None, release_id=None, run_id=None):
"""Adds a work item to a queue.
Args:
queue_name: Name of the queue to add the work item to.
payload: Optional. Payload that describes the work to do as a string.
If not a string and content_type is not provided, then this
function assumes the payload is a JSON-able Python object.
content_type: Optional. Content type of the payload.
source: Optional. Who or what originally created the task.
task_id: Optional. When supplied, only enqueue this task if a task
with this ID does not already exist. If a task with this ID already
exists, then this function will do nothing.
build_id: Build ID to associate with this task. May be None.
release_id: Release ID to associate with this task. May be None.
run_id: Run ID to associate with this task. May be None.
Returns:
ID of the task that was added.
"""
if task_id:
task = WorkQueue.query.filter_by(task_id=task_id).first()
if task:
return task.task_id
else:
task_id = uuid.uuid4().hex
if payload and not content_type and not isinstance(payload, basestring):
payload = json.dumps(payload)
content_type = 'application/json'
now = datetime.datetime.utcnow()
task = WorkQueue(
task_id=task_id,
queue_name=queue_name,
eta=now,
source=source,
build_id=build_id,
release_id=release_id,
run_id=run_id,
payload=payload,
content_type=content_type)
db.session.add(task)
return task.task_id | Adds a work item to a queue.
Args:
queue_name: Name of the queue to add the work item to.
payload: Optional. Payload that describes the work to do as a string.
If not a string and content_type is not provided, then this
function assumes the payload is a JSON-able Python object.
content_type: Optional. Content type of the payload.
source: Optional. Who or what originally created the task.
task_id: Optional. When supplied, only enqueue this task if a task
with this ID does not already exist. If a task with this ID already
exists, then this function will do nothing.
build_id: Build ID to associate with this task. May be None.
release_id: Release ID to associate with this task. May be None.
run_id: Run ID to associate with this task. May be None.
Returns:
ID of the task that was added. | Below is the the instruction that describes the task:
### Input:
Adds a work item to a queue.
Args:
queue_name: Name of the queue to add the work item to.
payload: Optional. Payload that describes the work to do as a string.
If not a string and content_type is not provided, then this
function assumes the payload is a JSON-able Python object.
content_type: Optional. Content type of the payload.
source: Optional. Who or what originally created the task.
task_id: Optional. When supplied, only enqueue this task if a task
with this ID does not already exist. If a task with this ID already
exists, then this function will do nothing.
build_id: Build ID to associate with this task. May be None.
release_id: Release ID to associate with this task. May be None.
run_id: Run ID to associate with this task. May be None.
Returns:
ID of the task that was added.
### Response:
def add(queue_name, payload=None, content_type=None, source=None, task_id=None,
build_id=None, release_id=None, run_id=None):
"""Adds a work item to a queue.
Args:
queue_name: Name of the queue to add the work item to.
payload: Optional. Payload that describes the work to do as a string.
If not a string and content_type is not provided, then this
function assumes the payload is a JSON-able Python object.
content_type: Optional. Content type of the payload.
source: Optional. Who or what originally created the task.
task_id: Optional. When supplied, only enqueue this task if a task
with this ID does not already exist. If a task with this ID already
exists, then this function will do nothing.
build_id: Build ID to associate with this task. May be None.
release_id: Release ID to associate with this task. May be None.
run_id: Run ID to associate with this task. May be None.
Returns:
ID of the task that was added.
"""
if task_id:
task = WorkQueue.query.filter_by(task_id=task_id).first()
if task:
return task.task_id
else:
task_id = uuid.uuid4().hex
if payload and not content_type and not isinstance(payload, basestring):
payload = json.dumps(payload)
content_type = 'application/json'
now = datetime.datetime.utcnow()
task = WorkQueue(
task_id=task_id,
queue_name=queue_name,
eta=now,
source=source,
build_id=build_id,
release_id=release_id,
run_id=run_id,
payload=payload,
content_type=content_type)
db.session.add(task)
return task.task_id |
def issues(self, **kwargs):
"""List issues related to this milestone.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of issues
"""
path = '%s/%s/issues' % (self.manager.path, self.get_id())
data_list = self.manager.gitlab.http_list(path, as_list=False,
**kwargs)
manager = GroupIssueManager(self.manager.gitlab,
parent=self.manager._parent)
# FIXME(gpocentek): the computed manager path is not correct
return RESTObjectList(manager, GroupIssue, data_list) | List issues related to this milestone.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of issues | Below is the the instruction that describes the task:
### Input:
List issues related to this milestone.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of issues
### Response:
def issues(self, **kwargs):
"""List issues related to this milestone.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of issues
"""
path = '%s/%s/issues' % (self.manager.path, self.get_id())
data_list = self.manager.gitlab.http_list(path, as_list=False,
**kwargs)
manager = GroupIssueManager(self.manager.gitlab,
parent=self.manager._parent)
# FIXME(gpocentek): the computed manager path is not correct
return RESTObjectList(manager, GroupIssue, data_list) |
def _query(self, query_str, query_args=None, **query_options):
"""
**query_options -- dict
ignore_result -- boolean -- true to not attempt to fetch results
fetchone -- boolean -- true to only fetch one result
count_result -- boolean -- true to return the int count of rows affected
"""
ret = True
# http://stackoverflow.com/questions/6739355/dictcursor-doesnt-seem-to-work-under-psycopg2
connection = query_options.get('connection', None)
with self.connection(connection) as connection:
cur = connection.cursor()
ignore_result = query_options.get('ignore_result', False)
count_result = query_options.get('count_result', False)
one_result = query_options.get('fetchone', query_options.get('one_result', False))
cursor_result = query_options.get('cursor_result', False)
try:
if query_args:
self.log("{}{}{}", query_str, os.linesep, query_args)
cur.execute(query_str, query_args)
else:
self.log(query_str)
cur.execute(query_str)
if cursor_result:
ret = cur
elif not ignore_result:
if one_result:
ret = self._normalize_result_dict(cur.fetchone())
elif count_result:
ret = cur.rowcount
else:
ret = self._normalize_result_list(cur.fetchall())
except Exception as e:
self.log(e)
raise
return ret | **query_options -- dict
ignore_result -- boolean -- true to not attempt to fetch results
fetchone -- boolean -- true to only fetch one result
count_result -- boolean -- true to return the int count of rows affected | Below is the the instruction that describes the task:
### Input:
**query_options -- dict
ignore_result -- boolean -- true to not attempt to fetch results
fetchone -- boolean -- true to only fetch one result
count_result -- boolean -- true to return the int count of rows affected
### Response:
def _query(self, query_str, query_args=None, **query_options):
"""
**query_options -- dict
ignore_result -- boolean -- true to not attempt to fetch results
fetchone -- boolean -- true to only fetch one result
count_result -- boolean -- true to return the int count of rows affected
"""
ret = True
# http://stackoverflow.com/questions/6739355/dictcursor-doesnt-seem-to-work-under-psycopg2
connection = query_options.get('connection', None)
with self.connection(connection) as connection:
cur = connection.cursor()
ignore_result = query_options.get('ignore_result', False)
count_result = query_options.get('count_result', False)
one_result = query_options.get('fetchone', query_options.get('one_result', False))
cursor_result = query_options.get('cursor_result', False)
try:
if query_args:
self.log("{}{}{}", query_str, os.linesep, query_args)
cur.execute(query_str, query_args)
else:
self.log(query_str)
cur.execute(query_str)
if cursor_result:
ret = cur
elif not ignore_result:
if one_result:
ret = self._normalize_result_dict(cur.fetchone())
elif count_result:
ret = cur.rowcount
else:
ret = self._normalize_result_list(cur.fetchall())
except Exception as e:
self.log(e)
raise
return ret |
def connect(
creator, maxusage=None, setsession=None,
failures=None, ping=1, closeable=True, *args, **kwargs):
"""A tough version of the connection constructor of a DB-API 2 module.
creator: either an arbitrary function returning new DB-API 2 compliant
connection objects or a DB-API 2 compliant database module
maxusage: maximum usage limit for the underlying DB-API 2 connection
(number of database operations, 0 or None means unlimited usage)
callproc(), execute() and executemany() count as one operation.
When the limit is reached, the connection is automatically reset.
setsession: an optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to german", "set time zone mez"]
failures: an optional exception class or a tuple of exception classes
for which the failover mechanism shall be applied, if the default
(OperationalError, InternalError) is not adequate
ping: determines when the connection should be checked with ping()
(0 = None = never, 1 = default = when _ping_check() is called,
2 = whenever a cursor is created, 4 = when a query is executed,
7 = always, and all other bit combinations of these values)
closeable: if this is set to false, then closing the connection will
be silently ignored, but by default the connection can be closed
args, kwargs: the parameters that shall be passed to the creator
function or the connection constructor of the DB-API 2 module
"""
return SteadyDBConnection(
creator, maxusage, setsession,
failures, ping, closeable, *args, **kwargs) | A tough version of the connection constructor of a DB-API 2 module.
creator: either an arbitrary function returning new DB-API 2 compliant
connection objects or a DB-API 2 compliant database module
maxusage: maximum usage limit for the underlying DB-API 2 connection
(number of database operations, 0 or None means unlimited usage)
callproc(), execute() and executemany() count as one operation.
When the limit is reached, the connection is automatically reset.
setsession: an optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to german", "set time zone mez"]
failures: an optional exception class or a tuple of exception classes
for which the failover mechanism shall be applied, if the default
(OperationalError, InternalError) is not adequate
ping: determines when the connection should be checked with ping()
(0 = None = never, 1 = default = when _ping_check() is called,
2 = whenever a cursor is created, 4 = when a query is executed,
7 = always, and all other bit combinations of these values)
closeable: if this is set to false, then closing the connection will
be silently ignored, but by default the connection can be closed
args, kwargs: the parameters that shall be passed to the creator
function or the connection constructor of the DB-API 2 module | Below is the the instruction that describes the task:
### Input:
A tough version of the connection constructor of a DB-API 2 module.
creator: either an arbitrary function returning new DB-API 2 compliant
connection objects or a DB-API 2 compliant database module
maxusage: maximum usage limit for the underlying DB-API 2 connection
(number of database operations, 0 or None means unlimited usage)
callproc(), execute() and executemany() count as one operation.
When the limit is reached, the connection is automatically reset.
setsession: an optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to german", "set time zone mez"]
failures: an optional exception class or a tuple of exception classes
for which the failover mechanism shall be applied, if the default
(OperationalError, InternalError) is not adequate
ping: determines when the connection should be checked with ping()
(0 = None = never, 1 = default = when _ping_check() is called,
2 = whenever a cursor is created, 4 = when a query is executed,
7 = always, and all other bit combinations of these values)
closeable: if this is set to false, then closing the connection will
be silently ignored, but by default the connection can be closed
args, kwargs: the parameters that shall be passed to the creator
function or the connection constructor of the DB-API 2 module
### Response:
def connect(
creator, maxusage=None, setsession=None,
failures=None, ping=1, closeable=True, *args, **kwargs):
"""A tough version of the connection constructor of a DB-API 2 module.
creator: either an arbitrary function returning new DB-API 2 compliant
connection objects or a DB-API 2 compliant database module
maxusage: maximum usage limit for the underlying DB-API 2 connection
(number of database operations, 0 or None means unlimited usage)
callproc(), execute() and executemany() count as one operation.
When the limit is reached, the connection is automatically reset.
setsession: an optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to german", "set time zone mez"]
failures: an optional exception class or a tuple of exception classes
for which the failover mechanism shall be applied, if the default
(OperationalError, InternalError) is not adequate
ping: determines when the connection should be checked with ping()
(0 = None = never, 1 = default = when _ping_check() is called,
2 = whenever a cursor is created, 4 = when a query is executed,
7 = always, and all other bit combinations of these values)
closeable: if this is set to false, then closing the connection will
be silently ignored, but by default the connection can be closed
args, kwargs: the parameters that shall be passed to the creator
function or the connection constructor of the DB-API 2 module
"""
return SteadyDBConnection(
creator, maxusage, setsession,
failures, ping, closeable, *args, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.