code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def is_descendant_of_book(self, id_, book_id):
"""Tests if an ``Id`` is a descendant of a book.
arg: id (osid.id.Id): an ``Id``
arg: book_id (osid.id.Id): the ``Id`` of a book
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``book_id,`` ``false`` otherwise
raise: NotFound - ``book_id`` is not found
raise: NullArgument - ``id`` or ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=book_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=book_id) | Tests if an ``Id`` is a descendant of a book.
arg: id (osid.id.Id): an ``Id``
arg: book_id (osid.id.Id): the ``Id`` of a book
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``book_id,`` ``false`` otherwise
raise: NotFound - ``book_id`` is not found
raise: NullArgument - ``id`` or ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``. | Below is the the instruction that describes the task:
### Input:
Tests if an ``Id`` is a descendant of a book.
arg: id (osid.id.Id): an ``Id``
arg: book_id (osid.id.Id): the ``Id`` of a book
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``book_id,`` ``false`` otherwise
raise: NotFound - ``book_id`` is not found
raise: NullArgument - ``id`` or ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
### Response:
def is_descendant_of_book(self, id_, book_id):
"""Tests if an ``Id`` is a descendant of a book.
arg: id (osid.id.Id): an ``Id``
arg: book_id (osid.id.Id): the ``Id`` of a book
return: (boolean) - ``true`` if the ``id`` is a descendant of
the ``book_id,`` ``false`` otherwise
raise: NotFound - ``book_id`` is not found
raise: NullArgument - ``id`` or ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_descendant_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_descendant_of_catalog(id_=id_, catalog_id=book_id)
return self._hierarchy_session.is_descendant(id_=id_, descendant_id=book_id) |
def _doSelectInThread(self, timeout):
"""Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
reads = self.reads
writes = self.writes
while 1:
try:
r, w, ignored = _select(reads.keys(),
writes.keys(),
[], timeout)
break
except ValueError, ve:
# Possibly a file descriptor has gone negative?
log.err()
self._preenDescriptorsInThread()
except TypeError, te:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptorsInThread()
except (select.error, IOError), se:
# select(2) encountered an error
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not reads) and (not writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptorsInThread()
else:
# OK, I really don't know what's going on. Blow up.
raise
self._sendToMain('Notify', r, w) | Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them. | Below is the the instruction that describes the task:
### Input:
Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
### Response:
def _doSelectInThread(self, timeout):
"""Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
reads = self.reads
writes = self.writes
while 1:
try:
r, w, ignored = _select(reads.keys(),
writes.keys(),
[], timeout)
break
except ValueError, ve:
# Possibly a file descriptor has gone negative?
log.err()
self._preenDescriptorsInThread()
except TypeError, te:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptorsInThread()
except (select.error, IOError), se:
# select(2) encountered an error
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not reads) and (not writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptorsInThread()
else:
# OK, I really don't know what's going on. Blow up.
raise
self._sendToMain('Notify', r, w) |
def render_template_file(file_name, context):
""" Renders and overrides Jinja2 template files """
with open(file_name, 'r+') as f:
template = Template(f.read())
output = template.render(context)
f.seek(0)
f.write(output)
f.truncate() | Renders and overrides Jinja2 template files | Below is the the instruction that describes the task:
### Input:
Renders and overrides Jinja2 template files
### Response:
def render_template_file(file_name, context):
""" Renders and overrides Jinja2 template files """
with open(file_name, 'r+') as f:
template = Template(f.read())
output = template.render(context)
f.seek(0)
f.write(output)
f.truncate() |
def _build_array_declarations(self, with_init=True):
"""
Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions
"""
# copy array declarations from from kernel ast
array_declarations = deepcopy(self.get_array_declarations())
array_dict = []
for d in array_declarations:
# We need to transform
array_dict.append(transform_multidim_to_1d_decl(d))
transform_array_decl_to_malloc(d, with_init=with_init)
return array_declarations, dict(array_dict) | Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions | Below is the the instruction that describes the task:
### Input:
Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions
### Response:
def _build_array_declarations(self, with_init=True):
"""
Generate declaration statements for arrays.
Also transforming multi-dim to 1d arrays and initializing with malloc.
:param with_init: ommit malloc initialization
:return: list of declarations nodes, dictionary of array names and original dimensions
"""
# copy array declarations from from kernel ast
array_declarations = deepcopy(self.get_array_declarations())
array_dict = []
for d in array_declarations:
# We need to transform
array_dict.append(transform_multidim_to_1d_decl(d))
transform_array_decl_to_malloc(d, with_init=with_init)
return array_declarations, dict(array_dict) |
def _elements(self):
"""
The cached list of elements.
"""
if not self.evaluated:
setattr(self, '_elements_cached', list(self._select()))
return self._elements_cached | The cached list of elements. | Below is the the instruction that describes the task:
### Input:
The cached list of elements.
### Response:
def _elements(self):
"""
The cached list of elements.
"""
if not self.evaluated:
setattr(self, '_elements_cached', list(self._select()))
return self._elements_cached |
def install_hwpack(url, replace_existing=False):
"""install hwpackrary from web or local files system.
:param url: web address or file path
:param replace_existing: bool
:rtype: None
"""
d = tmpdir(tmpdir())
f = download(url)
Archive(f).extractall(d)
clean_dir(d)
src_dhwpack = find_hwpack_dir(d)
targ_dhwpack = hwpack_dir() / src_dhwpack.name
doaction = 0
if targ_dhwpack.exists():
log.debug('hwpack already exists: %s', targ_dhwpack)
if replace_existing:
log.debug('remove %s', targ_dhwpack)
targ_dhwpack.rmtree()
doaction = 1
else:
doaction = 1
if doaction:
log.debug('move %s -> %s', src_dhwpack, targ_dhwpack)
src_dhwpack.move(targ_dhwpack)
hwpack_dir().copymode(targ_dhwpack)
for x in targ_dhwpack.walk():
hwpack_dir().copymode(x) | install hwpackrary from web or local files system.
:param url: web address or file path
:param replace_existing: bool
:rtype: None | Below is the the instruction that describes the task:
### Input:
install hwpackrary from web or local files system.
:param url: web address or file path
:param replace_existing: bool
:rtype: None
### Response:
def install_hwpack(url, replace_existing=False):
"""install hwpackrary from web or local files system.
:param url: web address or file path
:param replace_existing: bool
:rtype: None
"""
d = tmpdir(tmpdir())
f = download(url)
Archive(f).extractall(d)
clean_dir(d)
src_dhwpack = find_hwpack_dir(d)
targ_dhwpack = hwpack_dir() / src_dhwpack.name
doaction = 0
if targ_dhwpack.exists():
log.debug('hwpack already exists: %s', targ_dhwpack)
if replace_existing:
log.debug('remove %s', targ_dhwpack)
targ_dhwpack.rmtree()
doaction = 1
else:
doaction = 1
if doaction:
log.debug('move %s -> %s', src_dhwpack, targ_dhwpack)
src_dhwpack.move(targ_dhwpack)
hwpack_dir().copymode(targ_dhwpack)
for x in targ_dhwpack.walk():
hwpack_dir().copymode(x) |
def patch(patchfile,
options='',
saltenv='base',
source_hash=None,
show_changes=True,
source='running',
path=None,
test=False,
commit=True,
debug=False,
replace=True):
'''
.. versionadded:: 2019.2.0
Apply a patch to the configuration source, and load the result into the
running config of the device.
patchfile
A patch file to apply to the configuration source.
options
Options to pass to patch.
source_hash
If the patch file (specified via the ``patchfile`` argument) is an
HTTP(S) or FTP URL and the file exists in the minion's file cache, this
option can be passed to keep the minion from re-downloading the file if
the cached copy matches the specified hash.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can the user prefers a particular
location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.patch https://example.com/running_config.patch
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
patchfile_cache = __salt__['cp.cache_file'](patchfile)
if patchfile_cache is False:
return {
'out': None,
'result': False,
'comment': 'The file "{}" does not exist.'.format(patchfile)
}
replace_pattern = __salt__['file.patch'](path,
patchfile_cache,
options=options)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit) | .. versionadded:: 2019.2.0
Apply a patch to the configuration source, and load the result into the
running config of the device.
patchfile
A patch file to apply to the configuration source.
options
Options to pass to patch.
source_hash
If the patch file (specified via the ``patchfile`` argument) is an
HTTP(S) or FTP URL and the file exists in the minion's file cache, this
option can be passed to keep the minion from re-downloading the file if
the cached copy matches the specified hash.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can the user prefers a particular
location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.patch https://example.com/running_config.patch | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Apply a patch to the configuration source, and load the result into the
running config of the device.
patchfile
A patch file to apply to the configuration source.
options
Options to pass to patch.
source_hash
If the patch file (specified via the ``patchfile`` argument) is an
HTTP(S) or FTP URL and the file exists in the minion's file cache, this
option can be passed to keep the minion from re-downloading the file if
the cached copy matches the specified hash.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can the user prefers a particular
location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.patch https://example.com/running_config.patch
### Response:
def patch(patchfile,
options='',
saltenv='base',
source_hash=None,
show_changes=True,
source='running',
path=None,
test=False,
commit=True,
debug=False,
replace=True):
'''
.. versionadded:: 2019.2.0
Apply a patch to the configuration source, and load the result into the
running config of the device.
patchfile
A patch file to apply to the configuration source.
options
Options to pass to patch.
source_hash
If the patch file (specified via the ``patchfile`` argument) is an
HTTP(S) or FTP URL and the file exists in the minion's file cache, this
option can be passed to keep the minion from re-downloading the file if
the cached copy matches the specified hash.
show_changes: ``True``
Controls how changes are presented. If ``True``, this function will
return the of the changes made.
If ``False``, then it will return a boolean (``True`` if any changes
were made, otherwise False).
source: ``running``
The configuration source. Choose from: ``running``, ``candidate``, or
``startup``. Default: ``running``.
path: ``None``
Save the temporary configuration to a specific path, then read from
there. This argument is optional, can the user prefers a particular
location of the temporary file.
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return
the changes. Default: ``False`` and will commit the changes on the
device.
commit: ``True``
Commit the configuration changes? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key in the output dictionary, as
``loaded_config`` containing the raw configuration loaded on the device.
replace: ``True``
Load and replace the configuration. Default: ``True``.
CLI Example:
.. code-block:: bash
salt '*' net.patch https://example.com/running_config.patch
'''
config_saved = save_config(source=source, path=path)
if not config_saved or not config_saved['result']:
return config_saved
path = config_saved['out']
patchfile_cache = __salt__['cp.cache_file'](patchfile)
if patchfile_cache is False:
return {
'out': None,
'result': False,
'comment': 'The file "{}" does not exist.'.format(patchfile)
}
replace_pattern = __salt__['file.patch'](path,
patchfile_cache,
options=options)
with salt.utils.files.fopen(path, 'r') as fh_:
updated_config = fh_.read()
return __salt__['net.load_config'](text=updated_config,
test=test,
debug=debug,
replace=replace,
commit=commit) |
def can_persist_fixtures():
"""Returns True if it's possible to persist fixtures across tests.
Flask-Fixtures uses the setUpClass and tearDownClass methods to persist
fixtures across tests. These methods were added to unittest.TestCase in
python 2.7. So, we can only persist fixtures when using python 2.7.
However, the nose and py.test libraries add support for these methods
regardless of what version of python we're running, so if we're running
with either of those libraries, return True to persist fixtures.
"""
# If we're running python 2.7 or greater, we're fine
if sys.hexversion >= 0x02070000:
return True
# Otherwise, nose and py.test support the setUpClass and tearDownClass
# methods, so if we're using either of those, go ahead and run the tests
filename = inspect.stack()[-1][1]
executable = os.path.split(filename)[1]
return executable in ('py.test', 'nosetests') | Returns True if it's possible to persist fixtures across tests.
Flask-Fixtures uses the setUpClass and tearDownClass methods to persist
fixtures across tests. These methods were added to unittest.TestCase in
python 2.7. So, we can only persist fixtures when using python 2.7.
However, the nose and py.test libraries add support for these methods
regardless of what version of python we're running, so if we're running
with either of those libraries, return True to persist fixtures. | Below is the the instruction that describes the task:
### Input:
Returns True if it's possible to persist fixtures across tests.
Flask-Fixtures uses the setUpClass and tearDownClass methods to persist
fixtures across tests. These methods were added to unittest.TestCase in
python 2.7. So, we can only persist fixtures when using python 2.7.
However, the nose and py.test libraries add support for these methods
regardless of what version of python we're running, so if we're running
with either of those libraries, return True to persist fixtures.
### Response:
def can_persist_fixtures():
"""Returns True if it's possible to persist fixtures across tests.
Flask-Fixtures uses the setUpClass and tearDownClass methods to persist
fixtures across tests. These methods were added to unittest.TestCase in
python 2.7. So, we can only persist fixtures when using python 2.7.
However, the nose and py.test libraries add support for these methods
regardless of what version of python we're running, so if we're running
with either of those libraries, return True to persist fixtures.
"""
# If we're running python 2.7 or greater, we're fine
if sys.hexversion >= 0x02070000:
return True
# Otherwise, nose and py.test support the setUpClass and tearDownClass
# methods, so if we're using either of those, go ahead and run the tests
filename = inspect.stack()[-1][1]
executable = os.path.split(filename)[1]
return executable in ('py.test', 'nosetests') |
def _post_init(self):
"""Call the find devices method for the relevant platform."""
if WIN:
self._find_devices_win()
elif MAC:
self._find_devices_mac()
else:
self._find_devices()
self._update_all_devices()
if NIX:
self._find_leds() | Call the find devices method for the relevant platform. | Below is the the instruction that describes the task:
### Input:
Call the find devices method for the relevant platform.
### Response:
def _post_init(self):
"""Call the find devices method for the relevant platform."""
if WIN:
self._find_devices_win()
elif MAC:
self._find_devices_mac()
else:
self._find_devices()
self._update_all_devices()
if NIX:
self._find_leds() |
def init_module(self, run_object):
"""Initializes profiler with a module."""
self.profile = self.profile_module
self._run_object, _, self._run_args = run_object.partition(' ')
self._object_name = '%s (module)' % self._run_object
self._globs = {
'__file__': self._run_object,
'__name__': '__main__',
'__package__': None,
}
program_path = os.path.dirname(self._run_object)
if sys.path[0] != program_path:
sys.path.insert(0, program_path)
self._replace_sysargs() | Initializes profiler with a module. | Below is the the instruction that describes the task:
### Input:
Initializes profiler with a module.
### Response:
def init_module(self, run_object):
"""Initializes profiler with a module."""
self.profile = self.profile_module
self._run_object, _, self._run_args = run_object.partition(' ')
self._object_name = '%s (module)' % self._run_object
self._globs = {
'__file__': self._run_object,
'__name__': '__main__',
'__package__': None,
}
program_path = os.path.dirname(self._run_object)
if sys.path[0] != program_path:
sys.path.insert(0, program_path)
self._replace_sysargs() |
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read() | Decrypts a message from f. | Below is the the instruction that describes the task:
### Input:
Decrypts a message from f.
### Response:
def decrypt_from(self, f, mac_bytes=10):
""" Decrypts a message from f. """
ctx = DecryptionContext(self.curve, f, self, mac_bytes)
yield ctx
ctx.read() |
def _deep_merge_dict(a, b):
"""Additively merge right side dict into left side dict."""
for k, v in b.items():
if k in a and isinstance(a[k], dict) and isinstance(v, dict):
_deep_merge_dict(a[k], v)
else:
a[k] = v | Additively merge right side dict into left side dict. | Below is the the instruction that describes the task:
### Input:
Additively merge right side dict into left side dict.
### Response:
def _deep_merge_dict(a, b):
"""Additively merge right side dict into left side dict."""
for k, v in b.items():
if k in a and isinstance(a[k], dict) and isinstance(v, dict):
_deep_merge_dict(a[k], v)
else:
a[k] = v |
def normalized_messages(self, no_field_name='_entity'):
"""Return all the error messages as a dictionary"""
if isinstance(self.messages, dict):
return self.messages
if not self.field_names:
return {no_field_name: self.messages}
return dict((name, self.messages) for name in self.field_names) | Return all the error messages as a dictionary | Below is the the instruction that describes the task:
### Input:
Return all the error messages as a dictionary
### Response:
def normalized_messages(self, no_field_name='_entity'):
"""Return all the error messages as a dictionary"""
if isinstance(self.messages, dict):
return self.messages
if not self.field_names:
return {no_field_name: self.messages}
return dict((name, self.messages) for name in self.field_names) |
def maf_permutation(context_counts,
context_to_mut,
seq_context,
gene_seq,
num_permutations=10000,
drop_silent=False):
"""Performs null-permutations across all genes and records the results in
a format like a MAF file. This could be useful for examining the null
permutations because the alternative approaches always summarize the results.
With the simulated null-permutations, novel metrics can be applied to create
an empirical null-distribution.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
drop_silent : bool, default=False
Flage on whether to drop all silent mutations. Some data sources
do not report silent mutations, and the simulations should match this.
Returns
-------
maf_list : list of tuples
list of null mutations with mutation info in a MAF like format
"""
mycontexts = context_counts.index.tolist()
somatic_base, base_context = zip(*[(base, one_context)
for one_context in mycontexts
for base in context_to_mut[one_context]])
# get random positions determined by sequence context
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
num_permutations)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
# info about gene
gene_name = gene_seq.bed.gene_name
strand = gene_seq.bed.strand
chrom = gene_seq.bed.chrom
gene_seq.bed.init_genome_coordinates() # map seq pos to genome
# determine result of random positions
maf_list = []
for row in tmp_mut_pos:
# get genome coordinate
pos2genome = np.vectorize(lambda x: gene_seq.bed.seqpos2genome[x]+1)
genome_coord = pos2genome(row)
# get info about mutations
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
# get string describing variant
var_class = cutils.get_variant_classification(tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
tmp_mut_info['Codon Pos'])
# prepare output
for k, mysomatic_base in enumerate(somatic_base):
# format DNA change
ref_nuc = tmp_mut_info['Reference Nuc'][k]
nuc_pos = row[k]
dna_change = 'c.{0}{1}>{2}'.format(ref_nuc, nuc_pos, mysomatic_base)
# format protein change
ref_aa = tmp_mut_info['Reference AA'][k]
somatic_aa = tmp_mut_info['Somatic AA'][k]
codon_pos = tmp_mut_info['Codon Pos'][k]
protein_change = 'p.{0}{1}{2}'.format(ref_aa, codon_pos, somatic_aa)
# reverse complement if on negative strand
if strand == '-':
ref_nuc = utils.rev_comp(ref_nuc)
mysomatic_base = utils.rev_comp(mysomatic_base)
# append results
if drop_silent and var_class[k].decode() == 'Silent': continue
maf_line = [gene_name, strand, chrom, genome_coord[k], genome_coord[k],
ref_nuc, mysomatic_base, base_context[k], dna_change,
protein_change, var_class[k].decode()]
maf_list.append(maf_line)
return maf_list | Performs null-permutations across all genes and records the results in
a format like a MAF file. This could be useful for examining the null
permutations because the alternative approaches always summarize the results.
With the simulated null-permutations, novel metrics can be applied to create
an empirical null-distribution.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
drop_silent : bool, default=False
Flage on whether to drop all silent mutations. Some data sources
do not report silent mutations, and the simulations should match this.
Returns
-------
maf_list : list of tuples
list of null mutations with mutation info in a MAF like format | Below is the the instruction that describes the task:
### Input:
Performs null-permutations across all genes and records the results in
a format like a MAF file. This could be useful for examining the null
permutations because the alternative approaches always summarize the results.
With the simulated null-permutations, novel metrics can be applied to create
an empirical null-distribution.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
drop_silent : bool, default=False
Flage on whether to drop all silent mutations. Some data sources
do not report silent mutations, and the simulations should match this.
Returns
-------
maf_list : list of tuples
list of null mutations with mutation info in a MAF like format
### Response:
def maf_permutation(context_counts,
context_to_mut,
seq_context,
gene_seq,
num_permutations=10000,
drop_silent=False):
"""Performs null-permutations across all genes and records the results in
a format like a MAF file. This could be useful for examining the null
permutations because the alternative approaches always summarize the results.
With the simulated null-permutations, novel metrics can be applied to create
an empirical null-distribution.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
drop_silent : bool, default=False
Flage on whether to drop all silent mutations. Some data sources
do not report silent mutations, and the simulations should match this.
Returns
-------
maf_list : list of tuples
list of null mutations with mutation info in a MAF like format
"""
mycontexts = context_counts.index.tolist()
somatic_base, base_context = zip(*[(base, one_context)
for one_context in mycontexts
for base in context_to_mut[one_context]])
# get random positions determined by sequence context
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
num_permutations)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
# info about gene
gene_name = gene_seq.bed.gene_name
strand = gene_seq.bed.strand
chrom = gene_seq.bed.chrom
gene_seq.bed.init_genome_coordinates() # map seq pos to genome
# determine result of random positions
maf_list = []
for row in tmp_mut_pos:
# get genome coordinate
pos2genome = np.vectorize(lambda x: gene_seq.bed.seqpos2genome[x]+1)
genome_coord = pos2genome(row)
# get info about mutations
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
# get string describing variant
var_class = cutils.get_variant_classification(tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
tmp_mut_info['Codon Pos'])
# prepare output
for k, mysomatic_base in enumerate(somatic_base):
# format DNA change
ref_nuc = tmp_mut_info['Reference Nuc'][k]
nuc_pos = row[k]
dna_change = 'c.{0}{1}>{2}'.format(ref_nuc, nuc_pos, mysomatic_base)
# format protein change
ref_aa = tmp_mut_info['Reference AA'][k]
somatic_aa = tmp_mut_info['Somatic AA'][k]
codon_pos = tmp_mut_info['Codon Pos'][k]
protein_change = 'p.{0}{1}{2}'.format(ref_aa, codon_pos, somatic_aa)
# reverse complement if on negative strand
if strand == '-':
ref_nuc = utils.rev_comp(ref_nuc)
mysomatic_base = utils.rev_comp(mysomatic_base)
# append results
if drop_silent and var_class[k].decode() == 'Silent': continue
maf_line = [gene_name, strand, chrom, genome_coord[k], genome_coord[k],
ref_nuc, mysomatic_base, base_context[k], dna_change,
protein_change, var_class[k].decode()]
maf_list.append(maf_line)
return maf_list |
def expand_expression(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
# pylint: disable=too-many-locals
"""Expand a host or service expression into a dependency node tree
using (host|service)group membership, regex, or labels as item selector.
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
error = None
node = DependencyNode()
node.operand = '&'
elts = [e.strip() for e in pattern.split(',')]
# If host_name is empty, use the host_name the business rule is bound to
if not elts[0]:
elts[0] = self.bound_item.host_name
filters = []
# Looks for hosts/services using appropriate filters
try:
all_items = {
"hosts": hosts,
"hostgroups": hostgroups,
"servicegroups": servicegroups
}
if len(elts) > 1:
# We got a service expression
host_expr, service_expr = elts
filters.extend(self.get_srv_host_filters(host_expr))
filters.extend(self.get_srv_service_filters(service_expr))
items = services.find_by_filter(filters, all_items)
else:
# We got a host expression
host_expr = elts[0]
filters.extend(self.get_host_filters(host_expr))
items = hosts.find_by_filter(filters, all_items)
except re.error as regerr:
error = "Business rule uses invalid regex %s: %s" % (pattern, regerr)
else:
if not items:
error = "Business rule got an empty result for pattern %s" % pattern
# Checks if we got result
if error:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
# Creates dependency node subtree
# here we have Alignak SchedulingItem object (Host/Service)
for item in items:
# Creates a host/service node
son = DependencyNode()
son.operand = item.__class__.my_type
son.sons.append(item.uuid) # Only store the uuid, not the full object.
# Appends it to wrapping node
node.sons.append(son)
node.switch_zeros_of_values()
return node | Expand a host or service expression into a dependency node tree
using (host|service)group membership, regex, or labels as item selector.
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode | Below is the the instruction that describes the task:
### Input:
Expand a host or service expression into a dependency node tree
using (host|service)group membership, regex, or labels as item selector.
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
### Response:
def expand_expression(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
# pylint: disable=too-many-locals
"""Expand a host or service expression into a dependency node tree
using (host|service)group membership, regex, or labels as item selector.
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
error = None
node = DependencyNode()
node.operand = '&'
elts = [e.strip() for e in pattern.split(',')]
# If host_name is empty, use the host_name the business rule is bound to
if not elts[0]:
elts[0] = self.bound_item.host_name
filters = []
# Looks for hosts/services using appropriate filters
try:
all_items = {
"hosts": hosts,
"hostgroups": hostgroups,
"servicegroups": servicegroups
}
if len(elts) > 1:
# We got a service expression
host_expr, service_expr = elts
filters.extend(self.get_srv_host_filters(host_expr))
filters.extend(self.get_srv_service_filters(service_expr))
items = services.find_by_filter(filters, all_items)
else:
# We got a host expression
host_expr = elts[0]
filters.extend(self.get_host_filters(host_expr))
items = hosts.find_by_filter(filters, all_items)
except re.error as regerr:
error = "Business rule uses invalid regex %s: %s" % (pattern, regerr)
else:
if not items:
error = "Business rule got an empty result for pattern %s" % pattern
# Checks if we got result
if error:
if running is False:
node.configuration_errors.append(error)
else:
# As business rules are re-evaluated at run time on
# each scheduling loop, if the rule becomes invalid
# because of a badly written macro modulation, it
# should be notified upper for the error to be
# displayed in the check output.
raise Exception(error)
return node
# Creates dependency node subtree
# here we have Alignak SchedulingItem object (Host/Service)
for item in items:
# Creates a host/service node
son = DependencyNode()
son.operand = item.__class__.my_type
son.sons.append(item.uuid) # Only store the uuid, not the full object.
# Appends it to wrapping node
node.sons.append(son)
node.switch_zeros_of_values()
return node |
def contrast(self, value):
"""
Sets the LED intensity to the desired level, in the range 0-255.
:param level: Desired contrast level in the range of 0-255.
:type level: int
"""
assert(0x00 <= value <= 0xFF)
self._brightness = value >> 4
if self._last_image is not None:
self.display(self._last_image) | Sets the LED intensity to the desired level, in the range 0-255.
:param level: Desired contrast level in the range of 0-255.
:type level: int | Below is the the instruction that describes the task:
### Input:
Sets the LED intensity to the desired level, in the range 0-255.
:param level: Desired contrast level in the range of 0-255.
:type level: int
### Response:
def contrast(self, value):
"""
Sets the LED intensity to the desired level, in the range 0-255.
:param level: Desired contrast level in the range of 0-255.
:type level: int
"""
assert(0x00 <= value <= 0xFF)
self._brightness = value >> 4
if self._last_image is not None:
self.display(self._last_image) |
def Chunks(l, n, all=False):
'''
Returns a generator of consecutive `n`-sized chunks of list `l`.
If `all` is `True`, returns **all** `n`-sized chunks in `l`
by iterating over the starting point.
'''
if all:
jarr = range(0, n - 1)
else:
jarr = [0]
for j in jarr:
for i in range(j, len(l), n):
if i + 2 * n <= len(l):
yield l[i:i + n]
else:
if not all:
yield l[i:]
break | Returns a generator of consecutive `n`-sized chunks of list `l`.
If `all` is `True`, returns **all** `n`-sized chunks in `l`
by iterating over the starting point. | Below is the the instruction that describes the task:
### Input:
Returns a generator of consecutive `n`-sized chunks of list `l`.
If `all` is `True`, returns **all** `n`-sized chunks in `l`
by iterating over the starting point.
### Response:
def Chunks(l, n, all=False):
'''
Returns a generator of consecutive `n`-sized chunks of list `l`.
If `all` is `True`, returns **all** `n`-sized chunks in `l`
by iterating over the starting point.
'''
if all:
jarr = range(0, n - 1)
else:
jarr = [0]
for j in jarr:
for i in range(j, len(l), n):
if i + 2 * n <= len(l):
yield l[i:i + n]
else:
if not all:
yield l[i:]
break |
def _match_literal(self, a, b=None):
"""Match two names."""
return a.lower() == b if not self.case_sensitive else a == b | Match two names. | Below is the the instruction that describes the task:
### Input:
Match two names.
### Response:
def _match_literal(self, a, b=None):
"""Match two names."""
return a.lower() == b if not self.case_sensitive else a == b |
def setup_zone(self):
"""
Setup zone for current domain.
It will also setup the dns records of the zone
:return:
"""
# Initialize current zone
zones_content = self.request(self.api_url, 'get')
try:
if len(self.domain.split('.')) == 3:
domain = self.domain.split('.', 1)[1]
else:
domain = self.domain
zone = [zone for zone in zones_content['result'] if zone['name'] == domain][0]
except IndexError:
raise ZoneNotFound('Cannot find zone information for the domain {domain}.'
.format(domain=self.domain))
self.zone = zone
# Initialize dns_records of current zone
dns_content = self.request(self.api_url + zone['id'] + '/dns_records', 'get')
self.dns_records = dns_content['result'] | Setup zone for current domain.
It will also setup the dns records of the zone
:return: | Below is the the instruction that describes the task:
### Input:
Setup zone for current domain.
It will also setup the dns records of the zone
:return:
### Response:
def setup_zone(self):
"""
Setup zone for current domain.
It will also setup the dns records of the zone
:return:
"""
# Initialize current zone
zones_content = self.request(self.api_url, 'get')
try:
if len(self.domain.split('.')) == 3:
domain = self.domain.split('.', 1)[1]
else:
domain = self.domain
zone = [zone for zone in zones_content['result'] if zone['name'] == domain][0]
except IndexError:
raise ZoneNotFound('Cannot find zone information for the domain {domain}.'
.format(domain=self.domain))
self.zone = zone
# Initialize dns_records of current zone
dns_content = self.request(self.api_url + zone['id'] + '/dns_records', 'get')
self.dns_records = dns_content['result'] |
def parse_graminit_h(self, filename):
"""Parse the .h file written by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back.
"""
try:
f = open(filename)
except IOError, err:
print "Can't open %s: %s" % (filename, err)
return False
self.symbol2number = {}
self.number2symbol = {}
lineno = 0
for line in f:
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print "%s(%s): can't parse %s" % (filename, lineno,
line.strip())
else:
symbol, number = mo.groups()
number = int(number)
assert symbol not in self.symbol2number
assert number not in self.number2symbol
self.symbol2number[symbol] = number
self.number2symbol[number] = symbol
return True | Parse the .h file written by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back. | Below is the the instruction that describes the task:
### Input:
Parse the .h file written by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back.
### Response:
def parse_graminit_h(self, filename):
"""Parse the .h file written by pgen. (Internal)
This file is a sequence of #define statements defining the
nonterminals of the grammar as numbers. We build two tables
mapping the numbers to names and back.
"""
try:
f = open(filename)
except IOError, err:
print "Can't open %s: %s" % (filename, err)
return False
self.symbol2number = {}
self.number2symbol = {}
lineno = 0
for line in f:
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print "%s(%s): can't parse %s" % (filename, lineno,
line.strip())
else:
symbol, number = mo.groups()
number = int(number)
assert symbol not in self.symbol2number
assert number not in self.number2symbol
self.symbol2number[symbol] = number
self.number2symbol[number] = symbol
return True |
def form_valid(self, forms):
"""
If the form is valid, save the associated model.
"""
for key, form in forms.items():
setattr(self, '{}_object'.format(key), form.save())
return super(MultipleModelFormMixin, self).form_valid(forms) | If the form is valid, save the associated model. | Below is the the instruction that describes the task:
### Input:
If the form is valid, save the associated model.
### Response:
def form_valid(self, forms):
"""
If the form is valid, save the associated model.
"""
for key, form in forms.items():
setattr(self, '{}_object'.format(key), form.save())
return super(MultipleModelFormMixin, self).form_valid(forms) |
def submit_tar(cl_args, unknown_args, tmp_dir):
'''
Extract and execute the java files inside the tar and then add topology
definition file created by running submitTopology
We use the packer to make a package for the tar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology class name.
The submitter inside will write out the topology defn file to a location
that we specify. Then we write the topology defn file to a well known
packer location. We then write to appropriate places in zookeeper
and launch the aurora jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
'''
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
java_defines = cl_args['topology_main_jvm_property']
main_class = cl_args['topology-class-name']
res = execute.heron_tar(
main_class,
topology_file,
tuple(unknown_args),
tmp_dir,
java_defines)
result.render(res)
if not result.is_successful(res):
err_context = ("Failed to create topology definition " \
"file when executing class '%s' of file '%s'") % (main_class, topology_file)
res.add_context(err_context)
return res
return launch_topologies(cl_args, topology_file, tmp_dir) | Extract and execute the java files inside the tar and then add topology
definition file created by running submitTopology
We use the packer to make a package for the tar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology class name.
The submitter inside will write out the topology defn file to a location
that we specify. Then we write the topology defn file to a well known
packer location. We then write to appropriate places in zookeeper
and launch the aurora jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return: | Below is the the instruction that describes the task:
### Input:
Extract and execute the java files inside the tar and then add topology
definition file created by running submitTopology
We use the packer to make a package for the tar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology class name.
The submitter inside will write out the topology defn file to a location
that we specify. Then we write the topology defn file to a well known
packer location. We then write to appropriate places in zookeeper
and launch the aurora jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
### Response:
def submit_tar(cl_args, unknown_args, tmp_dir):
'''
Extract and execute the java files inside the tar and then add topology
definition file created by running submitTopology
We use the packer to make a package for the tar and dump it
to a well-known location. We then run the main method of class
with the specified arguments. We pass arguments as an environment variable HERON_OPTIONS.
This will run the jar file with the topology class name.
The submitter inside will write out the topology defn file to a location
that we specify. Then we write the topology defn file to a well known
packer location. We then write to appropriate places in zookeeper
and launch the aurora jobs
:param cl_args:
:param unknown_args:
:param tmp_dir:
:return:
'''
# execute main of the topology to create the topology definition
topology_file = cl_args['topology-file-name']
java_defines = cl_args['topology_main_jvm_property']
main_class = cl_args['topology-class-name']
res = execute.heron_tar(
main_class,
topology_file,
tuple(unknown_args),
tmp_dir,
java_defines)
result.render(res)
if not result.is_successful(res):
err_context = ("Failed to create topology definition " \
"file when executing class '%s' of file '%s'") % (main_class, topology_file)
res.add_context(err_context)
return res
return launch_topologies(cl_args, topology_file, tmp_dir) |
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created | Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks) | Below is the the instruction that describes the task:
### Input:
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
### Response:
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created |
def add_flags(self, flag_name, flags, target_name=None, configuration_name=None):
"""
Adds the given flags to the flag_name section of the target on the configurations
:param flag_name: name of the flag to be added the values to
:param flags: A string or array of strings
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
for configuration in self.objects.get_configurations_on_targets(target_name, configuration_name):
configuration.add_flags(flag_name, flags) | Adds the given flags to the flag_name section of the target on the configurations
:param flag_name: name of the flag to be added the values to
:param flags: A string or array of strings
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void | Below is the the instruction that describes the task:
### Input:
Adds the given flags to the flag_name section of the target on the configurations
:param flag_name: name of the flag to be added the values to
:param flags: A string or array of strings
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
### Response:
def add_flags(self, flag_name, flags, target_name=None, configuration_name=None):
"""
Adds the given flags to the flag_name section of the target on the configurations
:param flag_name: name of the flag to be added the values to
:param flags: A string or array of strings
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
for configuration in self.objects.get_configurations_on_targets(target_name, configuration_name):
configuration.add_flags(flag_name, flags) |
def aggregate(self, out_file: str = None, select: np.ndarray = None, group_by: Union[str, np.ndarray] = "Clusters", aggr_by: str = "mean", aggr_ca_by: Dict[str, str] = None) -> np.ndarray:
"""
Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute
"""
ca = {} # type: Dict[str, np.ndarray]
if select is not None:
raise ValueError("The 'select' argument is deprecated")
if isinstance(group_by, np.ndarray):
labels = group_by
else:
labels = (self.ca[group_by]).astype('int')
_, zero_strt_sort_noholes_lbls = np.unique(labels, return_inverse=True)
n_groups = len(set(labels))
if aggr_ca_by is not None:
for key in self.ca.keys():
if key not in aggr_ca_by:
continue
func = aggr_ca_by[key]
if func == "tally":
for val in set(self.ca[key]):
if np.issubdtype(type(val), np.str_):
valnew = val.replace("/", "-") # Slashes are not allowed in attribute names
valnew = valnew.replace(".", "_") # Nor are periods
ca[key + "_" + str(valnew)] = npg.aggregate(zero_strt_sort_noholes_lbls, (self.ca[key] == val).astype('int'), func="sum", fill_value=0)
elif func == "mode":
def mode(x): # type: ignore
return scipy.stats.mode(x)[0][0]
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=mode, fill_value=0).astype('str')
elif func == "mean":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=0)
elif func == "first":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=self.ca[key][0])
m = np.empty((self.shape[0], n_groups))
for (_, selection, view) in self.scan(axis=0, layers=[""]):
vals_aggr = npg.aggregate(zero_strt_sort_noholes_lbls, view[:, :], func=aggr_by, axis=1, fill_value=0)
m[selection, :] = vals_aggr
if out_file is not None:
loompy.create(out_file, m, self.ra, ca)
return m | Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute | Below is the the instruction that describes the task:
### Input:
Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute
### Response:
def aggregate(self, out_file: str = None, select: np.ndarray = None, group_by: Union[str, np.ndarray] = "Clusters", aggr_by: str = "mean", aggr_ca_by: Dict[str, str] = None) -> np.ndarray:
"""
Aggregate the Loom file by applying aggregation functions to the main matrix as well as to the column attributes
Args:
out_file The name of the output Loom file (will be appended to if it exists)
select Bool array giving the columns to include (or None, to include all)
group_by The column attribute to group by, or an np.ndarray of integer group labels
aggr_by The aggregation function for the main matrix
aggr_ca_by A dictionary of aggregation functions for the column attributes (or None to skip)
Returns:
m Aggregated main matrix
Remarks:
aggr_by gives the aggregation function for the main matrix
aggr_ca_by is a dictionary with column attributes as keys and aggregation functionas as values
Aggregation functions can be any valid aggregation function from here: https://github.com/ml31415/numpy-groupies
In addition, you can specify:
"tally" to count the number of occurences of each value of a categorical attribute
"""
ca = {} # type: Dict[str, np.ndarray]
if select is not None:
raise ValueError("The 'select' argument is deprecated")
if isinstance(group_by, np.ndarray):
labels = group_by
else:
labels = (self.ca[group_by]).astype('int')
_, zero_strt_sort_noholes_lbls = np.unique(labels, return_inverse=True)
n_groups = len(set(labels))
if aggr_ca_by is not None:
for key in self.ca.keys():
if key not in aggr_ca_by:
continue
func = aggr_ca_by[key]
if func == "tally":
for val in set(self.ca[key]):
if np.issubdtype(type(val), np.str_):
valnew = val.replace("/", "-") # Slashes are not allowed in attribute names
valnew = valnew.replace(".", "_") # Nor are periods
ca[key + "_" + str(valnew)] = npg.aggregate(zero_strt_sort_noholes_lbls, (self.ca[key] == val).astype('int'), func="sum", fill_value=0)
elif func == "mode":
def mode(x): # type: ignore
return scipy.stats.mode(x)[0][0]
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=mode, fill_value=0).astype('str')
elif func == "mean":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=0)
elif func == "first":
ca[key] = npg.aggregate(zero_strt_sort_noholes_lbls, self.ca[key], func=func, fill_value=self.ca[key][0])
m = np.empty((self.shape[0], n_groups))
for (_, selection, view) in self.scan(axis=0, layers=[""]):
vals_aggr = npg.aggregate(zero_strt_sort_noholes_lbls, view[:, :], func=aggr_by, axis=1, fill_value=0)
m[selection, :] = vals_aggr
if out_file is not None:
loompy.create(out_file, m, self.ra, ca)
return m |
def deduplicate_sequences(records, out_file):
"""
Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_sequences generator: '
'removing any duplicate records with identical sequences.')
checksum_sequences = collections.defaultdict(list)
for record in records:
checksum = seguid(record.seq)
sequences = checksum_sequences[checksum]
if not sequences:
yield record
sequences.append(record.id)
if out_file is not None:
with out_file:
for sequences in checksum_sequences.values():
out_file.write('%s\n' % (' '.join(sequences),)) | Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences. | Below is the the instruction that describes the task:
### Input:
Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences.
### Response:
def deduplicate_sequences(records, out_file):
"""
Remove any duplicate records with identical sequences, keep the first
instance seen and discard additional occurences.
"""
logging.info('Applying _deduplicate_sequences generator: '
'removing any duplicate records with identical sequences.')
checksum_sequences = collections.defaultdict(list)
for record in records:
checksum = seguid(record.seq)
sequences = checksum_sequences[checksum]
if not sequences:
yield record
sequences.append(record.id)
if out_file is not None:
with out_file:
for sequences in checksum_sequences.values():
out_file.write('%s\n' % (' '.join(sequences),)) |
def log_delete(sender, instance, **kwargs):
"""
Signal receiver that creates a log entry when a model instance is deleted from the database.
Direct use is discouraged, connect your model through :py:func:`auditlog.registry.register` instead.
"""
if instance.pk is not None:
changes = model_instance_diff(instance, None)
log_entry = LogEntry.objects.log_create(
instance,
action=LogEntry.Action.DELETE,
changes=json.dumps(changes),
) | Signal receiver that creates a log entry when a model instance is deleted from the database.
Direct use is discouraged, connect your model through :py:func:`auditlog.registry.register` instead. | Below is the the instruction that describes the task:
### Input:
Signal receiver that creates a log entry when a model instance is deleted from the database.
Direct use is discouraged, connect your model through :py:func:`auditlog.registry.register` instead.
### Response:
def log_delete(sender, instance, **kwargs):
"""
Signal receiver that creates a log entry when a model instance is deleted from the database.
Direct use is discouraged, connect your model through :py:func:`auditlog.registry.register` instead.
"""
if instance.pk is not None:
changes = model_instance_diff(instance, None)
log_entry = LogEntry.objects.log_create(
instance,
action=LogEntry.Action.DELETE,
changes=json.dumps(changes),
) |
def unwrap(data_type):
"""
Convenience method to unwrap all Aliases and Nullables from around a
DataType. This checks for nullable wrapping aliases, as well as aliases
wrapping nullables.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool, bool]: The underlying data type; a bool that is
set if a nullable was present; a bool that is set if an alias was
present.
"""
unwrapped_nullable = False
unwrapped_alias = False
while is_alias(data_type) or is_nullable_type(data_type):
if is_nullable_type(data_type):
unwrapped_nullable = True
if is_alias(data_type):
unwrapped_alias = True
data_type = data_type.data_type
return data_type, unwrapped_nullable, unwrapped_alias | Convenience method to unwrap all Aliases and Nullables from around a
DataType. This checks for nullable wrapping aliases, as well as aliases
wrapping nullables.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool, bool]: The underlying data type; a bool that is
set if a nullable was present; a bool that is set if an alias was
present. | Below is the the instruction that describes the task:
### Input:
Convenience method to unwrap all Aliases and Nullables from around a
DataType. This checks for nullable wrapping aliases, as well as aliases
wrapping nullables.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool, bool]: The underlying data type; a bool that is
set if a nullable was present; a bool that is set if an alias was
present.
### Response:
def unwrap(data_type):
"""
Convenience method to unwrap all Aliases and Nullables from around a
DataType. This checks for nullable wrapping aliases, as well as aliases
wrapping nullables.
Args:
data_type (DataType): The target to unwrap.
Return:
Tuple[DataType, bool, bool]: The underlying data type; a bool that is
set if a nullable was present; a bool that is set if an alias was
present.
"""
unwrapped_nullable = False
unwrapped_alias = False
while is_alias(data_type) or is_nullable_type(data_type):
if is_nullable_type(data_type):
unwrapped_nullable = True
if is_alias(data_type):
unwrapped_alias = True
data_type = data_type.data_type
return data_type, unwrapped_nullable, unwrapped_alias |
def is_underlined(r):
"""
The function will return True if the r tag passed in is considered
underlined.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
underline = rpr.find('%su' % w_namespace)
return style_is_false(underline) | The function will return True if the r tag passed in is considered
underlined. | Below is the the instruction that describes the task:
### Input:
The function will return True if the r tag passed in is considered
underlined.
### Response:
def is_underlined(r):
"""
The function will return True if the r tag passed in is considered
underlined.
"""
w_namespace = get_namespace(r, 'w')
rpr = r.find('%srPr' % w_namespace)
if rpr is None:
return False
underline = rpr.find('%su' % w_namespace)
return style_is_false(underline) |
def piptrack(y=None, sr=22050, S=None, n_fft=2048, hop_length=None,
fmin=150.0, fmax=4000.0, threshold=0.1,
win_length=None, window='hann', center=True, pad_mode='reflect',
ref=None):
'''Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
'''
# Check that we received an audio time series or STFT
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
# Make sure we're dealing with magnitudes
S = np.abs(S)
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
fft_freqs = time_frequency.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (np.abs(shift) < util.tiny(shift)))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode='constant')
shift = np.pad(shift, ([1, 1], [0, 0]), mode='constant')
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
if ref is None:
ref = np.max
if six.callable(ref):
ref_value = threshold * ref(S, axis=0)
else:
ref_value = np.abs(ref)
idx = np.argwhere(freq_mask & util.localmax(S * (S > ref_value)))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = ((idx[:, 0] + shift[idx[:, 0], idx[:, 1]])
* float(sr) / n_fft)
mags[idx[:, 0], idx[:, 1]] = (S[idx[:, 0], idx[:, 1]]
+ dskew[idx[:, 0], idx[:, 1]])
return pitches, mags | Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean) | Below is the the instruction that describes the task:
### Input:
Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
### Response:
def piptrack(y=None, sr=22050, S=None, n_fft=2048, hop_length=None,
fmin=150.0, fmax=4000.0, threshold=0.1,
win_length=None, window='hann', center=True, pad_mode='reflect',
ref=None):
'''Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
'''
# Check that we received an audio time series or STFT
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
# Make sure we're dealing with magnitudes
S = np.abs(S)
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
fft_freqs = time_frequency.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (np.abs(shift) < util.tiny(shift)))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode='constant')
shift = np.pad(shift, ([1, 1], [0, 0]), mode='constant')
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
if ref is None:
ref = np.max
if six.callable(ref):
ref_value = threshold * ref(S, axis=0)
else:
ref_value = np.abs(ref)
idx = np.argwhere(freq_mask & util.localmax(S * (S > ref_value)))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = ((idx[:, 0] + shift[idx[:, 0], idx[:, 1]])
* float(sr) / n_fft)
mags[idx[:, 0], idx[:, 1]] = (S[idx[:, 0], idx[:, 1]]
+ dskew[idx[:, 0], idx[:, 1]])
return pitches, mags |
def open_url(self, url, headers):
""" open url with python libraries
"""
data = self.cache.get_cached_data(url)
if data is not None:
return data, 200, headers
self.rate_limit_ensembl_requests()
req = request.Request(url, headers=headers)
try:
handler = request.urlopen(req)
except HTTPError as error:
# if we get a http error, we still process the status code, since a
# later step deals with different status codes differently.
handler = error
except (URLError, ConnectionResetError, TimeoutError):
# if we get a ConnectionResetError, assume something has gone wrong
# with the server. Later code will wait before retrying.
return '', 500, headers
status_code = handler.getcode()
response = handler.read()
if IS_PYTHON3:
response = response.decode("utf-8")
# parse the headers into a key, value dictionary
headers = dict(zip(map(str.lower, handler.headers.keys()), handler.headers.values()))
now = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
logging.warning("{}\t{}\t{}".format(now, status_code, url))
return response, status_code, headers | open url with python libraries | Below is the the instruction that describes the task:
### Input:
open url with python libraries
### Response:
def open_url(self, url, headers):
""" open url with python libraries
"""
data = self.cache.get_cached_data(url)
if data is not None:
return data, 200, headers
self.rate_limit_ensembl_requests()
req = request.Request(url, headers=headers)
try:
handler = request.urlopen(req)
except HTTPError as error:
# if we get a http error, we still process the status code, since a
# later step deals with different status codes differently.
handler = error
except (URLError, ConnectionResetError, TimeoutError):
# if we get a ConnectionResetError, assume something has gone wrong
# with the server. Later code will wait before retrying.
return '', 500, headers
status_code = handler.getcode()
response = handler.read()
if IS_PYTHON3:
response = response.decode("utf-8")
# parse the headers into a key, value dictionary
headers = dict(zip(map(str.lower, handler.headers.keys()), handler.headers.values()))
now = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
logging.warning("{}\t{}\t{}".format(now, status_code, url))
return response, status_code, headers |
def convert_attrs_to_uppercase(obj: Any, attrs: Iterable[str]) -> None:
"""
Converts the specified attributes of an object to upper case, modifying
the object in place.
"""
for a in attrs:
value = getattr(obj, a)
if value is None:
continue
setattr(obj, a, value.upper()) | Converts the specified attributes of an object to upper case, modifying
the object in place. | Below is the the instruction that describes the task:
### Input:
Converts the specified attributes of an object to upper case, modifying
the object in place.
### Response:
def convert_attrs_to_uppercase(obj: Any, attrs: Iterable[str]) -> None:
"""
Converts the specified attributes of an object to upper case, modifying
the object in place.
"""
for a in attrs:
value = getattr(obj, a)
if value is None:
continue
setattr(obj, a, value.upper()) |
def add_speaker(self, **kwargs):
"""
Creates a new BGPSpeaker instance.
Usage:
======= ================
Method URI
======= ================
POST /vtep/speakers
======= ================
Request parameters:
========== ============================================
Attribute Description
========== ============================================
dpid ID of Datapath binding to speaker. (e.g. 1)
as_number AS number. (e.g. 65000)
router_id Router ID. (e.g. "172.17.0.1")
========== ============================================
Example::
$ curl -X POST -d '{
"dpid": 1,
"as_number": 65000,
"router_id": "172.17.0.1"
}' http://localhost:8080/vtep/speakers | python -m json.tool
::
{
"172.17.0.1": {
"EvpnSpeaker": {
"as_number": 65000,
"dpid": 1,
"neighbors": {},
"router_id": "172.17.0.1"
}
}
}
"""
try:
body = self.vtep_app.add_speaker(**kwargs)
except DatapathNotFound as e:
return e.to_response(status=404)
return Response(content_type='application/json',
body=json.dumps(body)) | Creates a new BGPSpeaker instance.
Usage:
======= ================
Method URI
======= ================
POST /vtep/speakers
======= ================
Request parameters:
========== ============================================
Attribute Description
========== ============================================
dpid ID of Datapath binding to speaker. (e.g. 1)
as_number AS number. (e.g. 65000)
router_id Router ID. (e.g. "172.17.0.1")
========== ============================================
Example::
$ curl -X POST -d '{
"dpid": 1,
"as_number": 65000,
"router_id": "172.17.0.1"
}' http://localhost:8080/vtep/speakers | python -m json.tool
::
{
"172.17.0.1": {
"EvpnSpeaker": {
"as_number": 65000,
"dpid": 1,
"neighbors": {},
"router_id": "172.17.0.1"
}
}
} | Below is the the instruction that describes the task:
### Input:
Creates a new BGPSpeaker instance.
Usage:
======= ================
Method URI
======= ================
POST /vtep/speakers
======= ================
Request parameters:
========== ============================================
Attribute Description
========== ============================================
dpid ID of Datapath binding to speaker. (e.g. 1)
as_number AS number. (e.g. 65000)
router_id Router ID. (e.g. "172.17.0.1")
========== ============================================
Example::
$ curl -X POST -d '{
"dpid": 1,
"as_number": 65000,
"router_id": "172.17.0.1"
}' http://localhost:8080/vtep/speakers | python -m json.tool
::
{
"172.17.0.1": {
"EvpnSpeaker": {
"as_number": 65000,
"dpid": 1,
"neighbors": {},
"router_id": "172.17.0.1"
}
}
}
### Response:
def add_speaker(self, **kwargs):
"""
Creates a new BGPSpeaker instance.
Usage:
======= ================
Method URI
======= ================
POST /vtep/speakers
======= ================
Request parameters:
========== ============================================
Attribute Description
========== ============================================
dpid ID of Datapath binding to speaker. (e.g. 1)
as_number AS number. (e.g. 65000)
router_id Router ID. (e.g. "172.17.0.1")
========== ============================================
Example::
$ curl -X POST -d '{
"dpid": 1,
"as_number": 65000,
"router_id": "172.17.0.1"
}' http://localhost:8080/vtep/speakers | python -m json.tool
::
{
"172.17.0.1": {
"EvpnSpeaker": {
"as_number": 65000,
"dpid": 1,
"neighbors": {},
"router_id": "172.17.0.1"
}
}
}
"""
try:
body = self.vtep_app.add_speaker(**kwargs)
except DatapathNotFound as e:
return e.to_response(status=404)
return Response(content_type='application/json',
body=json.dumps(body)) |
def group_delay(wave):
r"""
Return the group delay of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.group_delay
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
ret = -derivative(phase(wave, unwrap=True) / (2 * math.pi))
ret.dep_name = "group_delay({0})".format(wave.dep_name)
ret.dep_units = "sec"
return ret | r"""
Return the group delay of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.group_delay
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]] | Below is the the instruction that describes the task:
### Input:
r"""
Return the group delay of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.group_delay
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
### Response:
def group_delay(wave):
r"""
Return the group delay of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.group_delay
:raises: RuntimeError (Argument \`wave\` is not valid)
.. [[[end]]]
"""
ret = -derivative(phase(wave, unwrap=True) / (2 * math.pi))
ret.dep_name = "group_delay({0})".format(wave.dep_name)
ret.dep_units = "sec"
return ret |
def unset(config, section, opt=None):
"""Unsets specified option and/or section in the config.
Args:
config (configobj.ConfigObj): config to work on.
section (str): section name.
opt (str): optional option name.
"""
if section not in config.keys():
raise ConfigError("section '{}' doesn't exist".format(section))
if opt is None:
del config[section]
return
if opt not in config[section].keys():
raise ConfigError(
"option '{}.{}' doesn't exist".format(section, opt)
)
del config[section][opt]
if not config[section]:
del config[section] | Unsets specified option and/or section in the config.
Args:
config (configobj.ConfigObj): config to work on.
section (str): section name.
opt (str): optional option name. | Below is the the instruction that describes the task:
### Input:
Unsets specified option and/or section in the config.
Args:
config (configobj.ConfigObj): config to work on.
section (str): section name.
opt (str): optional option name.
### Response:
def unset(config, section, opt=None):
"""Unsets specified option and/or section in the config.
Args:
config (configobj.ConfigObj): config to work on.
section (str): section name.
opt (str): optional option name.
"""
if section not in config.keys():
raise ConfigError("section '{}' doesn't exist".format(section))
if opt is None:
del config[section]
return
if opt not in config[section].keys():
raise ConfigError(
"option '{}.{}' doesn't exist".format(section, opt)
)
del config[section][opt]
if not config[section]:
del config[section] |
def process_ndex_network(network_id, username=None, password=None,
require_grounding=True):
"""Process an NDEx network into Statements.
Parameters
----------
network_id : str
NDEx network ID.
username : str
NDEx username.
password : str
NDEx password.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. Returns None if there if the HTTP
status code indicates an unsuccessful request.
"""
nd = ndex2.client.Ndex2(username=username, password=password)
res = nd.get_network_as_cx_stream(network_id)
if res.status_code != 200:
logger.error('Problem downloading network: status code %s' %
res.status_code)
logger.error('Response: %s' % res.text)
return None
json_list = res.json()
summary = nd.get_network_summary(network_id)
return process_cx(json_list, summary=summary,
require_grounding=require_grounding) | Process an NDEx network into Statements.
Parameters
----------
network_id : str
NDEx network ID.
username : str
NDEx username.
password : str
NDEx password.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. Returns None if there if the HTTP
status code indicates an unsuccessful request. | Below is the the instruction that describes the task:
### Input:
Process an NDEx network into Statements.
Parameters
----------
network_id : str
NDEx network ID.
username : str
NDEx username.
password : str
NDEx password.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. Returns None if there if the HTTP
status code indicates an unsuccessful request.
### Response:
def process_ndex_network(network_id, username=None, password=None,
require_grounding=True):
"""Process an NDEx network into Statements.
Parameters
----------
network_id : str
NDEx network ID.
username : str
NDEx username.
password : str
NDEx password.
require_grounding: bool
Whether network nodes lacking grounding information should be included
among the extracted Statements (default is True).
Returns
-------
NdexCxProcessor
Processor containing Statements. Returns None if there if the HTTP
status code indicates an unsuccessful request.
"""
nd = ndex2.client.Ndex2(username=username, password=password)
res = nd.get_network_as_cx_stream(network_id)
if res.status_code != 200:
logger.error('Problem downloading network: status code %s' %
res.status_code)
logger.error('Response: %s' % res.text)
return None
json_list = res.json()
summary = nd.get_network_summary(network_id)
return process_cx(json_list, summary=summary,
require_grounding=require_grounding) |
def get_models():
"""Finds all models, returning a list of model number and names
sorted increasing.
Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc]
"""
all_models = gfile.Glob(os.path.join(models_dir(), '*.meta'))
model_filenames = [os.path.basename(m) for m in all_models]
model_numbers_names = sorted([
(shipname.detect_model_num(m), shipname.detect_model_name(m))
for m in model_filenames])
return model_numbers_names | Finds all models, returning a list of model number and names
sorted increasing.
Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc] | Below is the the instruction that describes the task:
### Input:
Finds all models, returning a list of model number and names
sorted increasing.
Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc]
### Response:
def get_models():
"""Finds all models, returning a list of model number and names
sorted increasing.
Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc]
"""
all_models = gfile.Glob(os.path.join(models_dir(), '*.meta'))
model_filenames = [os.path.basename(m) for m in all_models]
model_numbers_names = sorted([
(shipname.detect_model_num(m), shipname.detect_model_name(m))
for m in model_filenames])
return model_numbers_names |
def serialize(self):
"""
Serializes into a bytestring.
:returns: An object of type Bytes.
"""
d = self.__getstate__()
return pickle.dumps({
'name': d['name'],
'seg': pickle.dumps(d['seg'], protocol=-1),
}, protocol=-1) | Serializes into a bytestring.
:returns: An object of type Bytes. | Below is the the instruction that describes the task:
### Input:
Serializes into a bytestring.
:returns: An object of type Bytes.
### Response:
def serialize(self):
"""
Serializes into a bytestring.
:returns: An object of type Bytes.
"""
d = self.__getstate__()
return pickle.dumps({
'name': d['name'],
'seg': pickle.dumps(d['seg'], protocol=-1),
}, protocol=-1) |
def printState(self, aState):
"""
Print an integer array that is the same shape as activeState.
:param aState: TODO: document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatRow(aState, i) | Print an integer array that is the same shape as activeState.
:param aState: TODO: document | Below is the the instruction that describes the task:
### Input:
Print an integer array that is the same shape as activeState.
:param aState: TODO: document
### Response:
def printState(self, aState):
"""
Print an integer array that is the same shape as activeState.
:param aState: TODO: document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatRow(aState, i) |
def write_table(mboxfile, mailTable):
"""
Takes a list and extends it with lists of data, which is
extracted from mbox messages.
:param mboxfile: Mbox file name/path
:param mailTable: A list (of lists)
:return: An extended list of lists
"""
mail_box_contents = mailbox.mbox(mboxfile)
m_pbar = tqdm.tqdm(range(0,len(mail_box_contents)))
m_pbar.set_description('Extracting mbox messages...')
count = 0
update_interval = min(50,len(mail_box_contents))
for message in mail_box_contents:
count += 1
if count % update_interval == 0:
m_pbar.update(update_interval)
clean_from = clean_address(message['From'])
clean_to = clean_addresses(message['To'])
clean_cc = clean_addresses(message['Cc'])
try:
clean_date = email.parsedate_to_datetime(message['Date'])
except:
clean_date = None
mailTable.append([
clean_from,
clean_to,
clean_cc,
clean_date,
message['Subject'],
get_body(message)
]) | Takes a list and extends it with lists of data, which is
extracted from mbox messages.
:param mboxfile: Mbox file name/path
:param mailTable: A list (of lists)
:return: An extended list of lists | Below is the the instruction that describes the task:
### Input:
Takes a list and extends it with lists of data, which is
extracted from mbox messages.
:param mboxfile: Mbox file name/path
:param mailTable: A list (of lists)
:return: An extended list of lists
### Response:
def write_table(mboxfile, mailTable):
"""
Takes a list and extends it with lists of data, which is
extracted from mbox messages.
:param mboxfile: Mbox file name/path
:param mailTable: A list (of lists)
:return: An extended list of lists
"""
mail_box_contents = mailbox.mbox(mboxfile)
m_pbar = tqdm.tqdm(range(0,len(mail_box_contents)))
m_pbar.set_description('Extracting mbox messages...')
count = 0
update_interval = min(50,len(mail_box_contents))
for message in mail_box_contents:
count += 1
if count % update_interval == 0:
m_pbar.update(update_interval)
clean_from = clean_address(message['From'])
clean_to = clean_addresses(message['To'])
clean_cc = clean_addresses(message['Cc'])
try:
clean_date = email.parsedate_to_datetime(message['Date'])
except:
clean_date = None
mailTable.append([
clean_from,
clean_to,
clean_cc,
clean_date,
message['Subject'],
get_body(message)
]) |
def _get_class_path(self, klass_or_instance):
"""Return class path for a given class.
:param klass_or_instance: Class or instance of given class
:return: String containing the class path
"""
if inspect.isclass(klass_or_instance):
klass = '{}.{}'.format(klass_or_instance.__module__, klass_or_instance.__name__)
elif not isinstance(klass_or_instance, str):
klass = klass_or_instance.__class__
klass = '{}.{}'.format(klass.__module__, klass.__name__)
else:
klass = klass_or_instance
return klass | Return class path for a given class.
:param klass_or_instance: Class or instance of given class
:return: String containing the class path | Below is the the instruction that describes the task:
### Input:
Return class path for a given class.
:param klass_or_instance: Class or instance of given class
:return: String containing the class path
### Response:
def _get_class_path(self, klass_or_instance):
"""Return class path for a given class.
:param klass_or_instance: Class or instance of given class
:return: String containing the class path
"""
if inspect.isclass(klass_or_instance):
klass = '{}.{}'.format(klass_or_instance.__module__, klass_or_instance.__name__)
elif not isinstance(klass_or_instance, str):
klass = klass_or_instance.__class__
klass = '{}.{}'.format(klass.__module__, klass.__name__)
else:
klass = klass_or_instance
return klass |
def set_attribute(self, app, key, value):
"""Sets an application attribute
:param app: application id
:param key: key of the attribute to set
:param value: value to set
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
"""
path = 'setattribute/' + parse.quote(app, '') + '/' + parse.quote(
self._encode_string(key), '')
res = self._make_ocs_request(
'POST',
self.OCS_SERVICE_PRIVATEDATA,
path,
data={'value': self._encode_string(value)}
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
return True
raise HTTPResponseError(res) | Sets an application attribute
:param app: application id
:param key: key of the attribute to set
:param value: value to set
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned | Below is the the instruction that describes the task:
### Input:
Sets an application attribute
:param app: application id
:param key: key of the attribute to set
:param value: value to set
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
### Response:
def set_attribute(self, app, key, value):
"""Sets an application attribute
:param app: application id
:param key: key of the attribute to set
:param value: value to set
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
"""
path = 'setattribute/' + parse.quote(app, '') + '/' + parse.quote(
self._encode_string(key), '')
res = self._make_ocs_request(
'POST',
self.OCS_SERVICE_PRIVATEDATA,
path,
data={'value': self._encode_string(value)}
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree)
return True
raise HTTPResponseError(res) |
def reset(self):
"""
Resets the environment to its initialization state. This method needs to be called to start a
new episode after the last episode ended.
:return: initial state
"""
self.level.reset() # optional: episode=-1, seed=None
return self.level.observations()[self.state_attribute] | Resets the environment to its initialization state. This method needs to be called to start a
new episode after the last episode ended.
:return: initial state | Below is the the instruction that describes the task:
### Input:
Resets the environment to its initialization state. This method needs to be called to start a
new episode after the last episode ended.
:return: initial state
### Response:
def reset(self):
"""
Resets the environment to its initialization state. This method needs to be called to start a
new episode after the last episode ended.
:return: initial state
"""
self.level.reset() # optional: episode=-1, seed=None
return self.level.observations()[self.state_attribute] |
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'):
"""Validates the tag key.
:param all_tags: Dict to check if there is a duplicate tag.
:param tag_key: The tag key to check against.
:param exception_param: The exception parameter to send over to help format the message. This is to reflect
the difference between the tag and untag APIs.
:return:
"""
# Validate that the key length is correct:
if len(tag_key) > 128:
raise TagKeyTooBig(tag_key, param=exception_param)
# Validate that the tag key fits the proper Regex:
# [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+
match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key)
# Kudos if you can come up with a better way of doing a global search :)
if not len(match) or len(match[0]) < len(tag_key):
raise InvalidTagCharacters(tag_key, param=exception_param) | Validates the tag key.
:param all_tags: Dict to check if there is a duplicate tag.
:param tag_key: The tag key to check against.
:param exception_param: The exception parameter to send over to help format the message. This is to reflect
the difference between the tag and untag APIs.
:return: | Below is the the instruction that describes the task:
### Input:
Validates the tag key.
:param all_tags: Dict to check if there is a duplicate tag.
:param tag_key: The tag key to check against.
:param exception_param: The exception parameter to send over to help format the message. This is to reflect
the difference between the tag and untag APIs.
:return:
### Response:
def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'):
"""Validates the tag key.
:param all_tags: Dict to check if there is a duplicate tag.
:param tag_key: The tag key to check against.
:param exception_param: The exception parameter to send over to help format the message. This is to reflect
the difference between the tag and untag APIs.
:return:
"""
# Validate that the key length is correct:
if len(tag_key) > 128:
raise TagKeyTooBig(tag_key, param=exception_param)
# Validate that the tag key fits the proper Regex:
# [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+
match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key)
# Kudos if you can come up with a better way of doing a global search :)
if not len(match) or len(match[0]) < len(tag_key):
raise InvalidTagCharacters(tag_key, param=exception_param) |
def checkConditions(self,verbose=False,public_call=True):
'''
This method checks whether the instance's type satisfies the growth impatience condition
(GIC), return impatience condition (RIC), absolute impatience condition (AIC), weak return
impatience condition (WRIC), finite human wealth condition (FHWC) and finite value of
autarky condition (FVAC). These are the conditions that are sufficient for nondegenerate
solutions under infinite horizon with a 1 period cycle. Depending on the model at hand, a
different combination of these conditions must be satisfied. (For an exposition of the
conditions, see http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/)
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
'''
violated = PerfForesightConsumerType.checkConditions(self, verbose=verbose, verbose_reference=False)
if self.cycles!=0 or self.T_cycle > 1:
return
EPermShkInv=np.dot(self.PermShkDstn[0][0],1/self.PermShkDstn[0][1])
PermGroFacAdj=self.PermGroFac[0]*EPermShkInv
Thorn=self.LivPrb[0]*(self.Rfree*self.DiscFac)**(1/self.CRRA)
GIF=Thorn/PermGroFacAdj
#Evaluate and report on the growth impatience condition
if GIF<1:
if public_call:
print('The growth impatience factor value for the supplied parameter values satisfies the growth impatience condition.')
else:
violated = True
print('The given parameter values violate the growth impatience condition for this consumer type; the GIF is: %2.4f' % (GIF))
if verbose:
print(' Therefore, a target level of wealth does not exist.')
#Evaluate and report on the weak return impatience condition
WRIF=(self.LivPrb[0]*(self.UnempPrb**(1/self.CRRA))*(self.Rfree*self.DiscFac)**(1/self.CRRA))/self.Rfree
if WRIF<1:
if public_call:
print('The weak return impatience factor value for the supplied parameter values satisfies the weak return impatience condition.')
else:
violated = True
print('The given type violates the weak return impatience condition with the supplied parameter values. The WRIF is: %2.4f' % (WRIF))
if verbose:
print(' Therefore, a nondegenerate solution is not available.')
#Evaluate and report on the finite value of autarky condition
EPermShkValFunc=np.dot(self.PermShkDstn[0][0],self.PermShkDstn[0][1]**(1-self.CRRA))
FVAF=self.LivPrb[0]*self.DiscFac*EPermShkValFunc*(self.PermGroFac[0]**(1-self.CRRA))
if FVAF<1:
if public_call:
print('The finite value of autarky factor value for the supplied parameter values satisfies the finite value of autarky condition.')
else:
print('The given type violates the finite value of autarky condition with the supplied parameter values. The FVAF is %2.4f' %(FVAF))
violated = True
if verbose:
print(' Therefore, a nondegenerate solution is not available.')
if verbose and violated:
print('\n[!] For more information on the conditions, see Table 3 in "Theoretical Foundations of Buffer Stock Saving" at http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/') | This method checks whether the instance's type satisfies the growth impatience condition
(GIC), return impatience condition (RIC), absolute impatience condition (AIC), weak return
impatience condition (WRIC), finite human wealth condition (FHWC) and finite value of
autarky condition (FVAC). These are the conditions that are sufficient for nondegenerate
solutions under infinite horizon with a 1 period cycle. Depending on the model at hand, a
different combination of these conditions must be satisfied. (For an exposition of the
conditions, see http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/)
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
This method checks whether the instance's type satisfies the growth impatience condition
(GIC), return impatience condition (RIC), absolute impatience condition (AIC), weak return
impatience condition (WRIC), finite human wealth condition (FHWC) and finite value of
autarky condition (FVAC). These are the conditions that are sufficient for nondegenerate
solutions under infinite horizon with a 1 period cycle. Depending on the model at hand, a
different combination of these conditions must be satisfied. (For an exposition of the
conditions, see http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/)
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
### Response:
def checkConditions(self,verbose=False,public_call=True):
'''
This method checks whether the instance's type satisfies the growth impatience condition
(GIC), return impatience condition (RIC), absolute impatience condition (AIC), weak return
impatience condition (WRIC), finite human wealth condition (FHWC) and finite value of
autarky condition (FVAC). These are the conditions that are sufficient for nondegenerate
solutions under infinite horizon with a 1 period cycle. Depending on the model at hand, a
different combination of these conditions must be satisfied. (For an exposition of the
conditions, see http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/)
Parameters
----------
verbose : boolean
Specifies different levels of verbosity of feedback. When False, it only reports whether the
instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
the factor values for all conditions.
Returns
-------
None
'''
violated = PerfForesightConsumerType.checkConditions(self, verbose=verbose, verbose_reference=False)
if self.cycles!=0 or self.T_cycle > 1:
return
EPermShkInv=np.dot(self.PermShkDstn[0][0],1/self.PermShkDstn[0][1])
PermGroFacAdj=self.PermGroFac[0]*EPermShkInv
Thorn=self.LivPrb[0]*(self.Rfree*self.DiscFac)**(1/self.CRRA)
GIF=Thorn/PermGroFacAdj
#Evaluate and report on the growth impatience condition
if GIF<1:
if public_call:
print('The growth impatience factor value for the supplied parameter values satisfies the growth impatience condition.')
else:
violated = True
print('The given parameter values violate the growth impatience condition for this consumer type; the GIF is: %2.4f' % (GIF))
if verbose:
print(' Therefore, a target level of wealth does not exist.')
#Evaluate and report on the weak return impatience condition
WRIF=(self.LivPrb[0]*(self.UnempPrb**(1/self.CRRA))*(self.Rfree*self.DiscFac)**(1/self.CRRA))/self.Rfree
if WRIF<1:
if public_call:
print('The weak return impatience factor value for the supplied parameter values satisfies the weak return impatience condition.')
else:
violated = True
print('The given type violates the weak return impatience condition with the supplied parameter values. The WRIF is: %2.4f' % (WRIF))
if verbose:
print(' Therefore, a nondegenerate solution is not available.')
#Evaluate and report on the finite value of autarky condition
EPermShkValFunc=np.dot(self.PermShkDstn[0][0],self.PermShkDstn[0][1]**(1-self.CRRA))
FVAF=self.LivPrb[0]*self.DiscFac*EPermShkValFunc*(self.PermGroFac[0]**(1-self.CRRA))
if FVAF<1:
if public_call:
print('The finite value of autarky factor value for the supplied parameter values satisfies the finite value of autarky condition.')
else:
print('The given type violates the finite value of autarky condition with the supplied parameter values. The FVAF is %2.4f' %(FVAF))
violated = True
if verbose:
print(' Therefore, a nondegenerate solution is not available.')
if verbose and violated:
print('\n[!] For more information on the conditions, see Table 3 in "Theoretical Foundations of Buffer Stock Saving" at http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/') |
def absolute(parser, token):
'''
Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag.
'''
node = url(parser, token)
return AbsoluteUrlNode(
view_name=node.view_name,
args=node.args,
kwargs=node.kwargs,
asvar=node.asvar
) | Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag. | Below is the the instruction that describes the task:
### Input:
Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag.
### Response:
def absolute(parser, token):
'''
Returns a full absolute URL based on the request host.
This template tag takes exactly the same paramters as url template tag.
'''
node = url(parser, token)
return AbsoluteUrlNode(
view_name=node.view_name,
args=node.args,
kwargs=node.kwargs,
asvar=node.asvar
) |
def preprocessRequest(self, service_request, *args, **kwargs):
"""
Preprocesses a request.
"""
processor = self.getPreprocessor(service_request)
if processor is None:
return
args = (service_request,) + args
if hasattr(processor, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return processor(*args) | Preprocesses a request. | Below is the the instruction that describes the task:
### Input:
Preprocesses a request.
### Response:
def preprocessRequest(self, service_request, *args, **kwargs):
"""
Preprocesses a request.
"""
processor = self.getPreprocessor(service_request)
if processor is None:
return
args = (service_request,) + args
if hasattr(processor, '_pyamf_expose_request'):
http_request = kwargs.get('http_request', None)
args = (http_request,) + args
return processor(*args) |
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc | Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] | Below is the the instruction that describes the task:
### Input:
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
### Response:
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc |
def _get_acquisition(self, model, space):
"""
Imports the acquisition
"""
from copy import deepcopy
acqOpt_config = deepcopy(self.config['acquisition']['optimizer'])
acqOpt_name = acqOpt_config['name']
del acqOpt_config['name']
from ..optimization import AcquisitionOptimizer
acqOpt = AcquisitionOptimizer(space, acqOpt_name, **acqOpt_config)
from ..acquisitions import select_acquisition
return select_acquisition(self.config['acquisition']['type']).fromConfig(model, space, acqOpt, None, self.config['acquisition']) | Imports the acquisition | Below is the the instruction that describes the task:
### Input:
Imports the acquisition
### Response:
def _get_acquisition(self, model, space):
"""
Imports the acquisition
"""
from copy import deepcopy
acqOpt_config = deepcopy(self.config['acquisition']['optimizer'])
acqOpt_name = acqOpt_config['name']
del acqOpt_config['name']
from ..optimization import AcquisitionOptimizer
acqOpt = AcquisitionOptimizer(space, acqOpt_name, **acqOpt_config)
from ..acquisitions import select_acquisition
return select_acquisition(self.config['acquisition']['type']).fromConfig(model, space, acqOpt, None, self.config['acquisition']) |
def default(self, line):
'''
Implement short-cut commands: any unique command prefix should
work.'''
cmdargs = line.split()
remain = ' '.join(cmdargs[1:])
if 'show'.startswith(cmdargs[0]):
self.do_show(remain)
elif 'set'.startswith(cmdargs[0]):
self.do_set(remain)
elif 'sendeth'.startswith(cmdargs[0]):
self.do_sendeth(remain)
elif 'load'.startswith(cmdargs[0]):
self.do_load(remain)
elif 'save'.startswith(cmdargs[0]):
self.do_save(remain)
elif 'monitor'.startswith(cmdargs[0]):
self.do_monitor(remain)
elif 'unmonitor'.startswith(cmdargs[0]):
self.do_unmonitor(remain)
elif 'exec'.startswith(cmdargs[0]):
self.do_exec(remain)
elif 'add'.startswith(cmdargs[0]):
self.do_add(remain)
elif 'remove'.startswith(cmdargs[0]):
self.do_remove(remain)
elif 'replay'.startswith(cmdargs[0]):
self.do_replay(remain)
else:
print ("Unrecognized command '{}'".format(line)) | Implement short-cut commands: any unique command prefix should
work. | Below is the the instruction that describes the task:
### Input:
Implement short-cut commands: any unique command prefix should
work.
### Response:
def default(self, line):
'''
Implement short-cut commands: any unique command prefix should
work.'''
cmdargs = line.split()
remain = ' '.join(cmdargs[1:])
if 'show'.startswith(cmdargs[0]):
self.do_show(remain)
elif 'set'.startswith(cmdargs[0]):
self.do_set(remain)
elif 'sendeth'.startswith(cmdargs[0]):
self.do_sendeth(remain)
elif 'load'.startswith(cmdargs[0]):
self.do_load(remain)
elif 'save'.startswith(cmdargs[0]):
self.do_save(remain)
elif 'monitor'.startswith(cmdargs[0]):
self.do_monitor(remain)
elif 'unmonitor'.startswith(cmdargs[0]):
self.do_unmonitor(remain)
elif 'exec'.startswith(cmdargs[0]):
self.do_exec(remain)
elif 'add'.startswith(cmdargs[0]):
self.do_add(remain)
elif 'remove'.startswith(cmdargs[0]):
self.do_remove(remain)
elif 'replay'.startswith(cmdargs[0]):
self.do_replay(remain)
else:
print ("Unrecognized command '{}'".format(line)) |
def acceptRecord(self, item):
"""
Closes the tree popup and sets the current record.
:param record | <orb.Table>
"""
record = item.record()
self.treePopupWidget().close()
self.setCurrentRecord(record) | Closes the tree popup and sets the current record.
:param record | <orb.Table> | Below is the the instruction that describes the task:
### Input:
Closes the tree popup and sets the current record.
:param record | <orb.Table>
### Response:
def acceptRecord(self, item):
"""
Closes the tree popup and sets the current record.
:param record | <orb.Table>
"""
record = item.record()
self.treePopupWidget().close()
self.setCurrentRecord(record) |
async def get(self, *, encoding=None, decoder=None):
"""Wait for and return pub/sub message from one of channels.
Return value is either:
* tuple of two elements: channel & message;
* tuple of three elements: pattern channel, (target channel & message);
* or None in case Receiver is not active or has just been stopped.
:raises aioredis.ChannelClosedError: If listener is stopped
and all messages have been received.
"""
# TODO: add note about raised exception and end marker.
# Flow before ClosableQueue:
# - ch.get() -> message
# - ch.close() -> ch.put(None)
# - ch.get() -> None
# - ch.get() -> ChannelClosedError
# Current flow:
# - ch.get() -> message
# - ch.close() -> ch._closed = True
# - ch.get() -> ChannelClosedError
assert decoder is None or callable(decoder), decoder
if self._queue.exhausted:
raise ChannelClosedError()
obj = await self._queue.get()
if obj is EndOfStream:
return
ch, msg = obj
if ch.is_pattern:
dest_ch, msg = msg
if encoding is not None:
msg = msg.decode(encoding)
if decoder is not None:
msg = decoder(msg)
if ch.is_pattern:
return ch, (dest_ch, msg)
return ch, msg | Wait for and return pub/sub message from one of channels.
Return value is either:
* tuple of two elements: channel & message;
* tuple of three elements: pattern channel, (target channel & message);
* or None in case Receiver is not active or has just been stopped.
:raises aioredis.ChannelClosedError: If listener is stopped
and all messages have been received. | Below is the the instruction that describes the task:
### Input:
Wait for and return pub/sub message from one of channels.
Return value is either:
* tuple of two elements: channel & message;
* tuple of three elements: pattern channel, (target channel & message);
* or None in case Receiver is not active or has just been stopped.
:raises aioredis.ChannelClosedError: If listener is stopped
and all messages have been received.
### Response:
async def get(self, *, encoding=None, decoder=None):
"""Wait for and return pub/sub message from one of channels.
Return value is either:
* tuple of two elements: channel & message;
* tuple of three elements: pattern channel, (target channel & message);
* or None in case Receiver is not active or has just been stopped.
:raises aioredis.ChannelClosedError: If listener is stopped
and all messages have been received.
"""
# TODO: add note about raised exception and end marker.
# Flow before ClosableQueue:
# - ch.get() -> message
# - ch.close() -> ch.put(None)
# - ch.get() -> None
# - ch.get() -> ChannelClosedError
# Current flow:
# - ch.get() -> message
# - ch.close() -> ch._closed = True
# - ch.get() -> ChannelClosedError
assert decoder is None or callable(decoder), decoder
if self._queue.exhausted:
raise ChannelClosedError()
obj = await self._queue.get()
if obj is EndOfStream:
return
ch, msg = obj
if ch.is_pattern:
dest_ch, msg = msg
if encoding is not None:
msg = msg.decode(encoding)
if decoder is not None:
msg = decoder(msg)
if ch.is_pattern:
return ch, (dest_ch, msg)
return ch, msg |
def _frozen_pool_single_run(kwargs):
"""Single run wrapper for the frozen pool, makes a single run and passes kwargs"""
idx = kwargs.pop('idx')
frozen_kwargs = _frozen_pool_single_run.kwargs
frozen_kwargs.update(kwargs) # in case of `run_map`
# we need to update job's args and kwargs
traj = frozen_kwargs['traj']
traj.f_set_crun(idx)
return _sigint_handling_single_run(frozen_kwargs) | Single run wrapper for the frozen pool, makes a single run and passes kwargs | Below is the the instruction that describes the task:
### Input:
Single run wrapper for the frozen pool, makes a single run and passes kwargs
### Response:
def _frozen_pool_single_run(kwargs):
"""Single run wrapper for the frozen pool, makes a single run and passes kwargs"""
idx = kwargs.pop('idx')
frozen_kwargs = _frozen_pool_single_run.kwargs
frozen_kwargs.update(kwargs) # in case of `run_map`
# we need to update job's args and kwargs
traj = frozen_kwargs['traj']
traj.f_set_crun(idx)
return _sigint_handling_single_run(frozen_kwargs) |
async def login(self, request, id_):
"""Login an user by ID."""
session = await self.load(request)
session['id'] = id_ | Login an user by ID. | Below is the the instruction that describes the task:
### Input:
Login an user by ID.
### Response:
async def login(self, request, id_):
"""Login an user by ID."""
session = await self.load(request)
session['id'] = id_ |
def xhtml(self, outfn=None, ext='.xhtml', css=None, **params):
"""convert the Schema to XHTML with the given output filename or with the given extension."""
from markdown import markdown
from copy import deepcopy
from bl.file import File
from .xslt import XSLT
from .rng import RNG
from . import XML, PATH, etree
rncfn = os.path.splitext(self.fn)[0] + '.rnc'
rngfn = os.path.splitext(self.fn)[0] + '.rng'
htmlfn = os.path.splitext(self.fn)[0] + '.html'
if self.fn==rncfn or os.path.exists(rncfn):
rngfn = Schema(rncfn).trang(ext='.rng')
assert os.path.exists(rngfn)
# convert all <r:define> elements into a <a:definition> blocks containing a compact syntax alternative
rng = RNG(fn=rngfn)
for define in rng.xpath(rng.root, "//r:define"):
log.debug("%s %r" % (rng.tag_name(define), define.attrib))
tempdefine = deepcopy(define)
tempgrammar = deepcopy(rng.root); tempgrammar.text = '\n'
for ch in tempgrammar.getchildren(): rng.remove(ch)
tempgrammar.insert(0, tempdefine)
for adoc in rng.xpath(tempdefine, ".//a:documentation | .//a:definition"):
rng.remove(adoc)
with tempfile.TemporaryDirectory() as tempdir:
x = XML(fn=os.path.join(tempdir, 'define.rng'), root=tempgrammar)
x.write()
newfn = Schema(x.fn).trang(ext='.rnc')
txt = open(newfn, 'rb').read().decode('utf-8')
if '\n\n' in txt:
txt = txt[txt.index('\n\n')+1:].strip()
adef = etree.Element("{%(a)s}definition" % RNG.NS)
adef.text = txt
adef.tail = '\n\t\t'
log.debug(adef.text)
annotations = rng.xpath(define, "a:*")
if len(annotations) > 0:
index = define.index(annotations[-1])+1
else:
index = 0
define.insert(index, adef)
rng.write()
xslt = XSLT(fn=os.path.join(PATH, 'xslts', 'rng2md.xslt'))
md = xslt.saxon9(rng.root, **params).strip()
html_body = markdown(md,
output_format="xhtml5",
extensions=[ # see https://python-markdown.github.io/extensions/
'markdown.extensions.extra',
'markdown.extensions.admonition',
'markdown.extensions.headerid',
'markdown.extensions.sane_lists',
'markdown.extensions.toc']).strip()
html_text = """<html><head><meta charset="UTF-8"/><style type="text/css">
body {font-family:sans-serif;line-height:1.3}
h1,h2,h3 {margin:1em 0 .25em 0}
h1 {font-size:2rem;font-weight:normal;}
h2 {font-size:1.2rem;font-weight:bold;}
h3 {font-size:1.15rem;font-weight:normal;font-style:italic;}
p {margin:0 0 .5rem 0;}
p.subtitle {font-size:1.2rem;font-family:sans-serif;margin-bottom:1em}
p.code {font-family:monospace;font-size:.6rem;color:#666;line-height:1.1}
pre {font-family:monospace;font-size:.6rem;color:#666;line-height:1.1;margin-left:1.5rem;}
hr {border:0;border-top:1px solid #999;margin:1rem 0;}
</style></head><body>\n""" + html_body + """\n</body></html>"""
html = XML(fn=htmlfn, root=html_text)
return html | convert the Schema to XHTML with the given output filename or with the given extension. | Below is the the instruction that describes the task:
### Input:
convert the Schema to XHTML with the given output filename or with the given extension.
### Response:
def xhtml(self, outfn=None, ext='.xhtml', css=None, **params):
"""convert the Schema to XHTML with the given output filename or with the given extension."""
from markdown import markdown
from copy import deepcopy
from bl.file import File
from .xslt import XSLT
from .rng import RNG
from . import XML, PATH, etree
rncfn = os.path.splitext(self.fn)[0] + '.rnc'
rngfn = os.path.splitext(self.fn)[0] + '.rng'
htmlfn = os.path.splitext(self.fn)[0] + '.html'
if self.fn==rncfn or os.path.exists(rncfn):
rngfn = Schema(rncfn).trang(ext='.rng')
assert os.path.exists(rngfn)
# convert all <r:define> elements into a <a:definition> blocks containing a compact syntax alternative
rng = RNG(fn=rngfn)
for define in rng.xpath(rng.root, "//r:define"):
log.debug("%s %r" % (rng.tag_name(define), define.attrib))
tempdefine = deepcopy(define)
tempgrammar = deepcopy(rng.root); tempgrammar.text = '\n'
for ch in tempgrammar.getchildren(): rng.remove(ch)
tempgrammar.insert(0, tempdefine)
for adoc in rng.xpath(tempdefine, ".//a:documentation | .//a:definition"):
rng.remove(adoc)
with tempfile.TemporaryDirectory() as tempdir:
x = XML(fn=os.path.join(tempdir, 'define.rng'), root=tempgrammar)
x.write()
newfn = Schema(x.fn).trang(ext='.rnc')
txt = open(newfn, 'rb').read().decode('utf-8')
if '\n\n' in txt:
txt = txt[txt.index('\n\n')+1:].strip()
adef = etree.Element("{%(a)s}definition" % RNG.NS)
adef.text = txt
adef.tail = '\n\t\t'
log.debug(adef.text)
annotations = rng.xpath(define, "a:*")
if len(annotations) > 0:
index = define.index(annotations[-1])+1
else:
index = 0
define.insert(index, adef)
rng.write()
xslt = XSLT(fn=os.path.join(PATH, 'xslts', 'rng2md.xslt'))
md = xslt.saxon9(rng.root, **params).strip()
html_body = markdown(md,
output_format="xhtml5",
extensions=[ # see https://python-markdown.github.io/extensions/
'markdown.extensions.extra',
'markdown.extensions.admonition',
'markdown.extensions.headerid',
'markdown.extensions.sane_lists',
'markdown.extensions.toc']).strip()
html_text = """<html><head><meta charset="UTF-8"/><style type="text/css">
body {font-family:sans-serif;line-height:1.3}
h1,h2,h3 {margin:1em 0 .25em 0}
h1 {font-size:2rem;font-weight:normal;}
h2 {font-size:1.2rem;font-weight:bold;}
h3 {font-size:1.15rem;font-weight:normal;font-style:italic;}
p {margin:0 0 .5rem 0;}
p.subtitle {font-size:1.2rem;font-family:sans-serif;margin-bottom:1em}
p.code {font-family:monospace;font-size:.6rem;color:#666;line-height:1.1}
pre {font-family:monospace;font-size:.6rem;color:#666;line-height:1.1;margin-left:1.5rem;}
hr {border:0;border-top:1px solid #999;margin:1rem 0;}
</style></head><body>\n""" + html_body + """\n</body></html>"""
html = XML(fn=htmlfn, root=html_text)
return html |
def generate_bangbang_output(self):
"""Generates the latest on or off pulse in
the string of on (True) or off (False) pulses
according to the desired heat_level setting. Successive calls
to this function will return the next value in the
on/off array series. Call this at control loop rate to
obtain the necessary on/off pulse train.
This system will not work if the caller expects to be able
to specify a new heat_level at every control loop iteration.
Only the value set at every number_of_segments iterations
will be picked up for output! Call about_to_rollover to determine
if it's time to set a new heat_level, if a new level is desired."""
if self._current_index >= self._num_segments:
# we're due to switch over to the next
# commanded heat_level
self._heat_level_now = self._heat_level
# reset array index
self._current_index = 0
# return output
out = self._output_array[self._heat_level_now][self._current_index]
self._current_index += 1
return out | Generates the latest on or off pulse in
the string of on (True) or off (False) pulses
according to the desired heat_level setting. Successive calls
to this function will return the next value in the
on/off array series. Call this at control loop rate to
obtain the necessary on/off pulse train.
This system will not work if the caller expects to be able
to specify a new heat_level at every control loop iteration.
Only the value set at every number_of_segments iterations
will be picked up for output! Call about_to_rollover to determine
if it's time to set a new heat_level, if a new level is desired. | Below is the the instruction that describes the task:
### Input:
Generates the latest on or off pulse in
the string of on (True) or off (False) pulses
according to the desired heat_level setting. Successive calls
to this function will return the next value in the
on/off array series. Call this at control loop rate to
obtain the necessary on/off pulse train.
This system will not work if the caller expects to be able
to specify a new heat_level at every control loop iteration.
Only the value set at every number_of_segments iterations
will be picked up for output! Call about_to_rollover to determine
if it's time to set a new heat_level, if a new level is desired.
### Response:
def generate_bangbang_output(self):
"""Generates the latest on or off pulse in
the string of on (True) or off (False) pulses
according to the desired heat_level setting. Successive calls
to this function will return the next value in the
on/off array series. Call this at control loop rate to
obtain the necessary on/off pulse train.
This system will not work if the caller expects to be able
to specify a new heat_level at every control loop iteration.
Only the value set at every number_of_segments iterations
will be picked up for output! Call about_to_rollover to determine
if it's time to set a new heat_level, if a new level is desired."""
if self._current_index >= self._num_segments:
# we're due to switch over to the next
# commanded heat_level
self._heat_level_now = self._heat_level
# reset array index
self._current_index = 0
# return output
out = self._output_array[self._heat_level_now][self._current_index]
self._current_index += 1
return out |
def GetEntries(dataList, title="Select", msg=""):
"""
Get entries of the list
title: Window name
mag: Label of the check button
return data dictionary like:
{'y': '5.0', 'x': '100', 'z': 'save'}
"""
root = tkinter.Tk()
root.title(title)
label = tkinter.Label(root, text=msg)
label.pack()
entries = []
for item in dataList:
tkinter.Label(root, text=item).pack()
entry = tkinter.Entry(root)
entry.pack()
entries.append(entry)
# print entries
tkinter.Button(root, text="OK", fg="black", command=root.quit).pack()
root.mainloop()
result = {}
for (entry, data) in zip(entries, dataList):
result[data] = entry.get()
root.destroy()
print(result)
return result | Get entries of the list
title: Window name
mag: Label of the check button
return data dictionary like:
{'y': '5.0', 'x': '100', 'z': 'save'} | Below is the the instruction that describes the task:
### Input:
Get entries of the list
title: Window name
mag: Label of the check button
return data dictionary like:
{'y': '5.0', 'x': '100', 'z': 'save'}
### Response:
def GetEntries(dataList, title="Select", msg=""):
"""
Get entries of the list
title: Window name
mag: Label of the check button
return data dictionary like:
{'y': '5.0', 'x': '100', 'z': 'save'}
"""
root = tkinter.Tk()
root.title(title)
label = tkinter.Label(root, text=msg)
label.pack()
entries = []
for item in dataList:
tkinter.Label(root, text=item).pack()
entry = tkinter.Entry(root)
entry.pack()
entries.append(entry)
# print entries
tkinter.Button(root, text="OK", fg="black", command=root.quit).pack()
root.mainloop()
result = {}
for (entry, data) in zip(entries, dataList):
result[data] = entry.get()
root.destroy()
print(result)
return result |
def infos(self, type=None, failed=False):
"""
Get infos in the network.
type specifies the type of info (defaults to Info). failed { False,
True, "all" } specifies the failed state of the infos. To get infos
from a specific node, see the infos() method in class
:class:`~dallinger.models.Node`.
"""
if type is None:
type = Info
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid failed".format(failed))
if failed == "all":
return type.query.filter_by(network_id=self.id).all()
else:
return type.query.filter_by(network_id=self.id, failed=failed).all() | Get infos in the network.
type specifies the type of info (defaults to Info). failed { False,
True, "all" } specifies the failed state of the infos. To get infos
from a specific node, see the infos() method in class
:class:`~dallinger.models.Node`. | Below is the the instruction that describes the task:
### Input:
Get infos in the network.
type specifies the type of info (defaults to Info). failed { False,
True, "all" } specifies the failed state of the infos. To get infos
from a specific node, see the infos() method in class
:class:`~dallinger.models.Node`.
### Response:
def infos(self, type=None, failed=False):
"""
Get infos in the network.
type specifies the type of info (defaults to Info). failed { False,
True, "all" } specifies the failed state of the infos. To get infos
from a specific node, see the infos() method in class
:class:`~dallinger.models.Node`.
"""
if type is None:
type = Info
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid failed".format(failed))
if failed == "all":
return type.query.filter_by(network_id=self.id).all()
else:
return type.query.filter_by(network_id=self.id, failed=failed).all() |
def control_valve_choke_P_g(xT, gamma, P1=None, P2=None):
r'''Calculates either the upstream or downstream pressure at which choked
flow though a gas control valve occurs, given either a set upstream or
downstream pressure. Implements an analytical solution of
the needed equations from the full function
:py:func:`~.size_control_valve_g`. A singularity arises as `xT` goes to 1
and `gamma` goes to 1.4.
.. math::
P_1 = - \frac{7 P_{2}}{5 \gamma x_T - 7}
.. math::
P_2 = \frac{P_{1}}{7} \left(- 5 \gamma x_T + 7\right)
Parameters
----------
xT : float, optional
Pressure difference ratio factor of a valve without fittings at choked
flow [-]
gamma : float
Specific heat capacity ratio [-]
P1 : float, optional
Absolute pressure upstream of the valve [Pa]
P2 : float, optional
Absolute pressure downstream of the valve [Pa]
Returns
-------
P_choke : float
Pressure at which a choke occurs in the gas valve [Pa]
Notes
-----
Extremely cheap to compute.
Examples
--------
>>> control_valve_choke_P_g(1, 1.3, 1E5)
7142.857142857143
>>> control_valve_choke_P_g(1, 1.3, P2=7142.857142857143)
100000.0
'''
if P2 is None:
ans = P2 = P1*(-5.0*gamma*xT + 7.0)/7.0
elif P1 is None:
ans = P1 = -7.0*P2/(5.0*gamma*xT - 7.0)
else:
raise Exception('Either P1 or P2 needs to be specified')
return ans | r'''Calculates either the upstream or downstream pressure at which choked
flow though a gas control valve occurs, given either a set upstream or
downstream pressure. Implements an analytical solution of
the needed equations from the full function
:py:func:`~.size_control_valve_g`. A singularity arises as `xT` goes to 1
and `gamma` goes to 1.4.
.. math::
P_1 = - \frac{7 P_{2}}{5 \gamma x_T - 7}
.. math::
P_2 = \frac{P_{1}}{7} \left(- 5 \gamma x_T + 7\right)
Parameters
----------
xT : float, optional
Pressure difference ratio factor of a valve without fittings at choked
flow [-]
gamma : float
Specific heat capacity ratio [-]
P1 : float, optional
Absolute pressure upstream of the valve [Pa]
P2 : float, optional
Absolute pressure downstream of the valve [Pa]
Returns
-------
P_choke : float
Pressure at which a choke occurs in the gas valve [Pa]
Notes
-----
Extremely cheap to compute.
Examples
--------
>>> control_valve_choke_P_g(1, 1.3, 1E5)
7142.857142857143
>>> control_valve_choke_P_g(1, 1.3, P2=7142.857142857143)
100000.0 | Below is the the instruction that describes the task:
### Input:
r'''Calculates either the upstream or downstream pressure at which choked
flow though a gas control valve occurs, given either a set upstream or
downstream pressure. Implements an analytical solution of
the needed equations from the full function
:py:func:`~.size_control_valve_g`. A singularity arises as `xT` goes to 1
and `gamma` goes to 1.4.
.. math::
P_1 = - \frac{7 P_{2}}{5 \gamma x_T - 7}
.. math::
P_2 = \frac{P_{1}}{7} \left(- 5 \gamma x_T + 7\right)
Parameters
----------
xT : float, optional
Pressure difference ratio factor of a valve without fittings at choked
flow [-]
gamma : float
Specific heat capacity ratio [-]
P1 : float, optional
Absolute pressure upstream of the valve [Pa]
P2 : float, optional
Absolute pressure downstream of the valve [Pa]
Returns
-------
P_choke : float
Pressure at which a choke occurs in the gas valve [Pa]
Notes
-----
Extremely cheap to compute.
Examples
--------
>>> control_valve_choke_P_g(1, 1.3, 1E5)
7142.857142857143
>>> control_valve_choke_P_g(1, 1.3, P2=7142.857142857143)
100000.0
### Response:
def control_valve_choke_P_g(xT, gamma, P1=None, P2=None):
r'''Calculates either the upstream or downstream pressure at which choked
flow though a gas control valve occurs, given either a set upstream or
downstream pressure. Implements an analytical solution of
the needed equations from the full function
:py:func:`~.size_control_valve_g`. A singularity arises as `xT` goes to 1
and `gamma` goes to 1.4.
.. math::
P_1 = - \frac{7 P_{2}}{5 \gamma x_T - 7}
.. math::
P_2 = \frac{P_{1}}{7} \left(- 5 \gamma x_T + 7\right)
Parameters
----------
xT : float, optional
Pressure difference ratio factor of a valve without fittings at choked
flow [-]
gamma : float
Specific heat capacity ratio [-]
P1 : float, optional
Absolute pressure upstream of the valve [Pa]
P2 : float, optional
Absolute pressure downstream of the valve [Pa]
Returns
-------
P_choke : float
Pressure at which a choke occurs in the gas valve [Pa]
Notes
-----
Extremely cheap to compute.
Examples
--------
>>> control_valve_choke_P_g(1, 1.3, 1E5)
7142.857142857143
>>> control_valve_choke_P_g(1, 1.3, P2=7142.857142857143)
100000.0
'''
if P2 is None:
ans = P2 = P1*(-5.0*gamma*xT + 7.0)/7.0
elif P1 is None:
ans = P1 = -7.0*P2/(5.0*gamma*xT - 7.0)
else:
raise Exception('Either P1 or P2 needs to be specified')
return ans |
def search(**kw):
"""Search the catalog adapter
:returns: Catalog search results
:rtype: iterable
"""
portal = get_portal()
catalog = ICatalog(portal)
catalog_query = ICatalogQuery(catalog)
query = catalog_query.make_query(**kw)
return catalog(query) | Search the catalog adapter
:returns: Catalog search results
:rtype: iterable | Below is the the instruction that describes the task:
### Input:
Search the catalog adapter
:returns: Catalog search results
:rtype: iterable
### Response:
def search(**kw):
"""Search the catalog adapter
:returns: Catalog search results
:rtype: iterable
"""
portal = get_portal()
catalog = ICatalog(portal)
catalog_query = ICatalogQuery(catalog)
query = catalog_query.make_query(**kw)
return catalog(query) |
def _api_url(self, path, **context):
""" Build the full url to the API endpoint """
if self.host == 'github.com':
baseurl = "https://api.github.com"
else:
baseurl = "https://{}/api/v3".format(self.host)
return baseurl + path.format(**context) | Build the full url to the API endpoint | Below is the the instruction that describes the task:
### Input:
Build the full url to the API endpoint
### Response:
def _api_url(self, path, **context):
""" Build the full url to the API endpoint """
if self.host == 'github.com':
baseurl = "https://api.github.com"
else:
baseurl = "https://{}/api/v3".format(self.host)
return baseurl + path.format(**context) |
def extract_emails(text):
"""Return a list of email addresses extracted from the string."""
text = text.replace(u'\u2024', '.')
emails = []
for m in EMAIL_RE.findall(text):
emails.append(m[0])
return emails | Return a list of email addresses extracted from the string. | Below is the the instruction that describes the task:
### Input:
Return a list of email addresses extracted from the string.
### Response:
def extract_emails(text):
"""Return a list of email addresses extracted from the string."""
text = text.replace(u'\u2024', '.')
emails = []
for m in EMAIL_RE.findall(text):
emails.append(m[0])
return emails |
def tail(f, window=20):
"""
Returns the last `window` lines of file `f` as a list.
@param window: the number of lines.
"""
if window == 0:
return []
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ).decode('utf-8', errors='ignore'))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes).decode('utf-8', errors='ignore'))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
return ''.join(data).splitlines()[-window:] | Returns the last `window` lines of file `f` as a list.
@param window: the number of lines. | Below is the the instruction that describes the task:
### Input:
Returns the last `window` lines of file `f` as a list.
@param window: the number of lines.
### Response:
def tail(f, window=20):
"""
Returns the last `window` lines of file `f` as a list.
@param window: the number of lines.
"""
if window == 0:
return []
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ).decode('utf-8', errors='ignore'))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes).decode('utf-8', errors='ignore'))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
return ''.join(data).splitlines()[-window:] |
def contains(self, data):
"""
Checks if the provided data is stored in at least one node of the list.
:param data: the seeked data
:type data: object
:returns: a boolean
"""
for item in self:
if item.data() == data:
return True
return False | Checks if the provided data is stored in at least one node of the list.
:param data: the seeked data
:type data: object
:returns: a boolean | Below is the the instruction that describes the task:
### Input:
Checks if the provided data is stored in at least one node of the list.
:param data: the seeked data
:type data: object
:returns: a boolean
### Response:
def contains(self, data):
"""
Checks if the provided data is stored in at least one node of the list.
:param data: the seeked data
:type data: object
:returns: a boolean
"""
for item in self:
if item.data() == data:
return True
return False |
def safe_join(directory, *pathnames):
"""Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory.
"""
parts = [directory]
for filename in pathnames:
if filename != "":
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename == ".." or filename.startswith("../"):
return None
parts.append(filename)
return posixpath.join(*parts) | Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory. | Below is the the instruction that describes the task:
### Input:
Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory.
### Response:
def safe_join(directory, *pathnames):
"""Safely join `directory` and one or more untrusted `pathnames`. If this
cannot be done, this function returns ``None``.
:param directory: the base directory.
:param pathnames: the untrusted pathnames relative to that directory.
"""
parts = [directory]
for filename in pathnames:
if filename != "":
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename == ".." or filename.startswith("../"):
return None
parts.append(filename)
return posixpath.join(*parts) |
def obo(self):
"""str: the synonym serialized in obo format.
"""
return 'synonym: "{}" {} [{}]'.format(
self.desc,
' '.join([self.scope, self.syn_type.name])\
if self.syn_type else self.scope,
', '.join(self.xref)
) | str: the synonym serialized in obo format. | Below is the the instruction that describes the task:
### Input:
str: the synonym serialized in obo format.
### Response:
def obo(self):
"""str: the synonym serialized in obo format.
"""
return 'synonym: "{}" {} [{}]'.format(
self.desc,
' '.join([self.scope, self.syn_type.name])\
if self.syn_type else self.scope,
', '.join(self.xref)
) |
def cmd_ssh(options):
"""Connect to the specified instance via ssh.
Finds instances that match the user specified args that are also
in the 'running' state. The target instance is determined, the
required connection information is retreived (IP, key and ssh
user-name), then an 'ssh' connection is made to the instance.
Args:
options (object): contains args and data from parser
"""
import os
import subprocess
from os.path import expanduser
options.inst_state = "running"
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
home_dir = expanduser("~")
if options.user is None:
tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami'])
options.user = cmd_ssh_user(tar_aminame,
i_info[tar_idx]['tag']['Name'])
else:
debg.dprint("LoginUser set by user: ", options.user)
os_spec = {"nt": ["powershell plink", "\\", "ppk"]}
c_itm = os_spec.get(os.name, ["ssh", "/", "pem"])
cmd_ssh_run = c_itm[0]
if not options.nopem:
cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}".
format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'],
c_itm[2]))
else:
debg.dprint("Connect string: ", "ssh {}@{}".
format(options.user, i_info[tar_idx]['pub_dns_name']))
cmd_ssh_run += " {0}@{1}".format(options.user,
i_info[tar_idx]['pub_dns_name'])
print(cmd_ssh_run)
subprocess.call(cmd_ssh_run, shell=True) | Connect to the specified instance via ssh.
Finds instances that match the user specified args that are also
in the 'running' state. The target instance is determined, the
required connection information is retreived (IP, key and ssh
user-name), then an 'ssh' connection is made to the instance.
Args:
options (object): contains args and data from parser | Below is the the instruction that describes the task:
### Input:
Connect to the specified instance via ssh.
Finds instances that match the user specified args that are also
in the 'running' state. The target instance is determined, the
required connection information is retreived (IP, key and ssh
user-name), then an 'ssh' connection is made to the instance.
Args:
options (object): contains args and data from parser
### Response:
def cmd_ssh(options):
"""Connect to the specified instance via ssh.
Finds instances that match the user specified args that are also
in the 'running' state. The target instance is determined, the
required connection information is retreived (IP, key and ssh
user-name), then an 'ssh' connection is made to the instance.
Args:
options (object): contains args and data from parser
"""
import os
import subprocess
from os.path import expanduser
options.inst_state = "running"
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
home_dir = expanduser("~")
if options.user is None:
tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami'])
options.user = cmd_ssh_user(tar_aminame,
i_info[tar_idx]['tag']['Name'])
else:
debg.dprint("LoginUser set by user: ", options.user)
os_spec = {"nt": ["powershell plink", "\\", "ppk"]}
c_itm = os_spec.get(os.name, ["ssh", "/", "pem"])
cmd_ssh_run = c_itm[0]
if not options.nopem:
cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}".
format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'],
c_itm[2]))
else:
debg.dprint("Connect string: ", "ssh {}@{}".
format(options.user, i_info[tar_idx]['pub_dns_name']))
cmd_ssh_run += " {0}@{1}".format(options.user,
i_info[tar_idx]['pub_dns_name'])
print(cmd_ssh_run)
subprocess.call(cmd_ssh_run, shell=True) |
def _repr_html_(self):
"""Generates the HTML representation.
"""
parts = []
if self._class:
parts.append('<div id="hh_%s" class="%s">%s</div>' % (self._id, self._class, self._markup))
else:
parts.append('<div id="hh_%s">%s</div>' % (self._id, self._markup))
if len(self._script) != 0:
parts.append('<script>')
parts.append('require([')
parts.append(','.join(['"%s"' % d[0] for d in self._dependencies]))
parts.append('], function(')
parts.append(','.join([d[1] for d in self._dependencies]))
parts.append(') {')
parts.append(self._script)
parts.append('});')
parts.append('</script>')
return ''.join(parts) | Generates the HTML representation. | Below is the the instruction that describes the task:
### Input:
Generates the HTML representation.
### Response:
def _repr_html_(self):
"""Generates the HTML representation.
"""
parts = []
if self._class:
parts.append('<div id="hh_%s" class="%s">%s</div>' % (self._id, self._class, self._markup))
else:
parts.append('<div id="hh_%s">%s</div>' % (self._id, self._markup))
if len(self._script) != 0:
parts.append('<script>')
parts.append('require([')
parts.append(','.join(['"%s"' % d[0] for d in self._dependencies]))
parts.append('], function(')
parts.append(','.join([d[1] for d in self._dependencies]))
parts.append(') {')
parts.append(self._script)
parts.append('});')
parts.append('</script>')
return ''.join(parts) |
def etag(self, href):
"""
ETag can be None if a subset of element json is using
this container, such as the case with Routing.
"""
if self and self._etag is None:
self._etag = LoadElement(href, only_etag=True)
return self._etag | ETag can be None if a subset of element json is using
this container, such as the case with Routing. | Below is the the instruction that describes the task:
### Input:
ETag can be None if a subset of element json is using
this container, such as the case with Routing.
### Response:
def etag(self, href):
"""
ETag can be None if a subset of element json is using
this container, such as the case with Routing.
"""
if self and self._etag is None:
self._etag = LoadElement(href, only_etag=True)
return self._etag |
def down(self, path, link, repo):
"""Download files
"""
filename = link.split("/")[-1]
if not os.path.isfile(path + filename):
Download(path, link.split(), repo).start() | Download files | Below is the the instruction that describes the task:
### Input:
Download files
### Response:
def down(self, path, link, repo):
"""Download files
"""
filename = link.split("/")[-1]
if not os.path.isfile(path + filename):
Download(path, link.split(), repo).start() |
def update_company(self, company):
"""Update company settings
:param company: updated settings
:type company: ``dict``
:rtype: ``bool``
"""
if not isinstance(company, dict):
raise AttributeError('company must be a <dict>')
method, url = get_URL('company_update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
payload.update(company)
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res) | Update company settings
:param company: updated settings
:type company: ``dict``
:rtype: ``bool`` | Below is the the instruction that describes the task:
### Input:
Update company settings
:param company: updated settings
:type company: ``dict``
:rtype: ``bool``
### Response:
def update_company(self, company):
"""Update company settings
:param company: updated settings
:type company: ``dict``
:rtype: ``bool``
"""
if not isinstance(company, dict):
raise AttributeError('company must be a <dict>')
method, url = get_URL('company_update')
payload = {
'apikey': self.config.get('apikey'),
'logintoken': self.session.cookies.get('logintoken')
}
payload.update(company)
res = getattr(self.session, method)(url, params=payload)
if res.status_code == 200:
return True
hellraiser(res) |
def str_variants(store, institute_obj, case_obj, variants_query, page=1, per_page=50):
"""Pre-process list of STR variants."""
# Nothing unique to STRs on this level. Inheritance?
return variants(store, institute_obj, case_obj, variants_query, page, per_page) | Pre-process list of STR variants. | Below is the the instruction that describes the task:
### Input:
Pre-process list of STR variants.
### Response:
def str_variants(store, institute_obj, case_obj, variants_query, page=1, per_page=50):
"""Pre-process list of STR variants."""
# Nothing unique to STRs on this level. Inheritance?
return variants(store, institute_obj, case_obj, variants_query, page, per_page) |
def assume_role(credentials, account, role):
"""Use FAWS provided credentials to assume defined role."""
sts = boto3.client(
'sts',
aws_access_key_id=credentials['accessKeyId'],
aws_secret_access_key=credentials['secretAccessKey'],
aws_session_token=credentials['sessionToken'],
)
resp = sts.assume_role(
RoleArn='arn:aws:sts::{}:role/{}'.format(account, role),
RoleSessionName='fleece_assumed_role'
)
return {
'accessKeyId': resp['Credentials']['AccessKeyId'],
'secretAccessKey': resp['Credentials']['SecretAccessKey'],
'sessionToken': resp['Credentials']['SessionToken'],
} | Use FAWS provided credentials to assume defined role. | Below is the the instruction that describes the task:
### Input:
Use FAWS provided credentials to assume defined role.
### Response:
def assume_role(credentials, account, role):
"""Use FAWS provided credentials to assume defined role."""
sts = boto3.client(
'sts',
aws_access_key_id=credentials['accessKeyId'],
aws_secret_access_key=credentials['secretAccessKey'],
aws_session_token=credentials['sessionToken'],
)
resp = sts.assume_role(
RoleArn='arn:aws:sts::{}:role/{}'.format(account, role),
RoleSessionName='fleece_assumed_role'
)
return {
'accessKeyId': resp['Credentials']['AccessKeyId'],
'secretAccessKey': resp['Credentials']['SecretAccessKey'],
'sessionToken': resp['Credentials']['SessionToken'],
} |
def save(self, directory=None, append_timestep=True):
"""
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory: Optional checkpoint directory.
append_timestep: Appends the current timestep to the checkpoint file if true.
Returns:
Checkpoint path where the model was saved.
"""
if self.flush_summarizer is not None:
self.monitored_session.run(fetches=self.flush_summarizer)
return self.saver.save(
sess=self.session,
save_path=(self.saver_directory if directory is None else directory),
global_step=(self.global_timestep if append_timestep else None),
# latest_filename=None, # Defaults to 'checkpoint'.
meta_graph_suffix='meta',
write_meta_graph=True,
write_state=True
) | Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory: Optional checkpoint directory.
append_timestep: Appends the current timestep to the checkpoint file if true.
Returns:
Checkpoint path where the model was saved. | Below is the the instruction that describes the task:
### Input:
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory: Optional checkpoint directory.
append_timestep: Appends the current timestep to the checkpoint file if true.
Returns:
Checkpoint path where the model was saved.
### Response:
def save(self, directory=None, append_timestep=True):
"""
Save TensorFlow model. If no checkpoint directory is given, the model's default saver
directory is used. Optionally appends current timestep to prevent overwriting previous
checkpoint files. Turn off to be able to load model from the same given path argument as
given here.
Args:
directory: Optional checkpoint directory.
append_timestep: Appends the current timestep to the checkpoint file if true.
Returns:
Checkpoint path where the model was saved.
"""
if self.flush_summarizer is not None:
self.monitored_session.run(fetches=self.flush_summarizer)
return self.saver.save(
sess=self.session,
save_path=(self.saver_directory if directory is None else directory),
global_step=(self.global_timestep if append_timestep else None),
# latest_filename=None, # Defaults to 'checkpoint'.
meta_graph_suffix='meta',
write_meta_graph=True,
write_state=True
) |
def _validate_ip_address(family, address):
"""Check if `address` is valid IP address and return it, in a normalized
form.
:Parameters:
- `family`: ``socket.AF_INET`` or ``socket.AF_INET6``
- `address`: the IP address to validate
"""
try:
info = socket.getaddrinfo(address, 0, family, socket.SOCK_STREAM, 0,
socket.AI_NUMERICHOST)
except socket.gaierror, err:
logger.debug("gaierror: {0} for {1!r}".format(err, address))
raise ValueError("Bad IP address")
if not info:
logger.debug("getaddrinfo result empty")
raise ValueError("Bad IP address")
addr = info[0][4]
logger.debug(" got address: {0!r}".format(addr))
try:
return socket.getnameinfo(addr, socket.NI_NUMERICHOST)[0]
except socket.gaierror, err:
logger.debug("gaierror: {0} for {1!r}".format(err, addr))
raise ValueError("Bad IP address") | Check if `address` is valid IP address and return it, in a normalized
form.
:Parameters:
- `family`: ``socket.AF_INET`` or ``socket.AF_INET6``
- `address`: the IP address to validate | Below is the the instruction that describes the task:
### Input:
Check if `address` is valid IP address and return it, in a normalized
form.
:Parameters:
- `family`: ``socket.AF_INET`` or ``socket.AF_INET6``
- `address`: the IP address to validate
### Response:
def _validate_ip_address(family, address):
"""Check if `address` is valid IP address and return it, in a normalized
form.
:Parameters:
- `family`: ``socket.AF_INET`` or ``socket.AF_INET6``
- `address`: the IP address to validate
"""
try:
info = socket.getaddrinfo(address, 0, family, socket.SOCK_STREAM, 0,
socket.AI_NUMERICHOST)
except socket.gaierror, err:
logger.debug("gaierror: {0} for {1!r}".format(err, address))
raise ValueError("Bad IP address")
if not info:
logger.debug("getaddrinfo result empty")
raise ValueError("Bad IP address")
addr = info[0][4]
logger.debug(" got address: {0!r}".format(addr))
try:
return socket.getnameinfo(addr, socket.NI_NUMERICHOST)[0]
except socket.gaierror, err:
logger.debug("gaierror: {0} for {1!r}".format(err, addr))
raise ValueError("Bad IP address") |
def get_git_branch(git_path='git'):
"""Returns the name of the current git branch
"""
branch_match = call((git_path, 'rev-parse', '--symbolic-full-name', 'HEAD'))
if branch_match == "HEAD":
return None
else:
return os.path.basename(branch_match) | Returns the name of the current git branch | Below is the the instruction that describes the task:
### Input:
Returns the name of the current git branch
### Response:
def get_git_branch(git_path='git'):
"""Returns the name of the current git branch
"""
branch_match = call((git_path, 'rev-parse', '--symbolic-full-name', 'HEAD'))
if branch_match == "HEAD":
return None
else:
return os.path.basename(branch_match) |
def timeslot_offset_options(
interval=swingtime_settings.TIMESLOT_INTERVAL,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
fmt=swingtime_settings.TIMESLOT_TIME_FORMAT
):
'''
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset.
'''
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
delta = utils.time_delta_total_seconds(dtstart - dt)
seconds = utils.time_delta_total_seconds(interval)
while dtstart <= dtend:
options.append((delta, dtstart.strftime(fmt)))
dtstart += interval
delta += seconds
return options | Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset. | Below is the the instruction that describes the task:
### Input:
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset.
### Response:
def timeslot_offset_options(
interval=swingtime_settings.TIMESLOT_INTERVAL,
start_time=swingtime_settings.TIMESLOT_START_TIME,
end_delta=swingtime_settings.TIMESLOT_END_TIME_DURATION,
fmt=swingtime_settings.TIMESLOT_TIME_FORMAT
):
'''
Create a list of time slot options for use in swingtime forms.
The list is comprised of 2-tuples containing the number of seconds since the
start of the day and a 12-hour temporal representation of that offset.
'''
dt = datetime.combine(date.today(), time(0))
dtstart = datetime.combine(dt.date(), start_time)
dtend = dtstart + end_delta
options = []
delta = utils.time_delta_total_seconds(dtstart - dt)
seconds = utils.time_delta_total_seconds(interval)
while dtstart <= dtend:
options.append((delta, dtstart.strftime(fmt)))
dtstart += interval
delta += seconds
return options |
def utcoffset(self, dt):
"""datetime -> minutes east of UTC (negative for west of UTC)."""
tt = _localtime(_mktime((dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)))
if tt.tm_isdst > 0: return _dstoffset
return _stdoffset | datetime -> minutes east of UTC (negative for west of UTC). | Below is the the instruction that describes the task:
### Input:
datetime -> minutes east of UTC (negative for west of UTC).
### Response:
def utcoffset(self, dt):
"""datetime -> minutes east of UTC (negative for west of UTC)."""
tt = _localtime(_mktime((dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)))
if tt.tm_isdst > 0: return _dstoffset
return _stdoffset |
def accept(self, arg1: JavaObject, arg2: JavaObject) -> None:
"""
>>> c = BiConsumer(lambda x,y : print(x + y))
>>> c.accept("foo", "bar")
foobar
"""
self.lambda_function(arg1, arg2) | >>> c = BiConsumer(lambda x,y : print(x + y))
>>> c.accept("foo", "bar")
foobar | Below is the the instruction that describes the task:
### Input:
>>> c = BiConsumer(lambda x,y : print(x + y))
>>> c.accept("foo", "bar")
foobar
### Response:
def accept(self, arg1: JavaObject, arg2: JavaObject) -> None:
"""
>>> c = BiConsumer(lambda x,y : print(x + y))
>>> c.accept("foo", "bar")
foobar
"""
self.lambda_function(arg1, arg2) |
def render_document(template_name, data_name, output_name):
"""
Combines a MarkDown template file from the aide_document package with a local associated YAML data file, then outputs the rendered combination to a local MarkDown output file.
Parameters
==========
template_name : String
Exact name of the MarkDown template file from the aide_document/templates folder. Do not use the file path.
data_name : String
Relative file path from where this method is called to the location of the YAML data file to be used.
output_name : String
Relative file path from where this method is called to the location to which the output file is written.
Examples
========
Suppose we have template.md in aide_document and a directory as follows:
data/
params.yaml
To render the document:
>>> from aide_document import combine
>>> combine.render_document('template.md', 'data/params.yaml', 'data/output.md')
This will then combine the data and template files and write to a new output file within data/.
"""
# Set up environment and load templates from pip package
env = Environment(loader=PackageLoader('aide_document')) #TODO: add custom path to templates
# Create output file, open template and data files, then combine
with open(output_name, 'w') as output_file:
output = env.get_template(template_name).render(yaml.load(open(data_name)))
output_file.write(output) | Combines a MarkDown template file from the aide_document package with a local associated YAML data file, then outputs the rendered combination to a local MarkDown output file.
Parameters
==========
template_name : String
Exact name of the MarkDown template file from the aide_document/templates folder. Do not use the file path.
data_name : String
Relative file path from where this method is called to the location of the YAML data file to be used.
output_name : String
Relative file path from where this method is called to the location to which the output file is written.
Examples
========
Suppose we have template.md in aide_document and a directory as follows:
data/
params.yaml
To render the document:
>>> from aide_document import combine
>>> combine.render_document('template.md', 'data/params.yaml', 'data/output.md')
This will then combine the data and template files and write to a new output file within data/. | Below is the the instruction that describes the task:
### Input:
Combines a MarkDown template file from the aide_document package with a local associated YAML data file, then outputs the rendered combination to a local MarkDown output file.
Parameters
==========
template_name : String
Exact name of the MarkDown template file from the aide_document/templates folder. Do not use the file path.
data_name : String
Relative file path from where this method is called to the location of the YAML data file to be used.
output_name : String
Relative file path from where this method is called to the location to which the output file is written.
Examples
========
Suppose we have template.md in aide_document and a directory as follows:
data/
params.yaml
To render the document:
>>> from aide_document import combine
>>> combine.render_document('template.md', 'data/params.yaml', 'data/output.md')
This will then combine the data and template files and write to a new output file within data/.
### Response:
def render_document(template_name, data_name, output_name):
"""
Combines a MarkDown template file from the aide_document package with a local associated YAML data file, then outputs the rendered combination to a local MarkDown output file.
Parameters
==========
template_name : String
Exact name of the MarkDown template file from the aide_document/templates folder. Do not use the file path.
data_name : String
Relative file path from where this method is called to the location of the YAML data file to be used.
output_name : String
Relative file path from where this method is called to the location to which the output file is written.
Examples
========
Suppose we have template.md in aide_document and a directory as follows:
data/
params.yaml
To render the document:
>>> from aide_document import combine
>>> combine.render_document('template.md', 'data/params.yaml', 'data/output.md')
This will then combine the data and template files and write to a new output file within data/.
"""
# Set up environment and load templates from pip package
env = Environment(loader=PackageLoader('aide_document')) #TODO: add custom path to templates
# Create output file, open template and data files, then combine
with open(output_name, 'w') as output_file:
output = env.get_template(template_name).render(yaml.load(open(data_name)))
output_file.write(output) |
def _yield_bundle_contents(self, data):
"""Yield bundle contents from the given dict.
Each item yielded will be either a string representing a file path
or a bundle."""
if isinstance(data, list):
contents = data
else:
contents = data.get('contents', [])
if isinstance(contents, six.string_types):
contents = contents,
for content in contents:
if isinstance(content, dict):
content = self._create_bundle(content)
yield content | Yield bundle contents from the given dict.
Each item yielded will be either a string representing a file path
or a bundle. | Below is the the instruction that describes the task:
### Input:
Yield bundle contents from the given dict.
Each item yielded will be either a string representing a file path
or a bundle.
### Response:
def _yield_bundle_contents(self, data):
"""Yield bundle contents from the given dict.
Each item yielded will be either a string representing a file path
or a bundle."""
if isinstance(data, list):
contents = data
else:
contents = data.get('contents', [])
if isinstance(contents, six.string_types):
contents = contents,
for content in contents:
if isinstance(content, dict):
content = self._create_bundle(content)
yield content |
def evolved_transformer_big_tpu():
"""Big parameters for Evolved Transformer model on TPU."""
hparams = add_evolved_transformer_hparams(transformer.transformer_big_tpu())
hparams.learning_rate_constant = 1 / hparams.learning_rate_warmup_steps ** 0.5
hparams.learning_rate_schedule = (
"constant*single_cycle_cos_decay")
return hparams | Big parameters for Evolved Transformer model on TPU. | Below is the the instruction that describes the task:
### Input:
Big parameters for Evolved Transformer model on TPU.
### Response:
def evolved_transformer_big_tpu():
"""Big parameters for Evolved Transformer model on TPU."""
hparams = add_evolved_transformer_hparams(transformer.transformer_big_tpu())
hparams.learning_rate_constant = 1 / hparams.learning_rate_warmup_steps ** 0.5
hparams.learning_rate_schedule = (
"constant*single_cycle_cos_decay")
return hparams |
def _create_client(provider_metadata, client_metadata, verify_ssl=True):
"""
Create a pyoidc client instance.
:param provider_metadata: provider configuration information
:type provider_metadata: Mapping[str, Union[str, Sequence[str]]]
:param client_metadata: client metadata
:type client_metadata: Mapping[str, Union[str, Sequence[str]]]
:return: client instance to use for communicating with the configured provider
:rtype: oic.oic.Client
"""
client = oic.Client(
client_authn_method=CLIENT_AUTHN_METHOD, verify_ssl=verify_ssl
)
# Provider configuration information
if "authorization_endpoint" in provider_metadata:
# no dynamic discovery necessary
client.handle_provider_config(ProviderConfigurationResponse(**provider_metadata),
provider_metadata["issuer"])
else:
# do dynamic discovery
client.provider_config(provider_metadata["issuer"])
# Client information
if "client_id" in client_metadata:
# static client info provided
client.store_registration_info(RegistrationRequest(**client_metadata))
else:
# do dynamic registration
client.register(client.provider_info['registration_endpoint'],
**client_metadata)
client.subject_type = (client.registration_response.get("subject_type") or
client.provider_info["subject_types_supported"][0])
return client | Create a pyoidc client instance.
:param provider_metadata: provider configuration information
:type provider_metadata: Mapping[str, Union[str, Sequence[str]]]
:param client_metadata: client metadata
:type client_metadata: Mapping[str, Union[str, Sequence[str]]]
:return: client instance to use for communicating with the configured provider
:rtype: oic.oic.Client | Below is the the instruction that describes the task:
### Input:
Create a pyoidc client instance.
:param provider_metadata: provider configuration information
:type provider_metadata: Mapping[str, Union[str, Sequence[str]]]
:param client_metadata: client metadata
:type client_metadata: Mapping[str, Union[str, Sequence[str]]]
:return: client instance to use for communicating with the configured provider
:rtype: oic.oic.Client
### Response:
def _create_client(provider_metadata, client_metadata, verify_ssl=True):
"""
Create a pyoidc client instance.
:param provider_metadata: provider configuration information
:type provider_metadata: Mapping[str, Union[str, Sequence[str]]]
:param client_metadata: client metadata
:type client_metadata: Mapping[str, Union[str, Sequence[str]]]
:return: client instance to use for communicating with the configured provider
:rtype: oic.oic.Client
"""
client = oic.Client(
client_authn_method=CLIENT_AUTHN_METHOD, verify_ssl=verify_ssl
)
# Provider configuration information
if "authorization_endpoint" in provider_metadata:
# no dynamic discovery necessary
client.handle_provider_config(ProviderConfigurationResponse(**provider_metadata),
provider_metadata["issuer"])
else:
# do dynamic discovery
client.provider_config(provider_metadata["issuer"])
# Client information
if "client_id" in client_metadata:
# static client info provided
client.store_registration_info(RegistrationRequest(**client_metadata))
else:
# do dynamic registration
client.register(client.provider_info['registration_endpoint'],
**client_metadata)
client.subject_type = (client.registration_response.get("subject_type") or
client.provider_info["subject_types_supported"][0])
return client |
def delete(self, name):
"""Delete object on remote"""
obj = self._get_object(name)
if obj:
return self.driver.delete_object(obj) | Delete object on remote | Below is the the instruction that describes the task:
### Input:
Delete object on remote
### Response:
def delete(self, name):
"""Delete object on remote"""
obj = self._get_object(name)
if obj:
return self.driver.delete_object(obj) |
def validate_totalflux(totalflux):
"""Check integrated flux for invalid values.
Parameters
----------
totalflux : float
Integrated flux.
Raises
------
synphot.exceptions.SynphotError
Input is zero, negative, or not a number.
"""
if totalflux <= 0.0:
raise exceptions.SynphotError('Integrated flux is <= 0')
elif np.isnan(totalflux):
raise exceptions.SynphotError('Integrated flux is NaN')
elif np.isinf(totalflux):
raise exceptions.SynphotError('Integrated flux is infinite') | Check integrated flux for invalid values.
Parameters
----------
totalflux : float
Integrated flux.
Raises
------
synphot.exceptions.SynphotError
Input is zero, negative, or not a number. | Below is the the instruction that describes the task:
### Input:
Check integrated flux for invalid values.
Parameters
----------
totalflux : float
Integrated flux.
Raises
------
synphot.exceptions.SynphotError
Input is zero, negative, or not a number.
### Response:
def validate_totalflux(totalflux):
"""Check integrated flux for invalid values.
Parameters
----------
totalflux : float
Integrated flux.
Raises
------
synphot.exceptions.SynphotError
Input is zero, negative, or not a number.
"""
if totalflux <= 0.0:
raise exceptions.SynphotError('Integrated flux is <= 0')
elif np.isnan(totalflux):
raise exceptions.SynphotError('Integrated flux is NaN')
elif np.isinf(totalflux):
raise exceptions.SynphotError('Integrated flux is infinite') |
def message(self, msg='', level=1, tab=0):
'''Print a message to the console'''
if self.verbosity >= level:
self.stdout.write('{}{}'.format(' ' * tab, msg)) | Print a message to the console | Below is the the instruction that describes the task:
### Input:
Print a message to the console
### Response:
def message(self, msg='', level=1, tab=0):
'''Print a message to the console'''
if self.verbosity >= level:
self.stdout.write('{}{}'.format(' ' * tab, msg)) |
def save(self):
"""
Creates a new user and account. Returns the newly created user.
"""
username, email, password = (self.cleaned_data['username'],
self.cleaned_data['email'],
self.cleaned_data['password1'])
user = get_user_model().objects.create_user(username, email, password,
not defaults.ACCOUNTS_ACTIVATION_REQUIRED,
defaults.ACCOUNTS_ACTIVATION_REQUIRED)
return user | Creates a new user and account. Returns the newly created user. | Below is the the instruction that describes the task:
### Input:
Creates a new user and account. Returns the newly created user.
### Response:
def save(self):
"""
Creates a new user and account. Returns the newly created user.
"""
username, email, password = (self.cleaned_data['username'],
self.cleaned_data['email'],
self.cleaned_data['password1'])
user = get_user_model().objects.create_user(username, email, password,
not defaults.ACCOUNTS_ACTIVATION_REQUIRED,
defaults.ACCOUNTS_ACTIVATION_REQUIRED)
return user |
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception)) | Remove a dependency and uninstall it. | Below is the the instruction that describes the task:
### Input:
Remove a dependency and uninstall it.
### Response:
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command('Removing', addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = '%s is not installed.' % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception)) |
def get_objanno(fin_anno, anno_type=None, **kws):
"""Read annotations in GAF, GPAD, Entrez gene2go, or text format."""
# kws get_objanno: taxids hdr_only prt allow_missing_symbol
anno_type = get_anno_desc(fin_anno, anno_type)
if anno_type is not None:
if anno_type == 'gene2go':
# kws: taxid taxids
return Gene2GoReader(fin_anno, **kws)
if anno_type == 'gaf':
return GafReader(fin_anno,
hdr_only=kws.get('hdr_only', False),
prt=kws.get('prt', sys.stdout),
allow_missing_symbol=kws.get('allow_missing_symbol', False))
if anno_type == 'gpad':
hdr_only = kws.get('hdr_only', False)
return GpadReader(fin_anno, hdr_only)
if anno_type == 'id2gos':
return IdToGosReader(fin_anno)
raise RuntimeError('UNEXPECTED ANNOTATION FILE FORMAT: {F} {D}'.format(
F=fin_anno, D=anno_type)) | Read annotations in GAF, GPAD, Entrez gene2go, or text format. | Below is the the instruction that describes the task:
### Input:
Read annotations in GAF, GPAD, Entrez gene2go, or text format.
### Response:
def get_objanno(fin_anno, anno_type=None, **kws):
"""Read annotations in GAF, GPAD, Entrez gene2go, or text format."""
# kws get_objanno: taxids hdr_only prt allow_missing_symbol
anno_type = get_anno_desc(fin_anno, anno_type)
if anno_type is not None:
if anno_type == 'gene2go':
# kws: taxid taxids
return Gene2GoReader(fin_anno, **kws)
if anno_type == 'gaf':
return GafReader(fin_anno,
hdr_only=kws.get('hdr_only', False),
prt=kws.get('prt', sys.stdout),
allow_missing_symbol=kws.get('allow_missing_symbol', False))
if anno_type == 'gpad':
hdr_only = kws.get('hdr_only', False)
return GpadReader(fin_anno, hdr_only)
if anno_type == 'id2gos':
return IdToGosReader(fin_anno)
raise RuntimeError('UNEXPECTED ANNOTATION FILE FORMAT: {F} {D}'.format(
F=fin_anno, D=anno_type)) |
def list_policies(zap_helper, policy_ids):
"""
Get a list of policies and whether or not they are enabled.
"""
policies = filter_by_ids(zap_helper.zap.ascan.policies(), policy_ids)
click.echo(tabulate([[p['id'], p['name'], p['enabled'], p['attackStrength'], p['alertThreshold']]
for p in policies],
headers=['ID', 'Name', 'Enabled', 'Strength', 'Threshold'],
tablefmt='grid')) | Get a list of policies and whether or not they are enabled. | Below is the the instruction that describes the task:
### Input:
Get a list of policies and whether or not they are enabled.
### Response:
def list_policies(zap_helper, policy_ids):
"""
Get a list of policies and whether or not they are enabled.
"""
policies = filter_by_ids(zap_helper.zap.ascan.policies(), policy_ids)
click.echo(tabulate([[p['id'], p['name'], p['enabled'], p['attackStrength'], p['alertThreshold']]
for p in policies],
headers=['ID', 'Name', 'Enabled', 'Strength', 'Threshold'],
tablefmt='grid')) |
def append_result(self, results, num_matches):
"""Real-time update of search results"""
filename, lineno, colno, match_end, line = results
if filename not in self.files:
file_item = FileMatchItem(self, filename, self.sorting,
self.text_color)
file_item.setExpanded(True)
self.files[filename] = file_item
self.num_files += 1
search_text = self.search_text
title = "'%s' - " % search_text
nb_files = self.num_files
if nb_files == 0:
text = _('String not found')
else:
text_matches = _('matches in')
text_files = _('file')
if nb_files > 1:
text_files += 's'
text = "%d %s %d %s" % (num_matches, text_matches,
nb_files, text_files)
self.set_title(title + text)
file_item = self.files[filename]
line = self.truncate_result(line, colno, match_end)
item = LineMatchItem(file_item, lineno, colno, line, self.text_color)
self.data[id(item)] = (filename, lineno, colno) | Real-time update of search results | Below is the the instruction that describes the task:
### Input:
Real-time update of search results
### Response:
def append_result(self, results, num_matches):
"""Real-time update of search results"""
filename, lineno, colno, match_end, line = results
if filename not in self.files:
file_item = FileMatchItem(self, filename, self.sorting,
self.text_color)
file_item.setExpanded(True)
self.files[filename] = file_item
self.num_files += 1
search_text = self.search_text
title = "'%s' - " % search_text
nb_files = self.num_files
if nb_files == 0:
text = _('String not found')
else:
text_matches = _('matches in')
text_files = _('file')
if nb_files > 1:
text_files += 's'
text = "%d %s %d %s" % (num_matches, text_matches,
nb_files, text_files)
self.set_title(title + text)
file_item = self.files[filename]
line = self.truncate_result(line, colno, match_end)
item = LineMatchItem(file_item, lineno, colno, line, self.text_color)
self.data[id(item)] = (filename, lineno, colno) |
def save_callback(operation, graphdef):
'''callback from save thread'''
if operation == 'test':
for e in graphdef.expressions:
if expression_ok(e):
graphdef.expression = e
display_graph(graphdef)
return
mestate.console.writeln('Invalid graph expressions', fg='red')
return
if operation == 'save':
save_graph(graphdef) | callback from save thread | Below is the the instruction that describes the task:
### Input:
callback from save thread
### Response:
def save_callback(operation, graphdef):
'''callback from save thread'''
if operation == 'test':
for e in graphdef.expressions:
if expression_ok(e):
graphdef.expression = e
display_graph(graphdef)
return
mestate.console.writeln('Invalid graph expressions', fg='red')
return
if operation == 'save':
save_graph(graphdef) |
def rupture_to_element(rup, parent=None):
"""
Convert a rupture object into an Element object.
:param rup:
must have attributes .rupid, .events_by_ses and .seed
:param parent:
parent of the returned element, or None
"""
if parent is None:
parent = et.Element('root')
rup_elem = et.SubElement(parent, rup.typology)
elem = et.SubElement(rup_elem, 'stochasticEventSets')
n = 0
for ses in rup.events_by_ses:
eids = rup.events_by_ses[ses]['eid']
n += len(eids)
ses_elem = et.SubElement(elem, 'SES', id=ses)
ses_elem.text = ' '.join(str(eid) for eid in eids)
rup_elem.set('id', rup.rupid)
rup_elem.set('multiplicity', str(n))
sub_elems(rup_elem, rup, 'magnitude', 'strike', 'dip', 'rake')
h = rup.hypocenter
et.SubElement(rup_elem, 'hypocenter', dict(lon=h.x, lat=h.y, depth=h.z))
if rup.is_from_fault_source:
# rup is from a simple or complex fault source
# the rup geometry is represented by a mesh of 3D
# points
mesh_elem = et.SubElement(rup_elem, 'mesh')
# we assume the mesh components (lons, lats, depths)
# are of uniform shape
for i, row in enumerate(rup.lons):
for j, col in enumerate(row):
node_elem = et.SubElement(mesh_elem, 'node')
node_elem.set('row', str(i))
node_elem.set('col', str(j))
node_elem.set('lon', str(rup.lons[i][j]))
node_elem.set('lat', str(rup.lats[i][j]))
node_elem.set('depth', str(rup.depths[i][j]))
# if we never entered the loop above, it's possible
# that i and j will be undefined
mesh_elem.set('rows', str(i + 1))
mesh_elem.set('cols', str(j + 1))
elif rup.is_gridded_surface:
# the rup geometry is represented by a mesh of (1, N) points
mesh_elem = et.SubElement(rup_elem, 'mesh')
for j, _ in enumerate(rup.lons):
node_elem = et.SubElement(mesh_elem, 'node')
node_elem.set('row', '0')
node_elem.set('col', str(j))
node_elem.set('lon', str(rup.lons[j]))
node_elem.set('lat', str(rup.lats[j]))
node_elem.set('depth', str(rup.depths[j]))
else:
# rupture is from a multi surface fault source
if rup.is_multi_surface:
# the arrays lons, lats and depths contain 4*N elements,
# where N is the number of planar surfaces contained in the
# multisurface; each planar surface if characterised by 4
# vertices top_left, top_right, bottom_left, bottom_right
assert len(rup.lons) % 4 == 0
assert len(rup.lons) == len(rup.lats) == len(rup.depths)
for offset in range(len(rup.lons) // 4):
# looping on the coordinates of the sub surfaces, one
# planar surface at the time
start = offset * 4
end = offset * 4 + 4
lons = rup.lons[start:end] # 4 lons of the current surface
lats = rup.lats[start:end] # 4 lats of the current surface
depths = rup.depths[start:end] # 4 depths
ps_elem = et.SubElement(
rup_elem, 'planarSurface')
top_left, top_right, bottom_left, bottom_right = \
zip(lons, lats, depths)
for el_name, corner in (
('topLeft', top_left),
('topRight', top_right),
('bottomLeft', bottom_left),
('bottomRight', bottom_right)):
corner_elem = et.SubElement(ps_elem, el_name)
corner_elem.set('lon', '%.7f' % corner[0])
corner_elem.set('lat', '%.7f' % corner[1])
corner_elem.set('depth', '%.7f' % corner[2])
else:
# rupture is from a point or area source
# the rupture geometry is represented by four 3D
# corner points
ps_elem = et.SubElement(rup_elem, 'planarSurface')
# create the corner point elements, in the order of:
# * top left
# * top right
# * bottom left
# * bottom right
for el_name, corner in (
('topLeft', rup.top_left_corner),
('topRight', rup.top_right_corner),
('bottomLeft', rup.bottom_left_corner),
('bottomRight', rup.bottom_right_corner)):
corner_elem = et.SubElement(ps_elem, el_name)
corner_elem.set('lon', '%.7f' % corner[0])
corner_elem.set('lat', '%.7f' % corner[1])
corner_elem.set('depth', '%.7f' % corner[2])
return parent | Convert a rupture object into an Element object.
:param rup:
must have attributes .rupid, .events_by_ses and .seed
:param parent:
parent of the returned element, or None | Below is the the instruction that describes the task:
### Input:
Convert a rupture object into an Element object.
:param rup:
must have attributes .rupid, .events_by_ses and .seed
:param parent:
parent of the returned element, or None
### Response:
def rupture_to_element(rup, parent=None):
"""
Convert a rupture object into an Element object.
:param rup:
must have attributes .rupid, .events_by_ses and .seed
:param parent:
parent of the returned element, or None
"""
if parent is None:
parent = et.Element('root')
rup_elem = et.SubElement(parent, rup.typology)
elem = et.SubElement(rup_elem, 'stochasticEventSets')
n = 0
for ses in rup.events_by_ses:
eids = rup.events_by_ses[ses]['eid']
n += len(eids)
ses_elem = et.SubElement(elem, 'SES', id=ses)
ses_elem.text = ' '.join(str(eid) for eid in eids)
rup_elem.set('id', rup.rupid)
rup_elem.set('multiplicity', str(n))
sub_elems(rup_elem, rup, 'magnitude', 'strike', 'dip', 'rake')
h = rup.hypocenter
et.SubElement(rup_elem, 'hypocenter', dict(lon=h.x, lat=h.y, depth=h.z))
if rup.is_from_fault_source:
# rup is from a simple or complex fault source
# the rup geometry is represented by a mesh of 3D
# points
mesh_elem = et.SubElement(rup_elem, 'mesh')
# we assume the mesh components (lons, lats, depths)
# are of uniform shape
for i, row in enumerate(rup.lons):
for j, col in enumerate(row):
node_elem = et.SubElement(mesh_elem, 'node')
node_elem.set('row', str(i))
node_elem.set('col', str(j))
node_elem.set('lon', str(rup.lons[i][j]))
node_elem.set('lat', str(rup.lats[i][j]))
node_elem.set('depth', str(rup.depths[i][j]))
# if we never entered the loop above, it's possible
# that i and j will be undefined
mesh_elem.set('rows', str(i + 1))
mesh_elem.set('cols', str(j + 1))
elif rup.is_gridded_surface:
# the rup geometry is represented by a mesh of (1, N) points
mesh_elem = et.SubElement(rup_elem, 'mesh')
for j, _ in enumerate(rup.lons):
node_elem = et.SubElement(mesh_elem, 'node')
node_elem.set('row', '0')
node_elem.set('col', str(j))
node_elem.set('lon', str(rup.lons[j]))
node_elem.set('lat', str(rup.lats[j]))
node_elem.set('depth', str(rup.depths[j]))
else:
# rupture is from a multi surface fault source
if rup.is_multi_surface:
# the arrays lons, lats and depths contain 4*N elements,
# where N is the number of planar surfaces contained in the
# multisurface; each planar surface if characterised by 4
# vertices top_left, top_right, bottom_left, bottom_right
assert len(rup.lons) % 4 == 0
assert len(rup.lons) == len(rup.lats) == len(rup.depths)
for offset in range(len(rup.lons) // 4):
# looping on the coordinates of the sub surfaces, one
# planar surface at the time
start = offset * 4
end = offset * 4 + 4
lons = rup.lons[start:end] # 4 lons of the current surface
lats = rup.lats[start:end] # 4 lats of the current surface
depths = rup.depths[start:end] # 4 depths
ps_elem = et.SubElement(
rup_elem, 'planarSurface')
top_left, top_right, bottom_left, bottom_right = \
zip(lons, lats, depths)
for el_name, corner in (
('topLeft', top_left),
('topRight', top_right),
('bottomLeft', bottom_left),
('bottomRight', bottom_right)):
corner_elem = et.SubElement(ps_elem, el_name)
corner_elem.set('lon', '%.7f' % corner[0])
corner_elem.set('lat', '%.7f' % corner[1])
corner_elem.set('depth', '%.7f' % corner[2])
else:
# rupture is from a point or area source
# the rupture geometry is represented by four 3D
# corner points
ps_elem = et.SubElement(rup_elem, 'planarSurface')
# create the corner point elements, in the order of:
# * top left
# * top right
# * bottom left
# * bottom right
for el_name, corner in (
('topLeft', rup.top_left_corner),
('topRight', rup.top_right_corner),
('bottomLeft', rup.bottom_left_corner),
('bottomRight', rup.bottom_right_corner)):
corner_elem = et.SubElement(ps_elem, el_name)
corner_elem.set('lon', '%.7f' % corner[0])
corner_elem.set('lat', '%.7f' % corner[1])
corner_elem.set('depth', '%.7f' % corner[2])
return parent |
def got_arbiter_module_type_defined(self, module_type):
"""Check if a module type is defined in one of the arbiters
Also check the module name
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined:
"""
for arbiter in self.arbiters:
# Do like the linkify will do after....
for module in getattr(arbiter, 'modules', []):
# So look at what the arbiter try to call as module
module_name = module.get_name()
# Ok, now look in modules...
for mod in self.modules:
# try to see if this module is the good type
if getattr(mod, 'python_name', '').strip() == module_type.strip():
# if so, the good name?
if getattr(mod, 'name', '').strip() == module_name:
return True
return False | Check if a module type is defined in one of the arbiters
Also check the module name
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined: | Below is the the instruction that describes the task:
### Input:
Check if a module type is defined in one of the arbiters
Also check the module name
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined:
### Response:
def got_arbiter_module_type_defined(self, module_type):
"""Check if a module type is defined in one of the arbiters
Also check the module name
:param module_type: module type to search for
:type module_type: str
:return: True if mod_type is found else False
:rtype: bool
TODO: Factorize it with got_broker_module_type_defined:
"""
for arbiter in self.arbiters:
# Do like the linkify will do after....
for module in getattr(arbiter, 'modules', []):
# So look at what the arbiter try to call as module
module_name = module.get_name()
# Ok, now look in modules...
for mod in self.modules:
# try to see if this module is the good type
if getattr(mod, 'python_name', '').strip() == module_type.strip():
# if so, the good name?
if getattr(mod, 'name', '').strip() == module_name:
return True
return False |
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict | Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension | Below is the the instruction that describes the task:
### Input:
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
### Response:
def filer(filelist):
"""
Helper script that creates a dictionary of the stain name: /sequencepath/strain_name.extension)
:param filelist: list of files to parse
:return filedict: dictionary of stain name: /sequencepath/strain_name.extension
"""
# Initialise the dictionary
filedict = dict()
for seqfile in filelist:
# Split off the file extension and remove the path from the name
strainname = os.path.splitext(os.path.basename(seqfile))[0]
# Populate the dictionary
filedict[strainname] = seqfile
return filedict |
def add_to_buffer(self, content, read_position):
"""Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
"""
self.read_position = read_position
if self.read_buffer is None:
self.read_buffer = content
else:
self.read_buffer = content + self.read_buffer | Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from. | Below is the the instruction that describes the task:
### Input:
Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
### Response:
def add_to_buffer(self, content, read_position):
"""Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
"""
self.read_position = read_position
if self.read_buffer is None:
self.read_buffer = content
else:
self.read_buffer = content + self.read_buffer |
def _find_conda():
"""Find the conda executable robustly across conda versions.
Returns
-------
conda : str
Path to the conda executable.
Raises
------
IOError
If the executable cannot be found in either the CONDA_EXE environment
variable or in the PATH.
Notes
-----
In POSIX platforms in conda >= 4.4, conda can be set up as a bash function
rather than an executable. (This is to enable the syntax
``conda activate env-name``.) In this case, the environment variable
``CONDA_EXE`` contains the path to the conda executable. In other cases,
we use standard search for the appropriate name in the PATH.
See https://github.com/airspeed-velocity/asv/issues/645 for more details.
"""
if 'CONDA_EXE' in os.environ:
conda = os.environ['CONDA_EXE']
else:
conda = util.which('conda')
return conda | Find the conda executable robustly across conda versions.
Returns
-------
conda : str
Path to the conda executable.
Raises
------
IOError
If the executable cannot be found in either the CONDA_EXE environment
variable or in the PATH.
Notes
-----
In POSIX platforms in conda >= 4.4, conda can be set up as a bash function
rather than an executable. (This is to enable the syntax
``conda activate env-name``.) In this case, the environment variable
``CONDA_EXE`` contains the path to the conda executable. In other cases,
we use standard search for the appropriate name in the PATH.
See https://github.com/airspeed-velocity/asv/issues/645 for more details. | Below is the the instruction that describes the task:
### Input:
Find the conda executable robustly across conda versions.
Returns
-------
conda : str
Path to the conda executable.
Raises
------
IOError
If the executable cannot be found in either the CONDA_EXE environment
variable or in the PATH.
Notes
-----
In POSIX platforms in conda >= 4.4, conda can be set up as a bash function
rather than an executable. (This is to enable the syntax
``conda activate env-name``.) In this case, the environment variable
``CONDA_EXE`` contains the path to the conda executable. In other cases,
we use standard search for the appropriate name in the PATH.
See https://github.com/airspeed-velocity/asv/issues/645 for more details.
### Response:
def _find_conda():
"""Find the conda executable robustly across conda versions.
Returns
-------
conda : str
Path to the conda executable.
Raises
------
IOError
If the executable cannot be found in either the CONDA_EXE environment
variable or in the PATH.
Notes
-----
In POSIX platforms in conda >= 4.4, conda can be set up as a bash function
rather than an executable. (This is to enable the syntax
``conda activate env-name``.) In this case, the environment variable
``CONDA_EXE`` contains the path to the conda executable. In other cases,
we use standard search for the appropriate name in the PATH.
See https://github.com/airspeed-velocity/asv/issues/645 for more details.
"""
if 'CONDA_EXE' in os.environ:
conda = os.environ['CONDA_EXE']
else:
conda = util.which('conda')
return conda |
def get_format(self):
""" Returns a QTextCharFormat that encodes the current style attributes.
"""
format = QtGui.QTextCharFormat()
# Set foreground color
qcolor = self.get_color(self.foreground_color, self.intensity)
if qcolor is not None:
format.setForeground(qcolor)
# Set background color
qcolor = self.get_color(self.background_color, self.intensity)
if qcolor is not None:
format.setBackground(qcolor)
# Set font weight/style options
if self.bold:
format.setFontWeight(QtGui.QFont.Bold)
else:
format.setFontWeight(QtGui.QFont.Normal)
format.setFontItalic(self.italic)
format.setFontUnderline(self.underline)
return format | Returns a QTextCharFormat that encodes the current style attributes. | Below is the the instruction that describes the task:
### Input:
Returns a QTextCharFormat that encodes the current style attributes.
### Response:
def get_format(self):
""" Returns a QTextCharFormat that encodes the current style attributes.
"""
format = QtGui.QTextCharFormat()
# Set foreground color
qcolor = self.get_color(self.foreground_color, self.intensity)
if qcolor is not None:
format.setForeground(qcolor)
# Set background color
qcolor = self.get_color(self.background_color, self.intensity)
if qcolor is not None:
format.setBackground(qcolor)
# Set font weight/style options
if self.bold:
format.setFontWeight(QtGui.QFont.Bold)
else:
format.setFontWeight(QtGui.QFont.Normal)
format.setFontItalic(self.italic)
format.setFontUnderline(self.underline)
return format |
def run_excel_to_html():
"""
Run the excel_to_html function from the
command-line.
Args:
-p path to file
-s name of the sheet to convert
-css classes to apply
-m attempt to combine merged cells
-c caption for accessibility
-su summary for accessibility
-d details for accessibility
Example use:
excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true
"""
# Capture commandline arguments. prog='' argument must
# match the command name in setup.py entry_points
parser = argparse.ArgumentParser(prog='excel_to_html')
parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.')
parser.add_argument(
'-s',
nargs='?',
help='The name of a sheet in our excel file. Defaults to "Sheet1".',
)
parser.add_argument(
'-css', nargs='?', help='Space separated css classes to append to the table.'
)
parser.add_argument(
'-m', action='store_true', help='Merge, attempt to combine merged cells.'
)
parser.add_argument(
'-c', nargs='?', help='Caption for creating an accessible table.'
)
parser.add_argument(
'-d',
nargs='?',
help='Two strings separated by a | character. The first string \
is for the html "summary" attribute and the second string is for the html "details" attribute. \
both values must be provided and nothing more.',
)
parser.add_argument(
'-r', action='store_true', help='Row headers. Does the table have row headers?'
)
args = parser.parse_args()
inputs = {
'p': args.p,
's': args.s,
'css': args.css,
'm': args.m,
'c': args.c,
'd': args.d,
'r': args.r,
}
p = inputs['p']
s = inputs['s'] if inputs['s'] else 'Sheet1'
css = inputs['css'] if inputs['css'] else ''
m = inputs['m'] if inputs['m'] else False
c = inputs['c'] if inputs['c'] else ''
d = inputs['d'].split('|') if inputs['d'] else []
r = inputs['r'] if inputs['r'] else False
html = fp.excel_to_html(
p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m
)
print(html) | Run the excel_to_html function from the
command-line.
Args:
-p path to file
-s name of the sheet to convert
-css classes to apply
-m attempt to combine merged cells
-c caption for accessibility
-su summary for accessibility
-d details for accessibility
Example use:
excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true | Below is the the instruction that describes the task:
### Input:
Run the excel_to_html function from the
command-line.
Args:
-p path to file
-s name of the sheet to convert
-css classes to apply
-m attempt to combine merged cells
-c caption for accessibility
-su summary for accessibility
-d details for accessibility
Example use:
excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true
### Response:
def run_excel_to_html():
"""
Run the excel_to_html function from the
command-line.
Args:
-p path to file
-s name of the sheet to convert
-css classes to apply
-m attempt to combine merged cells
-c caption for accessibility
-su summary for accessibility
-d details for accessibility
Example use:
excel_to_html -p myfile.xlsx -s SheetName -css diablo-python -m true
"""
# Capture commandline arguments. prog='' argument must
# match the command name in setup.py entry_points
parser = argparse.ArgumentParser(prog='excel_to_html')
parser.add_argument('-p', nargs='?', help='Path to an excel file for conversion.')
parser.add_argument(
'-s',
nargs='?',
help='The name of a sheet in our excel file. Defaults to "Sheet1".',
)
parser.add_argument(
'-css', nargs='?', help='Space separated css classes to append to the table.'
)
parser.add_argument(
'-m', action='store_true', help='Merge, attempt to combine merged cells.'
)
parser.add_argument(
'-c', nargs='?', help='Caption for creating an accessible table.'
)
parser.add_argument(
'-d',
nargs='?',
help='Two strings separated by a | character. The first string \
is for the html "summary" attribute and the second string is for the html "details" attribute. \
both values must be provided and nothing more.',
)
parser.add_argument(
'-r', action='store_true', help='Row headers. Does the table have row headers?'
)
args = parser.parse_args()
inputs = {
'p': args.p,
's': args.s,
'css': args.css,
'm': args.m,
'c': args.c,
'd': args.d,
'r': args.r,
}
p = inputs['p']
s = inputs['s'] if inputs['s'] else 'Sheet1'
css = inputs['css'] if inputs['css'] else ''
m = inputs['m'] if inputs['m'] else False
c = inputs['c'] if inputs['c'] else ''
d = inputs['d'].split('|') if inputs['d'] else []
r = inputs['r'] if inputs['r'] else False
html = fp.excel_to_html(
p, sheetname=s, css_classes=css, caption=c, details=d, row_headers=r, merge=m
)
print(html) |
def read_file(name):
"""Read file name (without extension) to string."""
cur_path = os.path.dirname(__file__)
exts = ('txt', 'rst')
for ext in exts:
path = os.path.join(cur_path, '.'.join((name, ext)))
if os.path.exists(path):
with open(path, 'rt') as file_obj:
return file_obj.read()
return '' | Read file name (without extension) to string. | Below is the the instruction that describes the task:
### Input:
Read file name (without extension) to string.
### Response:
def read_file(name):
"""Read file name (without extension) to string."""
cur_path = os.path.dirname(__file__)
exts = ('txt', 'rst')
for ext in exts:
path = os.path.join(cur_path, '.'.join((name, ext)))
if os.path.exists(path):
with open(path, 'rt') as file_obj:
return file_obj.read()
return '' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.