code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _get_dump_item_context(self, index, name, opts):
"""
Return a formated dict context
"""
c = {
'item_no': index,
'label': name,
'name': name,
'models': ' '.join(opts['models']),
'natural_key': '',
}
if opts.get('use_natural_key', False):
c['natural_key'] = ' -n'
c.update(self.get_global_context())
return c | Return a formated dict context | Below is the the instruction that describes the task:
### Input:
Return a formated dict context
### Response:
def _get_dump_item_context(self, index, name, opts):
"""
Return a formated dict context
"""
c = {
'item_no': index,
'label': name,
'name': name,
'models': ' '.join(opts['models']),
'natural_key': '',
}
if opts.get('use_natural_key', False):
c['natural_key'] = ' -n'
c.update(self.get_global_context())
return c |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'query_id') and self.query_id is not None:
_dict['query_id'] = self.query_id
if hasattr(self, 'natural_language_query'
) and self.natural_language_query is not None:
_dict['natural_language_query'] = self.natural_language_query
if hasattr(self, 'filter') and self.filter is not None:
_dict['filter'] = self.filter
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'query_id') and self.query_id is not None:
_dict['query_id'] = self.query_id
if hasattr(self, 'natural_language_query'
) and self.natural_language_query is not None:
_dict['natural_language_query'] = self.natural_language_query
if hasattr(self, 'filter') and self.filter is not None:
_dict['filter'] = self.filter
if hasattr(self, 'examples') and self.examples is not None:
_dict['examples'] = [x._to_dict() for x in self.examples]
return _dict |
def write(self, stream):
"""Writes the topology to a stream or file."""
topology = self.createTopology()
def write_it(stream):
transportOut = TMemoryBuffer()
protocolOut = TBinaryProtocol.TBinaryProtocol(transportOut)
topology.write(protocolOut)
bytes = transportOut.getvalue()
stream.write(bytes)
if isinstance(stream, six.string_types):
with open(stream, 'wb') as f:
write_it(f)
else:
write_it(stream)
return topology | Writes the topology to a stream or file. | Below is the the instruction that describes the task:
### Input:
Writes the topology to a stream or file.
### Response:
def write(self, stream):
"""Writes the topology to a stream or file."""
topology = self.createTopology()
def write_it(stream):
transportOut = TMemoryBuffer()
protocolOut = TBinaryProtocol.TBinaryProtocol(transportOut)
topology.write(protocolOut)
bytes = transportOut.getvalue()
stream.write(bytes)
if isinstance(stream, six.string_types):
with open(stream, 'wb') as f:
write_it(f)
else:
write_it(stream)
return topology |
def can_attend_meetings(intervals):
"""
:type intervals: List[Interval]
:rtype: bool
"""
intervals = sorted(intervals, key=lambda x: x.start)
for i in range(1, len(intervals)):
if intervals[i].start < intervals[i - 1].end:
return False
return True | :type intervals: List[Interval]
:rtype: bool | Below is the the instruction that describes the task:
### Input:
:type intervals: List[Interval]
:rtype: bool
### Response:
def can_attend_meetings(intervals):
"""
:type intervals: List[Interval]
:rtype: bool
"""
intervals = sorted(intervals, key=lambda x: x.start)
for i in range(1, len(intervals)):
if intervals[i].start < intervals[i - 1].end:
return False
return True |
def manage_file(name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None, # pylint: disable=W0613
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors='strict',
seuser=None,
serole=None,
setype=None,
serange=None,
**kwargs):
'''
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_sum
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded:: 2018.3.0
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode : False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
seuser
selinux user attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
setype
selinux type attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
# Ensure that user-provided hash string is lowercase
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source:
if not sfn:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file', ''):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
# Check changes if the target file exists
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
# Only test the checksums on files with managed contents
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type']))
else:
name_sum = None
# Check if file needs to be replaced
if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum):
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server or local
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3}). If the \'source_hash\' value '
'refers to a remote file with multiple possible '
'matches, then it may be necessary to set '
'\'source_hash_name\'.'.format(
source_sum['hash_type'],
source,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
# Print a diff equivalent to diff -u old new
if __salt__['config.option']('obfuscate_templates'):
ret['changes']['diff'] = '<Obfuscated Template>'
elif not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
try:
ret['changes']['diff'] = get_diff(
real_name, sfn, show_filenames=False)
except CommandExecutionError as exc:
ret['changes']['diff'] = exc.strerror
# Pre requisites are met, and the file needs to be replaced, do it
try:
salt.utils.files.copyfile(sfn,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'wb') as tmp_:
if encoding:
log.debug('File will be encoded with %s', encoding)
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
try:
differences = get_diff(
real_name, tmp, show_filenames=False,
show_changes=show_changes, template=True)
except CommandExecutionError as exc:
ret.setdefault('warnings', []).append(
'Failed to detect changes to file: {0}'.format(exc.strerror)
)
differences = ''
if differences:
ret['changes']['diff'] = differences
# Pre requisites are met, the file needs to be replaced, do it
try:
salt.utils.files.copyfile(tmp,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(tmp)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
__clean_tmp(tmp)
# Check for changing symlink to regular file here
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
try:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
ret['changes']['diff'] = \
'Replace symbolic link with regular file'
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get('win_owner'),
grant_perms=kwargs.get('win_perms'),
deny_perms=kwargs.get('win_deny_perms'),
inheritance=kwargs.get('win_inheritance', True),
reset=kwargs.get('win_perms_reset', False))
# pylint: enable=E1120,E1121,E1123
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks,
seuser=seuser, serole=serole, setype=setype, serange=serange)
if ret['changes']:
ret['comment'] = 'File {0} updated'.format(
salt.utils.data.decode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File {0} is in the correct state'.format(
salt.utils.data.decode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else: # target file does not exist
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
# check for existence of windows drive letter
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret,
'{0} drive not present'.format(drive))
if dir_mode is None and mode is not None:
# Add execute bit to each nonzero digit in the mode, if
# dir_mode was not specified. Otherwise, any
# directories created with makedirs_() below can't be
# listed via a shell.
mode_list = [x for x in six.text_type(mode)][-3:]
for idx in range(len(mode_list)):
if mode_list[idx] != '0':
mode_list[idx] = six.text_type(int(mode_list[idx]) | 1)
dir_mode = ''.join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
makedirs_(
path=name,
owner=kwargs.get('win_owner'),
grant_perms=kwargs.get('win_perms'),
deny_perms=kwargs.get('win_deny_perms'),
inheritance=kwargs.get('win_inheritance', True),
reset=kwargs.get('win_perms_reset', False))
# pylint: enable=E1120,E1121,E1123
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
with salt.utils.files.set_umask(0o077 if mode else None):
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
if touch(name):
ret['changes']['new'] = 'file {0} created'.format(name)
ret['comment'] = 'Empty file'
else:
return _error(
ret, 'Empty file {0} not created'.format(name)
)
else:
if not __opts__['test']:
if touch(name):
ret['changes']['diff'] = 'New file'
else:
return _error(
ret, 'File {0} not created'.format(name)
)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
with salt.utils.files.fopen(tmp, 'wb') as tmp_:
if encoding:
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
log.debug('File will be encoded with %s', encoding)
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
# Copy into place
salt.utils.files.copyfile(tmp,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get('win_owner'),
grant_perms=kwargs.get('win_perms'),
deny_perms=kwargs.get('win_deny_perms'),
inheritance=kwargs.get('win_inheritance', True),
reset=kwargs.get('win_perms_reset', False))
# pylint: enable=E1120,E1121,E1123
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs,
seuser=seuser, serole=serole, setype=setype, serange=serange)
if not ret['comment']:
ret['comment'] = 'File ' + name + ' updated'
if __opts__['test']:
ret['comment'] = 'File ' + name + ' not updated'
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File ' + name + ' is in the correct state'
if sfn:
__clean_tmp(sfn)
return ret | Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_sum
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded:: 2018.3.0
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode : False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
seuser
selinux user attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
setype
selinux type attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added | Below is the the instruction that describes the task:
### Input:
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_sum
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded:: 2018.3.0
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode : False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
seuser
selinux user attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
setype
selinux type attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
### Response:
def manage_file(name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None, # pylint: disable=W0613
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors='strict',
seuser=None,
serole=None,
setype=None,
serange=None,
**kwargs):
'''
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_sum
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded:: 2018.3.0
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode : False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
seuser
selinux user attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
setype
selinux type attribute
.. versionadded:: Neon
serange
selinux range attribute
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
# Ensure that user-provided hash string is lowercase
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source:
if not sfn:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file', ''):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
# Check changes if the target file exists
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
# Only test the checksums on files with managed contents
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type']))
else:
name_sum = None
# Check if file needs to be replaced
if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum):
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server or local
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3}). If the \'source_hash\' value '
'refers to a remote file with multiple possible '
'matches, then it may be necessary to set '
'\'source_hash_name\'.'.format(
source_sum['hash_type'],
source,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
# Print a diff equivalent to diff -u old new
if __salt__['config.option']('obfuscate_templates'):
ret['changes']['diff'] = '<Obfuscated Template>'
elif not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
try:
ret['changes']['diff'] = get_diff(
real_name, sfn, show_filenames=False)
except CommandExecutionError as exc:
ret['changes']['diff'] = exc.strerror
# Pre requisites are met, and the file needs to be replaced, do it
try:
salt.utils.files.copyfile(sfn,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'wb') as tmp_:
if encoding:
log.debug('File will be encoded with %s', encoding)
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
try:
differences = get_diff(
real_name, tmp, show_filenames=False,
show_changes=show_changes, template=True)
except CommandExecutionError as exc:
ret.setdefault('warnings', []).append(
'Failed to detect changes to file: {0}'.format(exc.strerror)
)
differences = ''
if differences:
ret['changes']['diff'] = differences
# Pre requisites are met, the file needs to be replaced, do it
try:
salt.utils.files.copyfile(tmp,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(tmp)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
__clean_tmp(tmp)
# Check for changing symlink to regular file here
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
try:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
ret['changes']['diff'] = \
'Replace symbolic link with regular file'
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get('win_owner'),
grant_perms=kwargs.get('win_perms'),
deny_perms=kwargs.get('win_deny_perms'),
inheritance=kwargs.get('win_inheritance', True),
reset=kwargs.get('win_perms_reset', False))
# pylint: enable=E1120,E1121,E1123
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks,
seuser=seuser, serole=serole, setype=setype, serange=serange)
if ret['changes']:
ret['comment'] = 'File {0} updated'.format(
salt.utils.data.decode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File {0} is in the correct state'.format(
salt.utils.data.decode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else: # target file does not exist
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
# check for existence of windows drive letter
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret,
'{0} drive not present'.format(drive))
if dir_mode is None and mode is not None:
# Add execute bit to each nonzero digit in the mode, if
# dir_mode was not specified. Otherwise, any
# directories created with makedirs_() below can't be
# listed via a shell.
mode_list = [x for x in six.text_type(mode)][-3:]
for idx in range(len(mode_list)):
if mode_list[idx] != '0':
mode_list[idx] = six.text_type(int(mode_list[idx]) | 1)
dir_mode = ''.join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
makedirs_(
path=name,
owner=kwargs.get('win_owner'),
grant_perms=kwargs.get('win_perms'),
deny_perms=kwargs.get('win_deny_perms'),
inheritance=kwargs.get('win_inheritance', True),
reset=kwargs.get('win_perms_reset', False))
# pylint: enable=E1120,E1121,E1123
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
with salt.utils.files.set_umask(0o077 if mode else None):
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
if touch(name):
ret['changes']['new'] = 'file {0} created'.format(name)
ret['comment'] = 'Empty file'
else:
return _error(
ret, 'Empty file {0} not created'.format(name)
)
else:
if not __opts__['test']:
if touch(name):
ret['changes']['diff'] = 'New file'
else:
return _error(
ret, 'File {0} not created'.format(name)
)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
with salt.utils.files.fopen(tmp, 'wb') as tmp_:
if encoding:
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
log.debug('File will be encoded with %s', encoding)
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
# Copy into place
salt.utils.files.copyfile(tmp,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get('win_owner'),
grant_perms=kwargs.get('win_perms'),
deny_perms=kwargs.get('win_deny_perms'),
inheritance=kwargs.get('win_inheritance', True),
reset=kwargs.get('win_perms_reset', False))
# pylint: enable=E1120,E1121,E1123
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs,
seuser=seuser, serole=serole, setype=setype, serange=serange)
if not ret['comment']:
ret['comment'] = 'File ' + name + ' updated'
if __opts__['test']:
ret['comment'] = 'File ' + name + ' not updated'
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File ' + name + ' is in the correct state'
if sfn:
__clean_tmp(sfn)
return ret |
def get_data(self, datatype, data):
""" Look for an IP address or an email address in the spammer database.
:param datatype: Which type of data is to be looked up.
Allowed values are 'ip' or 'mail'.
:param data: The value to be looked up through the API.
:type datatype: str
:type data: str
:return: Data relative to the looked up artifact.
:rtype: dict
"""
result = {}
params = StopforumspamClient._set_payload(datatype, data)
response = self.client.get(
'https://api.stopforumspam.org/api',
params=params, proxies=self.proxies)
response.raise_for_status()
report = response.json()
if report['success']:
data = report[StopforumspamClient._type_conversion[datatype]]
result = self._data_conversion(data)
else:
pass
return result | Look for an IP address or an email address in the spammer database.
:param datatype: Which type of data is to be looked up.
Allowed values are 'ip' or 'mail'.
:param data: The value to be looked up through the API.
:type datatype: str
:type data: str
:return: Data relative to the looked up artifact.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Look for an IP address or an email address in the spammer database.
:param datatype: Which type of data is to be looked up.
Allowed values are 'ip' or 'mail'.
:param data: The value to be looked up through the API.
:type datatype: str
:type data: str
:return: Data relative to the looked up artifact.
:rtype: dict
### Response:
def get_data(self, datatype, data):
""" Look for an IP address or an email address in the spammer database.
:param datatype: Which type of data is to be looked up.
Allowed values are 'ip' or 'mail'.
:param data: The value to be looked up through the API.
:type datatype: str
:type data: str
:return: Data relative to the looked up artifact.
:rtype: dict
"""
result = {}
params = StopforumspamClient._set_payload(datatype, data)
response = self.client.get(
'https://api.stopforumspam.org/api',
params=params, proxies=self.proxies)
response.raise_for_status()
report = response.json()
if report['success']:
data = report[StopforumspamClient._type_conversion[datatype]]
result = self._data_conversion(data)
else:
pass
return result |
def _load(self, **kwargs):
"""wrapped with load, override that in a subclass to customize"""
if 'uri' in self._meta_data:
error = "There was an attempt to assign a new uri to this "\
"resource, the _meta_data['uri'] is %s and it should"\
" not be changed." % (self._meta_data['uri'])
raise URICreationCollision(error)
requests_params = self._handle_requests_params(kwargs)
self._check_load_parameters(**kwargs)
kwargs['uri_as_parts'] = True
refresh_session = self._meta_data['bigip']._meta_data['icr_session']
base_uri = self._meta_data['container']._meta_data['uri']
kwargs.update(requests_params)
for key1, key2 in self._meta_data['reduction_forcing_pairs']:
kwargs = self._reduce_boolean_pair(kwargs, key1, key2)
kwargs = self._check_for_python_keywords(kwargs)
response = refresh_session.get(base_uri, **kwargs)
# Make new instance of self
return self._produce_instance(response) | wrapped with load, override that in a subclass to customize | Below is the the instruction that describes the task:
### Input:
wrapped with load, override that in a subclass to customize
### Response:
def _load(self, **kwargs):
"""wrapped with load, override that in a subclass to customize"""
if 'uri' in self._meta_data:
error = "There was an attempt to assign a new uri to this "\
"resource, the _meta_data['uri'] is %s and it should"\
" not be changed." % (self._meta_data['uri'])
raise URICreationCollision(error)
requests_params = self._handle_requests_params(kwargs)
self._check_load_parameters(**kwargs)
kwargs['uri_as_parts'] = True
refresh_session = self._meta_data['bigip']._meta_data['icr_session']
base_uri = self._meta_data['container']._meta_data['uri']
kwargs.update(requests_params)
for key1, key2 in self._meta_data['reduction_forcing_pairs']:
kwargs = self._reduce_boolean_pair(kwargs, key1, key2)
kwargs = self._check_for_python_keywords(kwargs)
response = refresh_session.get(base_uri, **kwargs)
# Make new instance of self
return self._produce_instance(response) |
def scanned(self):
"""Number of items that DynamoDB evaluated, before any filter was applied."""
if self.request["Select"] == "COUNT":
while not self.exhausted:
next(self, None)
return self._scanned | Number of items that DynamoDB evaluated, before any filter was applied. | Below is the the instruction that describes the task:
### Input:
Number of items that DynamoDB evaluated, before any filter was applied.
### Response:
def scanned(self):
"""Number of items that DynamoDB evaluated, before any filter was applied."""
if self.request["Select"] == "COUNT":
while not self.exhausted:
next(self, None)
return self._scanned |
def find_chunk (phrase, np):
"""
leverage noun phrase chunking
"""
for i in iter(range(0, len(phrase))):
parsed_np = find_chunk_sub(phrase, np, i)
if parsed_np:
return parsed_np | leverage noun phrase chunking | Below is the the instruction that describes the task:
### Input:
leverage noun phrase chunking
### Response:
def find_chunk (phrase, np):
"""
leverage noun phrase chunking
"""
for i in iter(range(0, len(phrase))):
parsed_np = find_chunk_sub(phrase, np, i)
if parsed_np:
return parsed_np |
def _update_repo(repo_config, store, tags_only):
"""Updates a repository to the tip of `master`. If the repository cannot
be updated because a hook that is configured does not exist in `master`,
this raises a RepositoryCannotBeUpdatedError
Args:
repo_config - A config for a repository
"""
repo_path = store.clone(repo_config['repo'], repo_config['rev'])
cmd_output('git', 'fetch', cwd=repo_path)
tag_cmd = ('git', 'describe', 'origin/master', '--tags')
if tags_only:
tag_cmd += ('--abbrev=0',)
else:
tag_cmd += ('--exact',)
try:
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
except CalledProcessError:
tag_cmd = ('git', 'rev-parse', 'origin/master')
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
# Don't bother trying to update if our rev is the same
if rev == repo_config['rev']:
return repo_config
try:
path = store.clone(repo_config['repo'], rev)
manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))
except InvalidManifestError as e:
raise RepositoryCannotBeUpdatedError(six.text_type(e))
# See if any of our hooks were deleted with the new commits
hooks = {hook['id'] for hook in repo_config['hooks']}
hooks_missing = hooks - {hook['id'] for hook in manifest}
if hooks_missing:
raise RepositoryCannotBeUpdatedError(
'Cannot update because the tip of master is missing these hooks:\n'
'{}'.format(', '.join(sorted(hooks_missing))),
)
# Construct a new config with the head rev
new_config = repo_config.copy()
new_config['rev'] = rev
return new_config | Updates a repository to the tip of `master`. If the repository cannot
be updated because a hook that is configured does not exist in `master`,
this raises a RepositoryCannotBeUpdatedError
Args:
repo_config - A config for a repository | Below is the the instruction that describes the task:
### Input:
Updates a repository to the tip of `master`. If the repository cannot
be updated because a hook that is configured does not exist in `master`,
this raises a RepositoryCannotBeUpdatedError
Args:
repo_config - A config for a repository
### Response:
def _update_repo(repo_config, store, tags_only):
"""Updates a repository to the tip of `master`. If the repository cannot
be updated because a hook that is configured does not exist in `master`,
this raises a RepositoryCannotBeUpdatedError
Args:
repo_config - A config for a repository
"""
repo_path = store.clone(repo_config['repo'], repo_config['rev'])
cmd_output('git', 'fetch', cwd=repo_path)
tag_cmd = ('git', 'describe', 'origin/master', '--tags')
if tags_only:
tag_cmd += ('--abbrev=0',)
else:
tag_cmd += ('--exact',)
try:
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
except CalledProcessError:
tag_cmd = ('git', 'rev-parse', 'origin/master')
rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip()
# Don't bother trying to update if our rev is the same
if rev == repo_config['rev']:
return repo_config
try:
path = store.clone(repo_config['repo'], rev)
manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))
except InvalidManifestError as e:
raise RepositoryCannotBeUpdatedError(six.text_type(e))
# See if any of our hooks were deleted with the new commits
hooks = {hook['id'] for hook in repo_config['hooks']}
hooks_missing = hooks - {hook['id'] for hook in manifest}
if hooks_missing:
raise RepositoryCannotBeUpdatedError(
'Cannot update because the tip of master is missing these hooks:\n'
'{}'.format(', '.join(sorted(hooks_missing))),
)
# Construct a new config with the head rev
new_config = repo_config.copy()
new_config['rev'] = rev
return new_config |
def is_defined(self, obj, force_import=False):
"""Return True if object is defined in current namespace"""
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns) | Return True if object is defined in current namespace | Below is the the instruction that describes the task:
### Input:
Return True if object is defined in current namespace
### Response:
def is_defined(self, obj, force_import=False):
"""Return True if object is defined in current namespace"""
from spyder_kernels.utils.dochelpers import isdefined
ns = self._get_current_namespace(with_magics=True)
return isdefined(obj, force_import=force_import, namespace=ns) |
def timetree_likelihood(self):
'''
Return the likelihood of the data given the current branch length in the tree
'''
LH = 0
for node in self.tree.find_clades(order='preorder'): # sum the likelihood contributions of all branches
if node.up is None: # root node
continue
LH -= node.branch_length_interpolator(node.branch_length)
# add the root sequence LH and return
if self.aln:
LH += self.gtr.sequence_logLH(self.tree.root.cseq, pattern_multiplicity=self.multiplicity)
return LH | Return the likelihood of the data given the current branch length in the tree | Below is the the instruction that describes the task:
### Input:
Return the likelihood of the data given the current branch length in the tree
### Response:
def timetree_likelihood(self):
'''
Return the likelihood of the data given the current branch length in the tree
'''
LH = 0
for node in self.tree.find_clades(order='preorder'): # sum the likelihood contributions of all branches
if node.up is None: # root node
continue
LH -= node.branch_length_interpolator(node.branch_length)
# add the root sequence LH and return
if self.aln:
LH += self.gtr.sequence_logLH(self.tree.root.cseq, pattern_multiplicity=self.multiplicity)
return LH |
def _match_device(self):
"""If the LED is connected to an input device,
associate the objects."""
for device in self.manager.all_devices:
if (device.get_char_device_path() ==
self._character_device_path):
self.device = device
device.leds.append(self)
break | If the LED is connected to an input device,
associate the objects. | Below is the the instruction that describes the task:
### Input:
If the LED is connected to an input device,
associate the objects.
### Response:
def _match_device(self):
"""If the LED is connected to an input device,
associate the objects."""
for device in self.manager.all_devices:
if (device.get_char_device_path() ==
self._character_device_path):
self.device = device
device.leds.append(self)
break |
def comb_jit(N, k):
"""
Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int)
"""
# From scipy.special._comb_int_long
# github.com/scipy/scipy/blob/v1.0.0/scipy/special/_comb.pyx
INTP_MAX = np.iinfo(np.intp).max
if N < 0 or k < 0 or k > N:
return 0
if k == 0:
return 1
if k == 1:
return N
if N == INTP_MAX:
return 0
M = N + 1
nterms = min(k, N - k)
val = 1
for j in range(1, nterms + 1):
# Overflow check
if val > INTP_MAX // (M - j):
return 0
val *= M - j
val //= j
return val | Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int) | Below is the the instruction that describes the task:
### Input:
Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int)
### Response:
def comb_jit(N, k):
"""
Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int)
"""
# From scipy.special._comb_int_long
# github.com/scipy/scipy/blob/v1.0.0/scipy/special/_comb.pyx
INTP_MAX = np.iinfo(np.intp).max
if N < 0 or k < 0 or k > N:
return 0
if k == 0:
return 1
if k == 1:
return N
if N == INTP_MAX:
return 0
M = N + 1
nterms = min(k, N - k)
val = 1
for j in range(1, nterms + 1):
# Overflow check
if val > INTP_MAX // (M - j):
return 0
val *= M - j
val //= j
return val |
def tail(file_path, lines=10, encoding="utf-8",
printed=True, errors='strict'):
"""
A really silly way to get the last N lines, defaults to 10.
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
"""
data = deque()
with open(file_path, "rb") as f:
for line in f:
if python_version >= (2, 7):
data.append(line.decode(encoding, errors=errors))
else:
data.append(line.decode(encoding))
if len(data) > lines:
data.popleft()
if printed:
print("".join(data))
else:
return data | A really silly way to get the last N lines, defaults to 10.
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list | Below is the the instruction that describes the task:
### Input:
A really silly way to get the last N lines, defaults to 10.
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
### Response:
def tail(file_path, lines=10, encoding="utf-8",
printed=True, errors='strict'):
"""
A really silly way to get the last N lines, defaults to 10.
:param file_path: Path to file to read
:param lines: Number of lines to read in
:param encoding: defaults to utf-8 to decode as, will fail on binary
:param printed: Automatically print the lines instead of returning it
:param errors: Decoding errors: 'strict', 'ignore' or 'replace'
:return: if printed is false, the lines are returned as a list
"""
data = deque()
with open(file_path, "rb") as f:
for line in f:
if python_version >= (2, 7):
data.append(line.decode(encoding, errors=errors))
else:
data.append(line.decode(encoding))
if len(data) > lines:
data.popleft()
if printed:
print("".join(data))
else:
return data |
def create_manage_py(self, apps):
"""Creates manage.py file, with a given list of installed apps.
:param list apps:
"""
self.logger.debug('Creating manage.py ...')
with open(self._get_manage_py_path(), mode='w') as f:
south_migration_modules = []
for app in apps:
south_migration_modules.append("'%(app)s': '%(app)s.south_migrations'" % {'app': app})
f.write(MANAGE_PY % {
'apps_available': "', '".join(apps),
'apps_path': self.apps_path,
'south_migration_modules': ", ".join(south_migration_modules)
}) | Creates manage.py file, with a given list of installed apps.
:param list apps: | Below is the the instruction that describes the task:
### Input:
Creates manage.py file, with a given list of installed apps.
:param list apps:
### Response:
def create_manage_py(self, apps):
"""Creates manage.py file, with a given list of installed apps.
:param list apps:
"""
self.logger.debug('Creating manage.py ...')
with open(self._get_manage_py_path(), mode='w') as f:
south_migration_modules = []
for app in apps:
south_migration_modules.append("'%(app)s': '%(app)s.south_migrations'" % {'app': app})
f.write(MANAGE_PY % {
'apps_available': "', '".join(apps),
'apps_path': self.apps_path,
'south_migration_modules': ", ".join(south_migration_modules)
}) |
def get_sdc_by_guid(self, guid):
"""
Get ScaleIO SDC object by its id
:param name: guid of SDC
:return: ScaleIO SDC object
:raise KeyError: No SDC with specified id found
:rtype: SDC object
"""
for sdc in self.sdc:
if sdc.guid == guid:
return sdc
raise KeyError("SDC with that GUID not found") | Get ScaleIO SDC object by its id
:param name: guid of SDC
:return: ScaleIO SDC object
:raise KeyError: No SDC with specified id found
:rtype: SDC object | Below is the the instruction that describes the task:
### Input:
Get ScaleIO SDC object by its id
:param name: guid of SDC
:return: ScaleIO SDC object
:raise KeyError: No SDC with specified id found
:rtype: SDC object
### Response:
def get_sdc_by_guid(self, guid):
"""
Get ScaleIO SDC object by its id
:param name: guid of SDC
:return: ScaleIO SDC object
:raise KeyError: No SDC with specified id found
:rtype: SDC object
"""
for sdc in self.sdc:
if sdc.guid == guid:
return sdc
raise KeyError("SDC with that GUID not found") |
def get_curie_prefix(self, uri):
''' Return the CURIE's prefix:'''
for key, value in self.uri_map.items():
if uri.startswith(key):
return value
return None | Return the CURIE's prefix: | Below is the the instruction that describes the task:
### Input:
Return the CURIE's prefix:
### Response:
def get_curie_prefix(self, uri):
''' Return the CURIE's prefix:'''
for key, value in self.uri_map.items():
if uri.startswith(key):
return value
return None |
def run_python_script_in_terminal(fname, wdir, args, interact,
debug, python_args, executable=None):
"""
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
"""
if executable is None:
executable = get_python_executable()
# If fname or python_exe contains spaces, it can't be ran on Windows, so we
# have to enclose them in quotes. Also wdir can come with / as os.sep, so
# we need to take care of it.
if os.name == 'nt':
fname = '"' + fname + '"'
wdir = wdir.replace('/', '\\')
executable = '"' + executable + '"'
p_args = [executable]
p_args += get_python_args(fname, python_args, interact, debug, args)
if os.name == 'nt':
cmd = 'start cmd.exe /c "cd %s && ' % wdir + ' '.join(p_args) + '"'
# Command line and cwd have to be converted to the filesystem
# encoding before passing them to subprocess, but only for
# Python 2.
# See https://bugs.python.org/issue1759845#msg74142 and Issue 1856
if PY2:
cmd = encoding.to_fs_from_unicode(cmd)
wdir = encoding.to_fs_from_unicode(wdir)
try:
run_shell_command(cmd, cwd=wdir)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
QMessageBox.critical(None, _('Run'),
_("It was not possible to run this file in "
"an external terminal"),
QMessageBox.Ok)
elif os.name == 'posix':
programs = [{'cmd': 'gnome-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'konsole',
'wdir-option': '--workdir',
'execute-option': '-e'},
{'cmd': 'xfce4-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'xterm',
'wdir-option': None,
'execute-option': '-e'},]
for program in programs:
if is_program_installed(program['cmd']):
arglist = []
if program['wdir-option'] and wdir:
arglist += [program['wdir-option'], wdir]
arglist.append(program['execute-option'])
arglist += p_args
if wdir:
run_program(program['cmd'], arglist, cwd=wdir)
else:
run_program(program['cmd'], arglist)
return
# TODO: Add a fallback to OSX
else:
raise NotImplementedError | Run Python script in an external system terminal.
:str wdir: working directory, may be empty. | Below is the the instruction that describes the task:
### Input:
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
### Response:
def run_python_script_in_terminal(fname, wdir, args, interact,
debug, python_args, executable=None):
"""
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
"""
if executable is None:
executable = get_python_executable()
# If fname or python_exe contains spaces, it can't be ran on Windows, so we
# have to enclose them in quotes. Also wdir can come with / as os.sep, so
# we need to take care of it.
if os.name == 'nt':
fname = '"' + fname + '"'
wdir = wdir.replace('/', '\\')
executable = '"' + executable + '"'
p_args = [executable]
p_args += get_python_args(fname, python_args, interact, debug, args)
if os.name == 'nt':
cmd = 'start cmd.exe /c "cd %s && ' % wdir + ' '.join(p_args) + '"'
# Command line and cwd have to be converted to the filesystem
# encoding before passing them to subprocess, but only for
# Python 2.
# See https://bugs.python.org/issue1759845#msg74142 and Issue 1856
if PY2:
cmd = encoding.to_fs_from_unicode(cmd)
wdir = encoding.to_fs_from_unicode(wdir)
try:
run_shell_command(cmd, cwd=wdir)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
QMessageBox.critical(None, _('Run'),
_("It was not possible to run this file in "
"an external terminal"),
QMessageBox.Ok)
elif os.name == 'posix':
programs = [{'cmd': 'gnome-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'konsole',
'wdir-option': '--workdir',
'execute-option': '-e'},
{'cmd': 'xfce4-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'xterm',
'wdir-option': None,
'execute-option': '-e'},]
for program in programs:
if is_program_installed(program['cmd']):
arglist = []
if program['wdir-option'] and wdir:
arglist += [program['wdir-option'], wdir]
arglist.append(program['execute-option'])
arglist += p_args
if wdir:
run_program(program['cmd'], arglist, cwd=wdir)
else:
run_program(program['cmd'], arglist)
return
# TODO: Add a fallback to OSX
else:
raise NotImplementedError |
def prepend_www(url):
"""Changes google.com to www.google.com"""
parsed = urlparse(url)
if parsed.netloc.split(".")[0] != "www":
return parsed.scheme + "://www." + parsed.netloc + parsed.path
else:
return url | Changes google.com to www.google.com | Below is the the instruction that describes the task:
### Input:
Changes google.com to www.google.com
### Response:
def prepend_www(url):
"""Changes google.com to www.google.com"""
parsed = urlparse(url)
if parsed.netloc.split(".")[0] != "www":
return parsed.scheme + "://www." + parsed.netloc + parsed.path
else:
return url |
def get_ip_scope_hosts( scopeId, auth, url):
"""
Function requires input of scope ID and returns list of allocated IP address for the specified scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeId: Interger of teh desired scope id
:return: list of dictionary objects where each element of the list represents a single host assigned to the IP scope
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url)
>>> ip_scope_hosts = get_ip_scope_hosts(scope_id, auth.creds, auth.url)
>>> assert type(ip_scope_hosts) is list
>>> assert 'name' in ip_scope_hosts[0]
>>> assert 'description' in ip_scope_hosts[0]
>>> assert 'ip' in ip_scope_hosts[0]
>>> assert 'id' in ip_scope_hosts[0]
"""
get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?size=10000&ipScopeId="+str(scopeId)
f_url = url + get_ip_scope_url
r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
ipscopelist = (json.loads(r.text))
if ipscopelist == {}:
return ipscopelist
else: ipscopelist = ipscopelist['assignedIpInfo']
if type(ipscopelist) is dict:
ipscope = []
ipscope.append(ipscopelist)
return ipscope
return ipscopelist
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_ip_scope: An Error has occured" | Function requires input of scope ID and returns list of allocated IP address for the specified scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeId: Interger of teh desired scope id
:return: list of dictionary objects where each element of the list represents a single host assigned to the IP scope
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url)
>>> ip_scope_hosts = get_ip_scope_hosts(scope_id, auth.creds, auth.url)
>>> assert type(ip_scope_hosts) is list
>>> assert 'name' in ip_scope_hosts[0]
>>> assert 'description' in ip_scope_hosts[0]
>>> assert 'ip' in ip_scope_hosts[0]
>>> assert 'id' in ip_scope_hosts[0] | Below is the the instruction that describes the task:
### Input:
Function requires input of scope ID and returns list of allocated IP address for the specified scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeId: Interger of teh desired scope id
:return: list of dictionary objects where each element of the list represents a single host assigned to the IP scope
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url)
>>> ip_scope_hosts = get_ip_scope_hosts(scope_id, auth.creds, auth.url)
>>> assert type(ip_scope_hosts) is list
>>> assert 'name' in ip_scope_hosts[0]
>>> assert 'description' in ip_scope_hosts[0]
>>> assert 'ip' in ip_scope_hosts[0]
>>> assert 'id' in ip_scope_hosts[0]
### Response:
def get_ip_scope_hosts( scopeId, auth, url):
"""
Function requires input of scope ID and returns list of allocated IP address for the specified scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeId: Interger of teh desired scope id
:return: list of dictionary objects where each element of the list represents a single host assigned to the IP scope
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url)
>>> ip_scope_hosts = get_ip_scope_hosts(scope_id, auth.creds, auth.url)
>>> assert type(ip_scope_hosts) is list
>>> assert 'name' in ip_scope_hosts[0]
>>> assert 'description' in ip_scope_hosts[0]
>>> assert 'ip' in ip_scope_hosts[0]
>>> assert 'id' in ip_scope_hosts[0]
"""
get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?size=10000&ipScopeId="+str(scopeId)
f_url = url + get_ip_scope_url
r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
ipscopelist = (json.loads(r.text))
if ipscopelist == {}:
return ipscopelist
else: ipscopelist = ipscopelist['assignedIpInfo']
if type(ipscopelist) is dict:
ipscope = []
ipscope.append(ipscopelist)
return ipscope
return ipscopelist
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_ip_scope: An Error has occured" |
def reset_for_retry(self, output_writer):
"""Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files.
"""
self.input_reader = self.initial_input_reader
self.slice_id = 0
self.retries += 1
self.output_writer = output_writer
self.handler = self.mapreduce_spec.mapper.handler | Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files. | Below is the the instruction that describes the task:
### Input:
Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files.
### Response:
def reset_for_retry(self, output_writer):
"""Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files.
"""
self.input_reader = self.initial_input_reader
self.slice_id = 0
self.retries += 1
self.output_writer = output_writer
self.handler = self.mapreduce_spec.mapper.handler |
def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key | Computes master key from transformed key and master seed.
Used in payload decryption. | Below is the the instruction that describes the task:
### Input:
Computes master key from transformed key and master seed.
Used in payload decryption.
### Response:
def compute_master(context):
"""Computes master key from transformed key and master seed.
Used in payload decryption."""
# combine the transformed key with the header master seed to find the master_key
master_key = hashlib.sha256(
context._.header.value.dynamic_header.master_seed.data +
context.transformed_key).digest()
return master_key |
def Pad(px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True, sample_independently=True,
name=None, deterministic=False, random_state=None):
"""
Augmenter that pads images, i.e. adds columns/rows to them.
dtype support::
See ``imgaug.augmenters.size.CropAndPad``.
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to pad on each side of the image.
Either this or the parameter `percent` may be set, not both at the same
time.
* If None, then pixel-based padding will not be used.
* If int, then that exact number of pixels will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two ints with values a and b, then each side will
be padded by a random amount in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single integer (always pad by
exactly that value), a tuple of two ints ``a`` and ``b`` (pad by
an amount ``a <= x <= b``), a list of ints (pad by a random value
that is contained in the list) or a StochasticParameter (sample
the amount to pad from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter \
or tuple, optional
The number of pixels to pad on each side of the image given
*in percent* of the image height/width.
E.g. if this is set to 0.1, the augmenter will always add 10 percent
of the image's height to the top, 10 percent of the width to the right,
10 percent of the height at the bottom and 10 percent of the width to
the left. Either this or the parameter `px` may be set, not both at the
same time.
* If None, then percent-based padding will not be used.
* If int, then expected to be 0 (no padding).
* If float, then that percentage will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two floats with values a and b, then each side will
be padded by a random percentage in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single float (always pad by
exactly that percent value), a tuple of two floats ``a`` and ``b``
(pad by a percentage ``a <= x <= b``), a list of floats (pad by a
random value that is contained in the list) or a
StochasticParameter (sample the percentage to pad from that
parameter).
pad_mode : imgaug.ALL or str or list of str or \
imgaug.parameters.StochasticParameter, optional
Padding mode to use. The available modes match the numpy padding modes,
i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,
``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes
``constant`` and ``linear_ramp`` use extra values, which are provided
by ``pad_cval`` when necessary. See :func:`imgaug.imgaug.pad` for
more details.
* If ``imgaug.ALL``, then a random mode from all available modes
will be sampled per image.
* If a string, it will be used as the pad mode for all images.
* If a list of strings, a random one of these will be sampled per
image and used as the mode.
* If StochasticParameter, a random mode will be sampled from this
parameter per image.
pad_cval : number or tuple of number list of number or \
imgaug.parameters.StochasticParameter, optional
The constant value to use if the pad mode is ``constant`` or the end
value to use if the mode is ``linear_ramp``.
See :func:`imgaug.imgaug.pad` for more details.
* If number, then that value will be used.
* If a tuple of two numbers and at least one of them is a float,
then a random number will be sampled from the continuous range
``a <= x <= b`` and used as the value. If both numbers are
integers, the range is discrete.
* If a list of number, then a random value will be chosen from the
elements of the list and used as the value.
* If StochasticParameter, a random value will be sampled from that
parameter per image.
keep_size : bool, optional
After padding, the result image will usually have a different
height/width compared to the original input image. If this parameter is
set to True, then the padded image will be resized to the input image's
size, i.e. the augmenter's output shape is always identical to the
input shape.
sample_independently : bool, optional
If False AND the values for `px`/`percent` result in exactly one
probability distribution for the amount to pad, only one single value
will be sampled from that probability distribution and used for all
sides. I.e. the pad amount then is the same for all sides.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Pad(px=(0, 10))
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). The added rows/columns are filled with black pixels.
>>> aug = iaa.Pad(px=(0, 10), sample_independently=False)
samples one value v from the discrete range ``[0..10]`` and pads all sides
by ``v`` pixels.
>>> aug = iaa.Pad(px=(0, 10), keep_size=False)
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). After padding, the images are NOT resized to their
original size (i.e. the images may end up having different heights/widths).
>>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
pads the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.Pad(percent=(0, 0.1))
pads each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.Pad(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
pads each side by either 5 percent or 10 percent.
>>> aug = iaa.Pad(px=(0, 10), pad_mode="edge")
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses the ``edge`` mode from numpy's
pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=["constant", "edge"])
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses randomly either the ``constant``
or ``edge`` mode from numpy's pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). It uses a random mode for numpy's pad function.
If the mode is ``constant`` or ``linear_ramp``, it samples a random value
``v`` from the range ``[0, 255]`` and uses that as the constant
value (``mode=constant``) or end value (``mode=linear_ramp``).
"""
def recursive_validate(v):
if v is None:
return v
elif ia.is_single_number(v):
ia.do_assert(v >= 0)
return v
elif isinstance(v, iap.StochasticParameter):
return v
elif isinstance(v, tuple):
return tuple([recursive_validate(v_) for v_ in v])
elif isinstance(v, list):
return [recursive_validate(v_) for v_ in v]
else:
raise Exception("Expected None or int or float or StochasticParameter or list or tuple, got %s." % (
type(v),))
px = recursive_validate(px)
percent = recursive_validate(percent)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
aug = CropAndPad(
px=px, percent=percent,
pad_mode=pad_mode, pad_cval=pad_cval,
keep_size=keep_size, sample_independently=sample_independently,
name=name, deterministic=deterministic, random_state=random_state
)
return aug | Augmenter that pads images, i.e. adds columns/rows to them.
dtype support::
See ``imgaug.augmenters.size.CropAndPad``.
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to pad on each side of the image.
Either this or the parameter `percent` may be set, not both at the same
time.
* If None, then pixel-based padding will not be used.
* If int, then that exact number of pixels will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two ints with values a and b, then each side will
be padded by a random amount in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single integer (always pad by
exactly that value), a tuple of two ints ``a`` and ``b`` (pad by
an amount ``a <= x <= b``), a list of ints (pad by a random value
that is contained in the list) or a StochasticParameter (sample
the amount to pad from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter \
or tuple, optional
The number of pixels to pad on each side of the image given
*in percent* of the image height/width.
E.g. if this is set to 0.1, the augmenter will always add 10 percent
of the image's height to the top, 10 percent of the width to the right,
10 percent of the height at the bottom and 10 percent of the width to
the left. Either this or the parameter `px` may be set, not both at the
same time.
* If None, then percent-based padding will not be used.
* If int, then expected to be 0 (no padding).
* If float, then that percentage will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two floats with values a and b, then each side will
be padded by a random percentage in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single float (always pad by
exactly that percent value), a tuple of two floats ``a`` and ``b``
(pad by a percentage ``a <= x <= b``), a list of floats (pad by a
random value that is contained in the list) or a
StochasticParameter (sample the percentage to pad from that
parameter).
pad_mode : imgaug.ALL or str or list of str or \
imgaug.parameters.StochasticParameter, optional
Padding mode to use. The available modes match the numpy padding modes,
i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,
``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes
``constant`` and ``linear_ramp`` use extra values, which are provided
by ``pad_cval`` when necessary. See :func:`imgaug.imgaug.pad` for
more details.
* If ``imgaug.ALL``, then a random mode from all available modes
will be sampled per image.
* If a string, it will be used as the pad mode for all images.
* If a list of strings, a random one of these will be sampled per
image and used as the mode.
* If StochasticParameter, a random mode will be sampled from this
parameter per image.
pad_cval : number or tuple of number list of number or \
imgaug.parameters.StochasticParameter, optional
The constant value to use if the pad mode is ``constant`` or the end
value to use if the mode is ``linear_ramp``.
See :func:`imgaug.imgaug.pad` for more details.
* If number, then that value will be used.
* If a tuple of two numbers and at least one of them is a float,
then a random number will be sampled from the continuous range
``a <= x <= b`` and used as the value. If both numbers are
integers, the range is discrete.
* If a list of number, then a random value will be chosen from the
elements of the list and used as the value.
* If StochasticParameter, a random value will be sampled from that
parameter per image.
keep_size : bool, optional
After padding, the result image will usually have a different
height/width compared to the original input image. If this parameter is
set to True, then the padded image will be resized to the input image's
size, i.e. the augmenter's output shape is always identical to the
input shape.
sample_independently : bool, optional
If False AND the values for `px`/`percent` result in exactly one
probability distribution for the amount to pad, only one single value
will be sampled from that probability distribution and used for all
sides. I.e. the pad amount then is the same for all sides.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Pad(px=(0, 10))
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). The added rows/columns are filled with black pixels.
>>> aug = iaa.Pad(px=(0, 10), sample_independently=False)
samples one value v from the discrete range ``[0..10]`` and pads all sides
by ``v`` pixels.
>>> aug = iaa.Pad(px=(0, 10), keep_size=False)
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). After padding, the images are NOT resized to their
original size (i.e. the images may end up having different heights/widths).
>>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
pads the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.Pad(percent=(0, 0.1))
pads each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.Pad(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
pads each side by either 5 percent or 10 percent.
>>> aug = iaa.Pad(px=(0, 10), pad_mode="edge")
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses the ``edge`` mode from numpy's
pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=["constant", "edge"])
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses randomly either the ``constant``
or ``edge`` mode from numpy's pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). It uses a random mode for numpy's pad function.
If the mode is ``constant`` or ``linear_ramp``, it samples a random value
``v`` from the range ``[0, 255]`` and uses that as the constant
value (``mode=constant``) or end value (``mode=linear_ramp``). | Below is the the instruction that describes the task:
### Input:
Augmenter that pads images, i.e. adds columns/rows to them.
dtype support::
See ``imgaug.augmenters.size.CropAndPad``.
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to pad on each side of the image.
Either this or the parameter `percent` may be set, not both at the same
time.
* If None, then pixel-based padding will not be used.
* If int, then that exact number of pixels will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two ints with values a and b, then each side will
be padded by a random amount in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single integer (always pad by
exactly that value), a tuple of two ints ``a`` and ``b`` (pad by
an amount ``a <= x <= b``), a list of ints (pad by a random value
that is contained in the list) or a StochasticParameter (sample
the amount to pad from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter \
or tuple, optional
The number of pixels to pad on each side of the image given
*in percent* of the image height/width.
E.g. if this is set to 0.1, the augmenter will always add 10 percent
of the image's height to the top, 10 percent of the width to the right,
10 percent of the height at the bottom and 10 percent of the width to
the left. Either this or the parameter `px` may be set, not both at the
same time.
* If None, then percent-based padding will not be used.
* If int, then expected to be 0 (no padding).
* If float, then that percentage will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two floats with values a and b, then each side will
be padded by a random percentage in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single float (always pad by
exactly that percent value), a tuple of two floats ``a`` and ``b``
(pad by a percentage ``a <= x <= b``), a list of floats (pad by a
random value that is contained in the list) or a
StochasticParameter (sample the percentage to pad from that
parameter).
pad_mode : imgaug.ALL or str or list of str or \
imgaug.parameters.StochasticParameter, optional
Padding mode to use. The available modes match the numpy padding modes,
i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,
``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes
``constant`` and ``linear_ramp`` use extra values, which are provided
by ``pad_cval`` when necessary. See :func:`imgaug.imgaug.pad` for
more details.
* If ``imgaug.ALL``, then a random mode from all available modes
will be sampled per image.
* If a string, it will be used as the pad mode for all images.
* If a list of strings, a random one of these will be sampled per
image and used as the mode.
* If StochasticParameter, a random mode will be sampled from this
parameter per image.
pad_cval : number or tuple of number list of number or \
imgaug.parameters.StochasticParameter, optional
The constant value to use if the pad mode is ``constant`` or the end
value to use if the mode is ``linear_ramp``.
See :func:`imgaug.imgaug.pad` for more details.
* If number, then that value will be used.
* If a tuple of two numbers and at least one of them is a float,
then a random number will be sampled from the continuous range
``a <= x <= b`` and used as the value. If both numbers are
integers, the range is discrete.
* If a list of number, then a random value will be chosen from the
elements of the list and used as the value.
* If StochasticParameter, a random value will be sampled from that
parameter per image.
keep_size : bool, optional
After padding, the result image will usually have a different
height/width compared to the original input image. If this parameter is
set to True, then the padded image will be resized to the input image's
size, i.e. the augmenter's output shape is always identical to the
input shape.
sample_independently : bool, optional
If False AND the values for `px`/`percent` result in exactly one
probability distribution for the amount to pad, only one single value
will be sampled from that probability distribution and used for all
sides. I.e. the pad amount then is the same for all sides.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Pad(px=(0, 10))
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). The added rows/columns are filled with black pixels.
>>> aug = iaa.Pad(px=(0, 10), sample_independently=False)
samples one value v from the discrete range ``[0..10]`` and pads all sides
by ``v`` pixels.
>>> aug = iaa.Pad(px=(0, 10), keep_size=False)
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). After padding, the images are NOT resized to their
original size (i.e. the images may end up having different heights/widths).
>>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
pads the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.Pad(percent=(0, 0.1))
pads each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.Pad(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
pads each side by either 5 percent or 10 percent.
>>> aug = iaa.Pad(px=(0, 10), pad_mode="edge")
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses the ``edge`` mode from numpy's
pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=["constant", "edge"])
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses randomly either the ``constant``
or ``edge`` mode from numpy's pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). It uses a random mode for numpy's pad function.
If the mode is ``constant`` or ``linear_ramp``, it samples a random value
``v`` from the range ``[0, 255]`` and uses that as the constant
value (``mode=constant``) or end value (``mode=linear_ramp``).
### Response:
def Pad(px=None, percent=None, pad_mode="constant", pad_cval=0, keep_size=True, sample_independently=True,
name=None, deterministic=False, random_state=None):
"""
Augmenter that pads images, i.e. adds columns/rows to them.
dtype support::
See ``imgaug.augmenters.size.CropAndPad``.
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to pad on each side of the image.
Either this or the parameter `percent` may be set, not both at the same
time.
* If None, then pixel-based padding will not be used.
* If int, then that exact number of pixels will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two ints with values a and b, then each side will
be padded by a random amount in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single integer (always pad by
exactly that value), a tuple of two ints ``a`` and ``b`` (pad by
an amount ``a <= x <= b``), a list of ints (pad by a random value
that is contained in the list) or a StochasticParameter (sample
the amount to pad from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter \
or tuple, optional
The number of pixels to pad on each side of the image given
*in percent* of the image height/width.
E.g. if this is set to 0.1, the augmenter will always add 10 percent
of the image's height to the top, 10 percent of the width to the right,
10 percent of the height at the bottom and 10 percent of the width to
the left. Either this or the parameter `px` may be set, not both at the
same time.
* If None, then percent-based padding will not be used.
* If int, then expected to be 0 (no padding).
* If float, then that percentage will always be padded.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two floats with values a and b, then each side will
be padded by a random percentage in the range ``a <= x <= b``.
``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single float (always pad by
exactly that percent value), a tuple of two floats ``a`` and ``b``
(pad by a percentage ``a <= x <= b``), a list of floats (pad by a
random value that is contained in the list) or a
StochasticParameter (sample the percentage to pad from that
parameter).
pad_mode : imgaug.ALL or str or list of str or \
imgaug.parameters.StochasticParameter, optional
Padding mode to use. The available modes match the numpy padding modes,
i.e. ``constant``, ``edge``, ``linear_ramp``, ``maximum``, ``median``,
``minimum``, ``reflect``, ``symmetric``, ``wrap``. The modes
``constant`` and ``linear_ramp`` use extra values, which are provided
by ``pad_cval`` when necessary. See :func:`imgaug.imgaug.pad` for
more details.
* If ``imgaug.ALL``, then a random mode from all available modes
will be sampled per image.
* If a string, it will be used as the pad mode for all images.
* If a list of strings, a random one of these will be sampled per
image and used as the mode.
* If StochasticParameter, a random mode will be sampled from this
parameter per image.
pad_cval : number or tuple of number list of number or \
imgaug.parameters.StochasticParameter, optional
The constant value to use if the pad mode is ``constant`` or the end
value to use if the mode is ``linear_ramp``.
See :func:`imgaug.imgaug.pad` for more details.
* If number, then that value will be used.
* If a tuple of two numbers and at least one of them is a float,
then a random number will be sampled from the continuous range
``a <= x <= b`` and used as the value. If both numbers are
integers, the range is discrete.
* If a list of number, then a random value will be chosen from the
elements of the list and used as the value.
* If StochasticParameter, a random value will be sampled from that
parameter per image.
keep_size : bool, optional
After padding, the result image will usually have a different
height/width compared to the original input image. If this parameter is
set to True, then the padded image will be resized to the input image's
size, i.e. the augmenter's output shape is always identical to the
input shape.
sample_independently : bool, optional
If False AND the values for `px`/`percent` result in exactly one
probability distribution for the amount to pad, only one single value
will be sampled from that probability distribution and used for all
sides. I.e. the pad amount then is the same for all sides.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Pad(px=(0, 10))
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). The added rows/columns are filled with black pixels.
>>> aug = iaa.Pad(px=(0, 10), sample_independently=False)
samples one value v from the discrete range ``[0..10]`` and pads all sides
by ``v`` pixels.
>>> aug = iaa.Pad(px=(0, 10), keep_size=False)
pads each side by a random value from the range 0px to 10px (the value
is sampled per side). After padding, the images are NOT resized to their
original size (i.e. the images may end up having different heights/widths).
>>> aug = iaa.Pad(px=((0, 10), (0, 5), (0, 10), (0, 5)))
pads the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.Pad(percent=(0, 0.1))
pads each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.Pad(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
pads each side by either 5 percent or 10 percent.
>>> aug = iaa.Pad(px=(0, 10), pad_mode="edge")
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses the ``edge`` mode from numpy's
pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=["constant", "edge"])
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). The padding uses randomly either the ``constant``
or ``edge`` mode from numpy's pad function.
>>> aug = iaa.Pad(px=(0, 10), pad_mode=ia.ALL, pad_cval=(0, 255))
pads each side by a random value from the range 0px to 10px (the values
are sampled per side). It uses a random mode for numpy's pad function.
If the mode is ``constant`` or ``linear_ramp``, it samples a random value
``v`` from the range ``[0, 255]`` and uses that as the constant
value (``mode=constant``) or end value (``mode=linear_ramp``).
"""
def recursive_validate(v):
if v is None:
return v
elif ia.is_single_number(v):
ia.do_assert(v >= 0)
return v
elif isinstance(v, iap.StochasticParameter):
return v
elif isinstance(v, tuple):
return tuple([recursive_validate(v_) for v_ in v])
elif isinstance(v, list):
return [recursive_validate(v_) for v_ in v]
else:
raise Exception("Expected None or int or float or StochasticParameter or list or tuple, got %s." % (
type(v),))
px = recursive_validate(px)
percent = recursive_validate(percent)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
aug = CropAndPad(
px=px, percent=percent,
pad_mode=pad_mode, pad_cval=pad_cval,
keep_size=keep_size, sample_independently=sample_independently,
name=name, deterministic=deterministic, random_state=random_state
)
return aug |
def tomof(self, indent=0, maxline=MAX_MOF_LINE):
"""
Return a MOF string with the declaration of this CIM method for use in
a CIM class declaration.
The order of parameters and qualifiers is preserved.
Parameters:
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted in the line with the method name.
Returns:
:term:`unicode string`: MOF string.
"""
mof = []
if self.qualifiers:
mof.append(_qualifiers_tomof(self.qualifiers, indent + MOF_INDENT,
maxline))
mof.append(_indent_str(indent))
# return_type is ensured not to be None or reference
mof.append(moftype(self.return_type, None))
mof.append(u' ')
mof.append(self.name)
if self.parameters.values():
mof.append(u'(\n')
mof_parms = []
for p in self.parameters.itervalues():
mof_parms.append(p.tomof(indent + MOF_INDENT, maxline))
mof.append(u',\n'.join(mof_parms))
mof.append(u');\n')
else:
mof.append(u'();\n')
return u''.join(mof) | Return a MOF string with the declaration of this CIM method for use in
a CIM class declaration.
The order of parameters and qualifiers is preserved.
Parameters:
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted in the line with the method name.
Returns:
:term:`unicode string`: MOF string. | Below is the the instruction that describes the task:
### Input:
Return a MOF string with the declaration of this CIM method for use in
a CIM class declaration.
The order of parameters and qualifiers is preserved.
Parameters:
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted in the line with the method name.
Returns:
:term:`unicode string`: MOF string.
### Response:
def tomof(self, indent=0, maxline=MAX_MOF_LINE):
"""
Return a MOF string with the declaration of this CIM method for use in
a CIM class declaration.
The order of parameters and qualifiers is preserved.
Parameters:
indent (:term:`integer`): Number of spaces to indent each line of
the returned string, counted in the line with the method name.
Returns:
:term:`unicode string`: MOF string.
"""
mof = []
if self.qualifiers:
mof.append(_qualifiers_tomof(self.qualifiers, indent + MOF_INDENT,
maxline))
mof.append(_indent_str(indent))
# return_type is ensured not to be None or reference
mof.append(moftype(self.return_type, None))
mof.append(u' ')
mof.append(self.name)
if self.parameters.values():
mof.append(u'(\n')
mof_parms = []
for p in self.parameters.itervalues():
mof_parms.append(p.tomof(indent + MOF_INDENT, maxline))
mof.append(u',\n'.join(mof_parms))
mof.append(u');\n')
else:
mof.append(u'();\n')
return u''.join(mof) |
def get_file(self, file_route, file_name=''):
''' a method to retrieve data for a file housed on telegram api
:param file_route: string with route to file endpoint on telegram api
:return: byte data stream with file data
'''
title = '%s.get_file' % self.__class__.__name__
# construct file url
file_url = '%s%s' % (self.file_endpoint, file_route)
# send request for file data
data_buffer = self._get_data(file_url, file_name, method_title=title)
return data_buffer | a method to retrieve data for a file housed on telegram api
:param file_route: string with route to file endpoint on telegram api
:return: byte data stream with file data | Below is the the instruction that describes the task:
### Input:
a method to retrieve data for a file housed on telegram api
:param file_route: string with route to file endpoint on telegram api
:return: byte data stream with file data
### Response:
def get_file(self, file_route, file_name=''):
''' a method to retrieve data for a file housed on telegram api
:param file_route: string with route to file endpoint on telegram api
:return: byte data stream with file data
'''
title = '%s.get_file' % self.__class__.__name__
# construct file url
file_url = '%s%s' % (self.file_endpoint, file_route)
# send request for file data
data_buffer = self._get_data(file_url, file_name, method_title=title)
return data_buffer |
def set_digit_raw(self, pos, bitmask):
"""Set digit at position to raw bitmask value. Position should be a value
of 0 to 3 with 0 being the left most digit on the display."""
if pos < 0 or pos > 3:
# Ignore out of bounds digits.
return
# Set the digit bitmask value at the appropriate position.
# Also set bit 7 (decimal point) if decimal is True.
self.buffer[pos*2] = bitmask & 0xFF
self.buffer[pos*2+1] = (bitmask >> 8) & 0xFF | Set digit at position to raw bitmask value. Position should be a value
of 0 to 3 with 0 being the left most digit on the display. | Below is the the instruction that describes the task:
### Input:
Set digit at position to raw bitmask value. Position should be a value
of 0 to 3 with 0 being the left most digit on the display.
### Response:
def set_digit_raw(self, pos, bitmask):
"""Set digit at position to raw bitmask value. Position should be a value
of 0 to 3 with 0 being the left most digit on the display."""
if pos < 0 or pos > 3:
# Ignore out of bounds digits.
return
# Set the digit bitmask value at the appropriate position.
# Also set bit 7 (decimal point) if decimal is True.
self.buffer[pos*2] = bitmask & 0xFF
self.buffer[pos*2+1] = (bitmask >> 8) & 0xFF |
def xml(self):
"""
the xml string representation.
:return: the xml string
:rtype: str
"""
xml = '<inasafe_provenance>\n'
for step in self.steps:
xml += step.xml
xml += '</inasafe_provenance>\n'
return xml | the xml string representation.
:return: the xml string
:rtype: str | Below is the the instruction that describes the task:
### Input:
the xml string representation.
:return: the xml string
:rtype: str
### Response:
def xml(self):
"""
the xml string representation.
:return: the xml string
:rtype: str
"""
xml = '<inasafe_provenance>\n'
for step in self.steps:
xml += step.xml
xml += '</inasafe_provenance>\n'
return xml |
def _request(self, url, **kwargs):
'''Makes request to :func:`Interface.request` and caches it.
:param url: endpoint url
:params \*\*kwargs: kwargs to pass to :func:`requests.request`
'''
response = super(Bounces, self)._request(url, **kwargs)
self._last_response = response
return response | Makes request to :func:`Interface.request` and caches it.
:param url: endpoint url
:params \*\*kwargs: kwargs to pass to :func:`requests.request` | Below is the the instruction that describes the task:
### Input:
Makes request to :func:`Interface.request` and caches it.
:param url: endpoint url
:params \*\*kwargs: kwargs to pass to :func:`requests.request`
### Response:
def _request(self, url, **kwargs):
'''Makes request to :func:`Interface.request` and caches it.
:param url: endpoint url
:params \*\*kwargs: kwargs to pass to :func:`requests.request`
'''
response = super(Bounces, self)._request(url, **kwargs)
self._last_response = response
return response |
def spawn(self, context=None):
"""
context may be a callable or a dict.
"""
if context is None:
context = self.default_context
if isinstance(context, collections.Callable):
context = context()
if not isinstance(context, collections.Mapping):
raise PatchboardError('Cannot determine a valid context')
return Client(self, context, self.api, self.endpoint_classes) | context may be a callable or a dict. | Below is the the instruction that describes the task:
### Input:
context may be a callable or a dict.
### Response:
def spawn(self, context=None):
"""
context may be a callable or a dict.
"""
if context is None:
context = self.default_context
if isinstance(context, collections.Callable):
context = context()
if not isinstance(context, collections.Mapping):
raise PatchboardError('Cannot determine a valid context')
return Client(self, context, self.api, self.endpoint_classes) |
def parse(line):
"""
Parses a log line using the regexices above.
:param line: A log line to be parsed.
:return: If no match is found, None, otherwise a dict containing the values parsed from the log line.
"""
values = None
matches = re.search(regex, line)
if matches:
# Standard values
values = {
'date_time': matches.group(DATE_TIME),
'log_level': matches.group(LOG_LEVEL),
'process_id': matches.group(PROCESS_ID),
'thread_name': matches.group(THREAD_NAME),
'logger_name': matches.group(LOGGER_NAME),
'log_message': matches.group(LOG_MESSAGE)
}
# Optional transaction tracking information
if matches.group(TRANSACTION):
values["transaction"] = {
"app": matches.group(TRANSACTION_APP),
"id": matches.group(TRANSACTION_ID),
"span": matches.group(TRANSACTION_SPAN),
"exported": matches.group(TRANSACTION_EXPORTED)
}
return values | Parses a log line using the regexices above.
:param line: A log line to be parsed.
:return: If no match is found, None, otherwise a dict containing the values parsed from the log line. | Below is the the instruction that describes the task:
### Input:
Parses a log line using the regexices above.
:param line: A log line to be parsed.
:return: If no match is found, None, otherwise a dict containing the values parsed from the log line.
### Response:
def parse(line):
"""
Parses a log line using the regexices above.
:param line: A log line to be parsed.
:return: If no match is found, None, otherwise a dict containing the values parsed from the log line.
"""
values = None
matches = re.search(regex, line)
if matches:
# Standard values
values = {
'date_time': matches.group(DATE_TIME),
'log_level': matches.group(LOG_LEVEL),
'process_id': matches.group(PROCESS_ID),
'thread_name': matches.group(THREAD_NAME),
'logger_name': matches.group(LOGGER_NAME),
'log_message': matches.group(LOG_MESSAGE)
}
# Optional transaction tracking information
if matches.group(TRANSACTION):
values["transaction"] = {
"app": matches.group(TRANSACTION_APP),
"id": matches.group(TRANSACTION_ID),
"span": matches.group(TRANSACTION_SPAN),
"exported": matches.group(TRANSACTION_EXPORTED)
}
return values |
def delete_case(self, case_id=None, institute_id=None, display_name=None):
"""Delete a single case from database
Args:
institute_id(str)
case_id(str)
Returns:
case_obj(dict): The case that was deleted
"""
query = {}
if case_id:
query['_id'] = case_id
LOG.info("Deleting case %s", case_id)
else:
if not (institute_id and display_name):
raise ValueError("Have to provide both institute_id and display_name")
LOG.info("Deleting case %s institute %s", display_name, institute_id)
query['owner'] = institute_id
query['display_name'] = display_name
result = self.case_collection.delete_one(query)
return result | Delete a single case from database
Args:
institute_id(str)
case_id(str)
Returns:
case_obj(dict): The case that was deleted | Below is the the instruction that describes the task:
### Input:
Delete a single case from database
Args:
institute_id(str)
case_id(str)
Returns:
case_obj(dict): The case that was deleted
### Response:
def delete_case(self, case_id=None, institute_id=None, display_name=None):
"""Delete a single case from database
Args:
institute_id(str)
case_id(str)
Returns:
case_obj(dict): The case that was deleted
"""
query = {}
if case_id:
query['_id'] = case_id
LOG.info("Deleting case %s", case_id)
else:
if not (institute_id and display_name):
raise ValueError("Have to provide both institute_id and display_name")
LOG.info("Deleting case %s institute %s", display_name, institute_id)
query['owner'] = institute_id
query['display_name'] = display_name
result = self.case_collection.delete_one(query)
return result |
def delete_notification_rule(self, id, **kwargs):
"""Get a notification rule for this user."""
endpoint = '{0}/{1}/notification_rules/{2}'.format(
self.endpoint,
self['id'],
id,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) | Get a notification rule for this user. | Below is the the instruction that describes the task:
### Input:
Get a notification rule for this user.
### Response:
def delete_notification_rule(self, id, **kwargs):
"""Get a notification rule for this user."""
endpoint = '{0}/{1}/notification_rules/{2}'.format(
self.endpoint,
self['id'],
id,
)
return self.request('DELETE', endpoint=endpoint, query_params=kwargs) |
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
fapp = paste.fileapp.FileApp(path)
return fapp(request.environ, self.start_response) | Call Paste's FileApp (a WSGI application) to serve the file
at the specified path | Below is the the instruction that describes the task:
### Input:
Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
### Response:
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
fapp = paste.fileapp.FileApp(path)
return fapp(request.environ, self.start_response) |
def capped(value, minimum=None, maximum=None):
"""
Args:
value: Value to cap
minimum: If specified, value should not be lower than this minimum
maximum: If specified, value should not be higher than this maximum
Returns:
`value` capped to `minimum` and `maximum` (if it is outside of those bounds)
"""
if minimum is not None and value < minimum:
return minimum
if maximum is not None and value > maximum:
return maximum
return value | Args:
value: Value to cap
minimum: If specified, value should not be lower than this minimum
maximum: If specified, value should not be higher than this maximum
Returns:
`value` capped to `minimum` and `maximum` (if it is outside of those bounds) | Below is the the instruction that describes the task:
### Input:
Args:
value: Value to cap
minimum: If specified, value should not be lower than this minimum
maximum: If specified, value should not be higher than this maximum
Returns:
`value` capped to `minimum` and `maximum` (if it is outside of those bounds)
### Response:
def capped(value, minimum=None, maximum=None):
"""
Args:
value: Value to cap
minimum: If specified, value should not be lower than this minimum
maximum: If specified, value should not be higher than this maximum
Returns:
`value` capped to `minimum` and `maximum` (if it is outside of those bounds)
"""
if minimum is not None and value < minimum:
return minimum
if maximum is not None and value > maximum:
return maximum
return value |
def get_milestone(self, title):
"""
given the title as str, looks for an existing milestone or create a new one,
and return the object
"""
if not title:
return GithubObject.NotSet
if not hasattr(self, '_milestones'):
self._milestones = {m.title: m for m in self.repo.get_milestones()}
milestone = self._milestones.get(title)
if not milestone:
milestone = self.repo.create_milestone(title=title)
return milestone | given the title as str, looks for an existing milestone or create a new one,
and return the object | Below is the the instruction that describes the task:
### Input:
given the title as str, looks for an existing milestone or create a new one,
and return the object
### Response:
def get_milestone(self, title):
"""
given the title as str, looks for an existing milestone or create a new one,
and return the object
"""
if not title:
return GithubObject.NotSet
if not hasattr(self, '_milestones'):
self._milestones = {m.title: m for m in self.repo.get_milestones()}
milestone = self._milestones.get(title)
if not milestone:
milestone = self.repo.create_milestone(title=title)
return milestone |
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for variantAnnotation in gaObjects:
print(
variantAnnotation.id, variantAnnotation.variant_id,
variantAnnotation.variant_annotation_set_id,
variantAnnotation.created, sep="\t", end="\t")
for effect in variantAnnotation.transcript_effects:
print(effect.alternate_bases, sep="|", end="|")
for so in effect.effects:
print(so.term, sep="&", end="|")
print(so.term_id, sep="&", end="|")
print(effect.hgvs_annotation.transcript,
effect.hgvs_annotation.protein, sep="|", end="\t")
print() | Prints out the specified Variant objects in a VCF-like form. | Below is the the instruction that describes the task:
### Input:
Prints out the specified Variant objects in a VCF-like form.
### Response:
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for variantAnnotation in gaObjects:
print(
variantAnnotation.id, variantAnnotation.variant_id,
variantAnnotation.variant_annotation_set_id,
variantAnnotation.created, sep="\t", end="\t")
for effect in variantAnnotation.transcript_effects:
print(effect.alternate_bases, sep="|", end="|")
for so in effect.effects:
print(so.term, sep="&", end="|")
print(so.term_id, sep="&", end="|")
print(effect.hgvs_annotation.transcript,
effect.hgvs_annotation.protein, sep="|", end="\t")
print() |
def permissions(self, course_id, permissions=None):
"""
Permissions.
Returns permission information for provided course & current_user
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - permissions
"""List of permissions to check against authenticated user"""
if permissions is not None:
params["permissions"] = permissions
self.logger.debug("GET /api/v1/courses/{course_id}/permissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/permissions".format(**path), data=data, params=params, no_data=True) | Permissions.
Returns permission information for provided course & current_user | Below is the the instruction that describes the task:
### Input:
Permissions.
Returns permission information for provided course & current_user
### Response:
def permissions(self, course_id, permissions=None):
"""
Permissions.
Returns permission information for provided course & current_user
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - permissions
"""List of permissions to check against authenticated user"""
if permissions is not None:
params["permissions"] = permissions
self.logger.debug("GET /api/v1/courses/{course_id}/permissions with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/permissions".format(**path), data=data, params=params, no_data=True) |
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get("PATH_INFO", "").lstrip("/")
logger.debug("<application> PATH: '%s'", path)
if path == "metadata":
return metadata(environ, start_response)
logger.debug("Finding callback to run")
try:
for regex, spec in urls:
match = re.search(regex, path)
if match is not None:
if isinstance(spec, tuple):
callback, func_name, _sp = spec
cls = callback(_sp, environ, start_response, cache=CACHE)
func = getattr(cls, func_name)
return func()
else:
return spec(environ, start_response, SP)
if re.match(".*static/.*", path):
return handle_static(environ, start_response, path)
return not_found(environ, start_response)
except StatusError as err:
logging.error("StatusError: %s" % err)
resp = BadRequest("%s" % err)
return resp(environ, start_response)
except Exception as err:
# _err = exception_trace("RUN", err)
# logging.error(exception_trace("RUN", _err))
print(err, file=sys.stderr)
resp = ServiceError("%s" % err)
return resp(environ, start_response) | The main WSGI application. Dispatch the current request to
the functions from above.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines | Below is the the instruction that describes the task:
### Input:
The main WSGI application. Dispatch the current request to
the functions from above.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
### Response:
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get("PATH_INFO", "").lstrip("/")
logger.debug("<application> PATH: '%s'", path)
if path == "metadata":
return metadata(environ, start_response)
logger.debug("Finding callback to run")
try:
for regex, spec in urls:
match = re.search(regex, path)
if match is not None:
if isinstance(spec, tuple):
callback, func_name, _sp = spec
cls = callback(_sp, environ, start_response, cache=CACHE)
func = getattr(cls, func_name)
return func()
else:
return spec(environ, start_response, SP)
if re.match(".*static/.*", path):
return handle_static(environ, start_response, path)
return not_found(environ, start_response)
except StatusError as err:
logging.error("StatusError: %s" % err)
resp = BadRequest("%s" % err)
return resp(environ, start_response)
except Exception as err:
# _err = exception_trace("RUN", err)
# logging.error(exception_trace("RUN", _err))
print(err, file=sys.stderr)
resp = ServiceError("%s" % err)
return resp(environ, start_response) |
def post_document(self, data):
"""
Create and analyze a new document
data -- A Dictionary representing the new document
"""
data.update({ 'client' : CLIENT })
return self._call('POST', self._generate_url_path('documents'), data) | Create and analyze a new document
data -- A Dictionary representing the new document | Below is the the instruction that describes the task:
### Input:
Create and analyze a new document
data -- A Dictionary representing the new document
### Response:
def post_document(self, data):
"""
Create and analyze a new document
data -- A Dictionary representing the new document
"""
data.update({ 'client' : CLIENT })
return self._call('POST', self._generate_url_path('documents'), data) |
def clone(self, snapshot_name_or_id=None,
mode=library.CloneMode.machine_state,
options=None, name=None,
uuid=None, groups=None, basefolder='', register=True):
"""Clone this Machine
Options:
snapshot_name_or_id - value can be either ISnapshot, name, or id
mode - set the CloneMode value
options - define the CloneOptions options
name - define a name of the new VM
uuid - set the uuid of the new VM
groups - specify which groups the new VM will exist under
basefolder - specify which folder to set the VM up under
register - register this VM with the server
Note: Default values create a linked clone from the current machine
state
Return a IMachine object for the newly cloned vm
"""
if options is None:
options = [library.CloneOptions.link]
if groups is None:
groups = []
vbox = virtualbox.VirtualBox()
if snapshot_name_or_id is not None:
if isinstance(snapshot_name_or_id, basestring):
snapshot = self.find_snapshot(snapshot_name_or_id)
else:
snapshot = snapshot_name_or_id
vm = snapshot.machine
else:
# linked clone can only be created from a snapshot...
# try grabbing the current_snapshot
if library.CloneOptions.link in options:
vm = self.current_snapshot.machine
else:
vm = self
if name is None:
name = "%s Clone" % vm.name
# Build the settings file
create_flags = ''
if uuid is not None:
create_flags = "UUID=%s" % uuid
primary_group = ''
if groups:
primary_group = groups[0]
# Make sure this settings file does not already exist
test_name = name
settings_file = ''
for i in range(1, 1000):
settings_file = vbox.compose_machine_filename(test_name,
primary_group,
create_flags,
basefolder)
if not os.path.exists(os.path.dirname(settings_file)):
break
test_name = "%s (%s)" % (name, i)
name = test_name
# Create the new machine and clone it!
vm_clone = vbox.create_machine(settings_file, name, groups, '', create_flags)
progress = vm.clone_to(vm_clone, mode, options)
progress.wait_for_completion(-1)
if register:
vbox.register_machine(vm_clone)
return vm_clone | Clone this Machine
Options:
snapshot_name_or_id - value can be either ISnapshot, name, or id
mode - set the CloneMode value
options - define the CloneOptions options
name - define a name of the new VM
uuid - set the uuid of the new VM
groups - specify which groups the new VM will exist under
basefolder - specify which folder to set the VM up under
register - register this VM with the server
Note: Default values create a linked clone from the current machine
state
Return a IMachine object for the newly cloned vm | Below is the the instruction that describes the task:
### Input:
Clone this Machine
Options:
snapshot_name_or_id - value can be either ISnapshot, name, or id
mode - set the CloneMode value
options - define the CloneOptions options
name - define a name of the new VM
uuid - set the uuid of the new VM
groups - specify which groups the new VM will exist under
basefolder - specify which folder to set the VM up under
register - register this VM with the server
Note: Default values create a linked clone from the current machine
state
Return a IMachine object for the newly cloned vm
### Response:
def clone(self, snapshot_name_or_id=None,
mode=library.CloneMode.machine_state,
options=None, name=None,
uuid=None, groups=None, basefolder='', register=True):
"""Clone this Machine
Options:
snapshot_name_or_id - value can be either ISnapshot, name, or id
mode - set the CloneMode value
options - define the CloneOptions options
name - define a name of the new VM
uuid - set the uuid of the new VM
groups - specify which groups the new VM will exist under
basefolder - specify which folder to set the VM up under
register - register this VM with the server
Note: Default values create a linked clone from the current machine
state
Return a IMachine object for the newly cloned vm
"""
if options is None:
options = [library.CloneOptions.link]
if groups is None:
groups = []
vbox = virtualbox.VirtualBox()
if snapshot_name_or_id is not None:
if isinstance(snapshot_name_or_id, basestring):
snapshot = self.find_snapshot(snapshot_name_or_id)
else:
snapshot = snapshot_name_or_id
vm = snapshot.machine
else:
# linked clone can only be created from a snapshot...
# try grabbing the current_snapshot
if library.CloneOptions.link in options:
vm = self.current_snapshot.machine
else:
vm = self
if name is None:
name = "%s Clone" % vm.name
# Build the settings file
create_flags = ''
if uuid is not None:
create_flags = "UUID=%s" % uuid
primary_group = ''
if groups:
primary_group = groups[0]
# Make sure this settings file does not already exist
test_name = name
settings_file = ''
for i in range(1, 1000):
settings_file = vbox.compose_machine_filename(test_name,
primary_group,
create_flags,
basefolder)
if not os.path.exists(os.path.dirname(settings_file)):
break
test_name = "%s (%s)" % (name, i)
name = test_name
# Create the new machine and clone it!
vm_clone = vbox.create_machine(settings_file, name, groups, '', create_flags)
progress = vm.clone_to(vm_clone, mode, options)
progress.wait_for_completion(-1)
if register:
vbox.register_machine(vm_clone)
return vm_clone |
def help_dataframe_memory(self):
""" Help for making a DataFrame with Workbench CLI """
help = '%sMaking a DataFrame: %s how to make a dataframe from memory_forensics sample' % (color.Yellow, color.Green)
help += '\n\n%sMemory Images Example:' % (color.Green)
help += '\n\t%s> load_sample /path/to/pcap/exemplar4.vmem [\'bad\', \'aptz13\']' % (color.LightBlue)
help += '\n\t%s> view # view is your friend use it often' % (color.LightBlue)
help += '\n\t%s> <<< TODO :) >>> %s' % (color.LightBlue, color.Normal)
return help | Help for making a DataFrame with Workbench CLI | Below is the the instruction that describes the task:
### Input:
Help for making a DataFrame with Workbench CLI
### Response:
def help_dataframe_memory(self):
""" Help for making a DataFrame with Workbench CLI """
help = '%sMaking a DataFrame: %s how to make a dataframe from memory_forensics sample' % (color.Yellow, color.Green)
help += '\n\n%sMemory Images Example:' % (color.Green)
help += '\n\t%s> load_sample /path/to/pcap/exemplar4.vmem [\'bad\', \'aptz13\']' % (color.LightBlue)
help += '\n\t%s> view # view is your friend use it often' % (color.LightBlue)
help += '\n\t%s> <<< TODO :) >>> %s' % (color.LightBlue, color.Normal)
return help |
def iscomet(self):
"""`True` if `targetname` appears to be a comet. """
# treat this object as comet if there is a prefix/number
if self.comet is not None:
return self.comet
elif self.asteroid is not None:
return not self.asteroid
else:
return (self.parse_comet()[0] is not None or
self.parse_comet()[1] is not None) | `True` if `targetname` appears to be a comet. | Below is the the instruction that describes the task:
### Input:
`True` if `targetname` appears to be a comet.
### Response:
def iscomet(self):
"""`True` if `targetname` appears to be a comet. """
# treat this object as comet if there is a prefix/number
if self.comet is not None:
return self.comet
elif self.asteroid is not None:
return not self.asteroid
else:
return (self.parse_comet()[0] is not None or
self.parse_comet()[1] is not None) |
def create_csv_fig_from_df(self, data_frames=[], filename=None, headers=[], index_label=None,
fig_type=None, title=None, xlabel=None, ylabel=None, xfont=10,
yfont=10, titlefont=15, fig_size=(8, 10), image_type="eps"):
"""
Joins all the datafarames horizontally and creates a CSV and an image file from
those dataframes.
:param data_frames: a list of dataframes containing timeseries data from various metrics
:param filename: the name of the csv and image file
:param headers: a list of headers to be applied to columns of the dataframes
:param index_label: name of the index column
:param fig_type: figure type. Currently we support 'bar' graphs
default: normal graph
:param title: display title of the figure
:param filename: file name to save the figure as
:param xlabel: label for x axis
:param ylabel: label for y axis
:param xfont: font size of x axis label
:param yfont: font size of y axis label
:param titlefont: font size of title of the figure
:param fig_size: tuple describing size of the figure (in centimeters) (W x H)
:param image_type: the image type to save the image as: jpg, png, etc
default: png
:returns: creates a csv having name as "filename".csv and an image file
having the name as "filename"."image_type"
"""
if not data_frames:
logger.error("No dataframes provided to create CSV")
sys.exit(1)
assert(len(data_frames) == len(headers))
dataframes = []
for index, df in enumerate(data_frames):
df = df.rename(columns={"value": headers[index].replace("_", "")})
dataframes.append(df)
res_df = pd.concat(dataframes, axis=1)
if "unixtime" in res_df:
del res_df['unixtime']
if not index_label:
index_label = "Date"
# Create the CSV file:
csv_name = filename + ".csv"
res_df.to_csv(csv_name, index_label=index_label)
logger.debug("file: {} was created.".format(csv_name))
# Create the Image:
image_name = filename + "." + image_type
title = title.replace("_", "")
figure(figsize=fig_size)
plt.subplot(111)
if fig_type == "bar":
ax = res_df.plot.bar(figsize=fig_size)
ticklabels = res_df.index
ax.xaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(ticklabels))
else:
plt.plot(res_df)
if not ylabel:
ylabel = "num " + " & ".join(headers)
if not xlabel:
xlabel = index_label
plt.title(title, fontsize=titlefont)
plt.ylabel(ylabel, fontsize=yfont)
plt.xlabel(xlabel, fontsize=xfont)
plt.grid(True)
plt.savefig(image_name)
logger.debug("Figure {} was generated.".format(image_name)) | Joins all the datafarames horizontally and creates a CSV and an image file from
those dataframes.
:param data_frames: a list of dataframes containing timeseries data from various metrics
:param filename: the name of the csv and image file
:param headers: a list of headers to be applied to columns of the dataframes
:param index_label: name of the index column
:param fig_type: figure type. Currently we support 'bar' graphs
default: normal graph
:param title: display title of the figure
:param filename: file name to save the figure as
:param xlabel: label for x axis
:param ylabel: label for y axis
:param xfont: font size of x axis label
:param yfont: font size of y axis label
:param titlefont: font size of title of the figure
:param fig_size: tuple describing size of the figure (in centimeters) (W x H)
:param image_type: the image type to save the image as: jpg, png, etc
default: png
:returns: creates a csv having name as "filename".csv and an image file
having the name as "filename"."image_type" | Below is the the instruction that describes the task:
### Input:
Joins all the datafarames horizontally and creates a CSV and an image file from
those dataframes.
:param data_frames: a list of dataframes containing timeseries data from various metrics
:param filename: the name of the csv and image file
:param headers: a list of headers to be applied to columns of the dataframes
:param index_label: name of the index column
:param fig_type: figure type. Currently we support 'bar' graphs
default: normal graph
:param title: display title of the figure
:param filename: file name to save the figure as
:param xlabel: label for x axis
:param ylabel: label for y axis
:param xfont: font size of x axis label
:param yfont: font size of y axis label
:param titlefont: font size of title of the figure
:param fig_size: tuple describing size of the figure (in centimeters) (W x H)
:param image_type: the image type to save the image as: jpg, png, etc
default: png
:returns: creates a csv having name as "filename".csv and an image file
having the name as "filename"."image_type"
### Response:
def create_csv_fig_from_df(self, data_frames=[], filename=None, headers=[], index_label=None,
fig_type=None, title=None, xlabel=None, ylabel=None, xfont=10,
yfont=10, titlefont=15, fig_size=(8, 10), image_type="eps"):
"""
Joins all the datafarames horizontally and creates a CSV and an image file from
those dataframes.
:param data_frames: a list of dataframes containing timeseries data from various metrics
:param filename: the name of the csv and image file
:param headers: a list of headers to be applied to columns of the dataframes
:param index_label: name of the index column
:param fig_type: figure type. Currently we support 'bar' graphs
default: normal graph
:param title: display title of the figure
:param filename: file name to save the figure as
:param xlabel: label for x axis
:param ylabel: label for y axis
:param xfont: font size of x axis label
:param yfont: font size of y axis label
:param titlefont: font size of title of the figure
:param fig_size: tuple describing size of the figure (in centimeters) (W x H)
:param image_type: the image type to save the image as: jpg, png, etc
default: png
:returns: creates a csv having name as "filename".csv and an image file
having the name as "filename"."image_type"
"""
if not data_frames:
logger.error("No dataframes provided to create CSV")
sys.exit(1)
assert(len(data_frames) == len(headers))
dataframes = []
for index, df in enumerate(data_frames):
df = df.rename(columns={"value": headers[index].replace("_", "")})
dataframes.append(df)
res_df = pd.concat(dataframes, axis=1)
if "unixtime" in res_df:
del res_df['unixtime']
if not index_label:
index_label = "Date"
# Create the CSV file:
csv_name = filename + ".csv"
res_df.to_csv(csv_name, index_label=index_label)
logger.debug("file: {} was created.".format(csv_name))
# Create the Image:
image_name = filename + "." + image_type
title = title.replace("_", "")
figure(figsize=fig_size)
plt.subplot(111)
if fig_type == "bar":
ax = res_df.plot.bar(figsize=fig_size)
ticklabels = res_df.index
ax.xaxis.set_major_formatter(matplotlib.ticker.FixedFormatter(ticklabels))
else:
plt.plot(res_df)
if not ylabel:
ylabel = "num " + " & ".join(headers)
if not xlabel:
xlabel = index_label
plt.title(title, fontsize=titlefont)
plt.ylabel(ylabel, fontsize=yfont)
plt.xlabel(xlabel, fontsize=xfont)
plt.grid(True)
plt.savefig(image_name)
logger.debug("Figure {} was generated.".format(image_name)) |
def log_entry_generator(log_instance):
"""
:yield: The next LogEntry from the REST API
:raise: StopIteration when there are no more log entries to show, please
note that if you call this again at a later time the REST API
could have different results and more data could be returned
"""
current_page_num = 0
while True:
has_results = False
for log_entry in log_instance.get_page(current_page_num):
has_results = True
yield log_entry
if not has_results:
break
current_page_num += 1 | :yield: The next LogEntry from the REST API
:raise: StopIteration when there are no more log entries to show, please
note that if you call this again at a later time the REST API
could have different results and more data could be returned | Below is the the instruction that describes the task:
### Input:
:yield: The next LogEntry from the REST API
:raise: StopIteration when there are no more log entries to show, please
note that if you call this again at a later time the REST API
could have different results and more data could be returned
### Response:
def log_entry_generator(log_instance):
"""
:yield: The next LogEntry from the REST API
:raise: StopIteration when there are no more log entries to show, please
note that if you call this again at a later time the REST API
could have different results and more data could be returned
"""
current_page_num = 0
while True:
has_results = False
for log_entry in log_instance.get_page(current_page_num):
has_results = True
yield log_entry
if not has_results:
break
current_page_num += 1 |
def retrieve_import_alias_mapping(names_list):
"""Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call"""
import_alias_names = dict()
for alias in names_list:
if alias.asname:
import_alias_names[alias.asname] = alias.name
return import_alias_names | Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call | Below is the the instruction that describes the task:
### Input:
Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call
### Response:
def retrieve_import_alias_mapping(names_list):
"""Creates a dictionary mapping aliases to their respective name.
import_alias_names is used in module_definitions.py and visit_Call"""
import_alias_names = dict()
for alias in names_list:
if alias.asname:
import_alias_names[alias.asname] = alias.name
return import_alias_names |
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a XChat log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._HEADER.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a XChat log file')
return False
_, month, day, hours, minutes, seconds, year = structure.date_time
month = timelib.MONTH_DICT.get(month.lower(), 0)
time_elements_tuple = (year, month, day, hours, minutes, seconds)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
return True | Verify that this file is a XChat log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not. | Below is the the instruction that describes the task:
### Input:
Verify that this file is a XChat log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
### Response:
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a XChat log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
try:
structure = self._HEADER.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a XChat log file')
return False
_, month, day, hours, minutes, seconds, year = structure.date_time
month = timelib.MONTH_DICT.get(month.lower(), 0)
time_elements_tuple = (year, month, day, hours, minutes, seconds)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug('Not a XChat log file, invalid date and time: {0!s}'.format(
structure.date_time))
return False
return True |
def _extract_from_toolkit(args):
"""
Look at all the modules in opt/python/streams (opt/python/streams/*.py)
and extract any spl decorated function as an operator.
"""
extractor = _Extractor(args)
if extractor._cmd_args.verbose:
print("spl-python-extract:", __version__)
print("Topology toolkit location:", _topology_tk_dir())
tk_dir = extractor._tk_dir
tk_streams = os.path.join(tk_dir, 'opt', 'python', 'streams')
if not os.path.isdir(tk_streams) or not fnmatch.filter(os.listdir(tk_streams), '*.py'):
# Nothing to do for Python extraction
extractor._make_toolkit()
return
lf = os.path.join(tk_streams, '.lockfile')
with open(lf, 'w') as lfno:
fcntl.flock(lfno, fcntl.LOCK_EX)
tk_idx = os.path.join(tk_dir, 'toolkit.xml')
tk_time = os.path.getmtime(tk_idx) if os.path.exists(tk_idx) else None
changed = False if tk_time else True
if tk_time:
for mf in glob.glob(os.path.join(tk_streams, '*.py')):
if os.path.getmtime(mf) >= tk_time:
changed = True
break
if changed:
path_items = _setup_path(tk_dir, tk_streams)
for mf in glob.glob(os.path.join(tk_streams, '*.py')):
print('Checking ', mf, 'for operators')
name = inspect.getmodulename(mf)
dynm = imp.load_source(name, mf)
streams_python_file = inspect.getsourcefile(dynm)
extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isfunction))
extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isclass))
langList = extractor._copy_globalization_resources()
if extractor._cmd_args.verbose:
print("Available languages for TopologySplpy resource:", langList)
extractor._setup_info_xml(langList)
extractor._make_toolkit()
_reset_path(path_items)
fcntl.flock(lfno, fcntl.LOCK_UN) | Look at all the modules in opt/python/streams (opt/python/streams/*.py)
and extract any spl decorated function as an operator. | Below is the the instruction that describes the task:
### Input:
Look at all the modules in opt/python/streams (opt/python/streams/*.py)
and extract any spl decorated function as an operator.
### Response:
def _extract_from_toolkit(args):
"""
Look at all the modules in opt/python/streams (opt/python/streams/*.py)
and extract any spl decorated function as an operator.
"""
extractor = _Extractor(args)
if extractor._cmd_args.verbose:
print("spl-python-extract:", __version__)
print("Topology toolkit location:", _topology_tk_dir())
tk_dir = extractor._tk_dir
tk_streams = os.path.join(tk_dir, 'opt', 'python', 'streams')
if not os.path.isdir(tk_streams) or not fnmatch.filter(os.listdir(tk_streams), '*.py'):
# Nothing to do for Python extraction
extractor._make_toolkit()
return
lf = os.path.join(tk_streams, '.lockfile')
with open(lf, 'w') as lfno:
fcntl.flock(lfno, fcntl.LOCK_EX)
tk_idx = os.path.join(tk_dir, 'toolkit.xml')
tk_time = os.path.getmtime(tk_idx) if os.path.exists(tk_idx) else None
changed = False if tk_time else True
if tk_time:
for mf in glob.glob(os.path.join(tk_streams, '*.py')):
if os.path.getmtime(mf) >= tk_time:
changed = True
break
if changed:
path_items = _setup_path(tk_dir, tk_streams)
for mf in glob.glob(os.path.join(tk_streams, '*.py')):
print('Checking ', mf, 'for operators')
name = inspect.getmodulename(mf)
dynm = imp.load_source(name, mf)
streams_python_file = inspect.getsourcefile(dynm)
extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isfunction))
extractor._process_operators(dynm, name, streams_python_file, inspect.getmembers(dynm, inspect.isclass))
langList = extractor._copy_globalization_resources()
if extractor._cmd_args.verbose:
print("Available languages for TopologySplpy resource:", langList)
extractor._setup_info_xml(langList)
extractor._make_toolkit()
_reset_path(path_items)
fcntl.flock(lfno, fcntl.LOCK_UN) |
def get(self, status_code):
"""
Returns the requested status.
:param int status_code: the status code to return
:queryparam str reason: optional reason phrase
"""
status_code = int(status_code)
if status_code >= 400:
kwargs = {'status_code': status_code}
if self.get_query_argument('reason', None):
kwargs['reason'] = self.get_query_argument('reason')
if self.get_query_argument('log_message', None):
kwargs['log_message'] = self.get_query_argument('log_message')
self.send_error(**kwargs)
else:
self.set_status(status_code) | Returns the requested status.
:param int status_code: the status code to return
:queryparam str reason: optional reason phrase | Below is the the instruction that describes the task:
### Input:
Returns the requested status.
:param int status_code: the status code to return
:queryparam str reason: optional reason phrase
### Response:
def get(self, status_code):
"""
Returns the requested status.
:param int status_code: the status code to return
:queryparam str reason: optional reason phrase
"""
status_code = int(status_code)
if status_code >= 400:
kwargs = {'status_code': status_code}
if self.get_query_argument('reason', None):
kwargs['reason'] = self.get_query_argument('reason')
if self.get_query_argument('log_message', None):
kwargs['log_message'] = self.get_query_argument('log_message')
self.send_error(**kwargs)
else:
self.set_status(status_code) |
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
#: return constant's name instead of constant itself
value = self.to_python(value).name
self.validate(value, model_instance)
self.run_validators(value)
return value | Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised. | Below is the the instruction that describes the task:
### Input:
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
### Response:
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
#: return constant's name instead of constant itself
value = self.to_python(value).name
self.validate(value, model_instance)
self.run_validators(value)
return value |
def call(self, operation, data):
"""Make some network operations."""
print('API call [{0}:{1}], method - {2}, data - {3}'.format(
self.host, self.api_key, operation, repr(data))) | Make some network operations. | Below is the the instruction that describes the task:
### Input:
Make some network operations.
### Response:
def call(self, operation, data):
"""Make some network operations."""
print('API call [{0}:{1}], method - {2}, data - {3}'.format(
self.host, self.api_key, operation, repr(data))) |
def ls(path, load_path=None): # pylint: disable=C0103
'''
List the direct children of a node
CLI Example:
.. code-block:: bash
salt '*' augeas.ls /files/etc/passwd
path
The path to list
.. versionadded:: 2016.3.0
load_path
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
'''
def _match(path):
''' Internal match function '''
try:
matches = aug.match(salt.utils.stringutils.to_str(path))
except RuntimeError:
return {}
ret = {}
for _ma in matches:
ret[_ma] = aug.get(_ma)
return ret
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
path = path.rstrip('/') + '/'
match_path = path + '*'
matches = _match(match_path)
ret = {}
for key, value in six.iteritems(matches):
name = _lstrip_word(key, path)
if _match(key + '/*'):
ret[name + '/'] = value # has sub nodes, e.g. directory
else:
ret[name] = value
return ret | List the direct children of a node
CLI Example:
.. code-block:: bash
salt '*' augeas.ls /files/etc/passwd
path
The path to list
.. versionadded:: 2016.3.0
load_path
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB. | Below is the the instruction that describes the task:
### Input:
List the direct children of a node
CLI Example:
.. code-block:: bash
salt '*' augeas.ls /files/etc/passwd
path
The path to list
.. versionadded:: 2016.3.0
load_path
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
### Response:
def ls(path, load_path=None): # pylint: disable=C0103
'''
List the direct children of a node
CLI Example:
.. code-block:: bash
salt '*' augeas.ls /files/etc/passwd
path
The path to list
.. versionadded:: 2016.3.0
load_path
A colon-spearated list of directories that modules should be searched
in. This is in addition to the standard load path and the directories
in AUGEAS_LENS_LIB.
'''
def _match(path):
''' Internal match function '''
try:
matches = aug.match(salt.utils.stringutils.to_str(path))
except RuntimeError:
return {}
ret = {}
for _ma in matches:
ret[_ma] = aug.get(_ma)
return ret
load_path = _check_load_paths(load_path)
aug = _Augeas(loadpath=load_path)
path = path.rstrip('/') + '/'
match_path = path + '*'
matches = _match(match_path)
ret = {}
for key, value in six.iteritems(matches):
name = _lstrip_word(key, path)
if _match(key + '/*'):
ret[name + '/'] = value # has sub nodes, e.g. directory
else:
ret[name] = value
return ret |
def global_variable_action(self, text, loc, var):
"""Code executed after recognising a global variable"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("GLOBAL_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_global_var(var.name, var.type)
self.codegen.global_var(var.name)
return index | Code executed after recognising a global variable | Below is the the instruction that describes the task:
### Input:
Code executed after recognising a global variable
### Response:
def global_variable_action(self, text, loc, var):
"""Code executed after recognising a global variable"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("GLOBAL_VAR:",var)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
index = self.symtab.insert_global_var(var.name, var.type)
self.codegen.global_var(var.name)
return index |
def _get_matching_dist_in_location(dist, location):
"""
Check if `locations` contain only the one intended dist.
Return the dist with metadata in the new location.
"""
# Getting the dist from the environment causes the
# distribution meta data to be read. Cloning isn't
# good enough.
import pkg_resources
env = pkg_resources.Environment([location])
dists = [ d for project_name in env for d in env[project_name] ]
dist_infos = [ (d.project_name, d.version) for d in dists ]
if dist_infos == [(dist.project_name, dist.version)]:
return dists.pop()
if dist_infos == [(dist.project_name.lower(), dist.version)]:
return dists.pop() | Check if `locations` contain only the one intended dist.
Return the dist with metadata in the new location. | Below is the the instruction that describes the task:
### Input:
Check if `locations` contain only the one intended dist.
Return the dist with metadata in the new location.
### Response:
def _get_matching_dist_in_location(dist, location):
"""
Check if `locations` contain only the one intended dist.
Return the dist with metadata in the new location.
"""
# Getting the dist from the environment causes the
# distribution meta data to be read. Cloning isn't
# good enough.
import pkg_resources
env = pkg_resources.Environment([location])
dists = [ d for project_name in env for d in env[project_name] ]
dist_infos = [ (d.project_name, d.version) for d in dists ]
if dist_infos == [(dist.project_name, dist.version)]:
return dists.pop()
if dist_infos == [(dist.project_name.lower(), dist.version)]:
return dists.pop() |
def find_previous_siblings(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_siblings', *args, **kwargs)
return self._wrap_multi(op) | Like :meth:`find_all`, but searches through :attr:`previous_siblings` | Below is the the instruction that describes the task:
### Input:
Like :meth:`find_all`, but searches through :attr:`previous_siblings`
### Response:
def find_previous_siblings(self, *args, **kwargs):
"""
Like :meth:`find_all`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_siblings', *args, **kwargs)
return self._wrap_multi(op) |
def UploadImageAsset(client, url):
"""Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201809')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId'] | Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image. | Below is the the instruction that describes the task:
### Input:
Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
### Response:
def UploadImageAsset(client, url):
"""Uploads the image from the specified url.
Args:
client: An AdWordsClient instance.
url: The image URL.
Returns:
The ID of the uploaded image.
"""
# Initialize appropriate service.
asset_service = client.GetService('AssetService', version='v201809')
# Download the image.
image_request = requests.get(url)
# Create the image asset.
image_asset = {
'xsi_type': 'ImageAsset',
'imageData': image_request.content,
# This field is optional, and if provided should be unique.
# 'assetName': 'Image asset ' + str(uuid.uuid4()),
}
# Create the operation.
operation = {
'operator': 'ADD',
'operand': image_asset
}
# Create the asset and return the ID.
result = asset_service.mutate([operation])
return result['value'][0]['assetId'] |
def _inject_selenium(self, test):
"""
Injects a selenium instance into the method.
"""
from django.conf import settings
test_case = get_test_case_class(test)
test_case.selenium_plugin_started = True
# Provide some reasonable default values
sel = selenium(
getattr(settings, "SELENIUM_HOST", "localhost"),
int(getattr(settings, "SELENIUM_PORT", 4444)),
getattr(settings, "SELENIUM_BROWSER_COMMAND", "*chrome"),
getattr(settings, "SELENIUM_URL_ROOT", "http://127.0.0.1:8000/"))
try:
sel.start()
except socket.error:
if getattr(settings, "FORCE_SELENIUM_TESTS", False):
raise
else:
raise SkipTest("Selenium server not available.")
else:
test_case.selenium_started = True
# Only works on method test cases, because we obviously need
# self.
if isinstance(test.test, nose.case.MethodTestCase):
test.test.test.im_self.selenium = sel
elif isinstance(test.test, TestCase):
test.test.run.im_self.selenium = sel
else:
raise SkipTest("Test skipped because it's not a method.") | Injects a selenium instance into the method. | Below is the the instruction that describes the task:
### Input:
Injects a selenium instance into the method.
### Response:
def _inject_selenium(self, test):
"""
Injects a selenium instance into the method.
"""
from django.conf import settings
test_case = get_test_case_class(test)
test_case.selenium_plugin_started = True
# Provide some reasonable default values
sel = selenium(
getattr(settings, "SELENIUM_HOST", "localhost"),
int(getattr(settings, "SELENIUM_PORT", 4444)),
getattr(settings, "SELENIUM_BROWSER_COMMAND", "*chrome"),
getattr(settings, "SELENIUM_URL_ROOT", "http://127.0.0.1:8000/"))
try:
sel.start()
except socket.error:
if getattr(settings, "FORCE_SELENIUM_TESTS", False):
raise
else:
raise SkipTest("Selenium server not available.")
else:
test_case.selenium_started = True
# Only works on method test cases, because we obviously need
# self.
if isinstance(test.test, nose.case.MethodTestCase):
test.test.test.im_self.selenium = sel
elif isinstance(test.test, TestCase):
test.test.run.im_self.selenium = sel
else:
raise SkipTest("Test skipped because it's not a method.") |
def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
"""
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar) | Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts. | Below is the the instruction that describes the task:
### Input:
Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
### Response:
def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
"""
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar) |
def report_version(self, data):
"""
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
"""
self.firmata_version.append(data[0]) # add major
self.firmata_version.append(data[1]) | This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value. | Below is the the instruction that describes the task:
### Input:
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
### Response:
def report_version(self, data):
"""
This method processes the report version message, sent asynchronously by Firmata when it starts up
or after refresh_report_version() is called
Use the api method api_get_version to retrieve this information
:param data: Message data from Firmata
:return: No return value.
"""
self.firmata_version.append(data[0]) # add major
self.firmata_version.append(data[1]) |
def _apply_credentials(auto_refresh=True, credentials=None,
headers=None):
"""Update Authorization header.
Update request headers with latest `access_token`. Perform token
`refresh` if token is ``None``.
Args:
auto_refresh (bool): Perform token refresh if access_token is ``None`` or expired. Defaults to ``True``.
credentials (class): Read-only credentials.
headers (class): Requests `CaseInsensitiveDict`.
"""
token = credentials.get_credentials().access_token
if auto_refresh is True:
if token is None:
token = credentials.refresh(
access_token=None, timeout=10)
elif credentials.jwt_is_expired():
token = credentials.refresh(timeout=10)
headers.update(
{'Authorization': "Bearer {}".format(token)}
) | Update Authorization header.
Update request headers with latest `access_token`. Perform token
`refresh` if token is ``None``.
Args:
auto_refresh (bool): Perform token refresh if access_token is ``None`` or expired. Defaults to ``True``.
credentials (class): Read-only credentials.
headers (class): Requests `CaseInsensitiveDict`. | Below is the the instruction that describes the task:
### Input:
Update Authorization header.
Update request headers with latest `access_token`. Perform token
`refresh` if token is ``None``.
Args:
auto_refresh (bool): Perform token refresh if access_token is ``None`` or expired. Defaults to ``True``.
credentials (class): Read-only credentials.
headers (class): Requests `CaseInsensitiveDict`.
### Response:
def _apply_credentials(auto_refresh=True, credentials=None,
headers=None):
"""Update Authorization header.
Update request headers with latest `access_token`. Perform token
`refresh` if token is ``None``.
Args:
auto_refresh (bool): Perform token refresh if access_token is ``None`` or expired. Defaults to ``True``.
credentials (class): Read-only credentials.
headers (class): Requests `CaseInsensitiveDict`.
"""
token = credentials.get_credentials().access_token
if auto_refresh is True:
if token is None:
token = credentials.refresh(
access_token=None, timeout=10)
elif credentials.jwt_is_expired():
token = credentials.refresh(timeout=10)
headers.update(
{'Authorization': "Bearer {}".format(token)}
) |
def get_for_content_type(self, ct):
"""Return the schema for the model of the given ContentType object"""
try:
return json.loads(self.state)[ct.app_label][ct.model]
except KeyError:
return None | Return the schema for the model of the given ContentType object | Below is the the instruction that describes the task:
### Input:
Return the schema for the model of the given ContentType object
### Response:
def get_for_content_type(self, ct):
"""Return the schema for the model of the given ContentType object"""
try:
return json.loads(self.state)[ct.app_label][ct.model]
except KeyError:
return None |
def download_issuetypes(jira_connection, project_ids):
'''
For Jira next-gen projects, issue types can be scoped to projects.
For issue types that are scoped to projects, only extract the ones
in the extracted projects.
'''
print('downloading jira issue types... ', end='', flush=True)
result = []
for it in jira_connection.issue_types():
if 'scope' in it.raw and it.raw['scope']['type'] == 'PROJECT':
if it.raw['scope']['project']['id'] in project_ids:
result.append(it.raw)
else:
result.append(it.raw)
print('✓')
return result | For Jira next-gen projects, issue types can be scoped to projects.
For issue types that are scoped to projects, only extract the ones
in the extracted projects. | Below is the the instruction that describes the task:
### Input:
For Jira next-gen projects, issue types can be scoped to projects.
For issue types that are scoped to projects, only extract the ones
in the extracted projects.
### Response:
def download_issuetypes(jira_connection, project_ids):
'''
For Jira next-gen projects, issue types can be scoped to projects.
For issue types that are scoped to projects, only extract the ones
in the extracted projects.
'''
print('downloading jira issue types... ', end='', flush=True)
result = []
for it in jira_connection.issue_types():
if 'scope' in it.raw and it.raw['scope']['type'] == 'PROJECT':
if it.raw['scope']['project']['id'] in project_ids:
result.append(it.raw)
else:
result.append(it.raw)
print('✓')
return result |
def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):
"""
Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_entity(entity_value=entity_value,
entity_type=entity_type,
alias_of=alias_of) | Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to | Below is the the instruction that describes the task:
### Input:
Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to
### Response:
def register_entity(self, entity_value, entity_type, alias_of=None, domain=0):
"""
Register an entity to be tagged in potential parse results.
Args:
entity_value(str): the value/proper name of an entity instance
(Ex: "The Big Bang Theory")
entity_type(str): the type/tag of an entity instance (Ex: "Television Show")
domain(str): a string representing the domain you wish to add the entity to
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_entity(entity_value=entity_value,
entity_type=entity_type,
alias_of=alias_of) |
def read_json (self, mode='rt', **kwargs):
"""Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure.
"""
import json
with self.open (mode=mode) as f:
return json.load (f, **kwargs) | Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure. | Below is the the instruction that describes the task:
### Input:
Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure.
### Response:
def read_json (self, mode='rt', **kwargs):
"""Use the :mod:`json` module to read in this file as a JSON-formatted data
structure. Keyword arguments are passed to :func:`json.load`. Returns the
read-in data structure.
"""
import json
with self.open (mode=mode) as f:
return json.load (f, **kwargs) |
def from_type_name(cls, typ, name):
"""Build the object from (type, name)."""
# Try aliases first.
for k, nt in cls.defined_aliases.items():
if typ is not None and typ != nt.type: continue
#print(name, nt.name)
if name == nt.name:
if len(k) == 1: return cls(xc=k)
if len(k) == 2: return cls(x=k[0], c=k[1])
raise ValueError("Wrong key: %s" % k)
# At this point, we should have something in the form
# name="GGA_X_PBE+GGA_C_PBE" or name=""LDA_XC_TETER93"
if "+" in name:
#if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name))
x, c = (s.strip() for s in name.split("+"))
x, c = LibxcFunc[x], LibxcFunc[c]
return cls(x=x, c=c)
else:
#if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name))
xc = LibxcFunc[name]
return cls(xc=xc)
if typ is None:
raise ValueError("Cannot find name=%s in defined_aliases" % name)
else:
raise ValueError("Cannot find type=%s, name=%s in defined_aliases" % (typ, name)) | Build the object from (type, name). | Below is the the instruction that describes the task:
### Input:
Build the object from (type, name).
### Response:
def from_type_name(cls, typ, name):
"""Build the object from (type, name)."""
# Try aliases first.
for k, nt in cls.defined_aliases.items():
if typ is not None and typ != nt.type: continue
#print(name, nt.name)
if name == nt.name:
if len(k) == 1: return cls(xc=k)
if len(k) == 2: return cls(x=k[0], c=k[1])
raise ValueError("Wrong key: %s" % k)
# At this point, we should have something in the form
# name="GGA_X_PBE+GGA_C_PBE" or name=""LDA_XC_TETER93"
if "+" in name:
#if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name))
x, c = (s.strip() for s in name.split("+"))
x, c = LibxcFunc[x], LibxcFunc[c]
return cls(x=x, c=c)
else:
#if typ is not None: raise ValueError("typ: `%s` but name: `%s`" % (typ, name))
xc = LibxcFunc[name]
return cls(xc=xc)
if typ is None:
raise ValueError("Cannot find name=%s in defined_aliases" % name)
else:
raise ValueError("Cannot find type=%s, name=%s in defined_aliases" % (typ, name)) |
def edit(filename, connection=None):
"""Checks out a file into the default changelist
:param filename: File to check out
:type filename: str
:param connection: Connection object to use
:type connection: :py:class:`Connection`
"""
c = connection or connect()
rev = c.ls(filename)
if rev:
rev[0].edit() | Checks out a file into the default changelist
:param filename: File to check out
:type filename: str
:param connection: Connection object to use
:type connection: :py:class:`Connection` | Below is the the instruction that describes the task:
### Input:
Checks out a file into the default changelist
:param filename: File to check out
:type filename: str
:param connection: Connection object to use
:type connection: :py:class:`Connection`
### Response:
def edit(filename, connection=None):
"""Checks out a file into the default changelist
:param filename: File to check out
:type filename: str
:param connection: Connection object to use
:type connection: :py:class:`Connection`
"""
c = connection or connect()
rev = c.ls(filename)
if rev:
rev[0].edit() |
def stats_for_satellite_image(self, metaimage):
"""
Retrieves statistics for the satellite image described by the provided metadata.
This is currently only supported 'EVI' and 'NDVI' presets
:param metaimage: the satellite image's metadata, in the form of a `MetaImage` subtype instance
:type metaimage: a `pyowm.agroapi10.imagery.MetaImage` subtype
:return: dict
"""
if metaimage.preset != PresetEnum.EVI and metaimage.preset != PresetEnum.NDVI:
raise ValueError("Unsupported image preset: should be EVI or NDVI")
if metaimage.stats_url is None:
raise ValueError("URL for image statistics is not defined")
status, data = self.http_client.get_json(metaimage.stats_url, params={})
return data | Retrieves statistics for the satellite image described by the provided metadata.
This is currently only supported 'EVI' and 'NDVI' presets
:param metaimage: the satellite image's metadata, in the form of a `MetaImage` subtype instance
:type metaimage: a `pyowm.agroapi10.imagery.MetaImage` subtype
:return: dict | Below is the the instruction that describes the task:
### Input:
Retrieves statistics for the satellite image described by the provided metadata.
This is currently only supported 'EVI' and 'NDVI' presets
:param metaimage: the satellite image's metadata, in the form of a `MetaImage` subtype instance
:type metaimage: a `pyowm.agroapi10.imagery.MetaImage` subtype
:return: dict
### Response:
def stats_for_satellite_image(self, metaimage):
"""
Retrieves statistics for the satellite image described by the provided metadata.
This is currently only supported 'EVI' and 'NDVI' presets
:param metaimage: the satellite image's metadata, in the form of a `MetaImage` subtype instance
:type metaimage: a `pyowm.agroapi10.imagery.MetaImage` subtype
:return: dict
"""
if metaimage.preset != PresetEnum.EVI and metaimage.preset != PresetEnum.NDVI:
raise ValueError("Unsupported image preset: should be EVI or NDVI")
if metaimage.stats_url is None:
raise ValueError("URL for image statistics is not defined")
status, data = self.http_client.get_json(metaimage.stats_url, params={})
return data |
def _deltasummation(term, ranges, i_range):
"""Partially execute a summation for `term` with a Kronecker Delta for one
of the summation indices.
This implements the solution to the core sub-problem in
:func:`indexed_sum_over_kronecker`
Args:
term (QuantumExpression): term of the sum
ranges (list): list of all summation index ranges
(class:`IndexRangeBase` instances)
i_range (int): list-index of element in `ranges` which should be
eliminated
Returns:
``(result, flag)`` where `result` is a list
of ``(new_term, new_ranges)`` tuples and `flag` is an integer.
There are three possible cases, indicated by the returned `flag`. Consider
the following setup::
>>> i, j, k = symbols('i, j, k', cls=IdxSym)
>>> i_range = IndexOverList(i, (0, 1))
>>> j_range = IndexOverList(j, (0, 1))
>>> ranges = [i_range, j_range]
>>> def A(i, j):
... from sympy import IndexedBase
... return OperatorSymbol(StrLabel(IndexedBase('A')[i, j]), hs=0)
1. If executing the sum produces a single non-zero term, result will be
``[(new_term, new_ranges)]`` where `new_ranges` contains the input `ranges`
without the eliminated range specified by `i_range`. This should be the
most common case for calls to:func:`_deltasummation`::
>>> term = KroneckerDelta(i, j) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [(A(i, i), [i_range])]
>>> assert flag == 1
2. If executing the sum for the index symbol specified via `index_range`
does not reduce the sum, the result will be the list ``[(term, ranges)]``
with unchanged `term` and `ranges`::
>>> term = KroneckerDelta(j, k) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
This case also covers if there is no Kroncker delta in the term::
>>> term = A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
3. If `term` does not contain a Kronecker delta as a factor, but in a
sum that can be expanded, the result will be a list of
``[(summand1, ranges), (summand2, ranges), ...]`` for the summands of that
expansion. In this case, `:func:`_deltasummation` should be called again
for every tuple in the list, with the same `i_range`::
>>> term = (KroneckerDelta(i, j) + 1) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [
... (A(i, j), [i_range, j_range]),
... (KroneckerDelta(i,j) * A(i, j), [i_range, j_range])]
>>> assert flag == 3
"""
from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression
idx = ranges[i_range].index_symbol
summands = _expand_delta(term, idx)
if len(summands) > 1:
return [(summand, ranges) for summand in summands], 3
else:
delta, expr = _extract_delta(summands[0], idx)
if not delta:
return [(term, ranges)], 2
solns = sympy.solve(delta.args[0] - delta.args[1], idx)
assert len(solns) > 0 # I can't think of an example that might cause this
# if len(solns) == 0:
# return [(term._zero, [])], 4
if len(solns) != 1:
return [(term, ranges)], 2
value = solns[0]
new_term = expr.substitute({idx: value})
if _RESOLVE_KRONECKER_WITH_PIECEWISE:
new_term *= ranges[i_range].piecewise_one(value)
assert isinstance(new_term, QuantumExpression)
return [(new_term, ranges[:i_range] + ranges[i_range+1:])], 1 | Partially execute a summation for `term` with a Kronecker Delta for one
of the summation indices.
This implements the solution to the core sub-problem in
:func:`indexed_sum_over_kronecker`
Args:
term (QuantumExpression): term of the sum
ranges (list): list of all summation index ranges
(class:`IndexRangeBase` instances)
i_range (int): list-index of element in `ranges` which should be
eliminated
Returns:
``(result, flag)`` where `result` is a list
of ``(new_term, new_ranges)`` tuples and `flag` is an integer.
There are three possible cases, indicated by the returned `flag`. Consider
the following setup::
>>> i, j, k = symbols('i, j, k', cls=IdxSym)
>>> i_range = IndexOverList(i, (0, 1))
>>> j_range = IndexOverList(j, (0, 1))
>>> ranges = [i_range, j_range]
>>> def A(i, j):
... from sympy import IndexedBase
... return OperatorSymbol(StrLabel(IndexedBase('A')[i, j]), hs=0)
1. If executing the sum produces a single non-zero term, result will be
``[(new_term, new_ranges)]`` where `new_ranges` contains the input `ranges`
without the eliminated range specified by `i_range`. This should be the
most common case for calls to:func:`_deltasummation`::
>>> term = KroneckerDelta(i, j) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [(A(i, i), [i_range])]
>>> assert flag == 1
2. If executing the sum for the index symbol specified via `index_range`
does not reduce the sum, the result will be the list ``[(term, ranges)]``
with unchanged `term` and `ranges`::
>>> term = KroneckerDelta(j, k) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
This case also covers if there is no Kroncker delta in the term::
>>> term = A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
3. If `term` does not contain a Kronecker delta as a factor, but in a
sum that can be expanded, the result will be a list of
``[(summand1, ranges), (summand2, ranges), ...]`` for the summands of that
expansion. In this case, `:func:`_deltasummation` should be called again
for every tuple in the list, with the same `i_range`::
>>> term = (KroneckerDelta(i, j) + 1) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [
... (A(i, j), [i_range, j_range]),
... (KroneckerDelta(i,j) * A(i, j), [i_range, j_range])]
>>> assert flag == 3 | Below is the the instruction that describes the task:
### Input:
Partially execute a summation for `term` with a Kronecker Delta for one
of the summation indices.
This implements the solution to the core sub-problem in
:func:`indexed_sum_over_kronecker`
Args:
term (QuantumExpression): term of the sum
ranges (list): list of all summation index ranges
(class:`IndexRangeBase` instances)
i_range (int): list-index of element in `ranges` which should be
eliminated
Returns:
``(result, flag)`` where `result` is a list
of ``(new_term, new_ranges)`` tuples and `flag` is an integer.
There are three possible cases, indicated by the returned `flag`. Consider
the following setup::
>>> i, j, k = symbols('i, j, k', cls=IdxSym)
>>> i_range = IndexOverList(i, (0, 1))
>>> j_range = IndexOverList(j, (0, 1))
>>> ranges = [i_range, j_range]
>>> def A(i, j):
... from sympy import IndexedBase
... return OperatorSymbol(StrLabel(IndexedBase('A')[i, j]), hs=0)
1. If executing the sum produces a single non-zero term, result will be
``[(new_term, new_ranges)]`` where `new_ranges` contains the input `ranges`
without the eliminated range specified by `i_range`. This should be the
most common case for calls to:func:`_deltasummation`::
>>> term = KroneckerDelta(i, j) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [(A(i, i), [i_range])]
>>> assert flag == 1
2. If executing the sum for the index symbol specified via `index_range`
does not reduce the sum, the result will be the list ``[(term, ranges)]``
with unchanged `term` and `ranges`::
>>> term = KroneckerDelta(j, k) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
This case also covers if there is no Kroncker delta in the term::
>>> term = A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
3. If `term` does not contain a Kronecker delta as a factor, but in a
sum that can be expanded, the result will be a list of
``[(summand1, ranges), (summand2, ranges), ...]`` for the summands of that
expansion. In this case, `:func:`_deltasummation` should be called again
for every tuple in the list, with the same `i_range`::
>>> term = (KroneckerDelta(i, j) + 1) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [
... (A(i, j), [i_range, j_range]),
... (KroneckerDelta(i,j) * A(i, j), [i_range, j_range])]
>>> assert flag == 3
### Response:
def _deltasummation(term, ranges, i_range):
"""Partially execute a summation for `term` with a Kronecker Delta for one
of the summation indices.
This implements the solution to the core sub-problem in
:func:`indexed_sum_over_kronecker`
Args:
term (QuantumExpression): term of the sum
ranges (list): list of all summation index ranges
(class:`IndexRangeBase` instances)
i_range (int): list-index of element in `ranges` which should be
eliminated
Returns:
``(result, flag)`` where `result` is a list
of ``(new_term, new_ranges)`` tuples and `flag` is an integer.
There are three possible cases, indicated by the returned `flag`. Consider
the following setup::
>>> i, j, k = symbols('i, j, k', cls=IdxSym)
>>> i_range = IndexOverList(i, (0, 1))
>>> j_range = IndexOverList(j, (0, 1))
>>> ranges = [i_range, j_range]
>>> def A(i, j):
... from sympy import IndexedBase
... return OperatorSymbol(StrLabel(IndexedBase('A')[i, j]), hs=0)
1. If executing the sum produces a single non-zero term, result will be
``[(new_term, new_ranges)]`` where `new_ranges` contains the input `ranges`
without the eliminated range specified by `i_range`. This should be the
most common case for calls to:func:`_deltasummation`::
>>> term = KroneckerDelta(i, j) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [(A(i, i), [i_range])]
>>> assert flag == 1
2. If executing the sum for the index symbol specified via `index_range`
does not reduce the sum, the result will be the list ``[(term, ranges)]``
with unchanged `term` and `ranges`::
>>> term = KroneckerDelta(j, k) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
This case also covers if there is no Kroncker delta in the term::
>>> term = A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
3. If `term` does not contain a Kronecker delta as a factor, but in a
sum that can be expanded, the result will be a list of
``[(summand1, ranges), (summand2, ranges), ...]`` for the summands of that
expansion. In this case, `:func:`_deltasummation` should be called again
for every tuple in the list, with the same `i_range`::
>>> term = (KroneckerDelta(i, j) + 1) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [
... (A(i, j), [i_range, j_range]),
... (KroneckerDelta(i,j) * A(i, j), [i_range, j_range])]
>>> assert flag == 3
"""
from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression
idx = ranges[i_range].index_symbol
summands = _expand_delta(term, idx)
if len(summands) > 1:
return [(summand, ranges) for summand in summands], 3
else:
delta, expr = _extract_delta(summands[0], idx)
if not delta:
return [(term, ranges)], 2
solns = sympy.solve(delta.args[0] - delta.args[1], idx)
assert len(solns) > 0 # I can't think of an example that might cause this
# if len(solns) == 0:
# return [(term._zero, [])], 4
if len(solns) != 1:
return [(term, ranges)], 2
value = solns[0]
new_term = expr.substitute({idx: value})
if _RESOLVE_KRONECKER_WITH_PIECEWISE:
new_term *= ranges[i_range].piecewise_one(value)
assert isinstance(new_term, QuantumExpression)
return [(new_term, ranges[:i_range] + ranges[i_range+1:])], 1 |
def generate_payload(self, config, context):
"""
Generate payload by checking Django request object.
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload.
"""
request = current_request()
payload = {
'url': request.build_absolute_uri(),
'component': request.resolver_match.app_name,
'action': request.resolver_match.func.__name__,
'params': {},
'session': {},
'cgi_data': dict(request.META),
'context': context
}
if hasattr(request, 'session'):
payload['session'] = filter_dict(dict(request.session), config.params_filters)
payload['params'] = filter_dict(dict(getattr(request, request.method)), config.params_filters)
return payload | Generate payload by checking Django request object.
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload. | Below is the the instruction that describes the task:
### Input:
Generate payload by checking Django request object.
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload.
### Response:
def generate_payload(self, config, context):
"""
Generate payload by checking Django request object.
:param context: current context.
:param config: honeybadger configuration.
:return: a dict with the generated payload.
"""
request = current_request()
payload = {
'url': request.build_absolute_uri(),
'component': request.resolver_match.app_name,
'action': request.resolver_match.func.__name__,
'params': {},
'session': {},
'cgi_data': dict(request.META),
'context': context
}
if hasattr(request, 'session'):
payload['session'] = filter_dict(dict(request.session), config.params_filters)
payload['params'] = filter_dict(dict(getattr(request, request.method)), config.params_filters)
return payload |
def bg_compensate(img, sigma, splinepoints, scale):
'''Reads file, subtracts background. Returns [compensated image, background].'''
from PIL import Image
import pylab
from matplotlib.image import pil_to_array
from centrosome.filter import canny
import matplotlib
img = Image.open(img)
if img.mode=='I;16':
# 16-bit image
# deal with the endianness explicitly... I'm not sure
# why PIL doesn't get this right.
imgdata = np.fromstring(img.tostring(),np.uint8)
imgdata.shape=(int(imgdata.shape[0]/2),2)
imgdata = imgdata.astype(np.uint16)
hi,lo = (0,1) if img.tag.prefix == 'MM' else (1,0)
imgdata = imgdata[:,hi]*256 + imgdata[:,lo]
img_size = list(img.size)
img_size.reverse()
new_img = imgdata.reshape(img_size)
# The magic # for maximum sample value is 281
if 281 in img.tag:
img = new_img.astype(np.float32) / img.tag[281][0]
elif np.max(new_img) < 4096:
img = new_img.astype(np.float32) / 4095.
else:
img = new_img.astype(np.float32) / 65535.
else:
img = pil_to_array(img)
pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r)
pylab.show()
if len(img.shape)>2:
raise ValueError('Image must be grayscale')
## Create mask that will fix problem when image has black areas outside of well
edges = canny(img, np.ones(img.shape, bool), 2, .1, .3)
ci = np.cumsum(edges, 0)
cj = np.cumsum(edges, 1)
i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]]
mask = ci > 0
mask = mask & (cj > 0)
mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]])
mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1])
import time
t0 = time.clock()
bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale)
print("Executed in %f sec" % (time.clock() - t0))
bg[~mask] = img[~mask]
pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r)
pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r)
pylab.show() | Reads file, subtracts background. Returns [compensated image, background]. | Below is the the instruction that describes the task:
### Input:
Reads file, subtracts background. Returns [compensated image, background].
### Response:
def bg_compensate(img, sigma, splinepoints, scale):
'''Reads file, subtracts background. Returns [compensated image, background].'''
from PIL import Image
import pylab
from matplotlib.image import pil_to_array
from centrosome.filter import canny
import matplotlib
img = Image.open(img)
if img.mode=='I;16':
# 16-bit image
# deal with the endianness explicitly... I'm not sure
# why PIL doesn't get this right.
imgdata = np.fromstring(img.tostring(),np.uint8)
imgdata.shape=(int(imgdata.shape[0]/2),2)
imgdata = imgdata.astype(np.uint16)
hi,lo = (0,1) if img.tag.prefix == 'MM' else (1,0)
imgdata = imgdata[:,hi]*256 + imgdata[:,lo]
img_size = list(img.size)
img_size.reverse()
new_img = imgdata.reshape(img_size)
# The magic # for maximum sample value is 281
if 281 in img.tag:
img = new_img.astype(np.float32) / img.tag[281][0]
elif np.max(new_img) < 4096:
img = new_img.astype(np.float32) / 4095.
else:
img = new_img.astype(np.float32) / 65535.
else:
img = pil_to_array(img)
pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r)
pylab.show()
if len(img.shape)>2:
raise ValueError('Image must be grayscale')
## Create mask that will fix problem when image has black areas outside of well
edges = canny(img, np.ones(img.shape, bool), 2, .1, .3)
ci = np.cumsum(edges, 0)
cj = np.cumsum(edges, 1)
i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]]
mask = ci > 0
mask = mask & (cj > 0)
mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]])
mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1])
import time
t0 = time.clock()
bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale)
print("Executed in %f sec" % (time.clock() - t0))
bg[~mask] = img[~mask]
pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r)
pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r)
pylab.show() |
def indication(self, *args, **kwargs):
"""Downstream packet, send to current terminal."""
if not self.current_terminal:
raise RuntimeError("no active terminal")
if not isinstance(self.current_terminal, Server):
raise RuntimeError("current terminal not a server")
self.current_terminal.indication(*args, **kwargs) | Downstream packet, send to current terminal. | Below is the the instruction that describes the task:
### Input:
Downstream packet, send to current terminal.
### Response:
def indication(self, *args, **kwargs):
"""Downstream packet, send to current terminal."""
if not self.current_terminal:
raise RuntimeError("no active terminal")
if not isinstance(self.current_terminal, Server):
raise RuntimeError("current terminal not a server")
self.current_terminal.indication(*args, **kwargs) |
def get_private_room_history(self, room_id, oldest=None, **kwargs):
"""
Get various history of specific private group in this case private
:param room_id:
:param kwargs:
:return:
"""
return GetPrivateRoomHistory(settings=self.settings, **kwargs).call(
room_id=room_id,
oldest=oldest,
**kwargs
) | Get various history of specific private group in this case private
:param room_id:
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
Get various history of specific private group in this case private
:param room_id:
:param kwargs:
:return:
### Response:
def get_private_room_history(self, room_id, oldest=None, **kwargs):
"""
Get various history of specific private group in this case private
:param room_id:
:param kwargs:
:return:
"""
return GetPrivateRoomHistory(settings=self.settings, **kwargs).call(
room_id=room_id,
oldest=oldest,
**kwargs
) |
def buckingham_input(self, structure, keywords, library=None,
uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin | Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence} | Below is the the instruction that describes the task:
### Input:
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
### Response:
def buckingham_input(self, structure, keywords, library=None,
uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin |
async def message_fetcher_coroutine(self, loop):
"""
Register callback for message fetcher coroutines
"""
Global.LOGGER.debug('registering callbacks for message fetcher coroutine')
self.isrunning = True
while self.isrunning:
loop.call_soon(self._fetch_messages)
loop.call_soon(self._perform_system_check)
await asyncio.sleep(Global.CONFIG_MANAGER.message_fetcher_sleep_interval)
Global.LOGGER.debug('message fetcher stopped') | Register callback for message fetcher coroutines | Below is the the instruction that describes the task:
### Input:
Register callback for message fetcher coroutines
### Response:
async def message_fetcher_coroutine(self, loop):
"""
Register callback for message fetcher coroutines
"""
Global.LOGGER.debug('registering callbacks for message fetcher coroutine')
self.isrunning = True
while self.isrunning:
loop.call_soon(self._fetch_messages)
loop.call_soon(self._perform_system_check)
await asyncio.sleep(Global.CONFIG_MANAGER.message_fetcher_sleep_interval)
Global.LOGGER.debug('message fetcher stopped') |
def update(input,refdir="jref$",local=None,interactive=False,wcsupdate=True):
"""
Updates headers of files given as input to point to the new reference files
NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle.
Parameters
-----------
input : string or list
Name of input file or files acceptable forms:
- single filename with or without directory
- @-file
- association table
- python list of filenames
- wildcard specification of filenames
refdir : string
Path to directory containing new reference files, either
environment variable or full path.
local : boolean
Specifies whether or not to copy new reference files to local
directory for use with the input files.
interactive : boolean
Specifies whether or not to interactively ask the user for the
exact names of the new reference files instead of automatically
searching a directory for them.
updatewcs : boolean
Specifies whether or not to update the WCS information in this
file to use the new reference files.
Examples
--------
1. A set of associated images specified by an ASN file can be updated to use
the NPOLFILEs and D2IMFILE found in the local directory defined using
the `myjref$` environment variable under PyRAF using::
>>> import updatenpol
>>> updatenpol.update('j8bt06010_asn.fits', 'myref$')
2. Another use under Python would be to feed it a specific list of files
to be updated using::
>>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$')
3. Files in another directory can also be processed using::
>>> updatenpol.update('data$*flt.fits','../new/ref/')
Notes
-----
.. warning::
This program requires access to the `jref$` directory in order
to evaluate the DGEOFILE specified in the input image header.
This evaluation allows the program to get the information it
needs to identify the correct NPOLFILE.
The use of this program now requires that a directory be set up with
all the new NPOLFILE and D2IMFILE reference files for ACS (a single
directory for all files for all ACS detectors will be fine, much like
jref). Currently, all the files generated by the ACS team has initially
been made available at::
/grp/hst/acs/lucas/new-npl/
The one known limitation to how this program works comes from
confusion if more than 1 file could possibly be used as the new
reference file. This would only happen when NPOLFILE reference files
have been checked into CDBS multiple times, and there are several
versions that apply to the same detector/filter combination. However,
that can be sorted out later if we get into that situation at all.
"""
print('UPDATENPOL Version',__version__+'('+__version_date__+')')
# expand (as needed) the list of input files
files,fcol = parseinput.parseinput(input)
if not interactive:
# expand reference directory name (if necessary) to
# interpret IRAF or environment variable names
rdir = fu.osfn(refdir)
ngeofiles,ngcol = parseinput.parseinput(os.path.join(rdir,'*npl.fits'))
# Find D2IMFILE in refdir for updating input file header as well
d2ifiles,d2col = parseinput.parseinput(os.path.join(rdir,"*d2i.fits"))
# Now, build a matched list of input files and DGEOFILE reference files
# to use for selecting the appropriate new reference file from the
# refdir directory.
for f in files:
print('Updating: ',f)
fdir = os.path.split(f)[0]
# Open each file...
fimg = fits.open(f, mode='update', memmap=False)
phdr = fimg['PRIMARY'].header
fdet = phdr['detector']
# get header of DGEOFILE
dfile = phdr.get('DGEOFILE','')
if dfile in ['N/A','',' ',None]:
npolname = ''
else:
dhdr = fits.getheader(fu.osfn(dfile), memmap=False)
if not interactive:
# search all new NPOLFILEs for one that matches current DGEOFILE config
npol = find_npolfile(ngeofiles,fdet,[phdr['filter1'],phdr['filter2']])
else:
if sys.version_info[0] >= 3:
npol = input("Enter name of NPOLFILE for %s:"%f)
else:
npol = raw_input("Enter name of NPOLFILE for %s:"%f)
if npol == "": npol = None
if npol is None:
errstr = "No valid NPOLFILE found in "+rdir+" for detector="+fdet+"\n"
errstr += " filters = "+phdr['filter1']+","+phdr['filter2']
raise ValueError(errstr)
npolname = os.path.split(npol)[1]
if local:
npolname = os.path.join(fdir,npolname)
# clobber any previous copies of this reference file
if os.path.exists(npolname): os.remove(npolname)
shutil.copy(npol,npolname)
else:
if '$' in refdir:
npolname = refdir+npolname
else:
npolname = os.path.join(refdir,npolname)
phdr.set('NPOLFILE', value=npolname,
comment="Non-polynomial corrections in Paper IV LUT",
after='DGEOFILE')
# Now find correct D2IFILE
if not interactive:
d2i = find_d2ifile(d2ifiles,fdet)
else:
if sys.version_info[0] >= 3:
d2i = input("Enter name of D2IMFILE for %s:"%f)
else:
d2i = raw_input("Enter name of D2IMFILE for %s:"%f)
if d2i == "": d2i = None
if d2i is None:
print('=============\nWARNING:')
print(" No valid D2IMFILE found in "+rdir+" for detector ="+fdet)
print(" D2IMFILE correction will not be applied.")
print('=============\n')
d2iname = ""
else:
d2iname = os.path.split(d2i)[1]
if local:
# Copy D2IMFILE to local data directory alongside input file as well
d2iname = os.path.join(fdir,d2iname)
# clobber any previous copies of this reference file
if os.path.exists(d2iname): os.remove(d2iname)
shutil.copy(d2i,d2iname)
else:
if '$' in refdir:
d2iname = refdir+d2iname
else:
d2iname = os.path.join(refdir,d2iname)
phdr.set('D2IMFILE', value=d2iname,
comment="Column correction table",
after='DGEOFILE')
# Close this input file header and go on to the next
fimg.close()
if wcsupdate:
updatewcs.updatewcs(f) | Updates headers of files given as input to point to the new reference files
NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle.
Parameters
-----------
input : string or list
Name of input file or files acceptable forms:
- single filename with or without directory
- @-file
- association table
- python list of filenames
- wildcard specification of filenames
refdir : string
Path to directory containing new reference files, either
environment variable or full path.
local : boolean
Specifies whether or not to copy new reference files to local
directory for use with the input files.
interactive : boolean
Specifies whether or not to interactively ask the user for the
exact names of the new reference files instead of automatically
searching a directory for them.
updatewcs : boolean
Specifies whether or not to update the WCS information in this
file to use the new reference files.
Examples
--------
1. A set of associated images specified by an ASN file can be updated to use
the NPOLFILEs and D2IMFILE found in the local directory defined using
the `myjref$` environment variable under PyRAF using::
>>> import updatenpol
>>> updatenpol.update('j8bt06010_asn.fits', 'myref$')
2. Another use under Python would be to feed it a specific list of files
to be updated using::
>>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$')
3. Files in another directory can also be processed using::
>>> updatenpol.update('data$*flt.fits','../new/ref/')
Notes
-----
.. warning::
This program requires access to the `jref$` directory in order
to evaluate the DGEOFILE specified in the input image header.
This evaluation allows the program to get the information it
needs to identify the correct NPOLFILE.
The use of this program now requires that a directory be set up with
all the new NPOLFILE and D2IMFILE reference files for ACS (a single
directory for all files for all ACS detectors will be fine, much like
jref). Currently, all the files generated by the ACS team has initially
been made available at::
/grp/hst/acs/lucas/new-npl/
The one known limitation to how this program works comes from
confusion if more than 1 file could possibly be used as the new
reference file. This would only happen when NPOLFILE reference files
have been checked into CDBS multiple times, and there are several
versions that apply to the same detector/filter combination. However,
that can be sorted out later if we get into that situation at all. | Below is the the instruction that describes the task:
### Input:
Updates headers of files given as input to point to the new reference files
NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle.
Parameters
-----------
input : string or list
Name of input file or files acceptable forms:
- single filename with or without directory
- @-file
- association table
- python list of filenames
- wildcard specification of filenames
refdir : string
Path to directory containing new reference files, either
environment variable or full path.
local : boolean
Specifies whether or not to copy new reference files to local
directory for use with the input files.
interactive : boolean
Specifies whether or not to interactively ask the user for the
exact names of the new reference files instead of automatically
searching a directory for them.
updatewcs : boolean
Specifies whether or not to update the WCS information in this
file to use the new reference files.
Examples
--------
1. A set of associated images specified by an ASN file can be updated to use
the NPOLFILEs and D2IMFILE found in the local directory defined using
the `myjref$` environment variable under PyRAF using::
>>> import updatenpol
>>> updatenpol.update('j8bt06010_asn.fits', 'myref$')
2. Another use under Python would be to feed it a specific list of files
to be updated using::
>>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$')
3. Files in another directory can also be processed using::
>>> updatenpol.update('data$*flt.fits','../new/ref/')
Notes
-----
.. warning::
This program requires access to the `jref$` directory in order
to evaluate the DGEOFILE specified in the input image header.
This evaluation allows the program to get the information it
needs to identify the correct NPOLFILE.
The use of this program now requires that a directory be set up with
all the new NPOLFILE and D2IMFILE reference files for ACS (a single
directory for all files for all ACS detectors will be fine, much like
jref). Currently, all the files generated by the ACS team has initially
been made available at::
/grp/hst/acs/lucas/new-npl/
The one known limitation to how this program works comes from
confusion if more than 1 file could possibly be used as the new
reference file. This would only happen when NPOLFILE reference files
have been checked into CDBS multiple times, and there are several
versions that apply to the same detector/filter combination. However,
that can be sorted out later if we get into that situation at all.
### Response:
def update(input,refdir="jref$",local=None,interactive=False,wcsupdate=True):
"""
Updates headers of files given as input to point to the new reference files
NPOLFILE and D2IMFILE required with the new C version of MultiDrizzle.
Parameters
-----------
input : string or list
Name of input file or files acceptable forms:
- single filename with or without directory
- @-file
- association table
- python list of filenames
- wildcard specification of filenames
refdir : string
Path to directory containing new reference files, either
environment variable or full path.
local : boolean
Specifies whether or not to copy new reference files to local
directory for use with the input files.
interactive : boolean
Specifies whether or not to interactively ask the user for the
exact names of the new reference files instead of automatically
searching a directory for them.
updatewcs : boolean
Specifies whether or not to update the WCS information in this
file to use the new reference files.
Examples
--------
1. A set of associated images specified by an ASN file can be updated to use
the NPOLFILEs and D2IMFILE found in the local directory defined using
the `myjref$` environment variable under PyRAF using::
>>> import updatenpol
>>> updatenpol.update('j8bt06010_asn.fits', 'myref$')
2. Another use under Python would be to feed it a specific list of files
to be updated using::
>>> updatenpol.update(['file1_flt.fits','file2_flt.fits'],'myjref$')
3. Files in another directory can also be processed using::
>>> updatenpol.update('data$*flt.fits','../new/ref/')
Notes
-----
.. warning::
This program requires access to the `jref$` directory in order
to evaluate the DGEOFILE specified in the input image header.
This evaluation allows the program to get the information it
needs to identify the correct NPOLFILE.
The use of this program now requires that a directory be set up with
all the new NPOLFILE and D2IMFILE reference files for ACS (a single
directory for all files for all ACS detectors will be fine, much like
jref). Currently, all the files generated by the ACS team has initially
been made available at::
/grp/hst/acs/lucas/new-npl/
The one known limitation to how this program works comes from
confusion if more than 1 file could possibly be used as the new
reference file. This would only happen when NPOLFILE reference files
have been checked into CDBS multiple times, and there are several
versions that apply to the same detector/filter combination. However,
that can be sorted out later if we get into that situation at all.
"""
print('UPDATENPOL Version',__version__+'('+__version_date__+')')
# expand (as needed) the list of input files
files,fcol = parseinput.parseinput(input)
if not interactive:
# expand reference directory name (if necessary) to
# interpret IRAF or environment variable names
rdir = fu.osfn(refdir)
ngeofiles,ngcol = parseinput.parseinput(os.path.join(rdir,'*npl.fits'))
# Find D2IMFILE in refdir for updating input file header as well
d2ifiles,d2col = parseinput.parseinput(os.path.join(rdir,"*d2i.fits"))
# Now, build a matched list of input files and DGEOFILE reference files
# to use for selecting the appropriate new reference file from the
# refdir directory.
for f in files:
print('Updating: ',f)
fdir = os.path.split(f)[0]
# Open each file...
fimg = fits.open(f, mode='update', memmap=False)
phdr = fimg['PRIMARY'].header
fdet = phdr['detector']
# get header of DGEOFILE
dfile = phdr.get('DGEOFILE','')
if dfile in ['N/A','',' ',None]:
npolname = ''
else:
dhdr = fits.getheader(fu.osfn(dfile), memmap=False)
if not interactive:
# search all new NPOLFILEs for one that matches current DGEOFILE config
npol = find_npolfile(ngeofiles,fdet,[phdr['filter1'],phdr['filter2']])
else:
if sys.version_info[0] >= 3:
npol = input("Enter name of NPOLFILE for %s:"%f)
else:
npol = raw_input("Enter name of NPOLFILE for %s:"%f)
if npol == "": npol = None
if npol is None:
errstr = "No valid NPOLFILE found in "+rdir+" for detector="+fdet+"\n"
errstr += " filters = "+phdr['filter1']+","+phdr['filter2']
raise ValueError(errstr)
npolname = os.path.split(npol)[1]
if local:
npolname = os.path.join(fdir,npolname)
# clobber any previous copies of this reference file
if os.path.exists(npolname): os.remove(npolname)
shutil.copy(npol,npolname)
else:
if '$' in refdir:
npolname = refdir+npolname
else:
npolname = os.path.join(refdir,npolname)
phdr.set('NPOLFILE', value=npolname,
comment="Non-polynomial corrections in Paper IV LUT",
after='DGEOFILE')
# Now find correct D2IFILE
if not interactive:
d2i = find_d2ifile(d2ifiles,fdet)
else:
if sys.version_info[0] >= 3:
d2i = input("Enter name of D2IMFILE for %s:"%f)
else:
d2i = raw_input("Enter name of D2IMFILE for %s:"%f)
if d2i == "": d2i = None
if d2i is None:
print('=============\nWARNING:')
print(" No valid D2IMFILE found in "+rdir+" for detector ="+fdet)
print(" D2IMFILE correction will not be applied.")
print('=============\n')
d2iname = ""
else:
d2iname = os.path.split(d2i)[1]
if local:
# Copy D2IMFILE to local data directory alongside input file as well
d2iname = os.path.join(fdir,d2iname)
# clobber any previous copies of this reference file
if os.path.exists(d2iname): os.remove(d2iname)
shutil.copy(d2i,d2iname)
else:
if '$' in refdir:
d2iname = refdir+d2iname
else:
d2iname = os.path.join(refdir,d2iname)
phdr.set('D2IMFILE', value=d2iname,
comment="Column correction table",
after='DGEOFILE')
# Close this input file header and go on to the next
fimg.close()
if wcsupdate:
updatewcs.updatewcs(f) |
def bond_sample_states(
perc_graph, num_nodes, num_edges, seed, spanning_cluster=True,
auxiliary_node_attributes=None, auxiliary_edge_attributes=None,
spanning_sides=None,
**kwargs
):
'''
Generate successive sample states of the bond percolation model
This is a :ref:`generator function <python:tut-generators>` to successively
add one edge at a time from the graph to the percolation model.
At each iteration, it calculates and returns the cluster statistics.
CAUTION: it returns a reference to the internal array, not a copy.
Parameters
----------
perc_graph : networkx.Graph
The substrate graph on which percolation is to take place
num_nodes : int
Number ``N`` of sites in the graph
num_edges : int
Number ``M`` of bonds in the graph
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Piped through to `numpy.random.RandomState` constructor.
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
auxiliary_node_attributes : optional
Return value of ``networkx.get_node_attributes(graph, 'span')``
auxiliary_edge_attributes : optional
Return value of ``networkx.get_edge_attributes(graph, 'span')``
spanning_sides : list, optional
List of keys (attribute values) of the two sides of the auxiliary
nodes.
Return value of ``list(set(auxiliary_node_attributes.values()))``
Yields
------
ret : ndarray
Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),
('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``
ret['n'] : ndarray of int
The number of bonds added at the particular iteration
ret['edge'] : ndarray of int
The index of the edge added at the particular iteration
Note that in the first step, when ``ret['n'] == 0``, this value is
undefined!
ret['has_spanning_cluster'] : ndarray of bool
``True`` if there is a spanning cluster, ``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['max_cluster_size'] : int
Size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
Array of size ``5``.
The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
size distribution, with ``k`` ranging from ``0`` to ``4``.
Raises
------
ValueError
If `spanning_cluster` is ``True``, but `graph` does not contain any
auxiliary nodes to detect spanning clusters.
See also
--------
numpy.random.RandomState
microcanonical_statistics_dtype
Notes
-----
Iterating through this generator is a single run of the Newman-Ziff
algorithm. [12]_
The first iteration yields the trivial state with :math:`n = 0` occupied
bonds.
Spanning cluster
In order to detect a spanning cluster, `graph` needs to contain
auxiliary nodes and edges, cf. Reference [12]_, Figure 6.
The auxiliary nodes and edges have the ``'span'`` `attribute
<http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
The value is either ``0`` or ``1``, distinguishing the two sides of the
graph to span.
Raw moments of the cluster size distribution
The :math:`k`-th raw moment of the (absolute) cluster size distribution
is :math:`\sum_s' s^k N_s`, where :math:`s` is the cluster size and
:math:`N_s` is the number of clusters of size :math:`s`. [13]_
The primed sum :math:`\sum'` signifies that the largest cluster is
excluded from the sum. [14]_
References
----------
.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.
.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
Francis, London, 1994), second edn.
.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
Physics (Springer, Berlin, Heidelberg, 2010),
`doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.
'''
# construct random number generator
rng = np.random.RandomState(seed=seed)
if spanning_cluster:
if len(spanning_sides) != 2:
raise ValueError(
'Spanning cluster is to be detected, but auxiliary nodes '
'of less or more than 2 types (sides) given.'
)
# get a list of edges for easy access in later iterations
perc_edges = perc_graph.edges()
perm_edges = rng.permutation(num_edges)
# initial iteration: no edges added yet (n == 0)
ret = np.empty(
1, dtype=microcanonical_statistics_dtype(spanning_cluster)
)
ret['n'] = 0
ret['max_cluster_size'] = 1
ret['moments'] = np.ones(5, dtype='uint64') * (num_nodes - 1)
if spanning_cluster:
ret['has_spanning_cluster'] = False
# yield cluster statistics for n == 0
yield ret
# set up disjoint set (union-find) data structure
ds = nx.utils.union_find.UnionFind()
if spanning_cluster:
ds_spanning = nx.utils.union_find.UnionFind()
# merge all auxiliary nodes for each side
side_roots = dict()
for side in spanning_sides:
nodes = [
node for (node, node_side) in auxiliary_node_attributes.items()
if node_side is side
]
ds_spanning.union(*nodes)
side_roots[side] = ds_spanning[nodes[0]]
for (edge, edge_side) in auxiliary_edge_attributes.items():
ds_spanning.union(side_roots[edge_side], *edge)
side_roots = [
ds_spanning[side_root] for side_root in side_roots.values()
]
# get first node
max_cluster_root = next(perc_graph.nodes_iter())
# loop over all edges (n == 1..M)
for n in range(num_edges):
ret['n'] += 1
# draw new edge from permutation
edge_index = perm_edges[n]
edge = perc_edges[edge_index]
ret['edge'] = edge_index
# find roots and weights
roots = [
ds[node] for node in edge
]
weights = [
ds.weights[root] for root in roots
]
if roots[0] is not roots[1]:
# not same cluster: union!
ds.union(*roots)
if spanning_cluster:
ds_spanning.union(*roots)
ret['has_spanning_cluster'] = (
ds_spanning[side_roots[0]] == ds_spanning[side_roots[1]]
)
# find new root and weight
root = ds[edge[0]]
weight = ds.weights[root]
# moments and maximum cluster size
# deduct the previous sub-maximum clusters from moments
for i in [0, 1]:
if roots[i] is max_cluster_root:
continue
ret['moments'] -= weights[i] ** np.arange(5, dtype='uint64')
if max_cluster_root in roots:
# merged with maximum cluster
max_cluster_root = root
ret['max_cluster_size'] = weight
else:
# merged previously sub-maximum clusters
if ret['max_cluster_size'] >= weight:
# previously largest cluster remains largest cluster
# add merged cluster to moments
ret['moments'] += weight ** np.arange(5, dtype='uint64')
else:
# merged cluster overtook previously largest cluster
# add previously largest cluster to moments
max_cluster_root = root
ret['moments'] += ret['max_cluster_size'] ** np.arange(
5, dtype='uint64'
)
ret['max_cluster_size'] = weight
yield ret | Generate successive sample states of the bond percolation model
This is a :ref:`generator function <python:tut-generators>` to successively
add one edge at a time from the graph to the percolation model.
At each iteration, it calculates and returns the cluster statistics.
CAUTION: it returns a reference to the internal array, not a copy.
Parameters
----------
perc_graph : networkx.Graph
The substrate graph on which percolation is to take place
num_nodes : int
Number ``N`` of sites in the graph
num_edges : int
Number ``M`` of bonds in the graph
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Piped through to `numpy.random.RandomState` constructor.
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
auxiliary_node_attributes : optional
Return value of ``networkx.get_node_attributes(graph, 'span')``
auxiliary_edge_attributes : optional
Return value of ``networkx.get_edge_attributes(graph, 'span')``
spanning_sides : list, optional
List of keys (attribute values) of the two sides of the auxiliary
nodes.
Return value of ``list(set(auxiliary_node_attributes.values()))``
Yields
------
ret : ndarray
Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),
('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``
ret['n'] : ndarray of int
The number of bonds added at the particular iteration
ret['edge'] : ndarray of int
The index of the edge added at the particular iteration
Note that in the first step, when ``ret['n'] == 0``, this value is
undefined!
ret['has_spanning_cluster'] : ndarray of bool
``True`` if there is a spanning cluster, ``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['max_cluster_size'] : int
Size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
Array of size ``5``.
The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
size distribution, with ``k`` ranging from ``0`` to ``4``.
Raises
------
ValueError
If `spanning_cluster` is ``True``, but `graph` does not contain any
auxiliary nodes to detect spanning clusters.
See also
--------
numpy.random.RandomState
microcanonical_statistics_dtype
Notes
-----
Iterating through this generator is a single run of the Newman-Ziff
algorithm. [12]_
The first iteration yields the trivial state with :math:`n = 0` occupied
bonds.
Spanning cluster
In order to detect a spanning cluster, `graph` needs to contain
auxiliary nodes and edges, cf. Reference [12]_, Figure 6.
The auxiliary nodes and edges have the ``'span'`` `attribute
<http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
The value is either ``0`` or ``1``, distinguishing the two sides of the
graph to span.
Raw moments of the cluster size distribution
The :math:`k`-th raw moment of the (absolute) cluster size distribution
is :math:`\sum_s' s^k N_s`, where :math:`s` is the cluster size and
:math:`N_s` is the number of clusters of size :math:`s`. [13]_
The primed sum :math:`\sum'` signifies that the largest cluster is
excluded from the sum. [14]_
References
----------
.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.
.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
Francis, London, 1994), second edn.
.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
Physics (Springer, Berlin, Heidelberg, 2010),
`doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_. | Below is the the instruction that describes the task:
### Input:
Generate successive sample states of the bond percolation model
This is a :ref:`generator function <python:tut-generators>` to successively
add one edge at a time from the graph to the percolation model.
At each iteration, it calculates and returns the cluster statistics.
CAUTION: it returns a reference to the internal array, not a copy.
Parameters
----------
perc_graph : networkx.Graph
The substrate graph on which percolation is to take place
num_nodes : int
Number ``N`` of sites in the graph
num_edges : int
Number ``M`` of bonds in the graph
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Piped through to `numpy.random.RandomState` constructor.
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
auxiliary_node_attributes : optional
Return value of ``networkx.get_node_attributes(graph, 'span')``
auxiliary_edge_attributes : optional
Return value of ``networkx.get_edge_attributes(graph, 'span')``
spanning_sides : list, optional
List of keys (attribute values) of the two sides of the auxiliary
nodes.
Return value of ``list(set(auxiliary_node_attributes.values()))``
Yields
------
ret : ndarray
Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),
('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``
ret['n'] : ndarray of int
The number of bonds added at the particular iteration
ret['edge'] : ndarray of int
The index of the edge added at the particular iteration
Note that in the first step, when ``ret['n'] == 0``, this value is
undefined!
ret['has_spanning_cluster'] : ndarray of bool
``True`` if there is a spanning cluster, ``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['max_cluster_size'] : int
Size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
Array of size ``5``.
The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
size distribution, with ``k`` ranging from ``0`` to ``4``.
Raises
------
ValueError
If `spanning_cluster` is ``True``, but `graph` does not contain any
auxiliary nodes to detect spanning clusters.
See also
--------
numpy.random.RandomState
microcanonical_statistics_dtype
Notes
-----
Iterating through this generator is a single run of the Newman-Ziff
algorithm. [12]_
The first iteration yields the trivial state with :math:`n = 0` occupied
bonds.
Spanning cluster
In order to detect a spanning cluster, `graph` needs to contain
auxiliary nodes and edges, cf. Reference [12]_, Figure 6.
The auxiliary nodes and edges have the ``'span'`` `attribute
<http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
The value is either ``0`` or ``1``, distinguishing the two sides of the
graph to span.
Raw moments of the cluster size distribution
The :math:`k`-th raw moment of the (absolute) cluster size distribution
is :math:`\sum_s' s^k N_s`, where :math:`s` is the cluster size and
:math:`N_s` is the number of clusters of size :math:`s`. [13]_
The primed sum :math:`\sum'` signifies that the largest cluster is
excluded from the sum. [14]_
References
----------
.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.
.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
Francis, London, 1994), second edn.
.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
Physics (Springer, Berlin, Heidelberg, 2010),
`doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.
### Response:
def bond_sample_states(
perc_graph, num_nodes, num_edges, seed, spanning_cluster=True,
auxiliary_node_attributes=None, auxiliary_edge_attributes=None,
spanning_sides=None,
**kwargs
):
'''
Generate successive sample states of the bond percolation model
This is a :ref:`generator function <python:tut-generators>` to successively
add one edge at a time from the graph to the percolation model.
At each iteration, it calculates and returns the cluster statistics.
CAUTION: it returns a reference to the internal array, not a copy.
Parameters
----------
perc_graph : networkx.Graph
The substrate graph on which percolation is to take place
num_nodes : int
Number ``N`` of sites in the graph
num_edges : int
Number ``M`` of bonds in the graph
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Piped through to `numpy.random.RandomState` constructor.
spanning_cluster : bool, optional
Whether to detect a spanning cluster or not.
Defaults to ``True``.
auxiliary_node_attributes : optional
Return value of ``networkx.get_node_attributes(graph, 'span')``
auxiliary_edge_attributes : optional
Return value of ``networkx.get_edge_attributes(graph, 'span')``
spanning_sides : list, optional
List of keys (attribute values) of the two sides of the auxiliary
nodes.
Return value of ``list(set(auxiliary_node_attributes.values()))``
Yields
------
ret : ndarray
Structured array with dtype ``dtype=[('has_spanning_cluster', 'bool'),
('max_cluster_size', 'uint32'), ('moments', 'int64', 5)]``
ret['n'] : ndarray of int
The number of bonds added at the particular iteration
ret['edge'] : ndarray of int
The index of the edge added at the particular iteration
Note that in the first step, when ``ret['n'] == 0``, this value is
undefined!
ret['has_spanning_cluster'] : ndarray of bool
``True`` if there is a spanning cluster, ``False`` otherwise.
Only exists if `spanning_cluster` argument is set to ``True``.
ret['max_cluster_size'] : int
Size of the largest cluster (absolute number of sites)
ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
Array of size ``5``.
The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
size distribution, with ``k`` ranging from ``0`` to ``4``.
Raises
------
ValueError
If `spanning_cluster` is ``True``, but `graph` does not contain any
auxiliary nodes to detect spanning clusters.
See also
--------
numpy.random.RandomState
microcanonical_statistics_dtype
Notes
-----
Iterating through this generator is a single run of the Newman-Ziff
algorithm. [12]_
The first iteration yields the trivial state with :math:`n = 0` occupied
bonds.
Spanning cluster
In order to detect a spanning cluster, `graph` needs to contain
auxiliary nodes and edges, cf. Reference [12]_, Figure 6.
The auxiliary nodes and edges have the ``'span'`` `attribute
<http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
The value is either ``0`` or ``1``, distinguishing the two sides of the
graph to span.
Raw moments of the cluster size distribution
The :math:`k`-th raw moment of the (absolute) cluster size distribution
is :math:`\sum_s' s^k N_s`, where :math:`s` is the cluster size and
:math:`N_s` is the number of clusters of size :math:`s`. [13]_
The primed sum :math:`\sum'` signifies that the largest cluster is
excluded from the sum. [14]_
References
----------
.. [12] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
or bond percolation. Physical Review E 64, 016706+ (2001),
`doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.
.. [13] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
Francis, London, 1994), second edn.
.. [14] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
Physics (Springer, Berlin, Heidelberg, 2010),
`doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.
'''
# construct random number generator
rng = np.random.RandomState(seed=seed)
if spanning_cluster:
if len(spanning_sides) != 2:
raise ValueError(
'Spanning cluster is to be detected, but auxiliary nodes '
'of less or more than 2 types (sides) given.'
)
# get a list of edges for easy access in later iterations
perc_edges = perc_graph.edges()
perm_edges = rng.permutation(num_edges)
# initial iteration: no edges added yet (n == 0)
ret = np.empty(
1, dtype=microcanonical_statistics_dtype(spanning_cluster)
)
ret['n'] = 0
ret['max_cluster_size'] = 1
ret['moments'] = np.ones(5, dtype='uint64') * (num_nodes - 1)
if spanning_cluster:
ret['has_spanning_cluster'] = False
# yield cluster statistics for n == 0
yield ret
# set up disjoint set (union-find) data structure
ds = nx.utils.union_find.UnionFind()
if spanning_cluster:
ds_spanning = nx.utils.union_find.UnionFind()
# merge all auxiliary nodes for each side
side_roots = dict()
for side in spanning_sides:
nodes = [
node for (node, node_side) in auxiliary_node_attributes.items()
if node_side is side
]
ds_spanning.union(*nodes)
side_roots[side] = ds_spanning[nodes[0]]
for (edge, edge_side) in auxiliary_edge_attributes.items():
ds_spanning.union(side_roots[edge_side], *edge)
side_roots = [
ds_spanning[side_root] for side_root in side_roots.values()
]
# get first node
max_cluster_root = next(perc_graph.nodes_iter())
# loop over all edges (n == 1..M)
for n in range(num_edges):
ret['n'] += 1
# draw new edge from permutation
edge_index = perm_edges[n]
edge = perc_edges[edge_index]
ret['edge'] = edge_index
# find roots and weights
roots = [
ds[node] for node in edge
]
weights = [
ds.weights[root] for root in roots
]
if roots[0] is not roots[1]:
# not same cluster: union!
ds.union(*roots)
if spanning_cluster:
ds_spanning.union(*roots)
ret['has_spanning_cluster'] = (
ds_spanning[side_roots[0]] == ds_spanning[side_roots[1]]
)
# find new root and weight
root = ds[edge[0]]
weight = ds.weights[root]
# moments and maximum cluster size
# deduct the previous sub-maximum clusters from moments
for i in [0, 1]:
if roots[i] is max_cluster_root:
continue
ret['moments'] -= weights[i] ** np.arange(5, dtype='uint64')
if max_cluster_root in roots:
# merged with maximum cluster
max_cluster_root = root
ret['max_cluster_size'] = weight
else:
# merged previously sub-maximum clusters
if ret['max_cluster_size'] >= weight:
# previously largest cluster remains largest cluster
# add merged cluster to moments
ret['moments'] += weight ** np.arange(5, dtype='uint64')
else:
# merged cluster overtook previously largest cluster
# add previously largest cluster to moments
max_cluster_root = root
ret['moments'] += ret['max_cluster_size'] ** np.arange(
5, dtype='uint64'
)
ret['max_cluster_size'] = weight
yield ret |
def addScalarBar3D(
self,
pos=(0, 0, 0),
normal=(0, 0, 1),
sx=0.1,
sy=2,
nlabels=9,
ncols=256,
cmap=None,
c="k",
alpha=1,
):
"""
Draw a 3D scalar bar to actor.
.. hint:: |mesh_coloring| |mesh_coloring.py|_
"""
# book it, it will be created by Plotter.show() later
self.scalarbar = [pos, normal, sx, sy, nlabels, ncols, cmap, c, alpha]
return self | Draw a 3D scalar bar to actor.
.. hint:: |mesh_coloring| |mesh_coloring.py|_ | Below is the the instruction that describes the task:
### Input:
Draw a 3D scalar bar to actor.
.. hint:: |mesh_coloring| |mesh_coloring.py|_
### Response:
def addScalarBar3D(
self,
pos=(0, 0, 0),
normal=(0, 0, 1),
sx=0.1,
sy=2,
nlabels=9,
ncols=256,
cmap=None,
c="k",
alpha=1,
):
"""
Draw a 3D scalar bar to actor.
.. hint:: |mesh_coloring| |mesh_coloring.py|_
"""
# book it, it will be created by Plotter.show() later
self.scalarbar = [pos, normal, sx, sy, nlabels, ncols, cmap, c, alpha]
return self |
def listen_fds(unset_environment=True):
"""Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3]
"""
num = _listen_fds(unset_environment)
return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num)) | Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3] | Below is the the instruction that describes the task:
### Input:
Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3]
### Response:
def listen_fds(unset_environment=True):
"""Return a list of socket activated descriptors
Example::
(in primary window)
$ systemd-activate -l 2000 python3 -c \\
'from systemd.daemon import listen_fds; print(listen_fds())'
(in another window)
$ telnet localhost 2000
(in primary window)
...
Execing python3 (...)
[3]
"""
num = _listen_fds(unset_environment)
return list(range(LISTEN_FDS_START, LISTEN_FDS_START + num)) |
def tpf(args):
"""
%prog tpf agpfile
Print out a list of ids, one per line. Also known as the Tiling Path.
AC225490.9 chr6
Can optionally output scaffold gaps.
"""
p = OptionParser(tpf.__doc__)
p.add_option("--noversion", default=False, action="store_true",
help="Remove trailing accession versions [default: %default]")
p.add_option("--gaps", default=False, action="store_true",
help="Include gaps in the output [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
agpfile, = args
agp = AGP(agpfile)
for a in agp:
object = a.object
if a.is_gap:
if opts.gaps and a.isCloneGap:
print("\t".join((a.gap_type, object, "na")))
continue
component_id = a.component_id
orientation = a.orientation
if opts.noversion:
component_id = component_id.rsplit(".", 1)[0]
print("\t".join((component_id, object, orientation))) | %prog tpf agpfile
Print out a list of ids, one per line. Also known as the Tiling Path.
AC225490.9 chr6
Can optionally output scaffold gaps. | Below is the the instruction that describes the task:
### Input:
%prog tpf agpfile
Print out a list of ids, one per line. Also known as the Tiling Path.
AC225490.9 chr6
Can optionally output scaffold gaps.
### Response:
def tpf(args):
"""
%prog tpf agpfile
Print out a list of ids, one per line. Also known as the Tiling Path.
AC225490.9 chr6
Can optionally output scaffold gaps.
"""
p = OptionParser(tpf.__doc__)
p.add_option("--noversion", default=False, action="store_true",
help="Remove trailing accession versions [default: %default]")
p.add_option("--gaps", default=False, action="store_true",
help="Include gaps in the output [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
agpfile, = args
agp = AGP(agpfile)
for a in agp:
object = a.object
if a.is_gap:
if opts.gaps and a.isCloneGap:
print("\t".join((a.gap_type, object, "na")))
continue
component_id = a.component_id
orientation = a.orientation
if opts.noversion:
component_id = component_id.rsplit(".", 1)[0]
print("\t".join((component_id, object, orientation))) |
def init(self, dir_or_plan=None, backend_config=None,
reconfigure=IsFlagged, backend=True, **kwargs):
"""
refer to https://www.terraform.io/docs/commands/init.html
By default, this assumes you want to use backend config, and tries to
init fresh. The flags -reconfigure and -backend=true are default.
:param dir_or_plan: relative path to the folder want to init
:param backend_config: a dictionary of backend config options. eg.
t = Terraform()
t.init(backend_config={'access_key': 'myaccesskey',
'secret_key': 'mysecretkey', 'bucket': 'mybucketname'})
:param reconfigure: whether or not to force reconfiguration of backend
:param backend: whether or not to use backend settings for init
:param kwargs: options
:return: ret_code, stdout, stderr
"""
options = kwargs
options['backend_config'] = backend_config
options['reconfigure'] = reconfigure
options['backend'] = backend
options = self._generate_default_options(options)
args = self._generate_default_args(dir_or_plan)
return self.cmd('init', *args, **options) | refer to https://www.terraform.io/docs/commands/init.html
By default, this assumes you want to use backend config, and tries to
init fresh. The flags -reconfigure and -backend=true are default.
:param dir_or_plan: relative path to the folder want to init
:param backend_config: a dictionary of backend config options. eg.
t = Terraform()
t.init(backend_config={'access_key': 'myaccesskey',
'secret_key': 'mysecretkey', 'bucket': 'mybucketname'})
:param reconfigure: whether or not to force reconfiguration of backend
:param backend: whether or not to use backend settings for init
:param kwargs: options
:return: ret_code, stdout, stderr | Below is the the instruction that describes the task:
### Input:
refer to https://www.terraform.io/docs/commands/init.html
By default, this assumes you want to use backend config, and tries to
init fresh. The flags -reconfigure and -backend=true are default.
:param dir_or_plan: relative path to the folder want to init
:param backend_config: a dictionary of backend config options. eg.
t = Terraform()
t.init(backend_config={'access_key': 'myaccesskey',
'secret_key': 'mysecretkey', 'bucket': 'mybucketname'})
:param reconfigure: whether or not to force reconfiguration of backend
:param backend: whether or not to use backend settings for init
:param kwargs: options
:return: ret_code, stdout, stderr
### Response:
def init(self, dir_or_plan=None, backend_config=None,
reconfigure=IsFlagged, backend=True, **kwargs):
"""
refer to https://www.terraform.io/docs/commands/init.html
By default, this assumes you want to use backend config, and tries to
init fresh. The flags -reconfigure and -backend=true are default.
:param dir_or_plan: relative path to the folder want to init
:param backend_config: a dictionary of backend config options. eg.
t = Terraform()
t.init(backend_config={'access_key': 'myaccesskey',
'secret_key': 'mysecretkey', 'bucket': 'mybucketname'})
:param reconfigure: whether or not to force reconfiguration of backend
:param backend: whether or not to use backend settings for init
:param kwargs: options
:return: ret_code, stdout, stderr
"""
options = kwargs
options['backend_config'] = backend_config
options['reconfigure'] = reconfigure
options['backend'] = backend
options = self._generate_default_options(options)
args = self._generate_default_args(dir_or_plan)
return self.cmd('init', *args, **options) |
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv | Return a shallow copy of the instance. | Below is the the instruction that describes the task:
### Input:
Return a shallow copy of the instance.
### Response:
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv |
def reconnect_all(self):
"""
Re-establish connection to all instances
"""
for role in self.Instances.keys():
for connection in self.Instances[role]:
connection.reconnect() | Re-establish connection to all instances | Below is the the instruction that describes the task:
### Input:
Re-establish connection to all instances
### Response:
def reconnect_all(self):
"""
Re-establish connection to all instances
"""
for role in self.Instances.keys():
for connection in self.Instances[role]:
connection.reconnect() |
def k15(k15file, dir_path='.', input_dir_path='',
meas_file='measurements.txt', aniso_outfile='specimens.txt',
samp_file="samples.txt", result_file ="rmag_anisotropy.txt",
specnum=0, sample_naming_con='1', location="unknown",
data_model_num=3):
"""
converts .k15 format data to MagIC format.
assumes Jelinek Kappabridge measurement scheme.
Parameters
----------
k15file : str
input file name
dir_path : str
output file directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
aniso_outfile : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
aniso_results_file : str
output result file name, default "rmag_results.txt", data model 2 only
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
data_model_num : int
MagIC data model [2, 3], default 3
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written)
Info
--------
Infile format:
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
"""
#
# initialize some variables
#
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
version_num = pmag.get_version()
syn = 0
itilt, igeo, linecnt, key = 0, 0, 0, ""
first_save = 1
k15 = []
citation = 'This study'
data_model_num = int(float(data_model_num))
# set column names for MagIC 3
spec_name_col = 'specimen' #
samp_name_col = 'sample' #
site_name_col = 'site' #
loc_name_col = 'location' #
citation_col = 'citations'
method_col = 'method_codes'
site_description_col = 'description'
expedition_col = 'expedition_name'
instrument_col = 'instrument_codes'
experiment_col = 'experiments'
analyst_col = 'analysts'
quality_col = 'quality'
aniso_quality_col = 'result_quality'
meas_standard_col = 'standard'
meas_description_col = 'description'
aniso_type_col = 'aniso_type'
aniso_unit_col = 'aniso_s_unit'
aniso_n_col = 'aniso_s_n_measurements'
azimuth_col = 'azimuth'
spec_volume_col = 'volume'
samp_dip_col = 'dip'
bed_dip_col = 'bed_dip'
bed_dip_direction_col = 'bed_dip_direction'
chi_vol_col = 'susc_chi_volume'
aniso_sigma_col = 'aniso_s_sigma'
aniso_unit_col = 'aniso_s_unit'
aniso_tilt_corr_col = 'aniso_tilt_correction'
meas_table_name = 'measurements'
spec_table_name = 'specimens'
samp_table_name = 'samples'
site_table_name = 'sites'
meas_name_col = 'measurement'
meas_time_col = 'timestamp'
meas_ac_col = 'meas_field_ac'
meas_temp_col = "meas_temp"
#
software_col = 'software_packages'
description_col = 'description' # sites.description
treat_temp_col = 'treat_temp'
meas_orient_phi_col = "meas_orient_phi"
meas_orient_theta_col = "meas_orient_theta"
aniso_mean_col = 'aniso_s_mean'
result_description_col = "description"
# set defaults correctly for MagIC 2
if data_model_num == 2:
if meas_file == 'measurements.txt':
meas_file = 'magic_measurements.txt'
if samp_file == 'samples.txt':
samp_file = 'er_samples.txt'
if aniso_outfile == 'specimens.txt':
aniso_outfile = 'rmag_anisotropy.txt'
# set column names for MagIC 2
if data_model_num == 2:
spec_name_col = 'er_specimen_name'
samp_name_col = 'er_sample_name'
site_name_col = 'er_site_name'
loc_name_col = 'er_location_name'
citation_col = 'er_citation_names'
method_col = 'magic_method_codes'
site_description_col = 'site_description'
expedition_col = 'er_expedition_name'
instrument_col = 'magic_instrument_codes'
experiment_col = 'magic_experiment_names'
analyst_col = 'er_analyst_mail_names'
quality_col = 'measurement_flag'
aniso_quality_col = 'anisotropy_flag'
meas_standard_col = 'measurement_standard'
meas_description_col = 'measurement_description'
aniso_type_col = 'anisotropy_type'
aniso_unit_col = 'anisotropy_unit'
aniso_n_col = 'anisotropy_n'
azimuth_col = 'sample_azimuth'
spec_volume_col = 'specimen_volume'
samp_dip_col = 'sample_dip'
bed_dip_col = 'sample_bed_dip'
bed_dip_direction_col = 'sample_bed_dip_direction'
chi_vol_col = 'measurement_chi_volume'
aniso_sigma_col = 'anisotropy_sigma'
aniso_unit_col = 'anisotropy_unit'
aniso_tilt_corr_col = 'anisotropy_tilt_correction'
meas_table_name = 'magic_measurements'
spec_table_name = 'er_specimens'
samp_table_name = 'er_samples'
site_table_name = 'er_sites'
meas_name_col = 'measurement_number'
meas_time_col = 'measurement_date'
meas_ac_col = 'measurement_lab_field_ac'
meas_temp_col = "measurement_temp"
#
software_col = 'magic_software_packages'
description_col = 'rmag_result_name'
treat_temp_col = 'treatment_temp'
meas_temp_col = "measurement_temp"
meas_orient_phi_col = "measurement_orient_phi"
meas_orient_theta_col = "measurement_orient_theta"
aniso_mean_col = 'anisotropy_mean'
result_description_col = "result_description"
# pick off stuff from command line
Z = ""
if "4" in sample_naming_con:
if "-" not in sample_naming_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z = sample_naming_con.split("-")[1]
sample_naming_con = "4"
if sample_naming_con == '6':
Samps, filetype = pmag.magic_read(
os.path.join(input_dir_path, samp_table_name + ".txt"))
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
aniso_outfile = pmag.resolve_file_name(aniso_outfile, output_dir_path)
result_file = pmag.resolve_file_name(result_file, output_dir_path)
k15file = pmag.resolve_file_name(k15file, input_dir_path)
if not os.path.exists(k15file):
print(k15file)
return False, "You must provide a valid k15 format file"
try:
SampRecs, filetype = pmag.magic_read(
samp_file) # append new records to existing
samplist = []
for samp in SampRecs:
if samp[samp_name_col] not in samplist:
samplist.append(samp[samp_name_col])
except IOError:
SampRecs = []
# measurement directions for Jelinek 1977 protocol:
Decs = [315, 225, 180, 135, 45, 90, 270, 270, 270, 90, 180, 180, 0, 0, 0]
Incs = [0, 0, 0, 0, 0, -45, -45, 0, 45, 45, 45, -45, -90, -45, 45]
# some defaults to read in .k15 file format
# list of measurements and default number of characters for specimen ID
# some magic default definitions
#
# read in data
with open(k15file, 'r') as finput:
lines = finput.readlines()
MeasRecs, SpecRecs, AnisRecs, ResRecs = [], [], [], []
# read in data
MeasRec, SpecRec, SampRec, SiteRec, AnisRec, ResRec = {}, {}, {}, {}, {}, {}
for line in lines:
linecnt += 1
rec = line.split()
if linecnt == 1:
MeasRec[method_col] = ""
SpecRec[method_col] = ""
SampRec[method_col] = ""
AnisRec[method_col] = ""
SiteRec[method_col] = ""
ResRec[method_col] = ""
MeasRec[software_col] = version_num
SpecRec[software_col] = version_num
SampRec[software_col] = version_num
AnisRec[software_col] = version_num
SiteRec[software_col] = version_num
ResRec[software_col] = version_num
MeasRec[method_col] = "LP-X"
MeasRec[quality_col] = "g"
MeasRec[meas_standard_col] = "u"
MeasRec[citation_col] = "This study"
SpecRec[citation_col] = "This study"
SampRec[citation_col] = "This study"
AnisRec[citation_col] = "This study"
ResRec[citation_col] = "This study"
MeasRec[spec_name_col] = rec[0]
MeasRec[experiment_col] = rec[0] + ":LP-AN-MS"
AnisRec[experiment_col] = rec[0] + ":AMS"
ResRec[experiment_col] = rec[0] + ":AMS"
SpecRec[spec_name_col] = rec[0]
AnisRec[spec_name_col] = rec[0]
SampRec[spec_name_col] = rec[0]
if data_model_num == 2:
ResRec[description_col] = rec[0]
if data_model_num == 3:
ResRec[spec_name_col] = rec[0]
specnum = int(specnum)
if specnum != 0:
MeasRec[samp_name_col] = rec[0][:-specnum]
if specnum == 0:
MeasRec[samp_name_col] = rec[0]
SampRec[samp_name_col] = MeasRec[samp_name_col]
SpecRec[samp_name_col] = MeasRec[samp_name_col]
AnisRec[samp_name_col] = MeasRec[samp_name_col]
if data_model_num == 3:
ResRec[samp_name_col] = MeasRec[samp_name_col]
else:
ResRec[samp_name_col + "s"] = MeasRec[samp_name_col]
if sample_naming_con == "6":
for samp in Samps:
if samp[samp_name_col] == AnisRec[samp_name_col]:
sitename = samp[site_name_col]
location = samp[loc_name_col]
elif sample_naming_con != "":
sitename = pmag.parse_site(
AnisRec[samp_name_col], sample_naming_con, Z)
MeasRec[site_name_col] = sitename
MeasRec[loc_name_col] = location
SampRec[site_name_col] = MeasRec[site_name_col]
SpecRec[site_name_col] = MeasRec[site_name_col]
AnisRec[site_name_col] = MeasRec[site_name_col]
ResRec[loc_name_col] = location
ResRec[site_name_col] = MeasRec[site_name_col]
if data_model_num == 2:
ResRec[site_name_col + "s"] = MeasRec[site_name_col]
SampRec[loc_name_col] = MeasRec[loc_name_col]
SpecRec[loc_name_col] = MeasRec[loc_name_col]
AnisRec[loc_name_col] = MeasRec[loc_name_col]
if data_model_num == 2 :
ResRec[loc_name_col + "s"] = MeasRec[loc_name_col]
if len(rec) >= 3:
SampRec[azimuth_col], SampRec[samp_dip_col] = rec[1], rec[2]
az, pl, igeo = float(rec[1]), float(rec[2]), 1
if len(rec) == 5:
SampRec[bed_dip_direction_col], SampRec[bed_dip_col] = '%7.1f' % (
90. + float(rec[3])), (rec[4])
bed_az, bed_dip, itilt, igeo = 90. + \
float(rec[3]), float(rec[4]), 1, 1
else:
for i in range(5):
# assume measurements in micro SI
k15.append(1e-6 * float(rec[i]))
if linecnt == 4:
sbar, sigma, bulk = pmag.dok15_s(k15)
hpars = pmag.dohext(9, sigma, sbar)
MeasRec[treat_temp_col] = '%8.3e' % (
273) # room temp in kelvin
MeasRec[meas_temp_col] = '%8.3e' % (
273) # room temp in kelvin
for i in range(15):
NewMeas = copy.deepcopy(MeasRec)
NewMeas[meas_orient_phi_col] = '%7.1f' % (Decs[i])
NewMeas[meas_orient_theta_col] = '%7.1f' % (Incs[i])
NewMeas[chi_vol_col] = '%12.10f' % (k15[i])
NewMeas[meas_name_col] = '%i' % (i + 1)
if data_model_num == 2:
NewMeas["magic_experiment_name"] = rec[0] + ":LP-AN-MS"
else:
NewMeas["experiment"] = rec[0] + ":LP-AN-MS"
MeasRecs.append(NewMeas)
if SampRec[samp_name_col] not in samplist:
SampRecs.append(SampRec)
samplist.append(SampRec[samp_name_col])
SpecRecs.append(SpecRec)
AnisRec[aniso_type_col] = "AMS"
ResRec[aniso_type_col] = "AMS"
s1_val = '{:12.10f}'.format(sbar[0])
s2_val = '{:12.10f}'.format(sbar[1])
s3_val = '{:12.10f}'.format(sbar[2])
s4_val = '{:12.10f}'.format(sbar[3])
s5_val = '{:12.10f}'.format(sbar[4])
s6_val = '{:12.10f}'.format(sbar[5])
# MAgIC 2
if data_model_num == 2:
AnisRec["anisotropy_s1"] = s1_val
AnisRec["anisotropy_s2"] = s2_val
AnisRec["anisotropy_s3"] = s3_val
AnisRec["anisotropy_s4"] = s4_val
AnisRec["anisotropy_s5"] = s5_val
AnisRec["anisotropy_s6"] = s6_val
# MagIC 3
else:
vals = [s1_val, s2_val, s3_val, s4_val, s5_val, s6_val]
AnisRec['aniso_s'] = ":".join([str(v).strip() for v in vals])
AnisRec[aniso_mean_col] = '%12.10f' % (bulk)
AnisRec[aniso_sigma_col] = '%12.10f' % (sigma)
AnisRec[aniso_mean_col] = '{:12.10f}'.format(bulk)
AnisRec[aniso_sigma_col] = '{:12.10f}'.format(sigma)
AnisRec[aniso_unit_col] = 'SI'
AnisRec[aniso_n_col] = '15'
AnisRec[aniso_tilt_corr_col] = '-1'
AnisRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
AnisRecs.append(AnisRec)
ResRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
ResRec[aniso_tilt_corr_col] = '-1'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hpars['t1'], hpars['v1_dec'], hpars['v1_inc'], hpars['v2_dec'], hpars['v2_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hpars['t2'], hpars['v2_dec'], hpars['v2_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hpars['t3'], hpars['v3_dec'], hpars['v3_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e13'], hpars['v2_dec'], hpars['v2_inc'], hpars['e23'])])
ResRec['aniso_v1'] = aniso_v1
ResRec['aniso_v2'] = aniso_v2
ResRec['aniso_v3'] = aniso_v3
else: # data model 2
ResRec["anisotropy_t1"] = '%12.10f' % (hpars['t1'])
ResRec["anisotropy_t2"] = '%12.10f' % (hpars['t2'])
ResRec["anisotropy_t3"] = '%12.10f' % (hpars['t3'])
ResRec["anisotropy_fest"] = '%12.10f' % (hpars['F'])
ResRec["anisotropy_ftest12"] = '%12.10f' % (hpars['F12'])
ResRec["anisotropy_ftest23"] = '%12.10f' % (hpars['F23'])
ResRec["anisotropy_v1_dec"] = '%7.1f' % (hpars['v1_dec'])
ResRec["anisotropy_v2_dec"] = '%7.1f' % (hpars['v2_dec'])
ResRec["anisotropy_v3_dec"] = '%7.1f' % (hpars['v3_dec'])
ResRec["anisotropy_v1_inc"] = '%7.1f' % (hpars['v1_inc'])
ResRec["anisotropy_v2_inc"] = '%7.1f' % (hpars['v2_inc'])
ResRec["anisotropy_v3_inc"] = '%7.1f' % (hpars['v3_inc'])
ResRec['anisotropy_v1_eta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v1_eta_inc'] = ResRec['anisotropy_v2_inc']
ResRec['anisotropy_v1_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v1_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v2_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v2_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v2_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v2_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v3_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v3_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v3_zeta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v3_zeta_inc'] = ResRec['anisotropy_v2_inc']
ResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f' % (
hpars['e12'])
ResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f' % (
hpars['e13'])
ResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f' % (
hpars['e12'])
ResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f' % (
hpars['e23'])
ResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f' % (
hpars['e13'])
ResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f' % (
hpars['e23'])
ResRec[result_description_col] = 'Critical F: ' + hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
#
ResRecs.append(ResRec)
if igeo == 1:
sbarg = pmag.dosgeo(sbar, az, pl)
hparsg = pmag.dohext(9, sigma, sbarg)
AnisRecG = copy.copy(AnisRec)
ResRecG = copy.copy(ResRec)
if data_model_num == 3:
AnisRecG["aniso_s"] = ":".join('{:12.10f}'.format(i) for i in sbarg)
if data_model_num == 2:
AnisRecG["anisotropy_s1"] = '%12.10f' % (sbarg[0])
AnisRecG["anisotropy_s2"] = '%12.10f' % (sbarg[1])
AnisRecG["anisotropy_s3"] = '%12.10f' % (sbarg[2])
AnisRecG["anisotropy_s4"] = '%12.10f' % (sbarg[3])
AnisRecG["anisotropy_s5"] = '%12.10f' % (sbarg[4])
AnisRecG["anisotropy_s6"] = '%12.10f' % (sbarg[5])
AnisRecG[aniso_tilt_corr_col] = '0'
ResRecG[aniso_tilt_corr_col] = '0'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparsg['t1'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparsg['t2'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparsg['t3'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e13'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e23'])])
ResRecG['aniso_v1'] = aniso_v1
ResRecG['aniso_v2'] = aniso_v2
ResRecG['aniso_v3'] = aniso_v3
#
if data_model_num == 2:
ResRecG["anisotropy_v1_dec"] = '%7.1f' % (hparsg['v1_dec'])
ResRecG["anisotropy_v2_dec"] = '%7.1f' % (hparsg['v2_dec'])
ResRecG["anisotropy_v3_dec"] = '%7.1f' % (hparsg['v3_dec'])
ResRecG["anisotropy_v1_inc"] = '%7.1f' % (hparsg['v1_inc'])
ResRecG["anisotropy_v2_inc"] = '%7.1f' % (hparsg['v2_inc'])
ResRecG["anisotropy_v3_inc"] = '%7.1f' % (hparsg['v3_inc'])
ResRecG['anisotropy_v1_eta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v1_eta_inc'] = ResRecG['anisotropy_v2_inc']
ResRecG['anisotropy_v1_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v1_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v2_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v2_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v2_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v2_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v3_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v3_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v3_zeta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v3_zeta_inc'] = ResRecG['anisotropy_v2_inc']
#
ResRecG[result_description_col] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + \
hpars["F12_crit"]
ResRecs.append(ResRecG)
AnisRecs.append(AnisRecG)
if itilt == 1:
sbart = pmag.dostilt(sbarg, bed_az, bed_dip)
hparst = pmag.dohext(9, sigma, sbart)
AnisRecT = copy.copy(AnisRec)
ResRecT = copy.copy(ResRec)
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparst['t1'], hparst['v1_dec'], hparst['v1_inc'], hparst['v2_dec'], hparst['v2_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparst['t2'], hparst['v2_dec'], hparst['v2_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparst['t3'], hparst['v3_dec'], hparst['v3_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e13'], hparst['v2_dec'], hparst['v2_inc'], hparst['e23'])])
ResRecT['aniso_v1'] = aniso_v1
ResRecT['aniso_v2'] = aniso_v2
ResRecT['aniso_v3'] = aniso_v3
#
if data_model_num == 2:
AnisRecT["anisotropy_s1"] = '%12.10f' % (sbart[0])
AnisRecT["anisotropy_s2"] = '%12.10f' % (sbart[1])
AnisRecT["anisotropy_s3"] = '%12.10f' % (sbart[2])
AnisRecT["anisotropy_s4"] = '%12.10f' % (sbart[3])
AnisRecT["anisotropy_s5"] = '%12.10f' % (sbart[4])
AnisRecT["anisotropy_s6"] = '%12.10f' % (sbart[5])
AnisRecT["anisotropy_tilt_correction"] = '100'
ResRecT["anisotropy_v1_dec"] = '%7.1f' % (hparst['v1_dec'])
ResRecT["anisotropy_v2_dec"] = '%7.1f' % (hparst['v2_dec'])
ResRecT["anisotropy_v3_dec"] = '%7.1f' % (hparst['v3_dec'])
ResRecT["anisotropy_v1_inc"] = '%7.1f' % (hparst['v1_inc'])
ResRecT["anisotropy_v2_inc"] = '%7.1f' % (hparst['v2_inc'])
ResRecT["anisotropy_v3_inc"] = '%7.1f' % (hparst['v3_inc'])
ResRecT['anisotropy_v1_eta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v1_eta_inc'] = ResRecT['anisotropy_v2_inc']
ResRecT['anisotropy_v1_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v1_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v2_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v2_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v2_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v2_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v3_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v3_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v3_zeta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v3_zeta_inc'] = ResRecT['anisotropy_v2_inc']
#
ResRecT[aniso_tilt_corr_col] = '100'
ResRecT[result_description_col] = 'Critical F: ' + \
hparst["F_crit"] + ';Critical F12/F13: ' + \
hparst["F12_crit"]
ResRecs.append(ResRecT)
AnisRecs.append(AnisRecT)
k15, linecnt = [], 0
MeasRec, SpecRec, SampRec, SiteRec, AnisRec = {}, {}, {}, {}, {}
# samples
pmag.magic_write(samp_file, SampRecs, samp_table_name)
# specimens / rmag_anisotropy / rmag_results
if data_model_num == 3:
AnisRecs.extend(ResRecs)
SpecRecs = AnisRecs.copy()
SpecRecs, keys = pmag.fillkeys(SpecRecs)
pmag.magic_write(aniso_outfile, SpecRecs, 'specimens')
flist = [meas_file, aniso_outfile, samp_file]
else:
pmag.magic_write(aniso_outfile, AnisRecs, 'rmag_anisotropy') # add to specimens?
pmag.magic_write(result_file, ResRecs, 'rmag_results') # added to specimens (NOT sites)
flist = [meas_file, samp_file, aniso_outfile, result_file]
# measurements
pmag.magic_write(meas_file, MeasRecs, meas_table_name)
print("Data saved to: " + ", ".join(flist))
return True, meas_file | converts .k15 format data to MagIC format.
assumes Jelinek Kappabridge measurement scheme.
Parameters
----------
k15file : str
input file name
dir_path : str
output file directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
aniso_outfile : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
aniso_results_file : str
output result file name, default "rmag_results.txt", data model 2 only
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
data_model_num : int
MagIC data model [2, 3], default 3
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written)
Info
--------
Infile format:
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help. | Below is the the instruction that describes the task:
### Input:
converts .k15 format data to MagIC format.
assumes Jelinek Kappabridge measurement scheme.
Parameters
----------
k15file : str
input file name
dir_path : str
output file directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
aniso_outfile : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
aniso_results_file : str
output result file name, default "rmag_results.txt", data model 2 only
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
data_model_num : int
MagIC data model [2, 3], default 3
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written)
Info
--------
Infile format:
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
### Response:
def k15(k15file, dir_path='.', input_dir_path='',
meas_file='measurements.txt', aniso_outfile='specimens.txt',
samp_file="samples.txt", result_file ="rmag_anisotropy.txt",
specnum=0, sample_naming_con='1', location="unknown",
data_model_num=3):
"""
converts .k15 format data to MagIC format.
assumes Jelinek Kappabridge measurement scheme.
Parameters
----------
k15file : str
input file name
dir_path : str
output file directory, default "."
input_dir_path : str
input file directory IF different from dir_path, default ""
meas_file : str
output measurement file name, default "measurements.txt"
aniso_outfile : str
output specimen file name, default "specimens.txt"
samp_file: str
output sample file name, default "samples.txt"
aniso_results_file : str
output result file name, default "rmag_results.txt", data model 2 only
specnum : int
number of characters to designate a specimen, default 0
samp_con : str
sample/site naming convention, default '1', see info below
location : str
location name, default "unknown"
data_model_num : int
MagIC data model [2, 3], default 3
Returns
--------
type - Tuple : (True or False indicating if conversion was sucessful, samp_file name written)
Info
--------
Infile format:
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXXYYY: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
"""
#
# initialize some variables
#
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
version_num = pmag.get_version()
syn = 0
itilt, igeo, linecnt, key = 0, 0, 0, ""
first_save = 1
k15 = []
citation = 'This study'
data_model_num = int(float(data_model_num))
# set column names for MagIC 3
spec_name_col = 'specimen' #
samp_name_col = 'sample' #
site_name_col = 'site' #
loc_name_col = 'location' #
citation_col = 'citations'
method_col = 'method_codes'
site_description_col = 'description'
expedition_col = 'expedition_name'
instrument_col = 'instrument_codes'
experiment_col = 'experiments'
analyst_col = 'analysts'
quality_col = 'quality'
aniso_quality_col = 'result_quality'
meas_standard_col = 'standard'
meas_description_col = 'description'
aniso_type_col = 'aniso_type'
aniso_unit_col = 'aniso_s_unit'
aniso_n_col = 'aniso_s_n_measurements'
azimuth_col = 'azimuth'
spec_volume_col = 'volume'
samp_dip_col = 'dip'
bed_dip_col = 'bed_dip'
bed_dip_direction_col = 'bed_dip_direction'
chi_vol_col = 'susc_chi_volume'
aniso_sigma_col = 'aniso_s_sigma'
aniso_unit_col = 'aniso_s_unit'
aniso_tilt_corr_col = 'aniso_tilt_correction'
meas_table_name = 'measurements'
spec_table_name = 'specimens'
samp_table_name = 'samples'
site_table_name = 'sites'
meas_name_col = 'measurement'
meas_time_col = 'timestamp'
meas_ac_col = 'meas_field_ac'
meas_temp_col = "meas_temp"
#
software_col = 'software_packages'
description_col = 'description' # sites.description
treat_temp_col = 'treat_temp'
meas_orient_phi_col = "meas_orient_phi"
meas_orient_theta_col = "meas_orient_theta"
aniso_mean_col = 'aniso_s_mean'
result_description_col = "description"
# set defaults correctly for MagIC 2
if data_model_num == 2:
if meas_file == 'measurements.txt':
meas_file = 'magic_measurements.txt'
if samp_file == 'samples.txt':
samp_file = 'er_samples.txt'
if aniso_outfile == 'specimens.txt':
aniso_outfile = 'rmag_anisotropy.txt'
# set column names for MagIC 2
if data_model_num == 2:
spec_name_col = 'er_specimen_name'
samp_name_col = 'er_sample_name'
site_name_col = 'er_site_name'
loc_name_col = 'er_location_name'
citation_col = 'er_citation_names'
method_col = 'magic_method_codes'
site_description_col = 'site_description'
expedition_col = 'er_expedition_name'
instrument_col = 'magic_instrument_codes'
experiment_col = 'magic_experiment_names'
analyst_col = 'er_analyst_mail_names'
quality_col = 'measurement_flag'
aniso_quality_col = 'anisotropy_flag'
meas_standard_col = 'measurement_standard'
meas_description_col = 'measurement_description'
aniso_type_col = 'anisotropy_type'
aniso_unit_col = 'anisotropy_unit'
aniso_n_col = 'anisotropy_n'
azimuth_col = 'sample_azimuth'
spec_volume_col = 'specimen_volume'
samp_dip_col = 'sample_dip'
bed_dip_col = 'sample_bed_dip'
bed_dip_direction_col = 'sample_bed_dip_direction'
chi_vol_col = 'measurement_chi_volume'
aniso_sigma_col = 'anisotropy_sigma'
aniso_unit_col = 'anisotropy_unit'
aniso_tilt_corr_col = 'anisotropy_tilt_correction'
meas_table_name = 'magic_measurements'
spec_table_name = 'er_specimens'
samp_table_name = 'er_samples'
site_table_name = 'er_sites'
meas_name_col = 'measurement_number'
meas_time_col = 'measurement_date'
meas_ac_col = 'measurement_lab_field_ac'
meas_temp_col = "measurement_temp"
#
software_col = 'magic_software_packages'
description_col = 'rmag_result_name'
treat_temp_col = 'treatment_temp'
meas_temp_col = "measurement_temp"
meas_orient_phi_col = "measurement_orient_phi"
meas_orient_theta_col = "measurement_orient_theta"
aniso_mean_col = 'anisotropy_mean'
result_description_col = "result_description"
# pick off stuff from command line
Z = ""
if "4" in sample_naming_con:
if "-" not in sample_naming_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z = sample_naming_con.split("-")[1]
sample_naming_con = "4"
if sample_naming_con == '6':
Samps, filetype = pmag.magic_read(
os.path.join(input_dir_path, samp_table_name + ".txt"))
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
aniso_outfile = pmag.resolve_file_name(aniso_outfile, output_dir_path)
result_file = pmag.resolve_file_name(result_file, output_dir_path)
k15file = pmag.resolve_file_name(k15file, input_dir_path)
if not os.path.exists(k15file):
print(k15file)
return False, "You must provide a valid k15 format file"
try:
SampRecs, filetype = pmag.magic_read(
samp_file) # append new records to existing
samplist = []
for samp in SampRecs:
if samp[samp_name_col] not in samplist:
samplist.append(samp[samp_name_col])
except IOError:
SampRecs = []
# measurement directions for Jelinek 1977 protocol:
Decs = [315, 225, 180, 135, 45, 90, 270, 270, 270, 90, 180, 180, 0, 0, 0]
Incs = [0, 0, 0, 0, 0, -45, -45, 0, 45, 45, 45, -45, -90, -45, 45]
# some defaults to read in .k15 file format
# list of measurements and default number of characters for specimen ID
# some magic default definitions
#
# read in data
with open(k15file, 'r') as finput:
lines = finput.readlines()
MeasRecs, SpecRecs, AnisRecs, ResRecs = [], [], [], []
# read in data
MeasRec, SpecRec, SampRec, SiteRec, AnisRec, ResRec = {}, {}, {}, {}, {}, {}
for line in lines:
linecnt += 1
rec = line.split()
if linecnt == 1:
MeasRec[method_col] = ""
SpecRec[method_col] = ""
SampRec[method_col] = ""
AnisRec[method_col] = ""
SiteRec[method_col] = ""
ResRec[method_col] = ""
MeasRec[software_col] = version_num
SpecRec[software_col] = version_num
SampRec[software_col] = version_num
AnisRec[software_col] = version_num
SiteRec[software_col] = version_num
ResRec[software_col] = version_num
MeasRec[method_col] = "LP-X"
MeasRec[quality_col] = "g"
MeasRec[meas_standard_col] = "u"
MeasRec[citation_col] = "This study"
SpecRec[citation_col] = "This study"
SampRec[citation_col] = "This study"
AnisRec[citation_col] = "This study"
ResRec[citation_col] = "This study"
MeasRec[spec_name_col] = rec[0]
MeasRec[experiment_col] = rec[0] + ":LP-AN-MS"
AnisRec[experiment_col] = rec[0] + ":AMS"
ResRec[experiment_col] = rec[0] + ":AMS"
SpecRec[spec_name_col] = rec[0]
AnisRec[spec_name_col] = rec[0]
SampRec[spec_name_col] = rec[0]
if data_model_num == 2:
ResRec[description_col] = rec[0]
if data_model_num == 3:
ResRec[spec_name_col] = rec[0]
specnum = int(specnum)
if specnum != 0:
MeasRec[samp_name_col] = rec[0][:-specnum]
if specnum == 0:
MeasRec[samp_name_col] = rec[0]
SampRec[samp_name_col] = MeasRec[samp_name_col]
SpecRec[samp_name_col] = MeasRec[samp_name_col]
AnisRec[samp_name_col] = MeasRec[samp_name_col]
if data_model_num == 3:
ResRec[samp_name_col] = MeasRec[samp_name_col]
else:
ResRec[samp_name_col + "s"] = MeasRec[samp_name_col]
if sample_naming_con == "6":
for samp in Samps:
if samp[samp_name_col] == AnisRec[samp_name_col]:
sitename = samp[site_name_col]
location = samp[loc_name_col]
elif sample_naming_con != "":
sitename = pmag.parse_site(
AnisRec[samp_name_col], sample_naming_con, Z)
MeasRec[site_name_col] = sitename
MeasRec[loc_name_col] = location
SampRec[site_name_col] = MeasRec[site_name_col]
SpecRec[site_name_col] = MeasRec[site_name_col]
AnisRec[site_name_col] = MeasRec[site_name_col]
ResRec[loc_name_col] = location
ResRec[site_name_col] = MeasRec[site_name_col]
if data_model_num == 2:
ResRec[site_name_col + "s"] = MeasRec[site_name_col]
SampRec[loc_name_col] = MeasRec[loc_name_col]
SpecRec[loc_name_col] = MeasRec[loc_name_col]
AnisRec[loc_name_col] = MeasRec[loc_name_col]
if data_model_num == 2 :
ResRec[loc_name_col + "s"] = MeasRec[loc_name_col]
if len(rec) >= 3:
SampRec[azimuth_col], SampRec[samp_dip_col] = rec[1], rec[2]
az, pl, igeo = float(rec[1]), float(rec[2]), 1
if len(rec) == 5:
SampRec[bed_dip_direction_col], SampRec[bed_dip_col] = '%7.1f' % (
90. + float(rec[3])), (rec[4])
bed_az, bed_dip, itilt, igeo = 90. + \
float(rec[3]), float(rec[4]), 1, 1
else:
for i in range(5):
# assume measurements in micro SI
k15.append(1e-6 * float(rec[i]))
if linecnt == 4:
sbar, sigma, bulk = pmag.dok15_s(k15)
hpars = pmag.dohext(9, sigma, sbar)
MeasRec[treat_temp_col] = '%8.3e' % (
273) # room temp in kelvin
MeasRec[meas_temp_col] = '%8.3e' % (
273) # room temp in kelvin
for i in range(15):
NewMeas = copy.deepcopy(MeasRec)
NewMeas[meas_orient_phi_col] = '%7.1f' % (Decs[i])
NewMeas[meas_orient_theta_col] = '%7.1f' % (Incs[i])
NewMeas[chi_vol_col] = '%12.10f' % (k15[i])
NewMeas[meas_name_col] = '%i' % (i + 1)
if data_model_num == 2:
NewMeas["magic_experiment_name"] = rec[0] + ":LP-AN-MS"
else:
NewMeas["experiment"] = rec[0] + ":LP-AN-MS"
MeasRecs.append(NewMeas)
if SampRec[samp_name_col] not in samplist:
SampRecs.append(SampRec)
samplist.append(SampRec[samp_name_col])
SpecRecs.append(SpecRec)
AnisRec[aniso_type_col] = "AMS"
ResRec[aniso_type_col] = "AMS"
s1_val = '{:12.10f}'.format(sbar[0])
s2_val = '{:12.10f}'.format(sbar[1])
s3_val = '{:12.10f}'.format(sbar[2])
s4_val = '{:12.10f}'.format(sbar[3])
s5_val = '{:12.10f}'.format(sbar[4])
s6_val = '{:12.10f}'.format(sbar[5])
# MAgIC 2
if data_model_num == 2:
AnisRec["anisotropy_s1"] = s1_val
AnisRec["anisotropy_s2"] = s2_val
AnisRec["anisotropy_s3"] = s3_val
AnisRec["anisotropy_s4"] = s4_val
AnisRec["anisotropy_s5"] = s5_val
AnisRec["anisotropy_s6"] = s6_val
# MagIC 3
else:
vals = [s1_val, s2_val, s3_val, s4_val, s5_val, s6_val]
AnisRec['aniso_s'] = ":".join([str(v).strip() for v in vals])
AnisRec[aniso_mean_col] = '%12.10f' % (bulk)
AnisRec[aniso_sigma_col] = '%12.10f' % (sigma)
AnisRec[aniso_mean_col] = '{:12.10f}'.format(bulk)
AnisRec[aniso_sigma_col] = '{:12.10f}'.format(sigma)
AnisRec[aniso_unit_col] = 'SI'
AnisRec[aniso_n_col] = '15'
AnisRec[aniso_tilt_corr_col] = '-1'
AnisRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
AnisRecs.append(AnisRec)
ResRec[method_col] = 'LP-X:AE-H:LP-AN-MS'
ResRec[aniso_tilt_corr_col] = '-1'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hpars['t1'], hpars['v1_dec'], hpars['v1_inc'], hpars['v2_dec'], hpars['v2_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hpars['t2'], hpars['v2_dec'], hpars['v2_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e12'], hpars['v3_dec'], hpars['v3_inc'], hpars['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hpars['t3'], hpars['v3_dec'], hpars['v3_inc'], hpars['v1_dec'], hpars['v1_inc'], hpars['e13'], hpars['v2_dec'], hpars['v2_inc'], hpars['e23'])])
ResRec['aniso_v1'] = aniso_v1
ResRec['aniso_v2'] = aniso_v2
ResRec['aniso_v3'] = aniso_v3
else: # data model 2
ResRec["anisotropy_t1"] = '%12.10f' % (hpars['t1'])
ResRec["anisotropy_t2"] = '%12.10f' % (hpars['t2'])
ResRec["anisotropy_t3"] = '%12.10f' % (hpars['t3'])
ResRec["anisotropy_fest"] = '%12.10f' % (hpars['F'])
ResRec["anisotropy_ftest12"] = '%12.10f' % (hpars['F12'])
ResRec["anisotropy_ftest23"] = '%12.10f' % (hpars['F23'])
ResRec["anisotropy_v1_dec"] = '%7.1f' % (hpars['v1_dec'])
ResRec["anisotropy_v2_dec"] = '%7.1f' % (hpars['v2_dec'])
ResRec["anisotropy_v3_dec"] = '%7.1f' % (hpars['v3_dec'])
ResRec["anisotropy_v1_inc"] = '%7.1f' % (hpars['v1_inc'])
ResRec["anisotropy_v2_inc"] = '%7.1f' % (hpars['v2_inc'])
ResRec["anisotropy_v3_inc"] = '%7.1f' % (hpars['v3_inc'])
ResRec['anisotropy_v1_eta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v1_eta_inc'] = ResRec['anisotropy_v2_inc']
ResRec['anisotropy_v1_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v1_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v2_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v2_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v2_zeta_dec'] = ResRec['anisotropy_v3_dec']
ResRec['anisotropy_v2_zeta_inc'] = ResRec['anisotropy_v3_inc']
ResRec['anisotropy_v3_eta_dec'] = ResRec['anisotropy_v1_dec']
ResRec['anisotropy_v3_eta_inc'] = ResRec['anisotropy_v1_inc']
ResRec['anisotropy_v3_zeta_dec'] = ResRec['anisotropy_v2_dec']
ResRec['anisotropy_v3_zeta_inc'] = ResRec['anisotropy_v2_inc']
ResRec["anisotropy_v1_eta_semi_angle"] = '%7.1f' % (
hpars['e12'])
ResRec["anisotropy_v1_zeta_semi_angle"] = '%7.1f' % (
hpars['e13'])
ResRec["anisotropy_v2_eta_semi_angle"] = '%7.1f' % (
hpars['e12'])
ResRec["anisotropy_v2_zeta_semi_angle"] = '%7.1f' % (
hpars['e23'])
ResRec["anisotropy_v3_eta_semi_angle"] = '%7.1f' % (
hpars['e13'])
ResRec["anisotropy_v3_zeta_semi_angle"] = '%7.1f' % (
hpars['e23'])
ResRec[result_description_col] = 'Critical F: ' + hpars["F_crit"] + ';Critical F12/F13: ' + hpars["F12_crit"]
#
ResRecs.append(ResRec)
if igeo == 1:
sbarg = pmag.dosgeo(sbar, az, pl)
hparsg = pmag.dohext(9, sigma, sbarg)
AnisRecG = copy.copy(AnisRec)
ResRecG = copy.copy(ResRec)
if data_model_num == 3:
AnisRecG["aniso_s"] = ":".join('{:12.10f}'.format(i) for i in sbarg)
if data_model_num == 2:
AnisRecG["anisotropy_s1"] = '%12.10f' % (sbarg[0])
AnisRecG["anisotropy_s2"] = '%12.10f' % (sbarg[1])
AnisRecG["anisotropy_s3"] = '%12.10f' % (sbarg[2])
AnisRecG["anisotropy_s4"] = '%12.10f' % (sbarg[3])
AnisRecG["anisotropy_s5"] = '%12.10f' % (sbarg[4])
AnisRecG["anisotropy_s6"] = '%12.10f' % (sbarg[5])
AnisRecG[aniso_tilt_corr_col] = '0'
ResRecG[aniso_tilt_corr_col] = '0'
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparsg['t1'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparsg['t2'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e12'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparsg['t3'], hparsg['v3_dec'], hparsg['v3_inc'], hparsg['v1_dec'], hparsg['v1_inc'], hparsg['e13'], hparsg['v2_dec'], hparsg['v2_inc'], hparsg['e23'])])
ResRecG['aniso_v1'] = aniso_v1
ResRecG['aniso_v2'] = aniso_v2
ResRecG['aniso_v3'] = aniso_v3
#
if data_model_num == 2:
ResRecG["anisotropy_v1_dec"] = '%7.1f' % (hparsg['v1_dec'])
ResRecG["anisotropy_v2_dec"] = '%7.1f' % (hparsg['v2_dec'])
ResRecG["anisotropy_v3_dec"] = '%7.1f' % (hparsg['v3_dec'])
ResRecG["anisotropy_v1_inc"] = '%7.1f' % (hparsg['v1_inc'])
ResRecG["anisotropy_v2_inc"] = '%7.1f' % (hparsg['v2_inc'])
ResRecG["anisotropy_v3_inc"] = '%7.1f' % (hparsg['v3_inc'])
ResRecG['anisotropy_v1_eta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v1_eta_inc'] = ResRecG['anisotropy_v2_inc']
ResRecG['anisotropy_v1_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v1_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v2_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v2_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v2_zeta_dec'] = ResRecG['anisotropy_v3_dec']
ResRecG['anisotropy_v2_zeta_inc'] = ResRecG['anisotropy_v3_inc']
ResRecG['anisotropy_v3_eta_dec'] = ResRecG['anisotropy_v1_dec']
ResRecG['anisotropy_v3_eta_inc'] = ResRecG['anisotropy_v1_inc']
ResRecG['anisotropy_v3_zeta_dec'] = ResRecG['anisotropy_v2_dec']
ResRecG['anisotropy_v3_zeta_inc'] = ResRecG['anisotropy_v2_inc']
#
ResRecG[result_description_col] = 'Critical F: ' + \
hpars["F_crit"] + ';Critical F12/F13: ' + \
hpars["F12_crit"]
ResRecs.append(ResRecG)
AnisRecs.append(AnisRecG)
if itilt == 1:
sbart = pmag.dostilt(sbarg, bed_az, bed_dip)
hparst = pmag.dohext(9, sigma, sbart)
AnisRecT = copy.copy(AnisRec)
ResRecT = copy.copy(ResRec)
if data_model_num == 3:
aniso_v1 = ':'.join([str(i) for i in (hparst['t1'], hparst['v1_dec'], hparst['v1_inc'], hparst['v2_dec'], hparst['v2_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e13'])])
aniso_v2 = ':'.join([str(i) for i in (hparst['t2'], hparst['v2_dec'], hparst['v2_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e12'], hparst['v3_dec'], hparst['v3_inc'], hparst['e23'])])
aniso_v3 = ':'.join([str(i) for i in (hparst['t3'], hparst['v3_dec'], hparst['v3_inc'], hparst['v1_dec'], hparst['v1_inc'], hparst['e13'], hparst['v2_dec'], hparst['v2_inc'], hparst['e23'])])
ResRecT['aniso_v1'] = aniso_v1
ResRecT['aniso_v2'] = aniso_v2
ResRecT['aniso_v3'] = aniso_v3
#
if data_model_num == 2:
AnisRecT["anisotropy_s1"] = '%12.10f' % (sbart[0])
AnisRecT["anisotropy_s2"] = '%12.10f' % (sbart[1])
AnisRecT["anisotropy_s3"] = '%12.10f' % (sbart[2])
AnisRecT["anisotropy_s4"] = '%12.10f' % (sbart[3])
AnisRecT["anisotropy_s5"] = '%12.10f' % (sbart[4])
AnisRecT["anisotropy_s6"] = '%12.10f' % (sbart[5])
AnisRecT["anisotropy_tilt_correction"] = '100'
ResRecT["anisotropy_v1_dec"] = '%7.1f' % (hparst['v1_dec'])
ResRecT["anisotropy_v2_dec"] = '%7.1f' % (hparst['v2_dec'])
ResRecT["anisotropy_v3_dec"] = '%7.1f' % (hparst['v3_dec'])
ResRecT["anisotropy_v1_inc"] = '%7.1f' % (hparst['v1_inc'])
ResRecT["anisotropy_v2_inc"] = '%7.1f' % (hparst['v2_inc'])
ResRecT["anisotropy_v3_inc"] = '%7.1f' % (hparst['v3_inc'])
ResRecT['anisotropy_v1_eta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v1_eta_inc'] = ResRecT['anisotropy_v2_inc']
ResRecT['anisotropy_v1_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v1_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v2_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v2_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v2_zeta_dec'] = ResRecT['anisotropy_v3_dec']
ResRecT['anisotropy_v2_zeta_inc'] = ResRecT['anisotropy_v3_inc']
ResRecT['anisotropy_v3_eta_dec'] = ResRecT['anisotropy_v1_dec']
ResRecT['anisotropy_v3_eta_inc'] = ResRecT['anisotropy_v1_inc']
ResRecT['anisotropy_v3_zeta_dec'] = ResRecT['anisotropy_v2_dec']
ResRecT['anisotropy_v3_zeta_inc'] = ResRecT['anisotropy_v2_inc']
#
ResRecT[aniso_tilt_corr_col] = '100'
ResRecT[result_description_col] = 'Critical F: ' + \
hparst["F_crit"] + ';Critical F12/F13: ' + \
hparst["F12_crit"]
ResRecs.append(ResRecT)
AnisRecs.append(AnisRecT)
k15, linecnt = [], 0
MeasRec, SpecRec, SampRec, SiteRec, AnisRec = {}, {}, {}, {}, {}
# samples
pmag.magic_write(samp_file, SampRecs, samp_table_name)
# specimens / rmag_anisotropy / rmag_results
if data_model_num == 3:
AnisRecs.extend(ResRecs)
SpecRecs = AnisRecs.copy()
SpecRecs, keys = pmag.fillkeys(SpecRecs)
pmag.magic_write(aniso_outfile, SpecRecs, 'specimens')
flist = [meas_file, aniso_outfile, samp_file]
else:
pmag.magic_write(aniso_outfile, AnisRecs, 'rmag_anisotropy') # add to specimens?
pmag.magic_write(result_file, ResRecs, 'rmag_results') # added to specimens (NOT sites)
flist = [meas_file, samp_file, aniso_outfile, result_file]
# measurements
pmag.magic_write(meas_file, MeasRecs, meas_table_name)
print("Data saved to: " + ", ".join(flist))
return True, meas_file |
def read_pkl_and_pklz(filename):
"""
Try read zipped or not zipped pickle file
"""
fcontent = None
try:
import gzip
f = gzip.open(filename, 'rb')
fcontent = f.read()
f.close()
except IOError as e:
# if the problem is in not gzip file
logger.info("Input gzip exception: " + str(e))
f = open(filename, 'rb')
fcontent = f.read()
f.close()
except Exception as e:
# other problem
import traceback
logger.error("Input gzip exception: " + str(e))
logger.error(traceback.format_exc())
return fcontent | Try read zipped or not zipped pickle file | Below is the the instruction that describes the task:
### Input:
Try read zipped or not zipped pickle file
### Response:
def read_pkl_and_pklz(filename):
"""
Try read zipped or not zipped pickle file
"""
fcontent = None
try:
import gzip
f = gzip.open(filename, 'rb')
fcontent = f.read()
f.close()
except IOError as e:
# if the problem is in not gzip file
logger.info("Input gzip exception: " + str(e))
f = open(filename, 'rb')
fcontent = f.read()
f.close()
except Exception as e:
# other problem
import traceback
logger.error("Input gzip exception: " + str(e))
logger.error(traceback.format_exc())
return fcontent |
def check_expected_infos(self, test_method):
"""
This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`.
"""
f = lambda key, default=[]: getattr(test_method, key, default)
expected_info_messages = f(EXPECTED_INFO_MESSAGES)
allowed_info_messages = f(ALLOWED_INFO_MESSAGES)
self.check_infos(expected_info_messages, allowed_info_messages) | This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`. | Below is the the instruction that describes the task:
### Input:
This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`.
### Response:
def check_expected_infos(self, test_method):
"""
This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`.
"""
f = lambda key, default=[]: getattr(test_method, key, default)
expected_info_messages = f(EXPECTED_INFO_MESSAGES)
allowed_info_messages = f(ALLOWED_INFO_MESSAGES)
self.check_infos(expected_info_messages, allowed_info_messages) |
def set_uid(self, uid, schema=None):
"""Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
"""
try:
uid, schema = author_id_normalize_and_schema(uid, schema)
except UnknownUIDSchema:
# Explicit schema wasn't provided, and the UID is too little
# to figure out the schema of it, this however doesn't mean
# the UID is invalid
pass
self._ensure_field('ids', [])
self.obj['ids'] = [id_ for id_ in self.obj['ids'] if id_.get('schema') != schema]
self._add_uid(uid, schema) | Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching | Below is the the instruction that describes the task:
### Input:
Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
### Response:
def set_uid(self, uid, schema=None):
"""Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
"""
try:
uid, schema = author_id_normalize_and_schema(uid, schema)
except UnknownUIDSchema:
# Explicit schema wasn't provided, and the UID is too little
# to figure out the schema of it, this however doesn't mean
# the UID is invalid
pass
self._ensure_field('ids', [])
self.obj['ids'] = [id_ for id_ in self.obj['ids'] if id_.get('schema') != schema]
self._add_uid(uid, schema) |
def all_project_administrators(self):
"""
Get the list of project administrators
:return:
"""
for project in self.project_list():
log.info('Processing project: {0} - {1}'.format(project.get('key'), project.get('name')))
yield {
'project_key': project.get('key'),
'project_name': project.get('name'),
'project_administrators': [{'email': x['emailAddress'], 'name': x['displayName']}
for x in self.project_users_with_administrator_permissions(project['key'])]} | Get the list of project administrators
:return: | Below is the the instruction that describes the task:
### Input:
Get the list of project administrators
:return:
### Response:
def all_project_administrators(self):
"""
Get the list of project administrators
:return:
"""
for project in self.project_list():
log.info('Processing project: {0} - {1}'.format(project.get('key'), project.get('name')))
yield {
'project_key': project.get('key'),
'project_name': project.get('name'),
'project_administrators': [{'email': x['emailAddress'], 'name': x['displayName']}
for x in self.project_users_with_administrator_permissions(project['key'])]} |
def update(self, img, value_dict):
"""
Accepts an image reference (object or ID) and dictionary of key/value
pairs, where the key is an attribute of the image, and the value is the
desired new value for that image.
NOTE: There is a bug in Glance where the 'add' operation returns a 409
if the property already exists, which conflicts with the spec. So to
get around this a fresh copy of the image must be retrieved, and the
value of 'op' must be determined based on whether this attribute exists
or not.
"""
img = self.get(img)
uri = "/%s/%s" % (self.uri_base, utils.get_id(img))
body = []
for key, val in value_dict.items():
op = "replace" if key in img.__dict__ else "add"
body.append({"op": op,
"path": "/%s" % key,
"value": val})
headers = {"Content-Type":
"application/openstack-images-v2.1-json-patch"}
resp, resp_body = self.api.method_patch(uri, body=body, headers=headers) | Accepts an image reference (object or ID) and dictionary of key/value
pairs, where the key is an attribute of the image, and the value is the
desired new value for that image.
NOTE: There is a bug in Glance where the 'add' operation returns a 409
if the property already exists, which conflicts with the spec. So to
get around this a fresh copy of the image must be retrieved, and the
value of 'op' must be determined based on whether this attribute exists
or not. | Below is the the instruction that describes the task:
### Input:
Accepts an image reference (object or ID) and dictionary of key/value
pairs, where the key is an attribute of the image, and the value is the
desired new value for that image.
NOTE: There is a bug in Glance where the 'add' operation returns a 409
if the property already exists, which conflicts with the spec. So to
get around this a fresh copy of the image must be retrieved, and the
value of 'op' must be determined based on whether this attribute exists
or not.
### Response:
def update(self, img, value_dict):
"""
Accepts an image reference (object or ID) and dictionary of key/value
pairs, where the key is an attribute of the image, and the value is the
desired new value for that image.
NOTE: There is a bug in Glance where the 'add' operation returns a 409
if the property already exists, which conflicts with the spec. So to
get around this a fresh copy of the image must be retrieved, and the
value of 'op' must be determined based on whether this attribute exists
or not.
"""
img = self.get(img)
uri = "/%s/%s" % (self.uri_base, utils.get_id(img))
body = []
for key, val in value_dict.items():
op = "replace" if key in img.__dict__ else "add"
body.append({"op": op,
"path": "/%s" % key,
"value": val})
headers = {"Content-Type":
"application/openstack-images-v2.1-json-patch"}
resp, resp_body = self.api.method_patch(uri, body=body, headers=headers) |
def autoencoder_ordered_text():
"""Ordered discrete autoencoder model for text."""
hparams = autoencoder_ordered_discrete()
hparams.bottleneck_bits = 1024
hparams.bottleneck_shared_bits = 1024-64
hparams.bottleneck_shared_bits_start_warmup = 75000
hparams.bottleneck_shared_bits_stop_warmup = 275000
hparams.num_hidden_layers = 7
hparams.batch_size = 1024
hparams.autoregressive_mode = "conv5"
hparams.max_hidden_size = 1024
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
hparams.sample_height = 128
hparams.sample_width = 1
return hparams | Ordered discrete autoencoder model for text. | Below is the the instruction that describes the task:
### Input:
Ordered discrete autoencoder model for text.
### Response:
def autoencoder_ordered_text():
"""Ordered discrete autoencoder model for text."""
hparams = autoencoder_ordered_discrete()
hparams.bottleneck_bits = 1024
hparams.bottleneck_shared_bits = 1024-64
hparams.bottleneck_shared_bits_start_warmup = 75000
hparams.bottleneck_shared_bits_stop_warmup = 275000
hparams.num_hidden_layers = 7
hparams.batch_size = 1024
hparams.autoregressive_mode = "conv5"
hparams.max_hidden_size = 1024
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
hparams.sample_height = 128
hparams.sample_width = 1
return hparams |
def exists(path, **kwargs):
"""Check if file or directory exists"""
import os.path
return os.path.exists(path, **kwargs) | Check if file or directory exists | Below is the the instruction that describes the task:
### Input:
Check if file or directory exists
### Response:
def exists(path, **kwargs):
"""Check if file or directory exists"""
import os.path
return os.path.exists(path, **kwargs) |
def generate_null_snvs(df, snvs, num_null_sets=5):
"""
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
"""
import numpy as np
import random
random.seed(20151007)
input_snvs = list(set(df.index) & set(snvs))
sig = df.ix[input_snvs]
not_sig = df.ix[set(df.index) - set(snvs)]
sig['group'] = sig.apply(lambda x: '::'.join(x), axis=1)
not_sig['group'] = not_sig.apply(lambda x: '::'.join(x), axis=1)
null_sets = []
vc = sig.group.value_counts()
bins = {c:sorted(list(df[c].value_counts().index)) for c in df.columns}
ordered_inputs = []
for i in vc.index:
ordered_inputs += list(sig[sig.group == i].index)
tdf = not_sig[not_sig.group == i]
count = vc[i]
for n in xrange(num_null_sets):
if tdf.shape[0] == 0:
groups = [i]
while tdf.shape[0] == 0:
# If there are no potential null SNVs in this group, we'll
# expand the group randomly.
g = groups[-1]
# Choose random bin.
cols = list(not_sig.columns)
cols.remove('group')
b = random.choice(cols)
# Get possibilities for that bin.
t = bins[b]
# Get last set of bin values and the value for the bin we
# want to change.
d = dict(zip(not_sig.columns, g.split('::')))
cat = d[b]
# Randomly walk away from bin value.
ind = t.index(cat)
if ind == 0:
ind += 1
elif ind == len(t) - 1:
ind -= 1
else:
ind += random.choice([-1, 1])
d[b] = t[ind]
groups.append('::'.join(pd.Series(d)[not_sig.columns].astype(str)))
tdf = not_sig[not_sig.group.apply(lambda x: x in groups)]
if count <= tdf.shape[0]:
ind = random.sample(tdf.index, count)
else:
ind = list(np.random.choice(tdf.index, size=count, replace=True))
if i == vc.index[0]:
null_sets.append(ind)
else:
null_sets[n] += ind
null_sets = pd.DataFrame(null_sets).T
null_sets.columns = ['null_{}'.format(x) for x in null_sets.columns]
cs = list(null_sets.columns)
null_sets['input'] = ordered_inputs
null_sets = null_sets[['input'] + cs]
return null_sets | Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns. | Below is the the instruction that describes the task:
### Input:
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
### Response:
def generate_null_snvs(df, snvs, num_null_sets=5):
"""
Generate a set of null SNVs based on an input list of SNVs and categorical
annotations.
Parameters
----------
df : pandas.DataFrame
Pandas dataframe where each column is a categorization of SNPs.
The index should be SNPs of the form chrom:pos.
snvs : list
List of input SNVs in the format chrom:pos. Entries that aren't in
the index of df will be dropped.
num_null_sets : int
Number of sets of null SNVs to generate.
Returns
-------
null_sets : pandas.Dataframe
Pandas dataframe with input SNVs as first column and null SNVs as
following columns.
"""
import numpy as np
import random
random.seed(20151007)
input_snvs = list(set(df.index) & set(snvs))
sig = df.ix[input_snvs]
not_sig = df.ix[set(df.index) - set(snvs)]
sig['group'] = sig.apply(lambda x: '::'.join(x), axis=1)
not_sig['group'] = not_sig.apply(lambda x: '::'.join(x), axis=1)
null_sets = []
vc = sig.group.value_counts()
bins = {c:sorted(list(df[c].value_counts().index)) for c in df.columns}
ordered_inputs = []
for i in vc.index:
ordered_inputs += list(sig[sig.group == i].index)
tdf = not_sig[not_sig.group == i]
count = vc[i]
for n in xrange(num_null_sets):
if tdf.shape[0] == 0:
groups = [i]
while tdf.shape[0] == 0:
# If there are no potential null SNVs in this group, we'll
# expand the group randomly.
g = groups[-1]
# Choose random bin.
cols = list(not_sig.columns)
cols.remove('group')
b = random.choice(cols)
# Get possibilities for that bin.
t = bins[b]
# Get last set of bin values and the value for the bin we
# want to change.
d = dict(zip(not_sig.columns, g.split('::')))
cat = d[b]
# Randomly walk away from bin value.
ind = t.index(cat)
if ind == 0:
ind += 1
elif ind == len(t) - 1:
ind -= 1
else:
ind += random.choice([-1, 1])
d[b] = t[ind]
groups.append('::'.join(pd.Series(d)[not_sig.columns].astype(str)))
tdf = not_sig[not_sig.group.apply(lambda x: x in groups)]
if count <= tdf.shape[0]:
ind = random.sample(tdf.index, count)
else:
ind = list(np.random.choice(tdf.index, size=count, replace=True))
if i == vc.index[0]:
null_sets.append(ind)
else:
null_sets[n] += ind
null_sets = pd.DataFrame(null_sets).T
null_sets.columns = ['null_{}'.format(x) for x in null_sets.columns]
cs = list(null_sets.columns)
null_sets['input'] = ordered_inputs
null_sets = null_sets[['input'] + cs]
return null_sets |
def get_stores(self, search_term):
'''
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
'''
params = {'SearchText': search_term}
response = self.__get('/storefindermap/storesearch', params=params)
return Stores(response.json()) | Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
### Response:
def get_stores(self, search_term):
'''
Search for dominos pizza stores using a search term.
:param string search: Search term.
:return: A list of nearby stores matching the search term.
:rtype: list
'''
params = {'SearchText': search_term}
response = self.__get('/storefindermap/storesearch', params=params)
return Stores(response.json()) |
def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])} | Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly. | Below is the the instruction that describes the task:
### Input:
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
### Response:
def coef(self):
"""
Return the coefficients which can be applied to the non-standardized data.
Note: standardize = True by default, if set to False then coef() return the coefficients which are fit directly.
"""
tbl = self._model_json["output"]["coefficients_table"]
if tbl is None:
return None
return {name: coef for name, coef in zip(tbl["names"], tbl["coefficients"])} |
def timestamp(self, posix: bool = True, **kwargs) -> Union[str, int]:
"""Generate random timestamp.
:param posix: POSIX time.
:param kwargs: Kwargs for :meth:`~Datetime.datetime()`.
:return: Timestamp.
"""
stamp = self.datetime(**kwargs)
if posix:
return timegm(stamp.utctimetuple())
return stamp.strftime('%Y-%m-%dT%H:%M:%SZ') | Generate random timestamp.
:param posix: POSIX time.
:param kwargs: Kwargs for :meth:`~Datetime.datetime()`.
:return: Timestamp. | Below is the the instruction that describes the task:
### Input:
Generate random timestamp.
:param posix: POSIX time.
:param kwargs: Kwargs for :meth:`~Datetime.datetime()`.
:return: Timestamp.
### Response:
def timestamp(self, posix: bool = True, **kwargs) -> Union[str, int]:
"""Generate random timestamp.
:param posix: POSIX time.
:param kwargs: Kwargs for :meth:`~Datetime.datetime()`.
:return: Timestamp.
"""
stamp = self.datetime(**kwargs)
if posix:
return timegm(stamp.utctimetuple())
return stamp.strftime('%Y-%m-%dT%H:%M:%SZ') |
def get_preferences(self):
""" Gets all the preferences of the current user
:returns: a dict presenting the preferences by name, values are
typed to str/bool/int/float regarding their content.
"""
pref_list = self.request('GetPrefs')['pref']
out = {}
for pref in pref_list:
out[pref['name']] = utils.auto_type(pref['_content'])
return out | Gets all the preferences of the current user
:returns: a dict presenting the preferences by name, values are
typed to str/bool/int/float regarding their content. | Below is the the instruction that describes the task:
### Input:
Gets all the preferences of the current user
:returns: a dict presenting the preferences by name, values are
typed to str/bool/int/float regarding their content.
### Response:
def get_preferences(self):
""" Gets all the preferences of the current user
:returns: a dict presenting the preferences by name, values are
typed to str/bool/int/float regarding their content.
"""
pref_list = self.request('GetPrefs')['pref']
out = {}
for pref in pref_list:
out[pref['name']] = utils.auto_type(pref['_content'])
return out |
def read_bdf(sdmfile, scannum, nskip=0, readints=0, bdfdir=''):
""" Uses sdmpy to read a given range of integrations from sdm of given scan.
readints=0 will read all of bdf (skipping nskip).
"""
assert os.path.exists(sdmfile), 'sdmfile %s does not exist' % sdmfile
sdm = getsdm(sdmfile, bdfdir=bdfdir)
scan = sdm.scan(scannum)
assert scan.bdf.fname, 'bdfstr not defined for scan %d' % scannum
if readints == 0:
readints = scan.bdf.numIntegration - nskip
logger.info('Reading %d ints starting at int %d' % (readints, nskip))
npols = len(sdmpy.scan.sdmarray(sdm['Polarization'][0].corrType))
data = np.empty( (readints, scan.bdf.numBaseline, sum(scan.numchans), npols), dtype='complex64', order='C')
data[:] = scan.bdf.get_data(trange=[nskip, nskip+readints]).reshape(data.shape)
return data | Uses sdmpy to read a given range of integrations from sdm of given scan.
readints=0 will read all of bdf (skipping nskip). | Below is the the instruction that describes the task:
### Input:
Uses sdmpy to read a given range of integrations from sdm of given scan.
readints=0 will read all of bdf (skipping nskip).
### Response:
def read_bdf(sdmfile, scannum, nskip=0, readints=0, bdfdir=''):
""" Uses sdmpy to read a given range of integrations from sdm of given scan.
readints=0 will read all of bdf (skipping nskip).
"""
assert os.path.exists(sdmfile), 'sdmfile %s does not exist' % sdmfile
sdm = getsdm(sdmfile, bdfdir=bdfdir)
scan = sdm.scan(scannum)
assert scan.bdf.fname, 'bdfstr not defined for scan %d' % scannum
if readints == 0:
readints = scan.bdf.numIntegration - nskip
logger.info('Reading %d ints starting at int %d' % (readints, nskip))
npols = len(sdmpy.scan.sdmarray(sdm['Polarization'][0].corrType))
data = np.empty( (readints, scan.bdf.numBaseline, sum(scan.numchans), npols), dtype='complex64', order='C')
data[:] = scan.bdf.get_data(trange=[nskip, nskip+readints]).reshape(data.shape)
return data |
def __get_default_form_data_input(self, elements):
"""Get the default form data {key: value} for the given elements.
Args:
elements list(obj): Soup elements.
Returns:
obj: The {key: value} form data
"""
form_data = OrderedDict()
for element in elements:
default_value = self.__get_default_value_from_element(element)
if default_value is False:
continue
form_data[element["name"]] = default_value
return form_data | Get the default form data {key: value} for the given elements.
Args:
elements list(obj): Soup elements.
Returns:
obj: The {key: value} form data | Below is the the instruction that describes the task:
### Input:
Get the default form data {key: value} for the given elements.
Args:
elements list(obj): Soup elements.
Returns:
obj: The {key: value} form data
### Response:
def __get_default_form_data_input(self, elements):
"""Get the default form data {key: value} for the given elements.
Args:
elements list(obj): Soup elements.
Returns:
obj: The {key: value} form data
"""
form_data = OrderedDict()
for element in elements:
default_value = self.__get_default_value_from_element(element)
if default_value is False:
continue
form_data[element["name"]] = default_value
return form_data |
def get_original_order_unique_ids(id_array):
"""
Get the unique id's of id_array, in their original order of appearance.
Parameters
----------
id_array : 1D ndarray.
Should contain the ids that we want to extract the unique values from.
Returns
-------
original_order_unique_ids : 1D ndarray.
Contains the unique ids from `id_array`, in their original order of
appearance.
"""
assert isinstance(id_array, np.ndarray)
assert len(id_array.shape) == 1
# Get the indices of the unique IDs in their order of appearance
# Note the [1] is because the np.unique() call will return both the sorted
# unique IDs and the indices
original_unique_id_indices =\
np.sort(np.unique(id_array, return_index=True)[1])
# Get the unique ids, in their original order of appearance
original_order_unique_ids = id_array[original_unique_id_indices]
return original_order_unique_ids | Get the unique id's of id_array, in their original order of appearance.
Parameters
----------
id_array : 1D ndarray.
Should contain the ids that we want to extract the unique values from.
Returns
-------
original_order_unique_ids : 1D ndarray.
Contains the unique ids from `id_array`, in their original order of
appearance. | Below is the the instruction that describes the task:
### Input:
Get the unique id's of id_array, in their original order of appearance.
Parameters
----------
id_array : 1D ndarray.
Should contain the ids that we want to extract the unique values from.
Returns
-------
original_order_unique_ids : 1D ndarray.
Contains the unique ids from `id_array`, in their original order of
appearance.
### Response:
def get_original_order_unique_ids(id_array):
"""
Get the unique id's of id_array, in their original order of appearance.
Parameters
----------
id_array : 1D ndarray.
Should contain the ids that we want to extract the unique values from.
Returns
-------
original_order_unique_ids : 1D ndarray.
Contains the unique ids from `id_array`, in their original order of
appearance.
"""
assert isinstance(id_array, np.ndarray)
assert len(id_array.shape) == 1
# Get the indices of the unique IDs in their order of appearance
# Note the [1] is because the np.unique() call will return both the sorted
# unique IDs and the indices
original_unique_id_indices =\
np.sort(np.unique(id_array, return_index=True)[1])
# Get the unique ids, in their original order of appearance
original_order_unique_ids = id_array[original_unique_id_indices]
return original_order_unique_ids |
def import_volumes(source, ignore_conflicts, yes):
'''Import volumes
SOURCE must be a json file and must follow the same structure used in `libreant-db export`.
Pass - to read from standard input.
'''
volumes = json.load(source)
tot = len(volumes)
if not yes:
click.confirm("Are you sure you want to import {} volumes into index '{}'".format(tot, arc._config['ES_INDEXNAME']))
conflicts=0
with click.progressbar(volumes, label='adding volumes') as bar:
for v in bar:
try:
arc.import_volume(v)
except ConflictException as ce:
if not ignore_conflicts:
die(str(ce))
conflicts += 1
except Exception as e:
if conf.get('DEBUG', False):
raise
else:
die(str(e))
if conflicts > 0:
click.echo("{} volumes has been skipped beacause of a conflict".format(conflicts)) | Import volumes
SOURCE must be a json file and must follow the same structure used in `libreant-db export`.
Pass - to read from standard input. | Below is the the instruction that describes the task:
### Input:
Import volumes
SOURCE must be a json file and must follow the same structure used in `libreant-db export`.
Pass - to read from standard input.
### Response:
def import_volumes(source, ignore_conflicts, yes):
'''Import volumes
SOURCE must be a json file and must follow the same structure used in `libreant-db export`.
Pass - to read from standard input.
'''
volumes = json.load(source)
tot = len(volumes)
if not yes:
click.confirm("Are you sure you want to import {} volumes into index '{}'".format(tot, arc._config['ES_INDEXNAME']))
conflicts=0
with click.progressbar(volumes, label='adding volumes') as bar:
for v in bar:
try:
arc.import_volume(v)
except ConflictException as ce:
if not ignore_conflicts:
die(str(ce))
conflicts += 1
except Exception as e:
if conf.get('DEBUG', False):
raise
else:
die(str(e))
if conflicts > 0:
click.echo("{} volumes has been skipped beacause of a conflict".format(conflicts)) |
def _initialize_pop(self, pop_size):
"""Generates the initial population and assigns fitnesses."""
self.initialize_cma_es(pop_size)
self.toolbox.register("individual", self._make_individual)
self.toolbox.register("generate", self._generate,
self.toolbox.individual)
self.toolbox.register("population", tools.initRepeat,
list, self._initial_individual)
self.toolbox.register("update", self.update)
self.population = self.toolbox.population(n=pop_size)
self.assign_fitnesses(self.population)
self._model_count += len(self.population)
return | Generates the initial population and assigns fitnesses. | Below is the the instruction that describes the task:
### Input:
Generates the initial population and assigns fitnesses.
### Response:
def _initialize_pop(self, pop_size):
"""Generates the initial population and assigns fitnesses."""
self.initialize_cma_es(pop_size)
self.toolbox.register("individual", self._make_individual)
self.toolbox.register("generate", self._generate,
self.toolbox.individual)
self.toolbox.register("population", tools.initRepeat,
list, self._initial_individual)
self.toolbox.register("update", self.update)
self.population = self.toolbox.population(n=pop_size)
self.assign_fitnesses(self.population)
self._model_count += len(self.population)
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.