code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def trill_links(self):
"""dict: trill link details
"""
xmlns = 'urn:brocade.com:mgmt:brocade-fabric-service'
get_links_info = ET.Element('show-linkinfo', xmlns=xmlns)
results = self._callback(get_links_info, handler='get')
result = []
for item in results.findall('{%s}show-link-info' % xmlns):
src_rbridge_id = item.find('{%s}linkinfo-rbridgeid' % xmlns).text
src_switch_wwn = item.find('{%s}linkinfo-wwn' % xmlns).text
for link in item.findall('{%s}linkinfo-isl' % xmlns):
dest_rbridge_id = link.find(
'{%s}linkinfo-isllink-destdomain' % xmlns).text
src_interface = link.find(
'{%s}linkinfo-isllink-srcport-interface' % xmlns).text
dest_interface = link.find(
'{%s}linkinfo-isllink-destport-interface' % xmlns).text
link_cost = link.find('{%s}linkinfo-isl-linkcost' % xmlns).text
link_cost_count = link.find(
'{%s}linkinfo-isllink-costcount' % xmlns).text
item_results = {'source-rbridgeid': src_rbridge_id,
'source-switch-wwn': src_switch_wwn,
'dest-rbridgeid': dest_rbridge_id,
'source-interface': src_interface,
'dest-interface': dest_interface,
'link-cost': link_cost,
'link-costcount': link_cost_count}
result.append(item_results)
return result | dict: trill link details | Below is the the instruction that describes the task:
### Input:
dict: trill link details
### Response:
def trill_links(self):
"""dict: trill link details
"""
xmlns = 'urn:brocade.com:mgmt:brocade-fabric-service'
get_links_info = ET.Element('show-linkinfo', xmlns=xmlns)
results = self._callback(get_links_info, handler='get')
result = []
for item in results.findall('{%s}show-link-info' % xmlns):
src_rbridge_id = item.find('{%s}linkinfo-rbridgeid' % xmlns).text
src_switch_wwn = item.find('{%s}linkinfo-wwn' % xmlns).text
for link in item.findall('{%s}linkinfo-isl' % xmlns):
dest_rbridge_id = link.find(
'{%s}linkinfo-isllink-destdomain' % xmlns).text
src_interface = link.find(
'{%s}linkinfo-isllink-srcport-interface' % xmlns).text
dest_interface = link.find(
'{%s}linkinfo-isllink-destport-interface' % xmlns).text
link_cost = link.find('{%s}linkinfo-isl-linkcost' % xmlns).text
link_cost_count = link.find(
'{%s}linkinfo-isllink-costcount' % xmlns).text
item_results = {'source-rbridgeid': src_rbridge_id,
'source-switch-wwn': src_switch_wwn,
'dest-rbridgeid': dest_rbridge_id,
'source-interface': src_interface,
'dest-interface': dest_interface,
'link-cost': link_cost,
'link-costcount': link_cost_count}
result.append(item_results)
return result |
def store_output_files(self, dir_path):
"""Store OSLOM output files to a directory."""
if self.last_result:
for entry in os.listdir(self.last_result["output_dir"]):
path = os.path.join(self.last_result["output_dir"], entry)
if os.path.isfile(path):
shutil.copy(path, os.path.join(dir_path, entry))
shutil.copy(
self.get_path(OslomRunner.SEED_FILE),
os.path.join(dir_path, OslomRunner.SEED_FILE))
args_file = os.path.join(dir_path, OslomRunner.ARGS_FILE)
with open(args_file, "w") as writer:
writer.write("{}\n".format(" ".join(self.last_result["args"])))
self.id_remapper.store_mapping(
os.path.join(dir_path, OslomRunner.IDS_MAPPING_FILE)) | Store OSLOM output files to a directory. | Below is the the instruction that describes the task:
### Input:
Store OSLOM output files to a directory.
### Response:
def store_output_files(self, dir_path):
"""Store OSLOM output files to a directory."""
if self.last_result:
for entry in os.listdir(self.last_result["output_dir"]):
path = os.path.join(self.last_result["output_dir"], entry)
if os.path.isfile(path):
shutil.copy(path, os.path.join(dir_path, entry))
shutil.copy(
self.get_path(OslomRunner.SEED_FILE),
os.path.join(dir_path, OslomRunner.SEED_FILE))
args_file = os.path.join(dir_path, OslomRunner.ARGS_FILE)
with open(args_file, "w") as writer:
writer.write("{}\n".format(" ".join(self.last_result["args"])))
self.id_remapper.store_mapping(
os.path.join(dir_path, OslomRunner.IDS_MAPPING_FILE)) |
def sequence(self, struct, size=1000, tree_depth=1, append_callable=None):
""" Generates random values for sequence-like objects
@struct: the sequence-like structure you want to fill with random
data
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|(value1, value2)|
2=|((value1, value2), (value1, value2))|
@append_callable: #callable method which appends/adds data to your
sequence-like structure - e.g. :meth:list.append
-> random @struct
..
from collections import UserList
from vital.debug import RandData
class MySequence(UserList):
pass
rd = RandData(int)
my_seq = MySequence()
rd.sequence(my_seq, 3, 1, my_seq.append)
# -> [88508293836062443, 49097807561770961, 55043550817099444]
..
"""
if not tree_depth:
return self._map_type()
_struct = struct()
add_struct = _struct.append if not append_callable \
else getattr(_struct, append_callable)
for x in range(size):
add_struct(self.sequence(
struct, size, tree_depth-1, append_callable))
return _struct | Generates random values for sequence-like objects
@struct: the sequence-like structure you want to fill with random
data
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|(value1, value2)|
2=|((value1, value2), (value1, value2))|
@append_callable: #callable method which appends/adds data to your
sequence-like structure - e.g. :meth:list.append
-> random @struct
..
from collections import UserList
from vital.debug import RandData
class MySequence(UserList):
pass
rd = RandData(int)
my_seq = MySequence()
rd.sequence(my_seq, 3, 1, my_seq.append)
# -> [88508293836062443, 49097807561770961, 55043550817099444]
.. | Below is the the instruction that describes the task:
### Input:
Generates random values for sequence-like objects
@struct: the sequence-like structure you want to fill with random
data
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|(value1, value2)|
2=|((value1, value2), (value1, value2))|
@append_callable: #callable method which appends/adds data to your
sequence-like structure - e.g. :meth:list.append
-> random @struct
..
from collections import UserList
from vital.debug import RandData
class MySequence(UserList):
pass
rd = RandData(int)
my_seq = MySequence()
rd.sequence(my_seq, 3, 1, my_seq.append)
# -> [88508293836062443, 49097807561770961, 55043550817099444]
..
### Response:
def sequence(self, struct, size=1000, tree_depth=1, append_callable=None):
""" Generates random values for sequence-like objects
@struct: the sequence-like structure you want to fill with random
data
@size: #int number of random values to include in each @tree_depth
@tree_depth: #int dict tree dimensions size, i.e.
1=|(value1, value2)|
2=|((value1, value2), (value1, value2))|
@append_callable: #callable method which appends/adds data to your
sequence-like structure - e.g. :meth:list.append
-> random @struct
..
from collections import UserList
from vital.debug import RandData
class MySequence(UserList):
pass
rd = RandData(int)
my_seq = MySequence()
rd.sequence(my_seq, 3, 1, my_seq.append)
# -> [88508293836062443, 49097807561770961, 55043550817099444]
..
"""
if not tree_depth:
return self._map_type()
_struct = struct()
add_struct = _struct.append if not append_callable \
else getattr(_struct, append_callable)
for x in range(size):
add_struct(self.sequence(
struct, size, tree_depth-1, append_callable))
return _struct |
def create_node(hostname, username, password, name, address, trans_label=None):
'''
A function to connect to a bigip device and create a node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node
address
The address of the node
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.create_node bigip admin admin 10.1.1.2
'''
#build session
bigip_session = _build_session(username, password, trans_label)
#construct the payload
payload = {}
payload['name'] = name
payload['address'] = address
#post to REST
try:
response = bigip_session.post(
BIG_IP_URL_BASE.format(host=hostname) + '/ltm/node',
data=salt.utils.json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response) | A function to connect to a bigip device and create a node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node
address
The address of the node
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.create_node bigip admin admin 10.1.1.2 | Below is the the instruction that describes the task:
### Input:
A function to connect to a bigip device and create a node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node
address
The address of the node
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.create_node bigip admin admin 10.1.1.2
### Response:
def create_node(hostname, username, password, name, address, trans_label=None):
'''
A function to connect to a bigip device and create a node.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the node
address
The address of the node
trans_label
The label of the transaction stored within the grain:
``bigip_f5_trans:<label>``
CLI Example::
salt '*' bigip.create_node bigip admin admin 10.1.1.2
'''
#build session
bigip_session = _build_session(username, password, trans_label)
#construct the payload
payload = {}
payload['name'] = name
payload['address'] = address
#post to REST
try:
response = bigip_session.post(
BIG_IP_URL_BASE.format(host=hostname) + '/ltm/node',
data=salt.utils.json.dumps(payload))
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response) |
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True | revoke authorization of a group | Below is the the instruction that describes the task:
### Input:
revoke authorization of a group
### Response:
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True |
def markCollapsed( self, direction, sizes ):
"""
Updates the interface to reflect that the splitter is collapsed.
:param direction | <XSplitterHandle.CollapseDirection>
sizes | [<int>, ..]
"""
self._collapsed = True
self._storedSizes = sizes[:]
if ( direction == XSplitterHandle.CollapseDirection.Before ):
if ( self.orientation() == Qt.Horizontal ):
self._collapseAfter.setArrowType( Qt.RightArrow )
self._collapseBefore.setArrowType( Qt.RightArrow )
else:
self._collapseAfter.setArrowType( Qt.DownArrow )
self._collapseBefore.setArrowType( Qt.DownArrow )
else:
if ( self.orientation() == Qt.Horizontal ):
self._collapseAfter.setArrowType( Qt.LeftArrow )
self._collapseBefore.setArrowType( Qt.LeftArrow )
else:
self._collapseAfter.setArrowType( Qt.UpArrow )
self._collapseAfter.setArrowType( Qt.UpArrow ) | Updates the interface to reflect that the splitter is collapsed.
:param direction | <XSplitterHandle.CollapseDirection>
sizes | [<int>, ..] | Below is the the instruction that describes the task:
### Input:
Updates the interface to reflect that the splitter is collapsed.
:param direction | <XSplitterHandle.CollapseDirection>
sizes | [<int>, ..]
### Response:
def markCollapsed( self, direction, sizes ):
"""
Updates the interface to reflect that the splitter is collapsed.
:param direction | <XSplitterHandle.CollapseDirection>
sizes | [<int>, ..]
"""
self._collapsed = True
self._storedSizes = sizes[:]
if ( direction == XSplitterHandle.CollapseDirection.Before ):
if ( self.orientation() == Qt.Horizontal ):
self._collapseAfter.setArrowType( Qt.RightArrow )
self._collapseBefore.setArrowType( Qt.RightArrow )
else:
self._collapseAfter.setArrowType( Qt.DownArrow )
self._collapseBefore.setArrowType( Qt.DownArrow )
else:
if ( self.orientation() == Qt.Horizontal ):
self._collapseAfter.setArrowType( Qt.LeftArrow )
self._collapseBefore.setArrowType( Qt.LeftArrow )
else:
self._collapseAfter.setArrowType( Qt.UpArrow )
self._collapseAfter.setArrowType( Qt.UpArrow ) |
def setSession(self, session_cookies):
"""Loads session cookies
:param session_cookies: A dictionay containing session cookies
:type session_cookies: dict
:return: False if `session_cookies` does not contain proper cookies
:rtype: bool
"""
# Quick check to see if session_cookies is formatted properly
if not session_cookies or "c_user" not in session_cookies:
return False
try:
# Load cookies into current session
self._session.cookies = requests.cookies.merge_cookies(
self._session.cookies, session_cookies
)
self._postLogin()
except Exception as e:
log.exception("Failed loading session")
self._resetValues()
return False
return True | Loads session cookies
:param session_cookies: A dictionay containing session cookies
:type session_cookies: dict
:return: False if `session_cookies` does not contain proper cookies
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Loads session cookies
:param session_cookies: A dictionay containing session cookies
:type session_cookies: dict
:return: False if `session_cookies` does not contain proper cookies
:rtype: bool
### Response:
def setSession(self, session_cookies):
"""Loads session cookies
:param session_cookies: A dictionay containing session cookies
:type session_cookies: dict
:return: False if `session_cookies` does not contain proper cookies
:rtype: bool
"""
# Quick check to see if session_cookies is formatted properly
if not session_cookies or "c_user" not in session_cookies:
return False
try:
# Load cookies into current session
self._session.cookies = requests.cookies.merge_cookies(
self._session.cookies, session_cookies
)
self._postLogin()
except Exception as e:
log.exception("Failed loading session")
self._resetValues()
return False
return True |
def get():
"""Return list of Scheduling Blocks Instances known to SDP ."""
LOG.debug('GET list of SBIs.')
# Construct response object.
_url = get_root_url()
response = dict(scheduling_blocks=[],
links=dict(home='{}'.format(_url)))
# Get ordered list of SBI ID's.
block_ids = DB.get_sched_block_instance_ids()
# Loop over SBIs and add summary of each to the list of SBIs in the
# response.
for block in DB.get_block_details(block_ids):
block_id = block['id']
LOG.debug('Adding SBI %s to list', block_id)
LOG.debug(block)
block['num_processing_blocks'] = len(block['processing_block_ids'])
temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2
block['status'] = choice(temp)
try:
del block['processing_block_ids']
except KeyError:
pass
block['links'] = {
'detail': '{}/scheduling-block/{}' .format(_url, block_id)
}
response['scheduling_blocks'].append(block)
return response, HTTPStatus.OK | Return list of Scheduling Blocks Instances known to SDP . | Below is the the instruction that describes the task:
### Input:
Return list of Scheduling Blocks Instances known to SDP .
### Response:
def get():
"""Return list of Scheduling Blocks Instances known to SDP ."""
LOG.debug('GET list of SBIs.')
# Construct response object.
_url = get_root_url()
response = dict(scheduling_blocks=[],
links=dict(home='{}'.format(_url)))
# Get ordered list of SBI ID's.
block_ids = DB.get_sched_block_instance_ids()
# Loop over SBIs and add summary of each to the list of SBIs in the
# response.
for block in DB.get_block_details(block_ids):
block_id = block['id']
LOG.debug('Adding SBI %s to list', block_id)
LOG.debug(block)
block['num_processing_blocks'] = len(block['processing_block_ids'])
temp = ['OK'] * 10 + ['WAITING'] * 4 + ['FAILED'] * 2
block['status'] = choice(temp)
try:
del block['processing_block_ids']
except KeyError:
pass
block['links'] = {
'detail': '{}/scheduling-block/{}' .format(_url, block_id)
}
response['scheduling_blocks'].append(block)
return response, HTTPStatus.OK |
def queryset(self, request, queryset):
"""
Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
"""
if self.value() is None:
return queryset.all()
else:
return queryset.filter(subscriptions__status=self.value()).distinct() | Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter | Below is the the instruction that describes the task:
### Input:
Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
### Response:
def queryset(self, request, queryset):
"""
Return the filtered queryset based on the value provided in the query string.
source: https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.ModelAdmin.list_filter
"""
if self.value() is None:
return queryset.all()
else:
return queryset.filter(subscriptions__status=self.value()).distinct() |
def get_subarray_sbi_ids(sub_array_id):
"""Return list of scheduling block Id's associated with the given
sub_array_id
"""
ids = []
for key in sorted(DB.keys(pattern='scheduling_block/*')):
config = json.loads(DB.get(key))
if config['sub_array_id'] == sub_array_id:
ids.append(config['id'])
return ids | Return list of scheduling block Id's associated with the given
sub_array_id | Below is the the instruction that describes the task:
### Input:
Return list of scheduling block Id's associated with the given
sub_array_id
### Response:
def get_subarray_sbi_ids(sub_array_id):
"""Return list of scheduling block Id's associated with the given
sub_array_id
"""
ids = []
for key in sorted(DB.keys(pattern='scheduling_block/*')):
config = json.loads(DB.get(key))
if config['sub_array_id'] == sub_array_id:
ids.append(config['id'])
return ids |
def validateUserPars(configObj,input_dict):
""" Compares input parameter names specified by user with those already
recognized by the task.
Any parameters provided by the user that does not match a known
task parameter will be reported and a ValueError exception will be
raised.
"""
# check to see whether any input parameters are unexpected.
# Any unexpected parameters provided on input should be reported and
# the code should stop
plist = getFullParList(configObj)
extra_pars = []
for kw in input_dict:
if kw not in plist:
extra_pars.append(kw)
if len(extra_pars) > 0:
print ('='*40)
print ('The following input parameters were not recognized as valid inputs:')
for p in extra_pars:
print(" %s"%(p))
print('\nPlease check the spelling of the parameter(s) and try again...')
print('='*40)
raise ValueError | Compares input parameter names specified by user with those already
recognized by the task.
Any parameters provided by the user that does not match a known
task parameter will be reported and a ValueError exception will be
raised. | Below is the the instruction that describes the task:
### Input:
Compares input parameter names specified by user with those already
recognized by the task.
Any parameters provided by the user that does not match a known
task parameter will be reported and a ValueError exception will be
raised.
### Response:
def validateUserPars(configObj,input_dict):
""" Compares input parameter names specified by user with those already
recognized by the task.
Any parameters provided by the user that does not match a known
task parameter will be reported and a ValueError exception will be
raised.
"""
# check to see whether any input parameters are unexpected.
# Any unexpected parameters provided on input should be reported and
# the code should stop
plist = getFullParList(configObj)
extra_pars = []
for kw in input_dict:
if kw not in plist:
extra_pars.append(kw)
if len(extra_pars) > 0:
print ('='*40)
print ('The following input parameters were not recognized as valid inputs:')
for p in extra_pars:
print(" %s"%(p))
print('\nPlease check the spelling of the parameter(s) and try again...')
print('='*40)
raise ValueError |
def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Updates the passed package (emerge --update package)
slot
Restrict the update to a particular slot. It will update to the
latest version within the slot.
fromrepo
Restrict the update to a particular repository. It will update to the
latest version within the repository.
binhost
has two options try and force.
try - tells emerge to try and install the package from a configured binhost.
force - forces emerge to install the package from a binhost otherwise it fails out.
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.update <package name>
'''
if salt.utils.data.is_true(refresh):
refresh_db()
full_atom = pkg
if slot is not None:
full_atom = '{0}:{1}'.format(full_atom, slot)
if fromrepo is not None:
full_atom = '{0}::{1}'.format(full_atom, fromrepo)
if binhost == 'try':
bin_opts = ['-g']
elif binhost == 'force':
bin_opts = ['-G']
else:
bin_opts = []
old = list_pkgs()
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['emerge',
'--ask', 'n',
'--quiet',
'--update',
'--newuse',
'--oneshot'])
cmd.extend(bin_opts)
cmd.append(full_atom)
call = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if call['retcode'] != 0:
needed_changes = _process_emerge_err(call['stdout'], call['stderr'])
else:
needed_changes = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if needed_changes:
raise CommandExecutionError(
'Problem encountered updating package(s)',
info={'needed_changes': needed_changes, 'changes': ret}
)
return ret | .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Updates the passed package (emerge --update package)
slot
Restrict the update to a particular slot. It will update to the
latest version within the slot.
fromrepo
Restrict the update to a particular repository. It will update to the
latest version within the repository.
binhost
has two options try and force.
try - tells emerge to try and install the package from a configured binhost.
force - forces emerge to install the package from a binhost otherwise it fails out.
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.update <package name> | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Updates the passed package (emerge --update package)
slot
Restrict the update to a particular slot. It will update to the
latest version within the slot.
fromrepo
Restrict the update to a particular repository. It will update to the
latest version within the repository.
binhost
has two options try and force.
try - tells emerge to try and install the package from a configured binhost.
force - forces emerge to install the package from a binhost otherwise it fails out.
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.update <package name>
### Response:
def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Updates the passed package (emerge --update package)
slot
Restrict the update to a particular slot. It will update to the
latest version within the slot.
fromrepo
Restrict the update to a particular repository. It will update to the
latest version within the repository.
binhost
has two options try and force.
try - tells emerge to try and install the package from a configured binhost.
force - forces emerge to install the package from a binhost otherwise it fails out.
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.update <package name>
'''
if salt.utils.data.is_true(refresh):
refresh_db()
full_atom = pkg
if slot is not None:
full_atom = '{0}:{1}'.format(full_atom, slot)
if fromrepo is not None:
full_atom = '{0}::{1}'.format(full_atom, fromrepo)
if binhost == 'try':
bin_opts = ['-g']
elif binhost == 'force':
bin_opts = ['-G']
else:
bin_opts = []
old = list_pkgs()
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend(['emerge',
'--ask', 'n',
'--quiet',
'--update',
'--newuse',
'--oneshot'])
cmd.extend(bin_opts)
cmd.append(full_atom)
call = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
if call['retcode'] != 0:
needed_changes = _process_emerge_err(call['stdout'], call['stderr'])
else:
needed_changes = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if needed_changes:
raise CommandExecutionError(
'Problem encountered updating package(s)',
info={'needed_changes': needed_changes, 'changes': ret}
)
return ret |
def place_orders(self, market_id, instructions, customer_ref=None, market_version=None,
customer_strategy_ref=None, async_=None, session=None, lightweight=None):
"""
Place new orders into market.
:param str market_id: The market id these orders are to be placed on
:param list instructions: The number of place instructions
:param str customer_ref: Optional parameter allowing the client to pass a unique string
(up to 32 chars) that is used to de-dupe mistaken re-submissions
:param dict market_version: Optional parameter allowing the client to specify which
version of the market the orders should be placed on, e.g. "{'version': 123456}"
:param str customer_strategy_ref: An optional reference customers can use to specify
which strategy has sent the order
:param bool async_: An optional flag (not setting equates to false) which specifies if
the orders should be placed asynchronously
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.PlaceOrders
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'placeOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.PlaceOrders, elapsed_time, lightweight) | Place new orders into market.
:param str market_id: The market id these orders are to be placed on
:param list instructions: The number of place instructions
:param str customer_ref: Optional parameter allowing the client to pass a unique string
(up to 32 chars) that is used to de-dupe mistaken re-submissions
:param dict market_version: Optional parameter allowing the client to specify which
version of the market the orders should be placed on, e.g. "{'version': 123456}"
:param str customer_strategy_ref: An optional reference customers can use to specify
which strategy has sent the order
:param bool async_: An optional flag (not setting equates to false) which specifies if
the orders should be placed asynchronously
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.PlaceOrders | Below is the the instruction that describes the task:
### Input:
Place new orders into market.
:param str market_id: The market id these orders are to be placed on
:param list instructions: The number of place instructions
:param str customer_ref: Optional parameter allowing the client to pass a unique string
(up to 32 chars) that is used to de-dupe mistaken re-submissions
:param dict market_version: Optional parameter allowing the client to specify which
version of the market the orders should be placed on, e.g. "{'version': 123456}"
:param str customer_strategy_ref: An optional reference customers can use to specify
which strategy has sent the order
:param bool async_: An optional flag (not setting equates to false) which specifies if
the orders should be placed asynchronously
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.PlaceOrders
### Response:
def place_orders(self, market_id, instructions, customer_ref=None, market_version=None,
customer_strategy_ref=None, async_=None, session=None, lightweight=None):
"""
Place new orders into market.
:param str market_id: The market id these orders are to be placed on
:param list instructions: The number of place instructions
:param str customer_ref: Optional parameter allowing the client to pass a unique string
(up to 32 chars) that is used to de-dupe mistaken re-submissions
:param dict market_version: Optional parameter allowing the client to specify which
version of the market the orders should be placed on, e.g. "{'version': 123456}"
:param str customer_strategy_ref: An optional reference customers can use to specify
which strategy has sent the order
:param bool async_: An optional flag (not setting equates to false) which specifies if
the orders should be placed asynchronously
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: resources.PlaceOrders
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'placeOrders')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.PlaceOrders, elapsed_time, lightweight) |
def hex_to_rgb(hex_value):
"""
Convert a hexadecimal color value to a 3-tuple of integers
suitable for use in an ``rgb()`` triplet specifying that color.
"""
hex_value = normalize_hex(hex_value)
hex_value = int(hex_value[1:], 16)
return IntegerRGB(
hex_value >> 16,
hex_value >> 8 & 0xff,
hex_value & 0xff
) | Convert a hexadecimal color value to a 3-tuple of integers
suitable for use in an ``rgb()`` triplet specifying that color. | Below is the the instruction that describes the task:
### Input:
Convert a hexadecimal color value to a 3-tuple of integers
suitable for use in an ``rgb()`` triplet specifying that color.
### Response:
def hex_to_rgb(hex_value):
"""
Convert a hexadecimal color value to a 3-tuple of integers
suitable for use in an ``rgb()`` triplet specifying that color.
"""
hex_value = normalize_hex(hex_value)
hex_value = int(hex_value[1:], 16)
return IntegerRGB(
hex_value >> 16,
hex_value >> 8 & 0xff,
hex_value & 0xff
) |
def import_key(keyid):
"""Import a key, either ASCII armored, or a GPG key id.
@param keyid: the key in ASCII armor format, or a GPG key id.
@raises SystemExit() via sys.exit() on failure.
"""
try:
return fetch_import_key(keyid)
except GPGKeyError as e:
error_out("Could not import key: {}".format(str(e))) | Import a key, either ASCII armored, or a GPG key id.
@param keyid: the key in ASCII armor format, or a GPG key id.
@raises SystemExit() via sys.exit() on failure. | Below is the the instruction that describes the task:
### Input:
Import a key, either ASCII armored, or a GPG key id.
@param keyid: the key in ASCII armor format, or a GPG key id.
@raises SystemExit() via sys.exit() on failure.
### Response:
def import_key(keyid):
"""Import a key, either ASCII armored, or a GPG key id.
@param keyid: the key in ASCII armor format, or a GPG key id.
@raises SystemExit() via sys.exit() on failure.
"""
try:
return fetch_import_key(keyid)
except GPGKeyError as e:
error_out("Could not import key: {}".format(str(e))) |
def definition_to_message(
definition, message=None, table_of_contents=None, heading_level=None):
"""Helper function to render a definition to a message.
:param definition: A definition dictionary (see definitions package).
:type definition: dict
:param message: The message that the definition should be appended to.
:type message: parameters.message.Message
:param table_of_contents: Table of contents that the headings should be
included in.
:type message: parameters.message.Message
:param heading_level: Optional style to apply to the definition
heading. See HEADING_LOOKUPS
:type heading_level: int
:returns: Message
:rtype: str
"""
if message is None:
message = m.Message()
if table_of_contents is None:
table_of_contents = m.Message()
if heading_level:
_create_section_header(
message,
table_of_contents,
definition['name'].replace(' ', '-'),
definition['name'],
heading_level=heading_level)
else:
header = m.Paragraph(m.ImportantText(definition['name']))
message.add(header)
# If the definition has an icon, we put the icon and description side by
# side in a table otherwise just show the description as a paragraph
url = _definition_icon_url(definition)
if url is None:
message.add(m.Paragraph(definition['description']))
if 'citations' in definition:
_citations_to_message(message, definition)
else:
LOGGER.info('Creating mini table for definition description: ' + url)
table = m.Table(style_class='table table-condensed')
row = m.Row()
row.add(m.Cell(m.Image(url, **MEDIUM_ICON_STYLE)))
row.add(m.Cell(definition['description']))
table.add(row)
for citation in definition['citations']:
if citation['text'] in [None, '']:
continue
row = m.Row()
row.add(m.Cell(''))
if citation['link'] in [None, '']:
row.add(m.Cell(citation['text']))
else:
row.add(m.Cell(m.Link(citation['link'], citation['text'])))
table.add(row)
message.add(table)
url = _definition_screenshot_url(definition)
if url:
message.add(m.Paragraph(m.Image(url), style_class='text-center'))
# types contains e.g. hazard_all
if 'types' in definition:
for sub_definition in definition['types']:
definition_to_message(
sub_definition,
message,
table_of_contents,
heading_level=3)
#
# Notes section if available
#
if 'notes' in definition:
# Start a notes details group too since we have an exposure
message.add(m.Heading(
tr('Notes:'), **DETAILS_STYLE))
message.add(m.Heading(
tr('General notes:'), **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['notes']:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'citations' in definition:
_citations_to_message(message, definition)
# This only for EQ
if 'earthquake_fatality_models' in definition:
current_function = current_earthquake_model_name()
paragraph = m.Paragraph(tr(
'The following earthquake fatality models are available in '
'InaSAFE. Note that you need to set one of these as the '
'active model in InaSAFE Options. The currently active model '
'is: '),
m.ImportantText(current_function)
)
message.add(paragraph)
models_definition = definition['earthquake_fatality_models']
for model in models_definition:
message.add(m.Heading(model['name'], **DETAILS_SUBGROUP_STYLE))
if 'description' in model:
paragraph = m.Paragraph(model['description'])
message.add(paragraph)
for note in model['notes']:
paragraph = m.Paragraph(note)
message.add(paragraph)
_citations_to_message(message, model)
for exposure in exposure_all:
extra_exposure_notes = specific_notes(definition, exposure)
if extra_exposure_notes:
title = tr('Notes for exposure : {exposure_name}').format(
exposure_name=exposure['name'])
message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in extra_exposure_notes:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'continuous_notes' in definition:
message.add(m.Heading(
tr('Notes for continuous datasets:'),
**DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['continuous_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'classified_notes' in definition:
message.add(m.Heading(
tr('Notes for classified datasets:'),
**DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['classified_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'single_event_notes' in definition:
message.add(
m.Heading(tr('Notes for single events'), **DETAILS_STYLE))
if len(definition['single_event_notes']) < 1:
message.add(m.Paragraph(tr('No single event notes defined.')))
else:
bullets = m.BulletedList()
for note in definition['single_event_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'multi_event_notes' in definition:
message.add(
m.Heading(
tr('Notes for multi events / scenarios:'),
**DETAILS_STYLE))
if len(definition['multi_event_notes']) < 1:
message.add(m.Paragraph(tr('No multi-event notes defined.')))
else:
bullets = m.BulletedList()
for note in definition['multi_event_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'actions' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Actions:'))))
bullets = m.BulletedList()
for note in definition['actions']:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
for exposure in exposure_all:
extra_exposure_actions = specific_actions(definition, exposure)
if extra_exposure_actions:
title = tr('Actions for exposure : {exposure_name}').format(
exposure_name=exposure['name'])
message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in extra_exposure_actions:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'continuous_hazard_units' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Units:'))))
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Plural'), header=True))
row.add(m.Cell(tr('Abbreviation'), header=True))
row.add(m.Cell(tr('Details'), header=True))
table.add(row)
for unit in definition['continuous_hazard_units']:
row = m.Row()
row.add(m.Cell(unit['name']))
row.add(m.Cell(unit['plural_name']))
row.add(m.Cell(unit['abbreviation']))
row.add(m.Cell(unit['description']))
table.add(row)
message.add(table)
if 'fields' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Fields:'))))
table = _create_fields_table()
if 'extra_fields' in definition:
all_fields = definition['fields'] + definition['extra_fields']
else:
all_fields = definition['fields']
for field in all_fields:
_add_field_to_table(field, table)
message.add(table)
if 'classifications' in definition:
message.add(m.Heading(
tr('Hazard classifications'),
**DETAILS_STYLE))
message.add(m.Paragraph(
definitions.hazard_classification['description']))
for inasafe_class in definition['classifications']:
definition_to_message(
inasafe_class,
message,
table_of_contents,
heading_level=3)
if 'classes' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Classes:'))))
is_hazard = definition['type'] == hazard_classification_type
if is_hazard:
table = _make_defaults_hazard_table()
else:
table = _make_defaults_exposure_table()
for inasafe_class in definition['classes']:
row = m.Row()
if is_hazard:
# name() on QColor returns its hex code
if 'color' in inasafe_class:
colour = inasafe_class['color'].name()
row.add(m.Cell(
'', attributes='style="background: %s;"' % colour))
else:
row.add(m.Cell(' '))
row.add(m.Cell(inasafe_class['name']))
if is_hazard:
if 'affected' in inasafe_class:
row.add(m.Cell(tr(inasafe_class['affected'])))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
if inasafe_class.get('fatality_rate') is None or \
inasafe_class.get('fatality_rate') < 0:
row.add(m.Cell(tr('unspecified')))
elif inasafe_class.get('fatality_rate') > 0:
# we want to show the rate as a scientific notation
rate = html_scientific_notation_rate(
inasafe_class['fatality_rate'])
rate = '%s%%' % rate
row.add(m.Cell(rate))
else: # == 0
row.add(m.Cell('0%'))
if is_hazard:
if 'displacement_rate' in inasafe_class:
rate = inasafe_class['displacement_rate'] * 100
rate = '%.0f%%' % rate
row.add(m.Cell(rate))
else:
row.add(m.Cell(tr('unspecified')))
if 'string_defaults' in inasafe_class:
defaults = None
for default in inasafe_class['string_defaults']:
if defaults:
defaults += ',%s' % default
else:
defaults = default
row.add(m.Cell(defaults))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
# Min may be a single value or a dict of values so we need
# to check type and deal with it accordingly
if 'numeric_default_min' in inasafe_class:
if isinstance(inasafe_class['numeric_default_min'], dict):
bullets = m.BulletedList()
minima = inasafe_class['numeric_default_min']
for key, value in sorted(minima.items()):
bullets.add('%s : %s' % (key, value))
row.add(m.Cell(bullets))
else:
row.add(m.Cell(inasafe_class['numeric_default_min']))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
# Max may be a single value or a dict of values so we need
# to check type and deal with it accordingly
if 'numeric_default_max' in inasafe_class:
if isinstance(inasafe_class['numeric_default_max'], dict):
bullets = m.BulletedList()
maxima = inasafe_class['numeric_default_max']
for key, value in sorted(maxima.items()):
bullets.add('%s : %s' % (key, value))
row.add(m.Cell(bullets))
else:
row.add(m.Cell(inasafe_class['numeric_default_max']))
else:
row.add(m.Cell(tr('unspecified')))
table.add(row)
# Description goes in its own row with spanning
row = m.Row()
row.add(m.Cell(''))
row.add(m.Cell(inasafe_class['description'], span=7))
table.add(row)
# For hazard classes we also add the 'not affected' class manually:
if definition['type'] == definitions.hazard_classification_type:
row = m.Row()
colour = definitions.not_exposed_class['color'].name()
row.add(m.Cell(
'', attributes='style="background: %s;"' % colour))
description = definitions.not_exposed_class['description']
row.add(m.Cell(description, span=7))
table.add(row)
message.add(table)
if 'affected' in definition:
if definition['affected']:
message.add(m.Paragraph(tr(
'Exposure entities in this class ARE considered affected')))
else:
message.add(m.Paragraph(tr(
'Exposure entities in this class are NOT considered '
'affected')))
if 'optional' in definition:
if definition['optional']:
message.add(m.Paragraph(tr(
'This class is NOT required in the hazard keywords.')))
else:
message.add(m.Paragraph(tr(
'This class IS required in the hazard keywords.')))
return message | Helper function to render a definition to a message.
:param definition: A definition dictionary (see definitions package).
:type definition: dict
:param message: The message that the definition should be appended to.
:type message: parameters.message.Message
:param table_of_contents: Table of contents that the headings should be
included in.
:type message: parameters.message.Message
:param heading_level: Optional style to apply to the definition
heading. See HEADING_LOOKUPS
:type heading_level: int
:returns: Message
:rtype: str | Below is the the instruction that describes the task:
### Input:
Helper function to render a definition to a message.
:param definition: A definition dictionary (see definitions package).
:type definition: dict
:param message: The message that the definition should be appended to.
:type message: parameters.message.Message
:param table_of_contents: Table of contents that the headings should be
included in.
:type message: parameters.message.Message
:param heading_level: Optional style to apply to the definition
heading. See HEADING_LOOKUPS
:type heading_level: int
:returns: Message
:rtype: str
### Response:
def definition_to_message(
definition, message=None, table_of_contents=None, heading_level=None):
"""Helper function to render a definition to a message.
:param definition: A definition dictionary (see definitions package).
:type definition: dict
:param message: The message that the definition should be appended to.
:type message: parameters.message.Message
:param table_of_contents: Table of contents that the headings should be
included in.
:type message: parameters.message.Message
:param heading_level: Optional style to apply to the definition
heading. See HEADING_LOOKUPS
:type heading_level: int
:returns: Message
:rtype: str
"""
if message is None:
message = m.Message()
if table_of_contents is None:
table_of_contents = m.Message()
if heading_level:
_create_section_header(
message,
table_of_contents,
definition['name'].replace(' ', '-'),
definition['name'],
heading_level=heading_level)
else:
header = m.Paragraph(m.ImportantText(definition['name']))
message.add(header)
# If the definition has an icon, we put the icon and description side by
# side in a table otherwise just show the description as a paragraph
url = _definition_icon_url(definition)
if url is None:
message.add(m.Paragraph(definition['description']))
if 'citations' in definition:
_citations_to_message(message, definition)
else:
LOGGER.info('Creating mini table for definition description: ' + url)
table = m.Table(style_class='table table-condensed')
row = m.Row()
row.add(m.Cell(m.Image(url, **MEDIUM_ICON_STYLE)))
row.add(m.Cell(definition['description']))
table.add(row)
for citation in definition['citations']:
if citation['text'] in [None, '']:
continue
row = m.Row()
row.add(m.Cell(''))
if citation['link'] in [None, '']:
row.add(m.Cell(citation['text']))
else:
row.add(m.Cell(m.Link(citation['link'], citation['text'])))
table.add(row)
message.add(table)
url = _definition_screenshot_url(definition)
if url:
message.add(m.Paragraph(m.Image(url), style_class='text-center'))
# types contains e.g. hazard_all
if 'types' in definition:
for sub_definition in definition['types']:
definition_to_message(
sub_definition,
message,
table_of_contents,
heading_level=3)
#
# Notes section if available
#
if 'notes' in definition:
# Start a notes details group too since we have an exposure
message.add(m.Heading(
tr('Notes:'), **DETAILS_STYLE))
message.add(m.Heading(
tr('General notes:'), **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['notes']:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'citations' in definition:
_citations_to_message(message, definition)
# This only for EQ
if 'earthquake_fatality_models' in definition:
current_function = current_earthquake_model_name()
paragraph = m.Paragraph(tr(
'The following earthquake fatality models are available in '
'InaSAFE. Note that you need to set one of these as the '
'active model in InaSAFE Options. The currently active model '
'is: '),
m.ImportantText(current_function)
)
message.add(paragraph)
models_definition = definition['earthquake_fatality_models']
for model in models_definition:
message.add(m.Heading(model['name'], **DETAILS_SUBGROUP_STYLE))
if 'description' in model:
paragraph = m.Paragraph(model['description'])
message.add(paragraph)
for note in model['notes']:
paragraph = m.Paragraph(note)
message.add(paragraph)
_citations_to_message(message, model)
for exposure in exposure_all:
extra_exposure_notes = specific_notes(definition, exposure)
if extra_exposure_notes:
title = tr('Notes for exposure : {exposure_name}').format(
exposure_name=exposure['name'])
message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in extra_exposure_notes:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'continuous_notes' in definition:
message.add(m.Heading(
tr('Notes for continuous datasets:'),
**DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['continuous_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'classified_notes' in definition:
message.add(m.Heading(
tr('Notes for classified datasets:'),
**DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in definition['classified_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'single_event_notes' in definition:
message.add(
m.Heading(tr('Notes for single events'), **DETAILS_STYLE))
if len(definition['single_event_notes']) < 1:
message.add(m.Paragraph(tr('No single event notes defined.')))
else:
bullets = m.BulletedList()
for note in definition['single_event_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'multi_event_notes' in definition:
message.add(
m.Heading(
tr('Notes for multi events / scenarios:'),
**DETAILS_STYLE))
if len(definition['multi_event_notes']) < 1:
message.add(m.Paragraph(tr('No multi-event notes defined.')))
else:
bullets = m.BulletedList()
for note in definition['multi_event_notes']:
bullets.add(m.Text(note))
message.add(bullets)
if 'actions' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Actions:'))))
bullets = m.BulletedList()
for note in definition['actions']:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
for exposure in exposure_all:
extra_exposure_actions = specific_actions(definition, exposure)
if extra_exposure_actions:
title = tr('Actions for exposure : {exposure_name}').format(
exposure_name=exposure['name'])
message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE))
bullets = m.BulletedList()
for note in extra_exposure_actions:
if isinstance(note, dict):
bullets = _add_dict_to_bullets(bullets, note)
elif note:
bullets.add(m.Text(note))
message.add(bullets)
if 'continuous_hazard_units' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Units:'))))
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Plural'), header=True))
row.add(m.Cell(tr('Abbreviation'), header=True))
row.add(m.Cell(tr('Details'), header=True))
table.add(row)
for unit in definition['continuous_hazard_units']:
row = m.Row()
row.add(m.Cell(unit['name']))
row.add(m.Cell(unit['plural_name']))
row.add(m.Cell(unit['abbreviation']))
row.add(m.Cell(unit['description']))
table.add(row)
message.add(table)
if 'fields' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Fields:'))))
table = _create_fields_table()
if 'extra_fields' in definition:
all_fields = definition['fields'] + definition['extra_fields']
else:
all_fields = definition['fields']
for field in all_fields:
_add_field_to_table(field, table)
message.add(table)
if 'classifications' in definition:
message.add(m.Heading(
tr('Hazard classifications'),
**DETAILS_STYLE))
message.add(m.Paragraph(
definitions.hazard_classification['description']))
for inasafe_class in definition['classifications']:
definition_to_message(
inasafe_class,
message,
table_of_contents,
heading_level=3)
if 'classes' in definition:
message.add(m.Paragraph(m.ImportantText(tr('Classes:'))))
is_hazard = definition['type'] == hazard_classification_type
if is_hazard:
table = _make_defaults_hazard_table()
else:
table = _make_defaults_exposure_table()
for inasafe_class in definition['classes']:
row = m.Row()
if is_hazard:
# name() on QColor returns its hex code
if 'color' in inasafe_class:
colour = inasafe_class['color'].name()
row.add(m.Cell(
'', attributes='style="background: %s;"' % colour))
else:
row.add(m.Cell(' '))
row.add(m.Cell(inasafe_class['name']))
if is_hazard:
if 'affected' in inasafe_class:
row.add(m.Cell(tr(inasafe_class['affected'])))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
if inasafe_class.get('fatality_rate') is None or \
inasafe_class.get('fatality_rate') < 0:
row.add(m.Cell(tr('unspecified')))
elif inasafe_class.get('fatality_rate') > 0:
# we want to show the rate as a scientific notation
rate = html_scientific_notation_rate(
inasafe_class['fatality_rate'])
rate = '%s%%' % rate
row.add(m.Cell(rate))
else: # == 0
row.add(m.Cell('0%'))
if is_hazard:
if 'displacement_rate' in inasafe_class:
rate = inasafe_class['displacement_rate'] * 100
rate = '%.0f%%' % rate
row.add(m.Cell(rate))
else:
row.add(m.Cell(tr('unspecified')))
if 'string_defaults' in inasafe_class:
defaults = None
for default in inasafe_class['string_defaults']:
if defaults:
defaults += ',%s' % default
else:
defaults = default
row.add(m.Cell(defaults))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
# Min may be a single value or a dict of values so we need
# to check type and deal with it accordingly
if 'numeric_default_min' in inasafe_class:
if isinstance(inasafe_class['numeric_default_min'], dict):
bullets = m.BulletedList()
minima = inasafe_class['numeric_default_min']
for key, value in sorted(minima.items()):
bullets.add('%s : %s' % (key, value))
row.add(m.Cell(bullets))
else:
row.add(m.Cell(inasafe_class['numeric_default_min']))
else:
row.add(m.Cell(tr('unspecified')))
if is_hazard:
# Max may be a single value or a dict of values so we need
# to check type and deal with it accordingly
if 'numeric_default_max' in inasafe_class:
if isinstance(inasafe_class['numeric_default_max'], dict):
bullets = m.BulletedList()
maxima = inasafe_class['numeric_default_max']
for key, value in sorted(maxima.items()):
bullets.add('%s : %s' % (key, value))
row.add(m.Cell(bullets))
else:
row.add(m.Cell(inasafe_class['numeric_default_max']))
else:
row.add(m.Cell(tr('unspecified')))
table.add(row)
# Description goes in its own row with spanning
row = m.Row()
row.add(m.Cell(''))
row.add(m.Cell(inasafe_class['description'], span=7))
table.add(row)
# For hazard classes we also add the 'not affected' class manually:
if definition['type'] == definitions.hazard_classification_type:
row = m.Row()
colour = definitions.not_exposed_class['color'].name()
row.add(m.Cell(
'', attributes='style="background: %s;"' % colour))
description = definitions.not_exposed_class['description']
row.add(m.Cell(description, span=7))
table.add(row)
message.add(table)
if 'affected' in definition:
if definition['affected']:
message.add(m.Paragraph(tr(
'Exposure entities in this class ARE considered affected')))
else:
message.add(m.Paragraph(tr(
'Exposure entities in this class are NOT considered '
'affected')))
if 'optional' in definition:
if definition['optional']:
message.add(m.Paragraph(tr(
'This class is NOT required in the hazard keywords.')))
else:
message.add(m.Paragraph(tr(
'This class IS required in the hazard keywords.')))
return message |
def get_data(self, df):
"""Returns the chart data"""
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend([{
'key': self.labelify(keys, column),
'values': data[column].tolist()}
for column in self.columns])
return chart_data | Returns the chart data | Below is the the instruction that describes the task:
### Input:
Returns the chart data
### Response:
def get_data(self, df):
"""Returns the chart data"""
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend([{
'key': self.labelify(keys, column),
'values': data[column].tolist()}
for column in self.columns])
return chart_data |
def get_uuid(length=32, version=1):
"""
Returns a unique ID of a given length.
User `version=2` for cross-systems uniqueness.
"""
if version == 1:
return uuid.uuid1().hex[:length]
else:
return uuid.uuid4().hex[:length] | Returns a unique ID of a given length.
User `version=2` for cross-systems uniqueness. | Below is the the instruction that describes the task:
### Input:
Returns a unique ID of a given length.
User `version=2` for cross-systems uniqueness.
### Response:
def get_uuid(length=32, version=1):
"""
Returns a unique ID of a given length.
User `version=2` for cross-systems uniqueness.
"""
if version == 1:
return uuid.uuid1().hex[:length]
else:
return uuid.uuid4().hex[:length] |
def put_object(
connection, container: str, object_name: str,
contents, content_type: str) -> None:
"""
Put file to objectstore
container == "path/in/store"
object_name = "your_file_name.txt"
contents=thefiledata (fileobject) open('ourfile', 'rb')
content_type='csv' / 'application/json' .. etc
"""
connection.put_object(
container, object_name, contents=contents,
content_type=content_type) | Put file to objectstore
container == "path/in/store"
object_name = "your_file_name.txt"
contents=thefiledata (fileobject) open('ourfile', 'rb')
content_type='csv' / 'application/json' .. etc | Below is the the instruction that describes the task:
### Input:
Put file to objectstore
container == "path/in/store"
object_name = "your_file_name.txt"
contents=thefiledata (fileobject) open('ourfile', 'rb')
content_type='csv' / 'application/json' .. etc
### Response:
def put_object(
connection, container: str, object_name: str,
contents, content_type: str) -> None:
"""
Put file to objectstore
container == "path/in/store"
object_name = "your_file_name.txt"
contents=thefiledata (fileobject) open('ourfile', 'rb')
content_type='csv' / 'application/json' .. etc
"""
connection.put_object(
container, object_name, contents=contents,
content_type=content_type) |
def run_module(self, resource, args=None, stdin=None, stdout=None):
"""Run `resource` module
Returns a `rope.base.oi.doa.PythonFileRunner` object for
controlling the process.
"""
perform_doa = self.project.prefs.get('perform_doi', True)
perform_doa = self.project.prefs.get('perform_doa', perform_doa)
receiver = self.object_info.doa_data_received
if not perform_doa:
receiver = None
runner = rope.base.oi.doa.PythonFileRunner(
self, resource, args, stdin, stdout, receiver)
runner.add_finishing_observer(self.module_cache.forget_all_data)
runner.run()
return runner | Run `resource` module
Returns a `rope.base.oi.doa.PythonFileRunner` object for
controlling the process. | Below is the the instruction that describes the task:
### Input:
Run `resource` module
Returns a `rope.base.oi.doa.PythonFileRunner` object for
controlling the process.
### Response:
def run_module(self, resource, args=None, stdin=None, stdout=None):
"""Run `resource` module
Returns a `rope.base.oi.doa.PythonFileRunner` object for
controlling the process.
"""
perform_doa = self.project.prefs.get('perform_doi', True)
perform_doa = self.project.prefs.get('perform_doa', perform_doa)
receiver = self.object_info.doa_data_received
if not perform_doa:
receiver = None
runner = rope.base.oi.doa.PythonFileRunner(
self, resource, args, stdin, stdout, receiver)
runner.add_finishing_observer(self.module_cache.forget_all_data)
runner.run()
return runner |
def add(cls, module_name, msg=''):
'''
Until the guard is dropped again,
disallow imports of the module given by ``module_name``.
If the module is imported while the guard is in place
an ``ImportGuard`` is raised. An additional message on why
the module cannot be imported can optionally be specified
using the parameter ``msg``.
If multiple guards are placed on the same module, all these guards
have to be dropped before the module can be imported again.
'''
if module_name in sys.modules:
raise ImportGuard(
'Module to guard has already been imported: '
+ module_name
)
cls._guards.setdefault(module_name, [])
cls._guards[module_name].append(msg)
cls._num_entries += 1
cls._install() | Until the guard is dropped again,
disallow imports of the module given by ``module_name``.
If the module is imported while the guard is in place
an ``ImportGuard`` is raised. An additional message on why
the module cannot be imported can optionally be specified
using the parameter ``msg``.
If multiple guards are placed on the same module, all these guards
have to be dropped before the module can be imported again. | Below is the the instruction that describes the task:
### Input:
Until the guard is dropped again,
disallow imports of the module given by ``module_name``.
If the module is imported while the guard is in place
an ``ImportGuard`` is raised. An additional message on why
the module cannot be imported can optionally be specified
using the parameter ``msg``.
If multiple guards are placed on the same module, all these guards
have to be dropped before the module can be imported again.
### Response:
def add(cls, module_name, msg=''):
'''
Until the guard is dropped again,
disallow imports of the module given by ``module_name``.
If the module is imported while the guard is in place
an ``ImportGuard`` is raised. An additional message on why
the module cannot be imported can optionally be specified
using the parameter ``msg``.
If multiple guards are placed on the same module, all these guards
have to be dropped before the module can be imported again.
'''
if module_name in sys.modules:
raise ImportGuard(
'Module to guard has already been imported: '
+ module_name
)
cls._guards.setdefault(module_name, [])
cls._guards[module_name].append(msg)
cls._num_entries += 1
cls._install() |
def ColumnAttr(name='', attr=None, **kwargs):
'Column using getattr/setattr of given attr.'
return Column(name,
expr=attr if attr is not None else name,
getter=lambda col,row: getattrdeep(row, col.expr),
setter=lambda col,row,val: setattrdeep(row, col.expr, val),
**kwargs) | Column using getattr/setattr of given attr. | Below is the the instruction that describes the task:
### Input:
Column using getattr/setattr of given attr.
### Response:
def ColumnAttr(name='', attr=None, **kwargs):
'Column using getattr/setattr of given attr.'
return Column(name,
expr=attr if attr is not None else name,
getter=lambda col,row: getattrdeep(row, col.expr),
setter=lambda col,row,val: setattrdeep(row, col.expr, val),
**kwargs) |
def reset(self):
'''Restores the starting position.'''
self.piece_bb = [
BB_VOID, # NONE
BB_RANK_C | BB_RANK_G, # PAWN
BB_A1 | BB_I1 | BB_A9 | BB_I9, # LANCE
BB_A2 | BB_A8 | BB_I2 | BB_I8, # KNIGHT
BB_A3 | BB_A7 | BB_I3 | BB_I7, # SILVER
BB_A4 | BB_A6 | BB_I4 | BB_I6, # GOLD
BB_B2 | BB_H8, # BISHOP
BB_B8 | BB_H2, # ROOK
BB_A5 | BB_I5, # KING
BB_VOID, # PROM_PAWN
BB_VOID, # PROM_LANCE
BB_VOID, # PROM_KNIGHT
BB_VOID, # PROM_SILVER
BB_VOID, # PROM_BISHOP
BB_VOID, # PROM_ROOK
]
self.pieces_in_hand = [collections.Counter(), collections.Counter()]
self.occupied = Occupied(BB_RANK_G | BB_H2 | BB_H8 | BB_RANK_I, BB_RANK_A | BB_B2 | BB_B8 | BB_RANK_C)
self.king_squares = [I5, A5]
self.pieces = [NONE for i in SQUARES]
for i in SQUARES:
mask = BB_SQUARES[i]
for piece_type in PIECE_TYPES:
if mask & self.piece_bb[piece_type]:
self.pieces[i] = piece_type
self.turn = BLACK
self.move_number = 1
self.captured_piece_stack = collections.deque()
self.move_stack = collections.deque()
self.incremental_zobrist_hash = self.board_zobrist_hash(DEFAULT_RANDOM_ARRAY)
self.transpositions = collections.Counter((self.zobrist_hash(), )) | Restores the starting position. | Below is the the instruction that describes the task:
### Input:
Restores the starting position.
### Response:
def reset(self):
'''Restores the starting position.'''
self.piece_bb = [
BB_VOID, # NONE
BB_RANK_C | BB_RANK_G, # PAWN
BB_A1 | BB_I1 | BB_A9 | BB_I9, # LANCE
BB_A2 | BB_A8 | BB_I2 | BB_I8, # KNIGHT
BB_A3 | BB_A7 | BB_I3 | BB_I7, # SILVER
BB_A4 | BB_A6 | BB_I4 | BB_I6, # GOLD
BB_B2 | BB_H8, # BISHOP
BB_B8 | BB_H2, # ROOK
BB_A5 | BB_I5, # KING
BB_VOID, # PROM_PAWN
BB_VOID, # PROM_LANCE
BB_VOID, # PROM_KNIGHT
BB_VOID, # PROM_SILVER
BB_VOID, # PROM_BISHOP
BB_VOID, # PROM_ROOK
]
self.pieces_in_hand = [collections.Counter(), collections.Counter()]
self.occupied = Occupied(BB_RANK_G | BB_H2 | BB_H8 | BB_RANK_I, BB_RANK_A | BB_B2 | BB_B8 | BB_RANK_C)
self.king_squares = [I5, A5]
self.pieces = [NONE for i in SQUARES]
for i in SQUARES:
mask = BB_SQUARES[i]
for piece_type in PIECE_TYPES:
if mask & self.piece_bb[piece_type]:
self.pieces[i] = piece_type
self.turn = BLACK
self.move_number = 1
self.captured_piece_stack = collections.deque()
self.move_stack = collections.deque()
self.incremental_zobrist_hash = self.board_zobrist_hash(DEFAULT_RANDOM_ARRAY)
self.transpositions = collections.Counter((self.zobrist_hash(), )) |
def __update_all(self):
""" Recheck next start of records from all the sources
:return: None
"""
self.__next_start = None
self.__next_sources = []
for source in self.__sources:
self.__update(source) | Recheck next start of records from all the sources
:return: None | Below is the the instruction that describes the task:
### Input:
Recheck next start of records from all the sources
:return: None
### Response:
def __update_all(self):
""" Recheck next start of records from all the sources
:return: None
"""
self.__next_start = None
self.__next_sources = []
for source in self.__sources:
self.__update(source) |
def get_work_artifact_link_types(self):
"""GetWorkArtifactLinkTypes.
[Preview API] Get the list of work item tracking outbound artifact link types.
:rtype: [WorkArtifactLink]
"""
response = self._send(http_method='GET',
location_id='1a31de40-e318-41cd-a6c6-881077df52e3',
version='5.0-preview.1')
return self._deserialize('[WorkArtifactLink]', self._unwrap_collection(response)) | GetWorkArtifactLinkTypes.
[Preview API] Get the list of work item tracking outbound artifact link types.
:rtype: [WorkArtifactLink] | Below is the the instruction that describes the task:
### Input:
GetWorkArtifactLinkTypes.
[Preview API] Get the list of work item tracking outbound artifact link types.
:rtype: [WorkArtifactLink]
### Response:
def get_work_artifact_link_types(self):
"""GetWorkArtifactLinkTypes.
[Preview API] Get the list of work item tracking outbound artifact link types.
:rtype: [WorkArtifactLink]
"""
response = self._send(http_method='GET',
location_id='1a31de40-e318-41cd-a6c6-881077df52e3',
version='5.0-preview.1')
return self._deserialize('[WorkArtifactLink]', self._unwrap_collection(response)) |
def _parse_response(response, post_request=False):
"""Treat the response from ASApi.
The json is dumped before checking the status as even if the response is
not properly formed we are in trouble.
TODO: Streamline error checking.
"""
data = response.json()
if not response.ok:
utils.error_message_and_exit('Push Api Error:', data)
if post_request and not data['success']:
raise Exception('Push Api Error: [%s]' % data['error'])
return data | Treat the response from ASApi.
The json is dumped before checking the status as even if the response is
not properly formed we are in trouble.
TODO: Streamline error checking. | Below is the the instruction that describes the task:
### Input:
Treat the response from ASApi.
The json is dumped before checking the status as even if the response is
not properly formed we are in trouble.
TODO: Streamline error checking.
### Response:
def _parse_response(response, post_request=False):
"""Treat the response from ASApi.
The json is dumped before checking the status as even if the response is
not properly formed we are in trouble.
TODO: Streamline error checking.
"""
data = response.json()
if not response.ok:
utils.error_message_and_exit('Push Api Error:', data)
if post_request and not data['success']:
raise Exception('Push Api Error: [%s]' % data['error'])
return data |
def render_to_texture(self, data, texture, offset, size):
"""Render a SDF to a texture at a given offset and size
Parameters
----------
data : array
Must be 2D with type np.ubyte.
texture : instance of Texture2D
The texture to render to.
offset : tuple of int
Offset (x, y) to render to inside the texture.
size : tuple of int
Size (w, h) to render inside the texture.
"""
assert isinstance(texture, Texture2D)
set_state(blend=False, depth_test=False)
# calculate the negative half (within object)
orig_tex = Texture2D(255 - data, format='luminance',
wrapping='clamp_to_edge', interpolation='nearest')
edf_neg_tex = self._render_edf(orig_tex)
# calculate positive half (outside object)
orig_tex[:, :, 0] = data
edf_pos_tex = self._render_edf(orig_tex)
# render final product to output texture
self.program_insert['u_texture'] = orig_tex
self.program_insert['u_pos_texture'] = edf_pos_tex
self.program_insert['u_neg_texture'] = edf_neg_tex
self.fbo_to[-1].color_buffer = texture
with self.fbo_to[-1]:
set_viewport(tuple(offset) + tuple(size))
self.program_insert.draw('triangle_strip') | Render a SDF to a texture at a given offset and size
Parameters
----------
data : array
Must be 2D with type np.ubyte.
texture : instance of Texture2D
The texture to render to.
offset : tuple of int
Offset (x, y) to render to inside the texture.
size : tuple of int
Size (w, h) to render inside the texture. | Below is the the instruction that describes the task:
### Input:
Render a SDF to a texture at a given offset and size
Parameters
----------
data : array
Must be 2D with type np.ubyte.
texture : instance of Texture2D
The texture to render to.
offset : tuple of int
Offset (x, y) to render to inside the texture.
size : tuple of int
Size (w, h) to render inside the texture.
### Response:
def render_to_texture(self, data, texture, offset, size):
"""Render a SDF to a texture at a given offset and size
Parameters
----------
data : array
Must be 2D with type np.ubyte.
texture : instance of Texture2D
The texture to render to.
offset : tuple of int
Offset (x, y) to render to inside the texture.
size : tuple of int
Size (w, h) to render inside the texture.
"""
assert isinstance(texture, Texture2D)
set_state(blend=False, depth_test=False)
# calculate the negative half (within object)
orig_tex = Texture2D(255 - data, format='luminance',
wrapping='clamp_to_edge', interpolation='nearest')
edf_neg_tex = self._render_edf(orig_tex)
# calculate positive half (outside object)
orig_tex[:, :, 0] = data
edf_pos_tex = self._render_edf(orig_tex)
# render final product to output texture
self.program_insert['u_texture'] = orig_tex
self.program_insert['u_pos_texture'] = edf_pos_tex
self.program_insert['u_neg_texture'] = edf_neg_tex
self.fbo_to[-1].color_buffer = texture
with self.fbo_to[-1]:
set_viewport(tuple(offset) + tuple(size))
self.program_insert.draw('triangle_strip') |
def start_from_snapshot(self, name):
"""
::
POST /:login/machines/:id/snapshots/:name
:param name: identifier for snapshot
:type name: :py:class:`basestring`
Start the machine from the snapshot with the given 'name'.
"""
_, r = self.datacenter.request('POST', self.path + '/snapshots/' +
str(name))
r.raise_for_status()
return self | ::
POST /:login/machines/:id/snapshots/:name
:param name: identifier for snapshot
:type name: :py:class:`basestring`
Start the machine from the snapshot with the given 'name'. | Below is the the instruction that describes the task:
### Input:
::
POST /:login/machines/:id/snapshots/:name
:param name: identifier for snapshot
:type name: :py:class:`basestring`
Start the machine from the snapshot with the given 'name'.
### Response:
def start_from_snapshot(self, name):
"""
::
POST /:login/machines/:id/snapshots/:name
:param name: identifier for snapshot
:type name: :py:class:`basestring`
Start the machine from the snapshot with the given 'name'.
"""
_, r = self.datacenter.request('POST', self.path + '/snapshots/' +
str(name))
r.raise_for_status()
return self |
def copy_config(project_path, output_dir):
"""Copy current config file to output directory
"""
project_config = os.path.join(project_path, 'config.json')
saved_config = os.path.join(output_dir, 'config.json')
shutil.copy(project_config, saved_config) | Copy current config file to output directory | Below is the the instruction that describes the task:
### Input:
Copy current config file to output directory
### Response:
def copy_config(project_path, output_dir):
"""Copy current config file to output directory
"""
project_config = os.path.join(project_path, 'config.json')
saved_config = os.path.join(output_dir, 'config.json')
shutil.copy(project_config, saved_config) |
def get_shared_people(self):
"""Retrieves all people that share their location with this account"""
people = []
output = self._get_data()
self._logger.debug(output)
shared_entries = output[0] or []
for info in shared_entries:
try:
people.append(Person(info))
except InvalidData:
self._logger.debug('Missing location or other info, dropping person with info: %s', info)
return people | Retrieves all people that share their location with this account | Below is the the instruction that describes the task:
### Input:
Retrieves all people that share their location with this account
### Response:
def get_shared_people(self):
"""Retrieves all people that share their location with this account"""
people = []
output = self._get_data()
self._logger.debug(output)
shared_entries = output[0] or []
for info in shared_entries:
try:
people.append(Person(info))
except InvalidData:
self._logger.debug('Missing location or other info, dropping person with info: %s', info)
return people |
def align_circulation_with_z(self, circulation=None):
"""
If the input orbit is a tube orbit, this function aligns the circulation
axis with the z axis and returns a copy.
Parameters
----------
circulation : array_like (optional)
Array of bits that specify the axis about which the orbit
circulates. If not provided, will compute this using
:meth:`~gala.dynamics.Orbit.circulation`. See that method for more
information.
Returns
-------
orb : :class:`~gala.dynamics.Orbit`
A copy of the original orbit object with circulation aligned with
the z axis.
"""
if circulation is None:
circulation = self.circulation()
circulation = atleast_2d(circulation, insert_axis=1)
cart = self.cartesian
pos = cart.xyz
vel = np.vstack((cart.v_x.value[None],
cart.v_y.value[None],
cart.v_z.value[None])) * cart.v_x.unit
if pos.ndim < 3:
pos = pos[...,np.newaxis]
vel = vel[...,np.newaxis]
if (circulation.shape[0] != self.ndim or
circulation.shape[1] != pos.shape[2]):
raise ValueError("Shape of 'circulation' array should match the "
"shape of the position/velocity (minus the time "
"axis).")
new_pos = pos.copy()
new_vel = vel.copy()
for n in range(pos.shape[2]):
if circulation[2,n] == 1 or np.all(circulation[:,n] == 0):
# already circulating about z or box orbit
continue
if sum(circulation[:,n]) > 1:
logger.warning("Circulation about multiple axes - are you sure "
"the orbit has been integrated for long enough?")
if circulation[0,n] == 1:
circ = 0
elif circulation[1,n] == 1:
circ = 1
else:
raise RuntimeError("Should never get here...")
new_pos[circ,:,n] = pos[2,:,n]
new_pos[2,:,n] = pos[circ,:,n]
new_vel[circ,:,n] = vel[2,:,n]
new_vel[2,:,n] = vel[circ,:,n]
return self.__class__(pos=new_pos.reshape(cart.xyz.shape),
vel=new_vel.reshape(cart.xyz.shape),
t=self.t,
hamiltonian=self.hamiltonian) | If the input orbit is a tube orbit, this function aligns the circulation
axis with the z axis and returns a copy.
Parameters
----------
circulation : array_like (optional)
Array of bits that specify the axis about which the orbit
circulates. If not provided, will compute this using
:meth:`~gala.dynamics.Orbit.circulation`. See that method for more
information.
Returns
-------
orb : :class:`~gala.dynamics.Orbit`
A copy of the original orbit object with circulation aligned with
the z axis. | Below is the the instruction that describes the task:
### Input:
If the input orbit is a tube orbit, this function aligns the circulation
axis with the z axis and returns a copy.
Parameters
----------
circulation : array_like (optional)
Array of bits that specify the axis about which the orbit
circulates. If not provided, will compute this using
:meth:`~gala.dynamics.Orbit.circulation`. See that method for more
information.
Returns
-------
orb : :class:`~gala.dynamics.Orbit`
A copy of the original orbit object with circulation aligned with
the z axis.
### Response:
def align_circulation_with_z(self, circulation=None):
"""
If the input orbit is a tube orbit, this function aligns the circulation
axis with the z axis and returns a copy.
Parameters
----------
circulation : array_like (optional)
Array of bits that specify the axis about which the orbit
circulates. If not provided, will compute this using
:meth:`~gala.dynamics.Orbit.circulation`. See that method for more
information.
Returns
-------
orb : :class:`~gala.dynamics.Orbit`
A copy of the original orbit object with circulation aligned with
the z axis.
"""
if circulation is None:
circulation = self.circulation()
circulation = atleast_2d(circulation, insert_axis=1)
cart = self.cartesian
pos = cart.xyz
vel = np.vstack((cart.v_x.value[None],
cart.v_y.value[None],
cart.v_z.value[None])) * cart.v_x.unit
if pos.ndim < 3:
pos = pos[...,np.newaxis]
vel = vel[...,np.newaxis]
if (circulation.shape[0] != self.ndim or
circulation.shape[1] != pos.shape[2]):
raise ValueError("Shape of 'circulation' array should match the "
"shape of the position/velocity (minus the time "
"axis).")
new_pos = pos.copy()
new_vel = vel.copy()
for n in range(pos.shape[2]):
if circulation[2,n] == 1 or np.all(circulation[:,n] == 0):
# already circulating about z or box orbit
continue
if sum(circulation[:,n]) > 1:
logger.warning("Circulation about multiple axes - are you sure "
"the orbit has been integrated for long enough?")
if circulation[0,n] == 1:
circ = 0
elif circulation[1,n] == 1:
circ = 1
else:
raise RuntimeError("Should never get here...")
new_pos[circ,:,n] = pos[2,:,n]
new_pos[2,:,n] = pos[circ,:,n]
new_vel[circ,:,n] = vel[2,:,n]
new_vel[2,:,n] = vel[circ,:,n]
return self.__class__(pos=new_pos.reshape(cart.xyz.shape),
vel=new_vel.reshape(cart.xyz.shape),
t=self.t,
hamiltonian=self.hamiltonian) |
def register_domain(self, domain=0, tokenizer=None, trie=None):
"""
Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add
"""
self.domains[domain] = IntentDeterminationEngine(
tokenizer=tokenizer, trie=trie) | Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add | Below is the the instruction that describes the task:
### Input:
Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add
### Response:
def register_domain(self, domain=0, tokenizer=None, trie=None):
"""
Register a domain with the intent engine.
Args:
tokenizer(tokenizer): The tokenizer you wish to use.
trie(Trie): the Trie() you wish to use.
domain(str): a string representing the domain you wish to add
"""
self.domains[domain] = IntentDeterminationEngine(
tokenizer=tokenizer, trie=trie) |
def dump(self, cache_file=None):
"""Write dump out to file `cache_file`, defaulting to
``self.cache_file``"""
if cache_file is None:
cache_file = self.cache_file
if cache_file is not None:
self.cache_file = cache_file
with open(cache_file, 'wb') as pickle_fh:
pickle.dump(
(self.remote, self.backend.name, self.max_sleep_interval,
self.job_id, self._status, self.epilogue, self.ssh,
self.scp),
pickle_fh) | Write dump out to file `cache_file`, defaulting to
``self.cache_file`` | Below is the the instruction that describes the task:
### Input:
Write dump out to file `cache_file`, defaulting to
``self.cache_file``
### Response:
def dump(self, cache_file=None):
"""Write dump out to file `cache_file`, defaulting to
``self.cache_file``"""
if cache_file is None:
cache_file = self.cache_file
if cache_file is not None:
self.cache_file = cache_file
with open(cache_file, 'wb') as pickle_fh:
pickle.dump(
(self.remote, self.backend.name, self.max_sleep_interval,
self.job_id, self._status, self.epilogue, self.ssh,
self.scp),
pickle_fh) |
def extract_date(value):
"""
Convert timestamp to datetime and set everything to zero except a date
"""
dtime = value.to_datetime()
dtime = (dtime - timedelta(hours=dtime.hour) - timedelta(minutes=dtime.minute) -
timedelta(seconds=dtime.second) - timedelta(microseconds=dtime.microsecond))
return dtime | Convert timestamp to datetime and set everything to zero except a date | Below is the the instruction that describes the task:
### Input:
Convert timestamp to datetime and set everything to zero except a date
### Response:
def extract_date(value):
"""
Convert timestamp to datetime and set everything to zero except a date
"""
dtime = value.to_datetime()
dtime = (dtime - timedelta(hours=dtime.hour) - timedelta(minutes=dtime.minute) -
timedelta(seconds=dtime.second) - timedelta(microseconds=dtime.microsecond))
return dtime |
def editpermissions_user_view(self, request, user_id, forum_id=None):
""" Allows to edit user permissions for the considered forum.
The view displays a form to define which permissions are granted for the given user for the
considered forum.
"""
user_model = get_user_model()
user = get_object_or_404(user_model, pk=user_id)
forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None
# Set up the context
context = self.get_forum_perms_base_context(request, forum)
context['forum'] = forum
context['title'] = '{} - {}'.format(_('Forum permissions'), user)
context['form'] = self._get_permissions_form(
request, UserForumPermission, {'forum': forum, 'user': user},
)
return render(request, self.editpermissions_user_view_template_name, context) | Allows to edit user permissions for the considered forum.
The view displays a form to define which permissions are granted for the given user for the
considered forum. | Below is the the instruction that describes the task:
### Input:
Allows to edit user permissions for the considered forum.
The view displays a form to define which permissions are granted for the given user for the
considered forum.
### Response:
def editpermissions_user_view(self, request, user_id, forum_id=None):
""" Allows to edit user permissions for the considered forum.
The view displays a form to define which permissions are granted for the given user for the
considered forum.
"""
user_model = get_user_model()
user = get_object_or_404(user_model, pk=user_id)
forum = get_object_or_404(Forum, pk=forum_id) if forum_id else None
# Set up the context
context = self.get_forum_perms_base_context(request, forum)
context['forum'] = forum
context['title'] = '{} - {}'.format(_('Forum permissions'), user)
context['form'] = self._get_permissions_form(
request, UserForumPermission, {'forum': forum, 'user': user},
)
return render(request, self.editpermissions_user_view_template_name, context) |
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name):
seq = self._multi_gamma_sequence(a, p)
return tf.reduce_sum(input_tensor=tf.math.digamma(seq), axis=[-1]) | Computes the multivariate digamma function; Psi_p(a). | Below is the the instruction that describes the task:
### Input:
Computes the multivariate digamma function; Psi_p(a).
### Response:
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name):
seq = self._multi_gamma_sequence(a, p)
return tf.reduce_sum(input_tensor=tf.math.digamma(seq), axis=[-1]) |
def status(self):
"""
Collects the instances state and returns a list.
.. important::
Molecule assumes all instances were created successfully by
Ansible, otherwise Ansible would return an error on create. This
may prove to be a bad assumption. However, configuring Molecule's
driver to match the options passed to the playbook may prove
difficult. Especially in cases where the user is provisioning
instances off localhost.
:returns: list
"""
status_list = []
for platform in self._config.platforms.instances:
instance_name = platform['name']
driver_name = self.name
provisioner_name = self._config.provisioner.name
scenario_name = self._config.scenario.name
status_list.append(
Status(
instance_name=instance_name,
driver_name=driver_name,
provisioner_name=provisioner_name,
scenario_name=scenario_name,
created=self._created(),
converged=self._converged(),
))
return status_list | Collects the instances state and returns a list.
.. important::
Molecule assumes all instances were created successfully by
Ansible, otherwise Ansible would return an error on create. This
may prove to be a bad assumption. However, configuring Molecule's
driver to match the options passed to the playbook may prove
difficult. Especially in cases where the user is provisioning
instances off localhost.
:returns: list | Below is the the instruction that describes the task:
### Input:
Collects the instances state and returns a list.
.. important::
Molecule assumes all instances were created successfully by
Ansible, otherwise Ansible would return an error on create. This
may prove to be a bad assumption. However, configuring Molecule's
driver to match the options passed to the playbook may prove
difficult. Especially in cases where the user is provisioning
instances off localhost.
:returns: list
### Response:
def status(self):
"""
Collects the instances state and returns a list.
.. important::
Molecule assumes all instances were created successfully by
Ansible, otherwise Ansible would return an error on create. This
may prove to be a bad assumption. However, configuring Molecule's
driver to match the options passed to the playbook may prove
difficult. Especially in cases where the user is provisioning
instances off localhost.
:returns: list
"""
status_list = []
for platform in self._config.platforms.instances:
instance_name = platform['name']
driver_name = self.name
provisioner_name = self._config.provisioner.name
scenario_name = self._config.scenario.name
status_list.append(
Status(
instance_name=instance_name,
driver_name=driver_name,
provisioner_name=provisioner_name,
scenario_name=scenario_name,
created=self._created(),
converged=self._converged(),
))
return status_list |
def get(self, sensor_type='temperature_core'):
"""Get sensors list."""
self.__update__()
if sensor_type == 'temperature_core':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_TEMP_UNIT]
elif sensor_type == 'fan_speed':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_FAN_UNIT]
else:
# Unknown type
logger.debug("Unknown sensor type %s" % sensor_type)
ret = []
return ret | Get sensors list. | Below is the the instruction that describes the task:
### Input:
Get sensors list.
### Response:
def get(self, sensor_type='temperature_core'):
"""Get sensors list."""
self.__update__()
if sensor_type == 'temperature_core':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_TEMP_UNIT]
elif sensor_type == 'fan_speed':
ret = [s for s in self.sensors_list if s['unit'] == SENSOR_FAN_UNIT]
else:
# Unknown type
logger.debug("Unknown sensor type %s" % sensor_type)
ret = []
return ret |
def write_stream(self, stream, validate=True):
"""
Write :attr:`metainfo` to a file-like object
Before any data is written, `stream` is truncated if possible.
:param stream: Writable file-like object (e.g. :class:`io.BytesIO`)
:param bool validate: Whether to run :meth:`validate` first
:raises WriteError: if writing to `stream` fails
:raises MetainfoError: if `validate` is `True` and :attr:`metainfo`
contains invalid data
"""
content = self.dump(validate=validate)
try:
# Remove existing data from stream *after* dump() didn't raise
# anything so we don't destroy it prematurely.
if stream.seekable():
stream.seek(0)
stream.truncate(0)
stream.write(content)
except OSError as e:
raise error.WriteError(e.errno) | Write :attr:`metainfo` to a file-like object
Before any data is written, `stream` is truncated if possible.
:param stream: Writable file-like object (e.g. :class:`io.BytesIO`)
:param bool validate: Whether to run :meth:`validate` first
:raises WriteError: if writing to `stream` fails
:raises MetainfoError: if `validate` is `True` and :attr:`metainfo`
contains invalid data | Below is the the instruction that describes the task:
### Input:
Write :attr:`metainfo` to a file-like object
Before any data is written, `stream` is truncated if possible.
:param stream: Writable file-like object (e.g. :class:`io.BytesIO`)
:param bool validate: Whether to run :meth:`validate` first
:raises WriteError: if writing to `stream` fails
:raises MetainfoError: if `validate` is `True` and :attr:`metainfo`
contains invalid data
### Response:
def write_stream(self, stream, validate=True):
"""
Write :attr:`metainfo` to a file-like object
Before any data is written, `stream` is truncated if possible.
:param stream: Writable file-like object (e.g. :class:`io.BytesIO`)
:param bool validate: Whether to run :meth:`validate` first
:raises WriteError: if writing to `stream` fails
:raises MetainfoError: if `validate` is `True` and :attr:`metainfo`
contains invalid data
"""
content = self.dump(validate=validate)
try:
# Remove existing data from stream *after* dump() didn't raise
# anything so we don't destroy it prematurely.
if stream.seekable():
stream.seek(0)
stream.truncate(0)
stream.write(content)
except OSError as e:
raise error.WriteError(e.errno) |
def get_element_masses(self):
"""
Get the masses of elements in the package.
:returns: [kg] An array of element masses. The sequence of the elements
in the result corresponds with the sequence of elements in the
element list of the material.
"""
result = [0] * len(self.material.elements)
for compound in self.material.compounds:
c = self.get_compound_mass(compound)
f = [c * x for x in emf(compound, self.material.elements)]
result = [v+f[ix] for ix, v in enumerate(result)]
return result | Get the masses of elements in the package.
:returns: [kg] An array of element masses. The sequence of the elements
in the result corresponds with the sequence of elements in the
element list of the material. | Below is the the instruction that describes the task:
### Input:
Get the masses of elements in the package.
:returns: [kg] An array of element masses. The sequence of the elements
in the result corresponds with the sequence of elements in the
element list of the material.
### Response:
def get_element_masses(self):
"""
Get the masses of elements in the package.
:returns: [kg] An array of element masses. The sequence of the elements
in the result corresponds with the sequence of elements in the
element list of the material.
"""
result = [0] * len(self.material.elements)
for compound in self.material.compounds:
c = self.get_compound_mass(compound)
f = [c * x for x in emf(compound, self.material.elements)]
result = [v+f[ix] for ix, v in enumerate(result)]
return result |
def disconnect(self):
"""
Disconnects from the robot.
"""
if self._driver:
self._driver.disconnect()
self.axis_homed = {
'x': False, 'y': False, 'z': False, 'a': False, 'b': False} | Disconnects from the robot. | Below is the the instruction that describes the task:
### Input:
Disconnects from the robot.
### Response:
def disconnect(self):
"""
Disconnects from the robot.
"""
if self._driver:
self._driver.disconnect()
self.axis_homed = {
'x': False, 'y': False, 'z': False, 'a': False, 'b': False} |
def get_hostname_from_dn(dn):
"""
This parses the hostname from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
"""
pod = get_pod_from_dn(dn)
node = get_node_from_dn(dn)
if pod and node:
return "pod-{}-node-{}".format(pod, node)
else:
return None | This parses the hostname from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min | Below is the the instruction that describes the task:
### Input:
This parses the hostname from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
### Response:
def get_hostname_from_dn(dn):
"""
This parses the hostname from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
"""
pod = get_pod_from_dn(dn)
node = get_node_from_dn(dn)
if pod and node:
return "pod-{}-node-{}".format(pod, node)
else:
return None |
def prep_fastq_inputs(in_files, data):
"""Prepare bgzipped fastq inputs
"""
if len(in_files) == 1 and _is_bam_input(in_files):
out = _bgzip_from_bam(in_files[0], data["dirs"], data)
elif len(in_files) == 1 and _is_cram_input(in_files):
out = _bgzip_from_cram(in_files[0], data["dirs"], data)
elif len(in_files) in [1, 2] and _ready_gzip_fastq(in_files, data):
out = _symlink_in_files(in_files, data)
else:
if len(in_files) > 2:
fpairs = fastq.combine_pairs(in_files)
pair_types = set([len(xs) for xs in fpairs])
assert len(pair_types) == 1
fpairs.sort(key=lambda x: os.path.basename(x[0]))
organized = [[xs[0] for xs in fpairs]]
if len(fpairs[0]) > 1:
organized.append([xs[1] for xs in fpairs])
in_files = organized
parallel = {"type": "local", "num_jobs": len(in_files),
"cores_per_job": max(1, data["config"]["algorithm"]["num_cores"] // len(in_files))}
inputs = [{"in_file": x, "read_num": i, "dirs": data["dirs"], "config": data["config"],
"is_cwl": "cwl_keys" in data,
"rgnames": data["rgnames"]}
for i, x in enumerate(in_files) if x]
out = run_multicore(_bgzip_from_fastq_parallel, [[d] for d in inputs], data["config"], parallel)
return out | Prepare bgzipped fastq inputs | Below is the the instruction that describes the task:
### Input:
Prepare bgzipped fastq inputs
### Response:
def prep_fastq_inputs(in_files, data):
"""Prepare bgzipped fastq inputs
"""
if len(in_files) == 1 and _is_bam_input(in_files):
out = _bgzip_from_bam(in_files[0], data["dirs"], data)
elif len(in_files) == 1 and _is_cram_input(in_files):
out = _bgzip_from_cram(in_files[0], data["dirs"], data)
elif len(in_files) in [1, 2] and _ready_gzip_fastq(in_files, data):
out = _symlink_in_files(in_files, data)
else:
if len(in_files) > 2:
fpairs = fastq.combine_pairs(in_files)
pair_types = set([len(xs) for xs in fpairs])
assert len(pair_types) == 1
fpairs.sort(key=lambda x: os.path.basename(x[0]))
organized = [[xs[0] for xs in fpairs]]
if len(fpairs[0]) > 1:
organized.append([xs[1] for xs in fpairs])
in_files = organized
parallel = {"type": "local", "num_jobs": len(in_files),
"cores_per_job": max(1, data["config"]["algorithm"]["num_cores"] // len(in_files))}
inputs = [{"in_file": x, "read_num": i, "dirs": data["dirs"], "config": data["config"],
"is_cwl": "cwl_keys" in data,
"rgnames": data["rgnames"]}
for i, x in enumerate(in_files) if x]
out = run_multicore(_bgzip_from_fastq_parallel, [[d] for d in inputs], data["config"], parallel)
return out |
def to_dict(self):
"""Get the ARIMA model as a dictionary
Return the dictionary representation of the ARIMA model
Returns
-------
res : dictionary
The ARIMA model as a dictionary.
"""
return {
'pvalues': self.pvalues(),
'resid': self.resid(),
'order': self.order,
'seasonal_order': self.seasonal_order,
'oob': self.oob(),
'aic': self.aic(),
'aicc': self.aicc(),
'bic': self.bic(),
'bse': self.bse(),
'params': self.params()
} | Get the ARIMA model as a dictionary
Return the dictionary representation of the ARIMA model
Returns
-------
res : dictionary
The ARIMA model as a dictionary. | Below is the the instruction that describes the task:
### Input:
Get the ARIMA model as a dictionary
Return the dictionary representation of the ARIMA model
Returns
-------
res : dictionary
The ARIMA model as a dictionary.
### Response:
def to_dict(self):
"""Get the ARIMA model as a dictionary
Return the dictionary representation of the ARIMA model
Returns
-------
res : dictionary
The ARIMA model as a dictionary.
"""
return {
'pvalues': self.pvalues(),
'resid': self.resid(),
'order': self.order,
'seasonal_order': self.seasonal_order,
'oob': self.oob(),
'aic': self.aic(),
'aicc': self.aicc(),
'bic': self.bic(),
'bse': self.bse(),
'params': self.params()
} |
def tuning_ranges(self):
"""A dictionary describing the ranges of all tuned hyperparameters.
The keys are the names of the hyperparameter, and the values are the ranges.
"""
out = {}
for _, ranges in self.description()['HyperParameterTuningJobConfig']['ParameterRanges'].items():
for param in ranges:
out[param['Name']] = param
return out | A dictionary describing the ranges of all tuned hyperparameters.
The keys are the names of the hyperparameter, and the values are the ranges. | Below is the the instruction that describes the task:
### Input:
A dictionary describing the ranges of all tuned hyperparameters.
The keys are the names of the hyperparameter, and the values are the ranges.
### Response:
def tuning_ranges(self):
"""A dictionary describing the ranges of all tuned hyperparameters.
The keys are the names of the hyperparameter, and the values are the ranges.
"""
out = {}
for _, ranges in self.description()['HyperParameterTuningJobConfig']['ParameterRanges'].items():
for param in ranges:
out[param['Name']] = param
return out |
def readPlist(pathOrFile):
"""Raises NotBinaryPlistException, InvalidPlistException"""
didOpen = False
result = None
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
try:
reader = PlistReader(pathOrFile)
result = reader.parse()
except NotBinaryPlistException as e:
try:
pathOrFile.seek(0)
result = None
if hasattr(plistlib, 'loads'):
contents = None
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'rb') as f:
contents = f.read()
else:
contents = pathOrFile.read()
result = plistlib.loads(contents)
else:
result = plistlib.readPlist(pathOrFile)
result = wrapDataObject(result, for_binary=True)
except Exception as e:
raise InvalidPlistException(e)
finally:
if didOpen:
pathOrFile.close()
return result | Raises NotBinaryPlistException, InvalidPlistException | Below is the the instruction that describes the task:
### Input:
Raises NotBinaryPlistException, InvalidPlistException
### Response:
def readPlist(pathOrFile):
"""Raises NotBinaryPlistException, InvalidPlistException"""
didOpen = False
result = None
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
try:
reader = PlistReader(pathOrFile)
result = reader.parse()
except NotBinaryPlistException as e:
try:
pathOrFile.seek(0)
result = None
if hasattr(plistlib, 'loads'):
contents = None
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'rb') as f:
contents = f.read()
else:
contents = pathOrFile.read()
result = plistlib.loads(contents)
else:
result = plistlib.readPlist(pathOrFile)
result = wrapDataObject(result, for_binary=True)
except Exception as e:
raise InvalidPlistException(e)
finally:
if didOpen:
pathOrFile.close()
return result |
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
"""Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
return self._binary_op(
"rtruediv", other, axis=axis, level=level, fill_value=fill_value
) | Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied. | Below is the the instruction that describes the task:
### Input:
Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
### Response:
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
"""Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
return self._binary_op(
"rtruediv", other, axis=axis, level=level, fill_value=fill_value
) |
def _manipulate(self, *args, **kwargs):
"""
This is a semi-private method. It's current use is to
manipulate memory file system objects so that
you can create certain conditions, to provoke
errors that otherwise won't occur.
"""
self.connection._manipulate(self, *args, **kwargs) | This is a semi-private method. It's current use is to
manipulate memory file system objects so that
you can create certain conditions, to provoke
errors that otherwise won't occur. | Below is the the instruction that describes the task:
### Input:
This is a semi-private method. It's current use is to
manipulate memory file system objects so that
you can create certain conditions, to provoke
errors that otherwise won't occur.
### Response:
def _manipulate(self, *args, **kwargs):
"""
This is a semi-private method. It's current use is to
manipulate memory file system objects so that
you can create certain conditions, to provoke
errors that otherwise won't occur.
"""
self.connection._manipulate(self, *args, **kwargs) |
def run_command(self, config_file):
"""
:param str config_file: The name of config file.
"""
config = configparser.ConfigParser()
config.read(config_file)
rdbms = config.get('database', 'rdbms').lower()
label_regex = config.get('constants', 'label_regex')
constants = self.create_constants(rdbms)
constants.main(config_file, label_regex) | :param str config_file: The name of config file. | Below is the the instruction that describes the task:
### Input:
:param str config_file: The name of config file.
### Response:
def run_command(self, config_file):
"""
:param str config_file: The name of config file.
"""
config = configparser.ConfigParser()
config.read(config_file)
rdbms = config.get('database', 'rdbms').lower()
label_regex = config.get('constants', 'label_regex')
constants = self.create_constants(rdbms)
constants.main(config_file, label_regex) |
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"%r is declared as a package namespace, but %r is not:"
" please correct this in setup.py", nsp, parent
) | Verify that namespace packages are valid | Below is the the instruction that describes the task:
### Input:
Verify that namespace packages are valid
### Response:
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
assert_string_list(dist,attr,value)
for nsp in value:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
if '.' in nsp:
parent = '.'.join(nsp.split('.')[:-1])
if parent not in value:
distutils.log.warn(
"%r is declared as a package namespace, but %r is not:"
" please correct this in setup.py", nsp, parent
) |
def is_vocalized(word):
"""Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
@param word: arabic unicode char
@type word: unicode
@return: if the word is vocalized
@rtype:Boolean
"""
if word.isalpha():
return False
for char in word:
if is_tashkeel(char):
return True
else:
return False | Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
@param word: arabic unicode char
@type word: unicode
@return: if the word is vocalized
@rtype:Boolean | Below is the the instruction that describes the task:
### Input:
Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
@param word: arabic unicode char
@type word: unicode
@return: if the word is vocalized
@rtype:Boolean
### Response:
def is_vocalized(word):
"""Checks if the arabic word is vocalized.
the word musn't have any spaces and pounctuations.
@param word: arabic unicode char
@type word: unicode
@return: if the word is vocalized
@rtype:Boolean
"""
if word.isalpha():
return False
for char in word:
if is_tashkeel(char):
return True
else:
return False |
def check_config():
""" Report if there is an existing config file
"""
configfile = ConfigFile()
global data
if data.keys() > 0:
# FIXME: run a better check of this file
print("gitberg config file exists")
print("\twould you like to edit your gitberg config file?")
else:
print("No config found")
print("\twould you like to create a gitberg config file?")
answer = input("--> [Y/n]")
# By default, the answer is yes, as denoted by the capital Y
if not answer:
answer = 'Y'
# If yes, generate a new configuration
# to be written out as yaml
if answer in 'Yy':
print("Running gitberg config generator ...")
# config.exists_or_make()
config_gen = ConfigGenerator(current=data)
config_gen.ask()
# print(config_gen.answers)
data = config_gen.answers
configfile.write()
print("Config written to {}".format(configfile.file_path)) | Report if there is an existing config file | Below is the the instruction that describes the task:
### Input:
Report if there is an existing config file
### Response:
def check_config():
""" Report if there is an existing config file
"""
configfile = ConfigFile()
global data
if data.keys() > 0:
# FIXME: run a better check of this file
print("gitberg config file exists")
print("\twould you like to edit your gitberg config file?")
else:
print("No config found")
print("\twould you like to create a gitberg config file?")
answer = input("--> [Y/n]")
# By default, the answer is yes, as denoted by the capital Y
if not answer:
answer = 'Y'
# If yes, generate a new configuration
# to be written out as yaml
if answer in 'Yy':
print("Running gitberg config generator ...")
# config.exists_or_make()
config_gen = ConfigGenerator(current=data)
config_gen.ask()
# print(config_gen.answers)
data = config_gen.answers
configfile.write()
print("Config written to {}".format(configfile.file_path)) |
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest() | Define a unique string for any set of representable args. | Below is the the instruction that describes the task:
### Input:
Define a unique string for any set of representable args.
### Response:
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest() |
def addHandler(name, basepath=None, baseurl=None, allowDownscale=False):
"""Add an event handler with given name."""
if basepath is None:
basepath = '.'
_handlers.append(_handler_classes[name](basepath, baseurl, allowDownscale)) | Add an event handler with given name. | Below is the the instruction that describes the task:
### Input:
Add an event handler with given name.
### Response:
def addHandler(name, basepath=None, baseurl=None, allowDownscale=False):
"""Add an event handler with given name."""
if basepath is None:
basepath = '.'
_handlers.append(_handler_classes[name](basepath, baseurl, allowDownscale)) |
def _interval_string_to_seconds(interval_string):
"""Convert internal string like 1M, 1Y3M, 3W to seconds.
:type interval_string: str
:param interval_string: Interval string like 1M, 1W, 1M3W4h2s...
(s => seconds, m => minutes, h => hours, D => days,
W => weeks, M => months, Y => Years).
:rtype: int
:return: The conversion in seconds of interval_string.
"""
interval_exc = "Bad interval format for {0}".format(interval_string)
interval_dict = {"s": 1, "m": 60, "h": 3600, "D": 86400,
"W": 7*86400, "M": 30*86400, "Y": 365*86400}
interval_regex = re.compile("^(?P<num>[0-9]+)(?P<ext>[smhDWMY])")
seconds = 0
while interval_string:
match = interval_regex.match(interval_string)
if match:
num, ext = int(match.group("num")), match.group("ext")
if num > 0 and ext in interval_dict:
seconds += num * interval_dict[ext]
interval_string = interval_string[match.end():]
else:
raise Exception(interval_exc)
else:
raise Exception(interval_exc)
return seconds | Convert internal string like 1M, 1Y3M, 3W to seconds.
:type interval_string: str
:param interval_string: Interval string like 1M, 1W, 1M3W4h2s...
(s => seconds, m => minutes, h => hours, D => days,
W => weeks, M => months, Y => Years).
:rtype: int
:return: The conversion in seconds of interval_string. | Below is the the instruction that describes the task:
### Input:
Convert internal string like 1M, 1Y3M, 3W to seconds.
:type interval_string: str
:param interval_string: Interval string like 1M, 1W, 1M3W4h2s...
(s => seconds, m => minutes, h => hours, D => days,
W => weeks, M => months, Y => Years).
:rtype: int
:return: The conversion in seconds of interval_string.
### Response:
def _interval_string_to_seconds(interval_string):
"""Convert internal string like 1M, 1Y3M, 3W to seconds.
:type interval_string: str
:param interval_string: Interval string like 1M, 1W, 1M3W4h2s...
(s => seconds, m => minutes, h => hours, D => days,
W => weeks, M => months, Y => Years).
:rtype: int
:return: The conversion in seconds of interval_string.
"""
interval_exc = "Bad interval format for {0}".format(interval_string)
interval_dict = {"s": 1, "m": 60, "h": 3600, "D": 86400,
"W": 7*86400, "M": 30*86400, "Y": 365*86400}
interval_regex = re.compile("^(?P<num>[0-9]+)(?P<ext>[smhDWMY])")
seconds = 0
while interval_string:
match = interval_regex.match(interval_string)
if match:
num, ext = int(match.group("num")), match.group("ext")
if num > 0 and ext in interval_dict:
seconds += num * interval_dict[ext]
interval_string = interval_string[match.end():]
else:
raise Exception(interval_exc)
else:
raise Exception(interval_exc)
return seconds |
def MA_serial(self,days,rev=0):
""" see make_serial()
收盤價移動平均 list 化,資料格式請見 def make_serial()
"""
return self.make_serial(self.raw_data,days,rev) | see make_serial()
收盤價移動平均 list 化,資料格式請見 def make_serial() | Below is the the instruction that describes the task:
### Input:
see make_serial()
收盤價移動平均 list 化,資料格式請見 def make_serial()
### Response:
def MA_serial(self,days,rev=0):
""" see make_serial()
收盤價移動平均 list 化,資料格式請見 def make_serial()
"""
return self.make_serial(self.raw_data,days,rev) |
def insert(self, table, payload, **kwargs):
"""Insert (POST) request wrapper
:param table: table to insert on
:param payload: update payload (dict)
:param kwargs: Keyword arguments passed along to `Request`
:return:
- Dictionary containing the created record
"""
r = self._legacy_request('POST', table, **kwargs)
return r.insert(payload) | Insert (POST) request wrapper
:param table: table to insert on
:param payload: update payload (dict)
:param kwargs: Keyword arguments passed along to `Request`
:return:
- Dictionary containing the created record | Below is the the instruction that describes the task:
### Input:
Insert (POST) request wrapper
:param table: table to insert on
:param payload: update payload (dict)
:param kwargs: Keyword arguments passed along to `Request`
:return:
- Dictionary containing the created record
### Response:
def insert(self, table, payload, **kwargs):
"""Insert (POST) request wrapper
:param table: table to insert on
:param payload: update payload (dict)
:param kwargs: Keyword arguments passed along to `Request`
:return:
- Dictionary containing the created record
"""
r = self._legacy_request('POST', table, **kwargs)
return r.insert(payload) |
def _print_header(data):
"""
Create vcf header to make
a valid vcf.
"""
print("##fileformat=VCFv4.2", file=STDOUT, end="")
print("##source=seqbuster2.3", file=STDOUT, end="")
print("##reference=mirbase", file=STDOUT, end="")
for pos in data:
print("##contig=<ID=%s>" % pos["chrom"], file=STDOUT, end="")
print('##INFO=<ID=ID,Number=1,Type=String,Description="miRNA name">', file=STDOUT, end="")
print('##FORMAT=<ID=GT,Number=1,Type=Integer,Description="Genotype">', file=STDOUT, end="")
print('##FORMAT=<ID=NR,Number=A,Type=Integer,Description="Total reads supporting the variant">', file=STDOUT, end="")
print('##FORMAT=<ID=NS,Number=A,Type=Float,Description="Total number of different sequences supporting the variant">', file=STDOUT, end="")
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMP001", file=STDOUT, end="") | Create vcf header to make
a valid vcf. | Below is the the instruction that describes the task:
### Input:
Create vcf header to make
a valid vcf.
### Response:
def _print_header(data):
"""
Create vcf header to make
a valid vcf.
"""
print("##fileformat=VCFv4.2", file=STDOUT, end="")
print("##source=seqbuster2.3", file=STDOUT, end="")
print("##reference=mirbase", file=STDOUT, end="")
for pos in data:
print("##contig=<ID=%s>" % pos["chrom"], file=STDOUT, end="")
print('##INFO=<ID=ID,Number=1,Type=String,Description="miRNA name">', file=STDOUT, end="")
print('##FORMAT=<ID=GT,Number=1,Type=Integer,Description="Genotype">', file=STDOUT, end="")
print('##FORMAT=<ID=NR,Number=A,Type=Integer,Description="Total reads supporting the variant">', file=STDOUT, end="")
print('##FORMAT=<ID=NS,Number=A,Type=Float,Description="Total number of different sequences supporting the variant">', file=STDOUT, end="")
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMP001", file=STDOUT, end="") |
def bip32(self, s):
"""
Parse a bip32 public key from a text string, either a seed, a prv or a pub.
Return a :class:`BIP32 <pycoin.key.BIP32Node.BIP32Node>` or None.
"""
s = parseable_str(s)
return self.bip32_prv(s) or self.bip32_pub(s) | Parse a bip32 public key from a text string, either a seed, a prv or a pub.
Return a :class:`BIP32 <pycoin.key.BIP32Node.BIP32Node>` or None. | Below is the the instruction that describes the task:
### Input:
Parse a bip32 public key from a text string, either a seed, a prv or a pub.
Return a :class:`BIP32 <pycoin.key.BIP32Node.BIP32Node>` or None.
### Response:
def bip32(self, s):
"""
Parse a bip32 public key from a text string, either a seed, a prv or a pub.
Return a :class:`BIP32 <pycoin.key.BIP32Node.BIP32Node>` or None.
"""
s = parseable_str(s)
return self.bip32_prv(s) or self.bip32_pub(s) |
def registration(fixed,
moving,
type_of_transform='SyN',
initial_transform=None,
outprefix='',
mask=None,
grad_step=0.2,
flow_sigma=3,
total_sigma=0,
aff_metric='mattes',
aff_sampling=32,
syn_metric='mattes',
syn_sampling=32,
reg_iterations=(40,20,0),
write_composite_transform=False,
verbose=False,
multivariate_extras=None,
**kwargs):
"""
Register a pair of images either through the full or simplified
interface to the ANTs registration method.
ANTsR function: `antsRegistration`
Arguments
---------
fixed : ANTsImage
fixed image to which we register the moving image.
moving : ANTsImage
moving image to be mapped to fixed space.
type_of_transform : string
A linear or non-linear registration type. Mutual information metric by default.
See Notes below for more.
initial_transform : list of strings (optional)
transforms to prepend
outprefix : string
output will be named with this prefix.
mask : ANTsImage (optional)
mask the registration.
grad_step : scalar
gradient step size (not for all tx)
flow_sigma : scalar
smoothing for update field
total_sigma : scalar
smoothing for total field
aff_metric : string
the metric for the affine part (GC, mattes, meansquares)
aff_sampling : scalar
the nbins or radius parameter for the syn metric
syn_metric : string
the metric for the syn part (CC, mattes, meansquares, demons)
syn_sampling : scalar
the nbins or radius parameter for the syn metric
reg_iterations : list/tuple of integers
vector of iterations for syn. we will set the smoothing and multi-resolution parameters based on the length of this vector.
write_composite_transform : boolean
Boolean specifying whether or not the composite transform (and its inverse, if it exists) should be written to an hdf5 composite file. This is false by default so that only the transform for each stage is written to file.
verbose : boolean
request verbose output (useful for debugging)
multivariate_extras : additional metrics for multi-metric registration
list of additional images and metrics which will
trigger the use of multiple metrics in the registration
process in the deformable stage. Each multivariate metric needs 5
entries: name of metric, fixed, moving, weight,
samplingParam. the list of lists should be of the form ( (
"nameOfMetric2", img, img, weight, metricParam ) ). Another
example would be ( ( "MeanSquares", f2, m2, 0.5, 0
), ( "CC", f2, m2, 0.5, 2 ) ) . This is only compatible
with the SyNOnly transformation.
kwargs : keyword args
extra arguments
Returns
-------
dict containing follow key/value pairs:
`warpedmovout`: Moving image warped to space of fixed image.
`warpedfixout`: Fixed image warped to space of moving image.
`fwdtransforms`: Transforms to move from moving to fixed image.
`invtransforms`: Transforms to move from fixed to moving image.
Notes
-----
typeofTransform can be one of:
- "Translation": Translation transformation.
- "Rigid": Rigid transformation: Only rotation and translation.
- "Similarity": Similarity transformation: scaling, rotation and translation.
- "QuickRigid": Rigid transformation: Only rotation and translation.
May be useful for quick visualization fixes.'
- "DenseRigid": Rigid transformation: Only rotation and translation.
Employs dense sampling during metric estimation.'
- "BOLDRigid": Rigid transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "Affine": Affine transformation: Rigid + scaling.
- "AffineFast": Fast version of Affine.
- "BOLDAffine": Affine transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "TRSAA": translation, rigid, similarity, affine (twice). please set
regIterations if using this option. this would be used in
cases where you want a really high quality affine mapping
(perhaps with mask).
- "ElasticSyN": Symmetric normalization: Affine + deformable
transformation, with mutual information as optimization
metric and elastic regularization.
- "SyN": Symmetric normalization: Affine + deformable transformation,
with mutual information as optimization metric.
- "SyNRA": Symmetric normalization: Rigid + Affine + deformable
transformation, with mutual information as optimization metric.
- "SyNOnly": Symmetric normalization: no initial transformation,
with mutual information as optimization metric. Assumes
images are aligned by an inital transformation. Can be
useful if you want to run an unmasked affine followed by
masked deformable registration.
- "SyNCC": SyN, but with cross-correlation as the metric.
- "SyNabp": SyN optimized for abpBrainExtraction.
- "SyNBold": SyN, but optimized for registrations between BOLD and T1 images.
- "SyNBoldAff": SyN, but optimized for registrations between BOLD
and T1 images, with additional affine step.
- "SyNAggro": SyN, but with more aggressive registration
(fine-scale matching and more deformation).
Takes more time than SyN.
- "TVMSQ": time-varying diffeomorphism with mean square metric
- "TVMSQC": time-varying diffeomorphism with mean square metric for very large deformation
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> mi = ants.image_read(ants.get_ants_data('r64'))
>>> fi = ants.resample_image(fi, (60,60), 1, 0)
>>> mi = ants.resample_image(mi, (60,60), 1, 0)
>>> mytx = ants.registration(fixed=fi, moving=mi, type_of_transform = 'SyN' )
"""
if isinstance(fixed, list) and (moving is None):
processed_args = utils._int_antsProcessArguments(fixed)
libfn = utils.get_lib_fn('antsRegistration')
libfn(processed_args)
return 0
if type_of_transform == '':
type_of_transform = 'SyN'
if isinstance(type_of_transform, (tuple,list)) and (len(type_of_transform) == 1):
type_of_transform = type_of_transform[0]
if (outprefix == '') or len(outprefix) == 0:
outprefix = mktemp()
if (np.sum(np.isnan(fixed.numpy())) > 0):
raise ValueError('fixed image has NaNs - replace these')
if (np.sum(np.isnan(moving.numpy())) > 0):
raise ValueError('moving image has NaNs - replace these')
#----------------------------
args = [fixed, moving, type_of_transform, outprefix]
myl = 0
myf_aff = '6x4x2x1'
mys_aff = '3x2x1x0'
metsam = 0.2
myiterations = '2100x1200x1200x10'
if (type_of_transform == 'AffineFast'):
type_of_transform = 'Affine'
myiterations = '2100x1200x0x0'
if (type_of_transform == 'BOLDAffine'):
type_of_transform = 'Affine'
myf_aff='2x1'
mys_aff='1x0'
myiterations = '100x20'
myl=1
if (type_of_transform == 'QuickRigid'):
type_of_transform = 'Rigid'
myiterations = '20x20x0x0'
if (type_of_transform == 'DenseRigid'):
type_of_transform = 'Rigid'
metsam = 0.8
if (type_of_transform == 'BOLDRigid'):
type_of_transform = 'Rigid'
myf_aff='2x1'
mys_aff='1x0'
myiterations = '100x20'
myl=1
mysyn = 'SyN[%f,%f,%f]' % (grad_step, flow_sigma, total_sigma)
itlen = len(reg_iterations)# NEED TO CHECK THIS
if itlen == 0:
smoothingsigmas = 0
shrinkfactors = 1
synits = reg_iterations
else:
smoothingsigmas = np.arange(0, itlen)[::-1].astype('float32') # NEED TO CHECK THIS
shrinkfactors = 2**smoothingsigmas
shrinkfactors = shrinkfactors.astype('int')
smoothingsigmas = 'x'.join([str(ss)[0] for ss in smoothingsigmas])
shrinkfactors = 'x'.join([str(ss) for ss in shrinkfactors])
synits = 'x'.join([str(ri) for ri in reg_iterations])
if not isinstance(fixed, str):
if isinstance(fixed, iio.ANTsImage) and isinstance(moving, iio.ANTsImage):
inpixeltype = fixed.pixeltype
ttexists = False
allowable_tx = {'SyNBold','SyNBoldAff', 'ElasticSyN','SyN','SyNRA',
'SyNOnly','SyNAggro','SyNCC','TRSAA','SyNabp','SyNLessAggro',
'TVMSQ','TVMSQC','Rigid','Similarity','Translation','Affine',
'AffineFast','BOLDAffine','QuickRigid','DenseRigid','BOLDRigid'}
ttexists = type_of_transform in allowable_tx
if not ttexists:
raise ValueError('`type_of_transform` does not exist')
if ttexists:
initx = initial_transform
#if isinstance(initx, ANTsTransform):
#tempTXfilename = tempfile( fileext = '.mat' )
#initx = invertAntsrTransform( initialTransform )
#initx = invertAntsrTransform( initx )
#writeAntsrTransform( initx, tempTXfilename )
#initx = tempTXfilename
moving = moving.clone('float')
fixed = fixed.clone('float')
warpedfixout = moving.clone()
warpedmovout = fixed.clone()
f = utils.get_pointer_string(fixed)
m = utils.get_pointer_string(moving)
wfo = utils.get_pointer_string(warpedfixout)
wmo = utils.get_pointer_string(warpedmovout)
if mask is not None:
mask_scale = mask - mask.min()
mask_scale = mask_scale / mask_scale.max() * 255.
charmask = mask_scale.clone('unsigned char')
maskopt = '[%s,NA]' % (utils.get_pointer_string(charmask))
else:
maskopt = None
if initx is None:
initx = '[%s,%s,1]' % (f, m)
# ------------------------------------------------------------
if type_of_transform == 'SyNBold':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[0.25]',
'-c', '[1200x1200x100,1e-6,5]',
'-s', '2x1x0',
'-f', '4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNBoldAff':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[0.25]',
'-c', '[1200x1200x100,1e-6,5]',
'-s', '2x1x0',
'-f', '4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '[200x20,1e-6,5]',
'-s', '1x0',
'-f', '2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % (synits),
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'ElasticSyN':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % (synits),
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyN':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNRA':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[0.25]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNOnly':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if multivariate_extras is not None:
metrics = [ ]
for kk in range( len( multivariate_extras ) ):
metrics.append( '-m' )
metricname = multivariate_extras[kk][0]
metricfixed = utils.get_pointer_string(multivariate_extras[kk][1])
metricmov = utils.get_pointer_string(multivariate_extras[kk][2])
metricWeight = multivariate_extras[kk][3]
metricSampling = multivariate_extras[kk][4]
metricString = "%s[%s,%s,%s,%s]" % (metricname,metricfixed,metricmov,metricWeight,metricSampling)
metrics.append( metricString )
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling) ]
args1= ['-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
for kk in range( len( metrics ) ):
args.append( metrics[kk] )
for kk in range( len( args1 ) ):
args.append( args1[kk] )
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNAggro':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x100',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNCC':
syn_metric = 'CC'
syn_sampling = 4
synits = '2100x1200x1200x20'
smoothingsigmas = '3x2x1x0'
shrinkfactors = '4x3x2x1'
mysyn = 'SyN[0.15,3,0]'
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[1]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[1]',
'-c', '1200x1200x100',
'-s', '2x1x0',
'-f', '4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'TRSAA':
itlen = len( reg_iterations )
itlenlow = round( itlen/2 + 0.0001 )
dlen = itlen - itlenlow
_myconvlow = [2000]*itlenlow + [0]*dlen
myconvlow = 'x'.join([str(mc) for mc in _myconvlow])
myconvhi = 'x'.join([str(r) for r in reg_iterations])
myconvhi = '[%s,1.e-7,10]' % myconvhi
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Translation[1]',
'-c', myconvlow,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[1]',
'-c', myconvlow,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Similarity[1]',
'-c', myconvlow,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[1]',
'-c', myconvhi,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[1]',
'-c', myconvhi,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------s
elif type_of_transform == 'SyNabp':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', 'mattes[%s,%s,1,32,regular,0.25]' % (f, m),
'-t', 'Rigid[0.1]',
'-c', '1000x500x250x100',
'-s', '4x2x1x0',
'-f', '8x4x2x1',
'-x', '[NA,NA]',
'-m', 'mattes[%s,%s,1,32,regular,0.25]' % (f, m),
'-t', 'Affine[0.1]',
'-c', '1000x500x250x100',
'-s', '4x2x1x0',
'-f', '8x4x2x1',
'-x', '[NA,NA]',
'-m', 'CC[%s,%s,0.5,4]' % (f,m),
'-t', 'SyN[0.1,3,0]',
'-c', '50x10x0',
'-s', '2x1x0',
'-f', '4x2x1',
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNLessAggro':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x100',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'TVMSQ':
if grad_step is None:
grad_step = 1.0
tvtx = 'TimeVaryingVelocityField[%s, 4, 0.0,0.0, 0.5,0 ]' % str(grad_step)
args = ['-d', str(fixed.dimension),
# '-r', initx,
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', tvtx,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'TVMSQC':
if grad_step is None:
grad_step = 2.0
tvtx = 'TimeVaryingVelocityField[%s, 8, 1.0,0.0, 0.05,0 ]' % str(grad_step)
args = ['-d', str(fixed.dimension),
# '-r', initx,
'-m', 'demons[%s,%s,0.5,0]' % (f, m),
'-m', 'meansquares[%s,%s,1,0]' % (f, m),
'-t', tvtx,
'-c', '[1200x1200x100x20x0,0,5]',
'-s', '8x6x4x2x1vox',
'-f', '8x6x4x2x1',
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif (type_of_transform == 'Rigid') or (type_of_transform == 'Similarity') or \
(type_of_transform == 'Translation') or (type_of_transform == 'Affine'):
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,%s]' % (aff_metric, f, m, aff_sampling, metsam),
'-t', '%s[0.25]' % type_of_transform,
'-c', myiterations,
'-s', mys_aff,
'-f', myf_aff,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
args.append('--float')
args.append('1')
args.append('--write-composite-transform')
args.append( write_composite_transform * 1 )
if verbose:
args.append('-v')
args.append('1')
processed_args = utils._int_antsProcessArguments(args)
libfn = utils.get_lib_fn('antsRegistration')
libfn(processed_args)
afffns = glob.glob(outprefix+'*'+'[0-9]GenericAffine.mat')
fwarpfns = glob.glob(outprefix+'*'+'[0-9]Warp.nii.gz')
iwarpfns = glob.glob(outprefix+'*'+'[0-9]InverseWarp.nii.gz')
#print(afffns, fwarpfns, iwarpfns)
if len(afffns) == 0:
afffns = ''
if len(fwarpfns) == 0:
fwarpfns = ''
if len(iwarpfns) == 0:
iwarpfns = ''
alltx = sorted(glob.glob(outprefix+'*'+'[0-9]*'))
findinv = np.where([re.search('[0-9]InverseWarp.nii.gz',ff) for ff in alltx])[0]
findfwd = np.where([re.search('[0-9]Warp.nii.gz', ff) for ff in alltx])[0]
if len(findinv) > 0:
fwdtransforms = list(reversed([ff for idx,ff in enumerate(alltx) if idx !=findinv[0]]))
invtransforms = [ff for idx,ff in enumerate(alltx) if idx!=findfwd[0]]
else:
fwdtransforms = list(reversed(alltx))
invtransforms = alltx
if write_composite_transform:
fwdtransforms = outprefix + 'Composite.h5'
invtransforms = outprefix + 'InverseComposite.h5'
return {
'warpedmovout': warpedmovout.clone(inpixeltype),
'warpedfixout': warpedfixout.clone(inpixeltype),
'fwdtransforms': fwdtransforms,
'invtransforms': invtransforms
}
else:
args.append('--float')
args.append('1')
args.append('--write-composite-transform')
args.append( write_composite_transform * 1 )
if verbose:
args.append('-v')
args.append('1')
processed_args = utils._int_antsProcessArguments(args)
libfn = utils.get_lib_fn('antsRegistration')
libfn(processed_args)
return 0 | Register a pair of images either through the full or simplified
interface to the ANTs registration method.
ANTsR function: `antsRegistration`
Arguments
---------
fixed : ANTsImage
fixed image to which we register the moving image.
moving : ANTsImage
moving image to be mapped to fixed space.
type_of_transform : string
A linear or non-linear registration type. Mutual information metric by default.
See Notes below for more.
initial_transform : list of strings (optional)
transforms to prepend
outprefix : string
output will be named with this prefix.
mask : ANTsImage (optional)
mask the registration.
grad_step : scalar
gradient step size (not for all tx)
flow_sigma : scalar
smoothing for update field
total_sigma : scalar
smoothing for total field
aff_metric : string
the metric for the affine part (GC, mattes, meansquares)
aff_sampling : scalar
the nbins or radius parameter for the syn metric
syn_metric : string
the metric for the syn part (CC, mattes, meansquares, demons)
syn_sampling : scalar
the nbins or radius parameter for the syn metric
reg_iterations : list/tuple of integers
vector of iterations for syn. we will set the smoothing and multi-resolution parameters based on the length of this vector.
write_composite_transform : boolean
Boolean specifying whether or not the composite transform (and its inverse, if it exists) should be written to an hdf5 composite file. This is false by default so that only the transform for each stage is written to file.
verbose : boolean
request verbose output (useful for debugging)
multivariate_extras : additional metrics for multi-metric registration
list of additional images and metrics which will
trigger the use of multiple metrics in the registration
process in the deformable stage. Each multivariate metric needs 5
entries: name of metric, fixed, moving, weight,
samplingParam. the list of lists should be of the form ( (
"nameOfMetric2", img, img, weight, metricParam ) ). Another
example would be ( ( "MeanSquares", f2, m2, 0.5, 0
), ( "CC", f2, m2, 0.5, 2 ) ) . This is only compatible
with the SyNOnly transformation.
kwargs : keyword args
extra arguments
Returns
-------
dict containing follow key/value pairs:
`warpedmovout`: Moving image warped to space of fixed image.
`warpedfixout`: Fixed image warped to space of moving image.
`fwdtransforms`: Transforms to move from moving to fixed image.
`invtransforms`: Transforms to move from fixed to moving image.
Notes
-----
typeofTransform can be one of:
- "Translation": Translation transformation.
- "Rigid": Rigid transformation: Only rotation and translation.
- "Similarity": Similarity transformation: scaling, rotation and translation.
- "QuickRigid": Rigid transformation: Only rotation and translation.
May be useful for quick visualization fixes.'
- "DenseRigid": Rigid transformation: Only rotation and translation.
Employs dense sampling during metric estimation.'
- "BOLDRigid": Rigid transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "Affine": Affine transformation: Rigid + scaling.
- "AffineFast": Fast version of Affine.
- "BOLDAffine": Affine transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "TRSAA": translation, rigid, similarity, affine (twice). please set
regIterations if using this option. this would be used in
cases where you want a really high quality affine mapping
(perhaps with mask).
- "ElasticSyN": Symmetric normalization: Affine + deformable
transformation, with mutual information as optimization
metric and elastic regularization.
- "SyN": Symmetric normalization: Affine + deformable transformation,
with mutual information as optimization metric.
- "SyNRA": Symmetric normalization: Rigid + Affine + deformable
transformation, with mutual information as optimization metric.
- "SyNOnly": Symmetric normalization: no initial transformation,
with mutual information as optimization metric. Assumes
images are aligned by an inital transformation. Can be
useful if you want to run an unmasked affine followed by
masked deformable registration.
- "SyNCC": SyN, but with cross-correlation as the metric.
- "SyNabp": SyN optimized for abpBrainExtraction.
- "SyNBold": SyN, but optimized for registrations between BOLD and T1 images.
- "SyNBoldAff": SyN, but optimized for registrations between BOLD
and T1 images, with additional affine step.
- "SyNAggro": SyN, but with more aggressive registration
(fine-scale matching and more deformation).
Takes more time than SyN.
- "TVMSQ": time-varying diffeomorphism with mean square metric
- "TVMSQC": time-varying diffeomorphism with mean square metric for very large deformation
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> mi = ants.image_read(ants.get_ants_data('r64'))
>>> fi = ants.resample_image(fi, (60,60), 1, 0)
>>> mi = ants.resample_image(mi, (60,60), 1, 0)
>>> mytx = ants.registration(fixed=fi, moving=mi, type_of_transform = 'SyN' ) | Below is the the instruction that describes the task:
### Input:
Register a pair of images either through the full or simplified
interface to the ANTs registration method.
ANTsR function: `antsRegistration`
Arguments
---------
fixed : ANTsImage
fixed image to which we register the moving image.
moving : ANTsImage
moving image to be mapped to fixed space.
type_of_transform : string
A linear or non-linear registration type. Mutual information metric by default.
See Notes below for more.
initial_transform : list of strings (optional)
transforms to prepend
outprefix : string
output will be named with this prefix.
mask : ANTsImage (optional)
mask the registration.
grad_step : scalar
gradient step size (not for all tx)
flow_sigma : scalar
smoothing for update field
total_sigma : scalar
smoothing for total field
aff_metric : string
the metric for the affine part (GC, mattes, meansquares)
aff_sampling : scalar
the nbins or radius parameter for the syn metric
syn_metric : string
the metric for the syn part (CC, mattes, meansquares, demons)
syn_sampling : scalar
the nbins or radius parameter for the syn metric
reg_iterations : list/tuple of integers
vector of iterations for syn. we will set the smoothing and multi-resolution parameters based on the length of this vector.
write_composite_transform : boolean
Boolean specifying whether or not the composite transform (and its inverse, if it exists) should be written to an hdf5 composite file. This is false by default so that only the transform for each stage is written to file.
verbose : boolean
request verbose output (useful for debugging)
multivariate_extras : additional metrics for multi-metric registration
list of additional images and metrics which will
trigger the use of multiple metrics in the registration
process in the deformable stage. Each multivariate metric needs 5
entries: name of metric, fixed, moving, weight,
samplingParam. the list of lists should be of the form ( (
"nameOfMetric2", img, img, weight, metricParam ) ). Another
example would be ( ( "MeanSquares", f2, m2, 0.5, 0
), ( "CC", f2, m2, 0.5, 2 ) ) . This is only compatible
with the SyNOnly transformation.
kwargs : keyword args
extra arguments
Returns
-------
dict containing follow key/value pairs:
`warpedmovout`: Moving image warped to space of fixed image.
`warpedfixout`: Fixed image warped to space of moving image.
`fwdtransforms`: Transforms to move from moving to fixed image.
`invtransforms`: Transforms to move from fixed to moving image.
Notes
-----
typeofTransform can be one of:
- "Translation": Translation transformation.
- "Rigid": Rigid transformation: Only rotation and translation.
- "Similarity": Similarity transformation: scaling, rotation and translation.
- "QuickRigid": Rigid transformation: Only rotation and translation.
May be useful for quick visualization fixes.'
- "DenseRigid": Rigid transformation: Only rotation and translation.
Employs dense sampling during metric estimation.'
- "BOLDRigid": Rigid transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "Affine": Affine transformation: Rigid + scaling.
- "AffineFast": Fast version of Affine.
- "BOLDAffine": Affine transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "TRSAA": translation, rigid, similarity, affine (twice). please set
regIterations if using this option. this would be used in
cases where you want a really high quality affine mapping
(perhaps with mask).
- "ElasticSyN": Symmetric normalization: Affine + deformable
transformation, with mutual information as optimization
metric and elastic regularization.
- "SyN": Symmetric normalization: Affine + deformable transformation,
with mutual information as optimization metric.
- "SyNRA": Symmetric normalization: Rigid + Affine + deformable
transformation, with mutual information as optimization metric.
- "SyNOnly": Symmetric normalization: no initial transformation,
with mutual information as optimization metric. Assumes
images are aligned by an inital transformation. Can be
useful if you want to run an unmasked affine followed by
masked deformable registration.
- "SyNCC": SyN, but with cross-correlation as the metric.
- "SyNabp": SyN optimized for abpBrainExtraction.
- "SyNBold": SyN, but optimized for registrations between BOLD and T1 images.
- "SyNBoldAff": SyN, but optimized for registrations between BOLD
and T1 images, with additional affine step.
- "SyNAggro": SyN, but with more aggressive registration
(fine-scale matching and more deformation).
Takes more time than SyN.
- "TVMSQ": time-varying diffeomorphism with mean square metric
- "TVMSQC": time-varying diffeomorphism with mean square metric for very large deformation
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> mi = ants.image_read(ants.get_ants_data('r64'))
>>> fi = ants.resample_image(fi, (60,60), 1, 0)
>>> mi = ants.resample_image(mi, (60,60), 1, 0)
>>> mytx = ants.registration(fixed=fi, moving=mi, type_of_transform = 'SyN' )
### Response:
def registration(fixed,
moving,
type_of_transform='SyN',
initial_transform=None,
outprefix='',
mask=None,
grad_step=0.2,
flow_sigma=3,
total_sigma=0,
aff_metric='mattes',
aff_sampling=32,
syn_metric='mattes',
syn_sampling=32,
reg_iterations=(40,20,0),
write_composite_transform=False,
verbose=False,
multivariate_extras=None,
**kwargs):
"""
Register a pair of images either through the full or simplified
interface to the ANTs registration method.
ANTsR function: `antsRegistration`
Arguments
---------
fixed : ANTsImage
fixed image to which we register the moving image.
moving : ANTsImage
moving image to be mapped to fixed space.
type_of_transform : string
A linear or non-linear registration type. Mutual information metric by default.
See Notes below for more.
initial_transform : list of strings (optional)
transforms to prepend
outprefix : string
output will be named with this prefix.
mask : ANTsImage (optional)
mask the registration.
grad_step : scalar
gradient step size (not for all tx)
flow_sigma : scalar
smoothing for update field
total_sigma : scalar
smoothing for total field
aff_metric : string
the metric for the affine part (GC, mattes, meansquares)
aff_sampling : scalar
the nbins or radius parameter for the syn metric
syn_metric : string
the metric for the syn part (CC, mattes, meansquares, demons)
syn_sampling : scalar
the nbins or radius parameter for the syn metric
reg_iterations : list/tuple of integers
vector of iterations for syn. we will set the smoothing and multi-resolution parameters based on the length of this vector.
write_composite_transform : boolean
Boolean specifying whether or not the composite transform (and its inverse, if it exists) should be written to an hdf5 composite file. This is false by default so that only the transform for each stage is written to file.
verbose : boolean
request verbose output (useful for debugging)
multivariate_extras : additional metrics for multi-metric registration
list of additional images and metrics which will
trigger the use of multiple metrics in the registration
process in the deformable stage. Each multivariate metric needs 5
entries: name of metric, fixed, moving, weight,
samplingParam. the list of lists should be of the form ( (
"nameOfMetric2", img, img, weight, metricParam ) ). Another
example would be ( ( "MeanSquares", f2, m2, 0.5, 0
), ( "CC", f2, m2, 0.5, 2 ) ) . This is only compatible
with the SyNOnly transformation.
kwargs : keyword args
extra arguments
Returns
-------
dict containing follow key/value pairs:
`warpedmovout`: Moving image warped to space of fixed image.
`warpedfixout`: Fixed image warped to space of moving image.
`fwdtransforms`: Transforms to move from moving to fixed image.
`invtransforms`: Transforms to move from fixed to moving image.
Notes
-----
typeofTransform can be one of:
- "Translation": Translation transformation.
- "Rigid": Rigid transformation: Only rotation and translation.
- "Similarity": Similarity transformation: scaling, rotation and translation.
- "QuickRigid": Rigid transformation: Only rotation and translation.
May be useful for quick visualization fixes.'
- "DenseRigid": Rigid transformation: Only rotation and translation.
Employs dense sampling during metric estimation.'
- "BOLDRigid": Rigid transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "Affine": Affine transformation: Rigid + scaling.
- "AffineFast": Fast version of Affine.
- "BOLDAffine": Affine transformation: Parameters typical for BOLD to
BOLD intrasubject registration'.'
- "TRSAA": translation, rigid, similarity, affine (twice). please set
regIterations if using this option. this would be used in
cases where you want a really high quality affine mapping
(perhaps with mask).
- "ElasticSyN": Symmetric normalization: Affine + deformable
transformation, with mutual information as optimization
metric and elastic regularization.
- "SyN": Symmetric normalization: Affine + deformable transformation,
with mutual information as optimization metric.
- "SyNRA": Symmetric normalization: Rigid + Affine + deformable
transformation, with mutual information as optimization metric.
- "SyNOnly": Symmetric normalization: no initial transformation,
with mutual information as optimization metric. Assumes
images are aligned by an inital transformation. Can be
useful if you want to run an unmasked affine followed by
masked deformable registration.
- "SyNCC": SyN, but with cross-correlation as the metric.
- "SyNabp": SyN optimized for abpBrainExtraction.
- "SyNBold": SyN, but optimized for registrations between BOLD and T1 images.
- "SyNBoldAff": SyN, but optimized for registrations between BOLD
and T1 images, with additional affine step.
- "SyNAggro": SyN, but with more aggressive registration
(fine-scale matching and more deformation).
Takes more time than SyN.
- "TVMSQ": time-varying diffeomorphism with mean square metric
- "TVMSQC": time-varying diffeomorphism with mean square metric for very large deformation
Example
-------
>>> import ants
>>> fi = ants.image_read(ants.get_ants_data('r16'))
>>> mi = ants.image_read(ants.get_ants_data('r64'))
>>> fi = ants.resample_image(fi, (60,60), 1, 0)
>>> mi = ants.resample_image(mi, (60,60), 1, 0)
>>> mytx = ants.registration(fixed=fi, moving=mi, type_of_transform = 'SyN' )
"""
if isinstance(fixed, list) and (moving is None):
processed_args = utils._int_antsProcessArguments(fixed)
libfn = utils.get_lib_fn('antsRegistration')
libfn(processed_args)
return 0
if type_of_transform == '':
type_of_transform = 'SyN'
if isinstance(type_of_transform, (tuple,list)) and (len(type_of_transform) == 1):
type_of_transform = type_of_transform[0]
if (outprefix == '') or len(outprefix) == 0:
outprefix = mktemp()
if (np.sum(np.isnan(fixed.numpy())) > 0):
raise ValueError('fixed image has NaNs - replace these')
if (np.sum(np.isnan(moving.numpy())) > 0):
raise ValueError('moving image has NaNs - replace these')
#----------------------------
args = [fixed, moving, type_of_transform, outprefix]
myl = 0
myf_aff = '6x4x2x1'
mys_aff = '3x2x1x0'
metsam = 0.2
myiterations = '2100x1200x1200x10'
if (type_of_transform == 'AffineFast'):
type_of_transform = 'Affine'
myiterations = '2100x1200x0x0'
if (type_of_transform == 'BOLDAffine'):
type_of_transform = 'Affine'
myf_aff='2x1'
mys_aff='1x0'
myiterations = '100x20'
myl=1
if (type_of_transform == 'QuickRigid'):
type_of_transform = 'Rigid'
myiterations = '20x20x0x0'
if (type_of_transform == 'DenseRigid'):
type_of_transform = 'Rigid'
metsam = 0.8
if (type_of_transform == 'BOLDRigid'):
type_of_transform = 'Rigid'
myf_aff='2x1'
mys_aff='1x0'
myiterations = '100x20'
myl=1
mysyn = 'SyN[%f,%f,%f]' % (grad_step, flow_sigma, total_sigma)
itlen = len(reg_iterations)# NEED TO CHECK THIS
if itlen == 0:
smoothingsigmas = 0
shrinkfactors = 1
synits = reg_iterations
else:
smoothingsigmas = np.arange(0, itlen)[::-1].astype('float32') # NEED TO CHECK THIS
shrinkfactors = 2**smoothingsigmas
shrinkfactors = shrinkfactors.astype('int')
smoothingsigmas = 'x'.join([str(ss)[0] for ss in smoothingsigmas])
shrinkfactors = 'x'.join([str(ss) for ss in shrinkfactors])
synits = 'x'.join([str(ri) for ri in reg_iterations])
if not isinstance(fixed, str):
if isinstance(fixed, iio.ANTsImage) and isinstance(moving, iio.ANTsImage):
inpixeltype = fixed.pixeltype
ttexists = False
allowable_tx = {'SyNBold','SyNBoldAff', 'ElasticSyN','SyN','SyNRA',
'SyNOnly','SyNAggro','SyNCC','TRSAA','SyNabp','SyNLessAggro',
'TVMSQ','TVMSQC','Rigid','Similarity','Translation','Affine',
'AffineFast','BOLDAffine','QuickRigid','DenseRigid','BOLDRigid'}
ttexists = type_of_transform in allowable_tx
if not ttexists:
raise ValueError('`type_of_transform` does not exist')
if ttexists:
initx = initial_transform
#if isinstance(initx, ANTsTransform):
#tempTXfilename = tempfile( fileext = '.mat' )
#initx = invertAntsrTransform( initialTransform )
#initx = invertAntsrTransform( initx )
#writeAntsrTransform( initx, tempTXfilename )
#initx = tempTXfilename
moving = moving.clone('float')
fixed = fixed.clone('float')
warpedfixout = moving.clone()
warpedmovout = fixed.clone()
f = utils.get_pointer_string(fixed)
m = utils.get_pointer_string(moving)
wfo = utils.get_pointer_string(warpedfixout)
wmo = utils.get_pointer_string(warpedmovout)
if mask is not None:
mask_scale = mask - mask.min()
mask_scale = mask_scale / mask_scale.max() * 255.
charmask = mask_scale.clone('unsigned char')
maskopt = '[%s,NA]' % (utils.get_pointer_string(charmask))
else:
maskopt = None
if initx is None:
initx = '[%s,%s,1]' % (f, m)
# ------------------------------------------------------------
if type_of_transform == 'SyNBold':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[0.25]',
'-c', '[1200x1200x100,1e-6,5]',
'-s', '2x1x0',
'-f', '4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNBoldAff':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[0.25]',
'-c', '[1200x1200x100,1e-6,5]',
'-s', '2x1x0',
'-f', '4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '[200x20,1e-6,5]',
'-s', '1x0',
'-f', '2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % (synits),
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'ElasticSyN':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % (synits),
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyN':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNRA':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[0.25]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNOnly':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if multivariate_extras is not None:
metrics = [ ]
for kk in range( len( multivariate_extras ) ):
metrics.append( '-m' )
metricname = multivariate_extras[kk][0]
metricfixed = utils.get_pointer_string(multivariate_extras[kk][1])
metricmov = utils.get_pointer_string(multivariate_extras[kk][2])
metricWeight = multivariate_extras[kk][3]
metricSampling = multivariate_extras[kk][4]
metricString = "%s[%s,%s,%s,%s]" % (metricname,metricfixed,metricmov,metricWeight,metricSampling)
metrics.append( metricString )
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling) ]
args1= ['-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
for kk in range( len( metrics ) ):
args.append( metrics[kk] )
for kk in range( len( args1 ) ):
args.append( args1[kk] )
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNAggro':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x100',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNCC':
syn_metric = 'CC'
syn_sampling = 4
synits = '2100x1200x1200x20'
smoothingsigmas = '3x2x1x0'
shrinkfactors = '4x3x2x1'
mysyn = 'SyN[0.15,3,0]'
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[1]',
'-c', '2100x1200x1200x0',
'-s', '3x2x1x0',
'-f', '4x4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[1]',
'-c', '1200x1200x100',
'-s', '2x1x0',
'-f', '4x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'TRSAA':
itlen = len( reg_iterations )
itlenlow = round( itlen/2 + 0.0001 )
dlen = itlen - itlenlow
_myconvlow = [2000]*itlenlow + [0]*dlen
myconvlow = 'x'.join([str(mc) for mc in _myconvlow])
myconvhi = 'x'.join([str(r) for r in reg_iterations])
myconvhi = '[%s,1.e-7,10]' % myconvhi
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Translation[1]',
'-c', myconvlow,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Rigid[1]',
'-c', myconvlow,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Similarity[1]',
'-c', myconvlow,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[1]',
'-c', myconvhi,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s,regular,0.3]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[1]',
'-c', myconvhi,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------s
elif type_of_transform == 'SyNabp':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', 'mattes[%s,%s,1,32,regular,0.25]' % (f, m),
'-t', 'Rigid[0.1]',
'-c', '1000x500x250x100',
'-s', '4x2x1x0',
'-f', '8x4x2x1',
'-x', '[NA,NA]',
'-m', 'mattes[%s,%s,1,32,regular,0.25]' % (f, m),
'-t', 'Affine[0.1]',
'-c', '1000x500x250x100',
'-s', '4x2x1x0',
'-f', '8x4x2x1',
'-x', '[NA,NA]',
'-m', 'CC[%s,%s,0.5,4]' % (f,m),
'-t', 'SyN[0.1,3,0]',
'-c', '50x10x0',
'-s', '2x1x0',
'-f', '4x2x1',
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'SyNLessAggro':
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,0.2]' % (aff_metric, f, m, aff_sampling),
'-t', 'Affine[0.25]',
'-c', '2100x1200x1200x100',
'-s', '3x2x1x0',
'-f', '4x2x2x1',
'-x', '[NA,NA]',
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', mysyn,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'TVMSQ':
if grad_step is None:
grad_step = 1.0
tvtx = 'TimeVaryingVelocityField[%s, 4, 0.0,0.0, 0.5,0 ]' % str(grad_step)
args = ['-d', str(fixed.dimension),
# '-r', initx,
'-m', '%s[%s,%s,1,%s]' % (syn_metric, f, m, syn_sampling),
'-t', tvtx,
'-c', '[%s,1e-7,8]' % synits,
'-s', smoothingsigmas,
'-f', shrinkfactors,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif type_of_transform == 'TVMSQC':
if grad_step is None:
grad_step = 2.0
tvtx = 'TimeVaryingVelocityField[%s, 8, 1.0,0.0, 0.05,0 ]' % str(grad_step)
args = ['-d', str(fixed.dimension),
# '-r', initx,
'-m', 'demons[%s,%s,0.5,0]' % (f, m),
'-m', 'meansquares[%s,%s,1,0]' % (f, m),
'-t', tvtx,
'-c', '[1200x1200x100x20x0,0,5]',
'-s', '8x6x4x2x1vox',
'-f', '8x6x4x2x1',
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
elif (type_of_transform == 'Rigid') or (type_of_transform == 'Similarity') or \
(type_of_transform == 'Translation') or (type_of_transform == 'Affine'):
args = ['-d', str(fixed.dimension),
'-r', initx,
'-m', '%s[%s,%s,1,%s,regular,%s]' % (aff_metric, f, m, aff_sampling, metsam),
'-t', '%s[0.25]' % type_of_transform,
'-c', myiterations,
'-s', mys_aff,
'-f', myf_aff,
'-u', '1',
'-z', '1',
'-l', myl,
'-o', '[%s,%s,%s]' % (outprefix, wmo, wfo)]
if maskopt is not None:
args.append('-x')
args.append(maskopt)
else:
args.append('-x')
args.append('[NA,NA]')
# ------------------------------------------------------------
args.append('--float')
args.append('1')
args.append('--write-composite-transform')
args.append( write_composite_transform * 1 )
if verbose:
args.append('-v')
args.append('1')
processed_args = utils._int_antsProcessArguments(args)
libfn = utils.get_lib_fn('antsRegistration')
libfn(processed_args)
afffns = glob.glob(outprefix+'*'+'[0-9]GenericAffine.mat')
fwarpfns = glob.glob(outprefix+'*'+'[0-9]Warp.nii.gz')
iwarpfns = glob.glob(outprefix+'*'+'[0-9]InverseWarp.nii.gz')
#print(afffns, fwarpfns, iwarpfns)
if len(afffns) == 0:
afffns = ''
if len(fwarpfns) == 0:
fwarpfns = ''
if len(iwarpfns) == 0:
iwarpfns = ''
alltx = sorted(glob.glob(outprefix+'*'+'[0-9]*'))
findinv = np.where([re.search('[0-9]InverseWarp.nii.gz',ff) for ff in alltx])[0]
findfwd = np.where([re.search('[0-9]Warp.nii.gz', ff) for ff in alltx])[0]
if len(findinv) > 0:
fwdtransforms = list(reversed([ff for idx,ff in enumerate(alltx) if idx !=findinv[0]]))
invtransforms = [ff for idx,ff in enumerate(alltx) if idx!=findfwd[0]]
else:
fwdtransforms = list(reversed(alltx))
invtransforms = alltx
if write_composite_transform:
fwdtransforms = outprefix + 'Composite.h5'
invtransforms = outprefix + 'InverseComposite.h5'
return {
'warpedmovout': warpedmovout.clone(inpixeltype),
'warpedfixout': warpedfixout.clone(inpixeltype),
'fwdtransforms': fwdtransforms,
'invtransforms': invtransforms
}
else:
args.append('--float')
args.append('1')
args.append('--write-composite-transform')
args.append( write_composite_transform * 1 )
if verbose:
args.append('-v')
args.append('1')
processed_args = utils._int_antsProcessArguments(args)
libfn = utils.get_lib_fn('antsRegistration')
libfn(processed_args)
return 0 |
def _get_scalar_names(self, limit=None):
"""Only give scalar options that have a varying range"""
names = []
if limit == 'point':
inpnames = list(self.input_dataset.point_arrays.keys())
elif limit == 'cell':
inpnames = list(self.input_dataset.cell_arrays.keys())
else:
inpnames = self.input_dataset.scalar_names
for name in inpnames:
arr = self.input_dataset.get_scalar(name)
rng = self.input_dataset.get_data_range(name)
if arr is not None and arr.size > 0 and (rng[1]-rng[0] > 0.0):
names.append(name)
try:
self._last_scalars = names[0]
except IndexError:
pass
return names | Only give scalar options that have a varying range | Below is the the instruction that describes the task:
### Input:
Only give scalar options that have a varying range
### Response:
def _get_scalar_names(self, limit=None):
"""Only give scalar options that have a varying range"""
names = []
if limit == 'point':
inpnames = list(self.input_dataset.point_arrays.keys())
elif limit == 'cell':
inpnames = list(self.input_dataset.cell_arrays.keys())
else:
inpnames = self.input_dataset.scalar_names
for name in inpnames:
arr = self.input_dataset.get_scalar(name)
rng = self.input_dataset.get_data_range(name)
if arr is not None and arr.size > 0 and (rng[1]-rng[0] > 0.0):
names.append(name)
try:
self._last_scalars = names[0]
except IndexError:
pass
return names |
def from_jsons(graph_json_str: str, check_version: bool = True) -> BELGraph:
"""Read a BEL graph from a Node-Link JSON string."""
graph_json_dict = json.loads(graph_json_str)
return from_json(graph_json_dict, check_version=check_version) | Read a BEL graph from a Node-Link JSON string. | Below is the the instruction that describes the task:
### Input:
Read a BEL graph from a Node-Link JSON string.
### Response:
def from_jsons(graph_json_str: str, check_version: bool = True) -> BELGraph:
"""Read a BEL graph from a Node-Link JSON string."""
graph_json_dict = json.loads(graph_json_str)
return from_json(graph_json_dict, check_version=check_version) |
def entry_from_raw(self, rval: RawEntry, jptr: JSONPointer = "") -> EntryValue:
"""Transform a raw (leaf-)list entry into the cooked form.
Args:
rval: raw entry (scalar or object)
jptr: JSON pointer of the entry
Raises:
NonexistentSchemaNode: If a member inside `rval` is not defined
in the schema.
RawTypeError: If a scalar value inside `rval` is of incorrect type.
"""
return super().from_raw(rval, jptr) | Transform a raw (leaf-)list entry into the cooked form.
Args:
rval: raw entry (scalar or object)
jptr: JSON pointer of the entry
Raises:
NonexistentSchemaNode: If a member inside `rval` is not defined
in the schema.
RawTypeError: If a scalar value inside `rval` is of incorrect type. | Below is the the instruction that describes the task:
### Input:
Transform a raw (leaf-)list entry into the cooked form.
Args:
rval: raw entry (scalar or object)
jptr: JSON pointer of the entry
Raises:
NonexistentSchemaNode: If a member inside `rval` is not defined
in the schema.
RawTypeError: If a scalar value inside `rval` is of incorrect type.
### Response:
def entry_from_raw(self, rval: RawEntry, jptr: JSONPointer = "") -> EntryValue:
"""Transform a raw (leaf-)list entry into the cooked form.
Args:
rval: raw entry (scalar or object)
jptr: JSON pointer of the entry
Raises:
NonexistentSchemaNode: If a member inside `rval` is not defined
in the schema.
RawTypeError: If a scalar value inside `rval` is of incorrect type.
"""
return super().from_raw(rval, jptr) |
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output | Create a single VM | Below is the the instruction that describes the task:
### Input:
Create a single VM
### Response:
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output |
def _check_job_status(self, job, desc, status_key_name):
"""Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails.
"""
status = desc[status_key_name]
# If the status is capital case, then convert it to Camel case
status = _STATUS_CODE_TABLE.get(status, status)
if status != 'Completed' and status != 'Stopped':
reason = desc.get('FailureReason', '(No reason provided)')
job_type = status_key_name.replace('JobStatus', ' job')
raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason)) | Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails. | Below is the the instruction that describes the task:
### Input:
Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails.
### Response:
def _check_job_status(self, job, desc, status_key_name):
"""Check to see if the job completed successfully and, if not, construct and
raise a ValueError.
Args:
job (str): The name of the job to check.
desc (dict[str, str]): The result of ``describe_training_job()``.
status_key_name (str): Status key name to check for.
Raises:
ValueError: If the training job fails.
"""
status = desc[status_key_name]
# If the status is capital case, then convert it to Camel case
status = _STATUS_CODE_TABLE.get(status, status)
if status != 'Completed' and status != 'Stopped':
reason = desc.get('FailureReason', '(No reason provided)')
job_type = status_key_name.replace('JobStatus', ' job')
raise ValueError('Error for {} {}: {} Reason: {}'.format(job_type, job, status, reason)) |
def _gen_trend_graph(start, end, force_overwrite=False):
""" Total trend graph for machine category. """
filename = graphs.get_trend_graph_filename(start, end)
csv_filename = os.path.join(GRAPH_ROOT, filename + '.csv')
png_filename = os.path.join(GRAPH_ROOT, filename + '.png')
_check_directory_exists(csv_filename)
_check_directory_exists(png_filename)
if not settings.GRAPH_DEBUG or force_overwrite:
if os.path.exists(csv_filename):
if os.path.exists(png_filename):
return
query = CPUJob.objects.filter(
date__range=(start, end)
)
query = query.values('date').annotate(Sum('cpu_usage'))
query = query.order_by('date')
t_start = start
t_end = end
start_str = start.strftime('%Y-%m-%d')
end_str = end.strftime('%Y-%m-%d')
fig, ax = plt.subplots(figsize=(6, 4))
ax.set_xlim(start, end)
ax.set_title('%s - %s' % (start_str, end_str))
ax.set_ylabel("CPU Time (hours)")
ax.set_xlabel("Date")
locator = mdates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
ax.xaxis.set_minor_locator(mdates.DayLocator())
data = {}
x_data = []
y_data = []
with open(csv_filename, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for row in query.iterator():
csv_writer.writerow([
row['date'], row['cpu_usage__sum'] / 3600.00
])
date = row['date']
data[date] = row['cpu_usage__sum']
start = t_start
end = t_end
while start <= end:
total = 0
if start in data:
total = data[start]
x_data.append(start)
y_data.append(total / 3600.00)
start = start + datetime.timedelta(days=1)
del data
ax.plot(x_data, y_data)
del x_data
del y_data
fig.autofmt_xdate()
plt.tight_layout()
plt.savefig(png_filename)
plt.close() | Total trend graph for machine category. | Below is the the instruction that describes the task:
### Input:
Total trend graph for machine category.
### Response:
def _gen_trend_graph(start, end, force_overwrite=False):
""" Total trend graph for machine category. """
filename = graphs.get_trend_graph_filename(start, end)
csv_filename = os.path.join(GRAPH_ROOT, filename + '.csv')
png_filename = os.path.join(GRAPH_ROOT, filename + '.png')
_check_directory_exists(csv_filename)
_check_directory_exists(png_filename)
if not settings.GRAPH_DEBUG or force_overwrite:
if os.path.exists(csv_filename):
if os.path.exists(png_filename):
return
query = CPUJob.objects.filter(
date__range=(start, end)
)
query = query.values('date').annotate(Sum('cpu_usage'))
query = query.order_by('date')
t_start = start
t_end = end
start_str = start.strftime('%Y-%m-%d')
end_str = end.strftime('%Y-%m-%d')
fig, ax = plt.subplots(figsize=(6, 4))
ax.set_xlim(start, end)
ax.set_title('%s - %s' % (start_str, end_str))
ax.set_ylabel("CPU Time (hours)")
ax.set_xlabel("Date")
locator = mdates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
ax.xaxis.set_minor_locator(mdates.DayLocator())
data = {}
x_data = []
y_data = []
with open(csv_filename, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for row in query.iterator():
csv_writer.writerow([
row['date'], row['cpu_usage__sum'] / 3600.00
])
date = row['date']
data[date] = row['cpu_usage__sum']
start = t_start
end = t_end
while start <= end:
total = 0
if start in data:
total = data[start]
x_data.append(start)
y_data.append(total / 3600.00)
start = start + datetime.timedelta(days=1)
del data
ax.plot(x_data, y_data)
del x_data
del y_data
fig.autofmt_xdate()
plt.tight_layout()
plt.savefig(png_filename)
plt.close() |
def value(self, value):
"""Sets the DATA_OBJECT stored value."""
dtype = TYPES[type(value)] if self._type is None else self._type
lib.set_data_type(self._data, dtype)
lib.set_data_value(self._data, self.clips_value(value)) | Sets the DATA_OBJECT stored value. | Below is the the instruction that describes the task:
### Input:
Sets the DATA_OBJECT stored value.
### Response:
def value(self, value):
"""Sets the DATA_OBJECT stored value."""
dtype = TYPES[type(value)] if self._type is None else self._type
lib.set_data_type(self._data, dtype)
lib.set_data_value(self._data, self.clips_value(value)) |
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return | When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss. | Below is the the instruction that describes the task:
### Input:
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
### Response:
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return |
def sanitize_random(value):
"""
Random string of same length as the given value.
"""
if not value:
return value
return ''.join(random.choice(CHARACTERS) for _ in range(len(value))) | Random string of same length as the given value. | Below is the the instruction that describes the task:
### Input:
Random string of same length as the given value.
### Response:
def sanitize_random(value):
"""
Random string of same length as the given value.
"""
if not value:
return value
return ''.join(random.choice(CHARACTERS) for _ in range(len(value))) |
def add_subvolume(self, token, channel, secret,
x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
resolution, title, notes):
"""
Adds a new subvolume to a token/channel.
Arguments:
token (str): The token to write to in LIMS
channel (str): Channel to add in the subvolume. Can be `None`
x_start (int): Start in x dimension
x_stop (int): Stop in x dimension
y_start (int): Start in y dimension
y_stop (int): Stop in y dimension
z_start (int): Start in z dimension
z_stop (int): Stop in z dimension
resolution (int): The resolution at which this subvolume is seen
title (str): The title to set for the subvolume
notes (str): Optional extra thoughts on the subvolume
Returns:
boolean: success
"""
md = self.get_metadata(token)['metadata']
if 'subvolumes' in md:
subvols = md['subvolumes']
else:
subvols = []
subvols.append({
'token': token,
'channel': channel,
'x_start': x_start,
'x_stop': x_stop,
'y_start': y_start,
'y_stop': y_stop,
'z_start': z_start,
'z_stop': z_stop,
'resolution': resolution,
'title': title,
'notes': notes
})
return self.set_metadata(token, {
'secret': secret,
'subvolumes': subvols
}) | Adds a new subvolume to a token/channel.
Arguments:
token (str): The token to write to in LIMS
channel (str): Channel to add in the subvolume. Can be `None`
x_start (int): Start in x dimension
x_stop (int): Stop in x dimension
y_start (int): Start in y dimension
y_stop (int): Stop in y dimension
z_start (int): Start in z dimension
z_stop (int): Stop in z dimension
resolution (int): The resolution at which this subvolume is seen
title (str): The title to set for the subvolume
notes (str): Optional extra thoughts on the subvolume
Returns:
boolean: success | Below is the the instruction that describes the task:
### Input:
Adds a new subvolume to a token/channel.
Arguments:
token (str): The token to write to in LIMS
channel (str): Channel to add in the subvolume. Can be `None`
x_start (int): Start in x dimension
x_stop (int): Stop in x dimension
y_start (int): Start in y dimension
y_stop (int): Stop in y dimension
z_start (int): Start in z dimension
z_stop (int): Stop in z dimension
resolution (int): The resolution at which this subvolume is seen
title (str): The title to set for the subvolume
notes (str): Optional extra thoughts on the subvolume
Returns:
boolean: success
### Response:
def add_subvolume(self, token, channel, secret,
x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
resolution, title, notes):
"""
Adds a new subvolume to a token/channel.
Arguments:
token (str): The token to write to in LIMS
channel (str): Channel to add in the subvolume. Can be `None`
x_start (int): Start in x dimension
x_stop (int): Stop in x dimension
y_start (int): Start in y dimension
y_stop (int): Stop in y dimension
z_start (int): Start in z dimension
z_stop (int): Stop in z dimension
resolution (int): The resolution at which this subvolume is seen
title (str): The title to set for the subvolume
notes (str): Optional extra thoughts on the subvolume
Returns:
boolean: success
"""
md = self.get_metadata(token)['metadata']
if 'subvolumes' in md:
subvols = md['subvolumes']
else:
subvols = []
subvols.append({
'token': token,
'channel': channel,
'x_start': x_start,
'x_stop': x_stop,
'y_start': y_start,
'y_stop': y_stop,
'z_start': z_start,
'z_stop': z_stop,
'resolution': resolution,
'title': title,
'notes': notes
})
return self.set_metadata(token, {
'secret': secret,
'subvolumes': subvols
}) |
def _get_option(self, settings, find_key):
""" Return index for provided key """
# This is used as in IAR template, everything
# is as an array with random positions. We look for key with an index
for option in settings:
if option['name'] == find_key:
return settings.index(option) | Return index for provided key | Below is the the instruction that describes the task:
### Input:
Return index for provided key
### Response:
def _get_option(self, settings, find_key):
""" Return index for provided key """
# This is used as in IAR template, everything
# is as an array with random positions. We look for key with an index
for option in settings:
if option['name'] == find_key:
return settings.index(option) |
def upload_json_results(self, token, filepath, community_id,
producer_display_name, metric_name,
producer_revision, submit_time, **kwargs):
"""
Upload a JSON file containing numeric scoring results to be added as
scalars. File is parsed and then deleted from the server.
:param token: A valid token for the user in question.
:param filepath: The path to the JSON file.
:param community_id: The id of the community that owns the producer.
:param producer_display_name: The display name of the producer.
:param producer_revision: The repository revision of the producer
that produced this value.
:param submit_time: The submit timestamp. Must be parsable with PHP
strtotime().
:param config_item_id: (optional) If this value pertains to a specific
configuration item, pass its id here.
:param test_dataset_id: (optional) If this value pertains to a
specific test dataset, pass its id here.
:param truth_dataset_id: (optional) If this value pertains to a
specific ground truth dataset, pass its id here.
:param parent_keys: (optional) Semicolon-separated list of parent keys
to look for numeric results under. Use '.' to denote nesting, like
in normal javascript syntax.
:param silent: (optional) If true, do not perform threshold-based email
notifications for this scalar.
:param unofficial: (optional) If true, creates an unofficial scalar
visible only to the user performing the submission.
:param build_results_url: (optional) A URL for linking to build results
for this submission.
:param branch: (optional) The branch name in the source repository for
this submission.
:param params: (optional) Any key/value pairs that should be displayed
with this scalar result.
:type params: dict
:param extra_urls: (optional) Other URL's that should be displayed with
with this scalar result. Each element of the list should be a dict
with the following keys: label, text, href
:type extra_urls: list of dicts
:returns: The list of scalars that were created.
"""
parameters = dict()
parameters['token'] = token
parameters['communityId'] = community_id
parameters['producerDisplayName'] = producer_display_name
parameters['metricName'] = metric_name
parameters['producerRevision'] = producer_revision
parameters['submitTime'] = submit_time
optional_keys = [
'config_item_id', 'test_dataset_id', 'truth_dataset_id', 'silent',
'unofficial', 'build_results_url', 'branch', 'extra_urls',
'params']
for key in optional_keys:
if key in kwargs:
if key == 'config_item_id':
parameters['configItemId'] = kwargs[key]
elif key == 'test_dataset_id':
parameters['testDatasetId'] = kwargs[key]
elif key == 'truth_dataset_id':
parameters['truthDatasetId'] = kwargs[key]
elif key == 'parent_keys':
parameters['parentKeys'] = kwargs[key]
elif key == 'build_results_url':
parameters['buildResultsUrl'] = kwargs[key]
elif key == 'extra_urls':
parameters['extraUrls'] = json.dumps(kwargs[key])
elif key == 'params':
parameters[key] = json.dumps(kwargs[key])
elif key == 'silent':
if kwargs[key]:
parameters[key] = kwargs[key]
elif key == 'unofficial':
if kwargs[key]:
parameters[key] = kwargs[key]
else:
parameters[key] = kwargs[key]
file_payload = open(filepath, 'rb')
response = self.request('midas.tracker.results.upload.json',
parameters, file_payload)
return response | Upload a JSON file containing numeric scoring results to be added as
scalars. File is parsed and then deleted from the server.
:param token: A valid token for the user in question.
:param filepath: The path to the JSON file.
:param community_id: The id of the community that owns the producer.
:param producer_display_name: The display name of the producer.
:param producer_revision: The repository revision of the producer
that produced this value.
:param submit_time: The submit timestamp. Must be parsable with PHP
strtotime().
:param config_item_id: (optional) If this value pertains to a specific
configuration item, pass its id here.
:param test_dataset_id: (optional) If this value pertains to a
specific test dataset, pass its id here.
:param truth_dataset_id: (optional) If this value pertains to a
specific ground truth dataset, pass its id here.
:param parent_keys: (optional) Semicolon-separated list of parent keys
to look for numeric results under. Use '.' to denote nesting, like
in normal javascript syntax.
:param silent: (optional) If true, do not perform threshold-based email
notifications for this scalar.
:param unofficial: (optional) If true, creates an unofficial scalar
visible only to the user performing the submission.
:param build_results_url: (optional) A URL for linking to build results
for this submission.
:param branch: (optional) The branch name in the source repository for
this submission.
:param params: (optional) Any key/value pairs that should be displayed
with this scalar result.
:type params: dict
:param extra_urls: (optional) Other URL's that should be displayed with
with this scalar result. Each element of the list should be a dict
with the following keys: label, text, href
:type extra_urls: list of dicts
:returns: The list of scalars that were created. | Below is the the instruction that describes the task:
### Input:
Upload a JSON file containing numeric scoring results to be added as
scalars. File is parsed and then deleted from the server.
:param token: A valid token for the user in question.
:param filepath: The path to the JSON file.
:param community_id: The id of the community that owns the producer.
:param producer_display_name: The display name of the producer.
:param producer_revision: The repository revision of the producer
that produced this value.
:param submit_time: The submit timestamp. Must be parsable with PHP
strtotime().
:param config_item_id: (optional) If this value pertains to a specific
configuration item, pass its id here.
:param test_dataset_id: (optional) If this value pertains to a
specific test dataset, pass its id here.
:param truth_dataset_id: (optional) If this value pertains to a
specific ground truth dataset, pass its id here.
:param parent_keys: (optional) Semicolon-separated list of parent keys
to look for numeric results under. Use '.' to denote nesting, like
in normal javascript syntax.
:param silent: (optional) If true, do not perform threshold-based email
notifications for this scalar.
:param unofficial: (optional) If true, creates an unofficial scalar
visible only to the user performing the submission.
:param build_results_url: (optional) A URL for linking to build results
for this submission.
:param branch: (optional) The branch name in the source repository for
this submission.
:param params: (optional) Any key/value pairs that should be displayed
with this scalar result.
:type params: dict
:param extra_urls: (optional) Other URL's that should be displayed with
with this scalar result. Each element of the list should be a dict
with the following keys: label, text, href
:type extra_urls: list of dicts
:returns: The list of scalars that were created.
### Response:
def upload_json_results(self, token, filepath, community_id,
producer_display_name, metric_name,
producer_revision, submit_time, **kwargs):
"""
Upload a JSON file containing numeric scoring results to be added as
scalars. File is parsed and then deleted from the server.
:param token: A valid token for the user in question.
:param filepath: The path to the JSON file.
:param community_id: The id of the community that owns the producer.
:param producer_display_name: The display name of the producer.
:param producer_revision: The repository revision of the producer
that produced this value.
:param submit_time: The submit timestamp. Must be parsable with PHP
strtotime().
:param config_item_id: (optional) If this value pertains to a specific
configuration item, pass its id here.
:param test_dataset_id: (optional) If this value pertains to a
specific test dataset, pass its id here.
:param truth_dataset_id: (optional) If this value pertains to a
specific ground truth dataset, pass its id here.
:param parent_keys: (optional) Semicolon-separated list of parent keys
to look for numeric results under. Use '.' to denote nesting, like
in normal javascript syntax.
:param silent: (optional) If true, do not perform threshold-based email
notifications for this scalar.
:param unofficial: (optional) If true, creates an unofficial scalar
visible only to the user performing the submission.
:param build_results_url: (optional) A URL for linking to build results
for this submission.
:param branch: (optional) The branch name in the source repository for
this submission.
:param params: (optional) Any key/value pairs that should be displayed
with this scalar result.
:type params: dict
:param extra_urls: (optional) Other URL's that should be displayed with
with this scalar result. Each element of the list should be a dict
with the following keys: label, text, href
:type extra_urls: list of dicts
:returns: The list of scalars that were created.
"""
parameters = dict()
parameters['token'] = token
parameters['communityId'] = community_id
parameters['producerDisplayName'] = producer_display_name
parameters['metricName'] = metric_name
parameters['producerRevision'] = producer_revision
parameters['submitTime'] = submit_time
optional_keys = [
'config_item_id', 'test_dataset_id', 'truth_dataset_id', 'silent',
'unofficial', 'build_results_url', 'branch', 'extra_urls',
'params']
for key in optional_keys:
if key in kwargs:
if key == 'config_item_id':
parameters['configItemId'] = kwargs[key]
elif key == 'test_dataset_id':
parameters['testDatasetId'] = kwargs[key]
elif key == 'truth_dataset_id':
parameters['truthDatasetId'] = kwargs[key]
elif key == 'parent_keys':
parameters['parentKeys'] = kwargs[key]
elif key == 'build_results_url':
parameters['buildResultsUrl'] = kwargs[key]
elif key == 'extra_urls':
parameters['extraUrls'] = json.dumps(kwargs[key])
elif key == 'params':
parameters[key] = json.dumps(kwargs[key])
elif key == 'silent':
if kwargs[key]:
parameters[key] = kwargs[key]
elif key == 'unofficial':
if kwargs[key]:
parameters[key] = kwargs[key]
else:
parameters[key] = kwargs[key]
file_payload = open(filepath, 'rb')
response = self.request('midas.tracker.results.upload.json',
parameters, file_payload)
return response |
def mse(mean, estimator):
"""
Description:
Calculates the Mean Squared Error (MSE) of
an estimation on flat numpy ndarrays.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
"""
return np.mean((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0) | Description:
Calculates the Mean Squared Error (MSE) of
an estimation on flat numpy ndarrays.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray) | Below is the the instruction that describes the task:
### Input:
Description:
Calculates the Mean Squared Error (MSE) of
an estimation on flat numpy ndarrays.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
### Response:
def mse(mean, estimator):
"""
Description:
Calculates the Mean Squared Error (MSE) of
an estimation on flat numpy ndarrays.
Parameters:
mean: actual value (numpy ndarray)
estimator: estimated value of the mean (numpy ndarray)
"""
return np.mean((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0) |
def _default_bridge(self):
""" Get an instance of the ENBridge object using ctypes. """
objc = self.objc
ENBridge = objc.objc_getClass('ENBridge')
return objc.objc_msgSend(ENBridge, objc.sel_registerName('instance')) | Get an instance of the ENBridge object using ctypes. | Below is the the instruction that describes the task:
### Input:
Get an instance of the ENBridge object using ctypes.
### Response:
def _default_bridge(self):
""" Get an instance of the ENBridge object using ctypes. """
objc = self.objc
ENBridge = objc.objc_getClass('ENBridge')
return objc.objc_msgSend(ENBridge, objc.sel_registerName('instance')) |
def button_clicked(self, button):
"""Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed
"""
if button is self.idx_ok:
evt_types = [x.text() for x in self.idx_evt_type.selectedItems()]
self.merge_to = self.idx_merge_to.currentText()
min_interval = self.min_interval.get_value()
events = []
merge_to_longer = False
if not evt_types:
QMessageBox.warning(self, 'Missing information',
'Choose at least one event type.')
return
min_interval = 0 if not min_interval else min_interval
if self.merge_to == 'longer duration event':
merge_to_longer = True
if len(evt_types) > 1:
answer = QInputDialog.getText(self, 'New Event Type',
'Enter new event\'s name')
if answer[1]:
name = answer[0]
else:
return
else:
name = evt_types[0]
for etype in evt_types:
events.extend(self.parent.notes.annot.get_events(name=etype,
qual='Good'))
if self.cross_chan.get_value():
events = merge_close(events, min_interval,
merge_to_longer=merge_to_longer)
else:
channels = sorted(set([y for x in events for y in x['chan']]))
events = []
chan_events = []
for chan in channels:
chan_events = []
for etype in evt_types:
chan_events.extend(self.parent.notes.annot.get_events(
name=etype, chan=chan, qual='Good'))
events.extend(merge_close(chan_events, min_interval,
merge_to_longer=merge_to_longer))
for etype in evt_types:
self.parent.notes.annot.remove_event_type(etype)
self.parent.notes.add_events(events, name=name, chan=None)
self.parent.notes.display_eventtype()
n_eventtype = self.parent.notes.idx_eventtype.count()
self.parent.notes.idx_eventtype.setCurrentIndex(n_eventtype - 1)
self.accept()
if button is self.idx_cancel:
self.reject() | Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed | Below is the the instruction that describes the task:
### Input:
Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed
### Response:
def button_clicked(self, button):
"""Action when button was clicked.
Parameters
----------
button : instance of QPushButton
which button was pressed
"""
if button is self.idx_ok:
evt_types = [x.text() for x in self.idx_evt_type.selectedItems()]
self.merge_to = self.idx_merge_to.currentText()
min_interval = self.min_interval.get_value()
events = []
merge_to_longer = False
if not evt_types:
QMessageBox.warning(self, 'Missing information',
'Choose at least one event type.')
return
min_interval = 0 if not min_interval else min_interval
if self.merge_to == 'longer duration event':
merge_to_longer = True
if len(evt_types) > 1:
answer = QInputDialog.getText(self, 'New Event Type',
'Enter new event\'s name')
if answer[1]:
name = answer[0]
else:
return
else:
name = evt_types[0]
for etype in evt_types:
events.extend(self.parent.notes.annot.get_events(name=etype,
qual='Good'))
if self.cross_chan.get_value():
events = merge_close(events, min_interval,
merge_to_longer=merge_to_longer)
else:
channels = sorted(set([y for x in events for y in x['chan']]))
events = []
chan_events = []
for chan in channels:
chan_events = []
for etype in evt_types:
chan_events.extend(self.parent.notes.annot.get_events(
name=etype, chan=chan, qual='Good'))
events.extend(merge_close(chan_events, min_interval,
merge_to_longer=merge_to_longer))
for etype in evt_types:
self.parent.notes.annot.remove_event_type(etype)
self.parent.notes.add_events(events, name=name, chan=None)
self.parent.notes.display_eventtype()
n_eventtype = self.parent.notes.idx_eventtype.count()
self.parent.notes.idx_eventtype.setCurrentIndex(n_eventtype - 1)
self.accept()
if button is self.idx_cancel:
self.reject() |
def get(self, ns, label=None):
"""Return :class:`tags instances<~Tag>` for the namespace `ns`, ordered
by label.
If `label` is not None the only one instance may be returned, or
`None` if no tags exists for this label.
"""
query = Tag.query.filter(Tag.ns == ns)
if label is not None:
return query.filter(Tag.label == label).first()
return query.all() | Return :class:`tags instances<~Tag>` for the namespace `ns`, ordered
by label.
If `label` is not None the only one instance may be returned, or
`None` if no tags exists for this label. | Below is the the instruction that describes the task:
### Input:
Return :class:`tags instances<~Tag>` for the namespace `ns`, ordered
by label.
If `label` is not None the only one instance may be returned, or
`None` if no tags exists for this label.
### Response:
def get(self, ns, label=None):
"""Return :class:`tags instances<~Tag>` for the namespace `ns`, ordered
by label.
If `label` is not None the only one instance may be returned, or
`None` if no tags exists for this label.
"""
query = Tag.query.filter(Tag.ns == ns)
if label is not None:
return query.filter(Tag.label == label).first()
return query.all() |
def age(self, year, month=2, day=1):
"""Returns the age of the player on a given date.
:year: int representing the year.
:month: int representing the month (1-12).
:day: int representing the day within the month (1-31).
:returns: Age in years as a float.
"""
doc = self.get_main_doc()
date_string = doc('span[itemprop="birthDate"]').attr('data-birth')
regex = r'(\d{4})\-(\d{2})\-(\d{2})'
date_args = map(int, re.match(regex, date_string).groups())
birth_date = datetime.date(*date_args)
age_date = datetime.date(year=year, month=month, day=day)
delta = age_date - birth_date
age = delta.days / 365.
return age | Returns the age of the player on a given date.
:year: int representing the year.
:month: int representing the month (1-12).
:day: int representing the day within the month (1-31).
:returns: Age in years as a float. | Below is the the instruction that describes the task:
### Input:
Returns the age of the player on a given date.
:year: int representing the year.
:month: int representing the month (1-12).
:day: int representing the day within the month (1-31).
:returns: Age in years as a float.
### Response:
def age(self, year, month=2, day=1):
"""Returns the age of the player on a given date.
:year: int representing the year.
:month: int representing the month (1-12).
:day: int representing the day within the month (1-31).
:returns: Age in years as a float.
"""
doc = self.get_main_doc()
date_string = doc('span[itemprop="birthDate"]').attr('data-birth')
regex = r'(\d{4})\-(\d{2})\-(\d{2})'
date_args = map(int, re.match(regex, date_string).groups())
birth_date = datetime.date(*date_args)
age_date = datetime.date(year=year, month=month, day=day)
delta = age_date - birth_date
age = delta.days / 365.
return age |
def from_yamlf_to_list(cls, fpath: str, encoding: str='utf8',
force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> TList[T]:
"""From yaml file path to list of instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
"""
return cls.from_dicts(util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict) | From yaml file path to list of instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance | Below is the the instruction that describes the task:
### Input:
From yaml file path to list of instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
### Response:
def from_yamlf_to_list(cls, fpath: str, encoding: str='utf8',
force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> TList[T]:
"""From yaml file path to list of instance
:param fpath: Yaml file path
:param encoding: Yaml file encoding
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: List of instance
"""
return cls.from_dicts(util.load_yamlf(fpath, encoding),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict) |
def _remove(self, client_kwargs):
"""
Remove an object.
args:
client_kwargs (dict): Client arguments.
"""
with _handle_azure_exception():
# File
if 'file_name' in client_kwargs:
return self.client.delete_file(
share_name=client_kwargs['share_name'],
directory_name=client_kwargs['directory_name'],
file_name=client_kwargs['file_name'])
# Directory
elif 'directory_name' in client_kwargs:
return self.client.delete_directory(
share_name=client_kwargs['share_name'],
directory_name=client_kwargs['directory_name'])
# Share
return self.client.delete_share(
share_name=client_kwargs['share_name']) | Remove an object.
args:
client_kwargs (dict): Client arguments. | Below is the the instruction that describes the task:
### Input:
Remove an object.
args:
client_kwargs (dict): Client arguments.
### Response:
def _remove(self, client_kwargs):
"""
Remove an object.
args:
client_kwargs (dict): Client arguments.
"""
with _handle_azure_exception():
# File
if 'file_name' in client_kwargs:
return self.client.delete_file(
share_name=client_kwargs['share_name'],
directory_name=client_kwargs['directory_name'],
file_name=client_kwargs['file_name'])
# Directory
elif 'directory_name' in client_kwargs:
return self.client.delete_directory(
share_name=client_kwargs['share_name'],
directory_name=client_kwargs['directory_name'])
# Share
return self.client.delete_share(
share_name=client_kwargs['share_name']) |
def find_reasonable_designs(workspaces, condition=None, verbose=False):
"""
Return a list of design where the representative model has a restraint
distance less that the given threshold. The default threshold (1.2) is
fairly lenient.
"""
print "Loading designs..."
designs = []
if condition is None:
condition = 'restraint_dist < 1.2'
for workspace in workspaces:
for directory in workspace.output_subdirs:
if verbose:
print ' ' + directory
design = structures.Design(directory)
vars = design.structures.iloc[design.rep].to_dict()
if pd.eval(condition, local_dict=vars):
designs.append(design)
return designs | Return a list of design where the representative model has a restraint
distance less that the given threshold. The default threshold (1.2) is
fairly lenient. | Below is the the instruction that describes the task:
### Input:
Return a list of design where the representative model has a restraint
distance less that the given threshold. The default threshold (1.2) is
fairly lenient.
### Response:
def find_reasonable_designs(workspaces, condition=None, verbose=False):
"""
Return a list of design where the representative model has a restraint
distance less that the given threshold. The default threshold (1.2) is
fairly lenient.
"""
print "Loading designs..."
designs = []
if condition is None:
condition = 'restraint_dist < 1.2'
for workspace in workspaces:
for directory in workspace.output_subdirs:
if verbose:
print ' ' + directory
design = structures.Design(directory)
vars = design.structures.iloc[design.rep].to_dict()
if pd.eval(condition, local_dict=vars):
designs.append(design)
return designs |
def is_nondominated_continuous(self, obs_df):
"""identify which candidate solutions are pareto non-dominated continuously updated,
but still slow
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:,self.obs_obj_names]
P = list(obj_df.index)
PP = set()
PP.add(P[0])
#iidx = 1
#while iidx < len(P):
for iidx in P:
jidx = 0
drop = []
keep = True
for jidx in PP:
# if dominates(iidx,jidx):
# drop.append(jidx)
# elif dominates(jidx,iidx):
# keep = False
# break
if jidx == iidx:
continue
if self.dominates(obj_df.loc[iidx, :], obj_df.loc[jidx, :]):
drop.append(jidx)
elif self.dominates(obj_df.loc[jidx, :], obj_df.loc[iidx, :]):
keep = False
break
for d in drop:
PP.remove(d)
if keep:
PP.add(iidx)
#iidx += 1
is_nondom = pd.Series(data=False,index=obs_df.index,dtype=bool)
is_nondom.loc[PP] = True
return is_nondom | identify which candidate solutions are pareto non-dominated continuously updated,
but still slow
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series | Below is the the instruction that describes the task:
### Input:
identify which candidate solutions are pareto non-dominated continuously updated,
but still slow
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
### Response:
def is_nondominated_continuous(self, obs_df):
"""identify which candidate solutions are pareto non-dominated continuously updated,
but still slow
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:,self.obs_obj_names]
P = list(obj_df.index)
PP = set()
PP.add(P[0])
#iidx = 1
#while iidx < len(P):
for iidx in P:
jidx = 0
drop = []
keep = True
for jidx in PP:
# if dominates(iidx,jidx):
# drop.append(jidx)
# elif dominates(jidx,iidx):
# keep = False
# break
if jidx == iidx:
continue
if self.dominates(obj_df.loc[iidx, :], obj_df.loc[jidx, :]):
drop.append(jidx)
elif self.dominates(obj_df.loc[jidx, :], obj_df.loc[iidx, :]):
keep = False
break
for d in drop:
PP.remove(d)
if keep:
PP.add(iidx)
#iidx += 1
is_nondom = pd.Series(data=False,index=obs_df.index,dtype=bool)
is_nondom.loc[PP] = True
return is_nondom |
def transform(self, X, lenscale=None):
"""
Apply the RBF to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X). If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, D) where D is number of RBF centres.
"""
N, d = X.shape
lenscale = self._check_dim(d, lenscale)
den = (2 * lenscale**2)
return np.exp(- cdist(X / den, self.C / den, 'sqeuclidean')) | Apply the RBF to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X). If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, D) where D is number of RBF centres. | Below is the the instruction that describes the task:
### Input:
Apply the RBF to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X). If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, D) where D is number of RBF centres.
### Response:
def transform(self, X, lenscale=None):
"""
Apply the RBF to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X). If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, D) where D is number of RBF centres.
"""
N, d = X.shape
lenscale = self._check_dim(d, lenscale)
den = (2 * lenscale**2)
return np.exp(- cdist(X / den, self.C / den, 'sqeuclidean')) |
def _build_operator_name(name, function, symbol):
"""implementation details"""
def add_operator(sym):
if 'new' in sym or 'delete' in sym:
return 'operator ' + sym
return 'operator' + sym
if isinstance(name, Callable) and None is function:
name = None
if name:
if 'operator' not in name:
name = add_operator(name)
return name
elif symbol:
return add_operator(symbol)
return name | implementation details | Below is the the instruction that describes the task:
### Input:
implementation details
### Response:
def _build_operator_name(name, function, symbol):
"""implementation details"""
def add_operator(sym):
if 'new' in sym or 'delete' in sym:
return 'operator ' + sym
return 'operator' + sym
if isinstance(name, Callable) and None is function:
name = None
if name:
if 'operator' not in name:
name = add_operator(name)
return name
elif symbol:
return add_operator(symbol)
return name |
def _save_token_on_disk(self):
"""Helper function that saves the token on disk"""
token = self._token.copy()
# Client secret is needed for token refreshing and isn't returned
# as a pared of OAuth token by default
token.update(client_secret=self._client_secret)
with codecs.open(config.TOKEN_FILE_PATH, 'w', 'utf8') as f:
json.dump(
token, f,
ensure_ascii=False,
sort_keys=True,
indent=4,
) | Helper function that saves the token on disk | Below is the the instruction that describes the task:
### Input:
Helper function that saves the token on disk
### Response:
def _save_token_on_disk(self):
"""Helper function that saves the token on disk"""
token = self._token.copy()
# Client secret is needed for token refreshing and isn't returned
# as a pared of OAuth token by default
token.update(client_secret=self._client_secret)
with codecs.open(config.TOKEN_FILE_PATH, 'w', 'utf8') as f:
json.dump(
token, f,
ensure_ascii=False,
sort_keys=True,
indent=4,
) |
def odo_register():
'''
Enable conversion of .sdmx files with odo (http://odo.readthedocs.org).
Adds conversion from sdmx to PD.DataFrame to odo graph.
Note that native discovery of sdmx files is not yet supported. odo will thus
convert to PD.DataFrame
and discover the data shape from there.
'''
logger.info('Registering with odo...')
import odo
from odo.utils import keywords
import pandas as PD
from toolz import keyfilter
import toolz.curried.operator as op
class PandaSDMX(object):
def __init__(self, uri):
self.uri = uri
@odo.resource.register(r'.*\.sdmx')
def resource_sdmx(uri, **kwargs):
return PandaSDMX(uri)
@odo.discover.register(PandaSDMX)
def _(sdmx):
return odo.discover(Request().get(fromfile=sdmx.uri).write())
@odo.convert.register(PD.DataFrame, PandaSDMX)
def convert_sdmx(sdmx, **kwargs):
write = Request().get(fromfile=sdmx.uri).write
return write(**keyfilter(op.contains(keywords(write)), kwargs))
logger.info('odo registration complete.') | Enable conversion of .sdmx files with odo (http://odo.readthedocs.org).
Adds conversion from sdmx to PD.DataFrame to odo graph.
Note that native discovery of sdmx files is not yet supported. odo will thus
convert to PD.DataFrame
and discover the data shape from there. | Below is the the instruction that describes the task:
### Input:
Enable conversion of .sdmx files with odo (http://odo.readthedocs.org).
Adds conversion from sdmx to PD.DataFrame to odo graph.
Note that native discovery of sdmx files is not yet supported. odo will thus
convert to PD.DataFrame
and discover the data shape from there.
### Response:
def odo_register():
'''
Enable conversion of .sdmx files with odo (http://odo.readthedocs.org).
Adds conversion from sdmx to PD.DataFrame to odo graph.
Note that native discovery of sdmx files is not yet supported. odo will thus
convert to PD.DataFrame
and discover the data shape from there.
'''
logger.info('Registering with odo...')
import odo
from odo.utils import keywords
import pandas as PD
from toolz import keyfilter
import toolz.curried.operator as op
class PandaSDMX(object):
def __init__(self, uri):
self.uri = uri
@odo.resource.register(r'.*\.sdmx')
def resource_sdmx(uri, **kwargs):
return PandaSDMX(uri)
@odo.discover.register(PandaSDMX)
def _(sdmx):
return odo.discover(Request().get(fromfile=sdmx.uri).write())
@odo.convert.register(PD.DataFrame, PandaSDMX)
def convert_sdmx(sdmx, **kwargs):
write = Request().get(fromfile=sdmx.uri).write
return write(**keyfilter(op.contains(keywords(write)), kwargs))
logger.info('odo registration complete.') |
def gibbs_sampling_step(self, visible, n_features):
"""Perform one step of gibbs sampling.
:param visible: activations of the visible units
:param n_features: number of features
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
"""
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs, n_features)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1 | Perform one step of gibbs sampling.
:param visible: activations of the visible units
:param n_features: number of features
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states) | Below is the the instruction that describes the task:
### Input:
Perform one step of gibbs sampling.
:param visible: activations of the visible units
:param n_features: number of features
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
### Response:
def gibbs_sampling_step(self, visible, n_features):
"""Perform one step of gibbs sampling.
:param visible: activations of the visible units
:param n_features: number of features
:return: tuple(hidden probs, hidden states, visible probs,
new hidden probs, new hidden states)
"""
hprobs, hstates = self.sample_hidden_from_visible(visible)
vprobs = self.sample_visible_from_hidden(hprobs, n_features)
hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)
return hprobs, hstates, vprobs, hprobs1, hstates1 |
def post_save_update_cache(sender, instance, created, raw, **kwargs):
"""Update the cache when an instance is created or modified."""
if raw:
return
name = sender.__name__
if name in cached_model_names:
delay_cache = getattr(instance, '_delay_cache', False)
if not delay_cache:
from .tasks import update_cache_for_instance
update_cache_for_instance(name, instance.pk, instance) | Update the cache when an instance is created or modified. | Below is the the instruction that describes the task:
### Input:
Update the cache when an instance is created or modified.
### Response:
def post_save_update_cache(sender, instance, created, raw, **kwargs):
"""Update the cache when an instance is created or modified."""
if raw:
return
name = sender.__name__
if name in cached_model_names:
delay_cache = getattr(instance, '_delay_cache', False)
if not delay_cache:
from .tasks import update_cache_for_instance
update_cache_for_instance(name, instance.pk, instance) |
def show_status(self):
"""
dumps the status of the agent
"""
txt = 'Agent Status:\n'
print(txt)
txt += "start_x = " + str(self.start_x) + "\n"
txt += "start_y = " + str(self.start_y) + "\n"
txt += "target_x = " + str(self.target_x) + "\n"
txt += "target_y = " + str(self.target_y) + "\n"
txt += "current_x = " + str(self.current_x) + "\n"
txt += "current_y = " + str(self.current_y) + "\n"
print(self.grd)
return txt | dumps the status of the agent | Below is the the instruction that describes the task:
### Input:
dumps the status of the agent
### Response:
def show_status(self):
"""
dumps the status of the agent
"""
txt = 'Agent Status:\n'
print(txt)
txt += "start_x = " + str(self.start_x) + "\n"
txt += "start_y = " + str(self.start_y) + "\n"
txt += "target_x = " + str(self.target_x) + "\n"
txt += "target_y = " + str(self.target_y) + "\n"
txt += "current_x = " + str(self.current_x) + "\n"
txt += "current_y = " + str(self.current_y) + "\n"
print(self.grd)
return txt |
def FMErrorByNum( num ):
"""This function raises an error based on the specified error code."""
if not num in FMErrorNum.keys():
raise FMServerError, (num, FMErrorNum[-1])
elif num == 102:
raise FMFieldError, (num, FMErrorNum[num])
else:
raise FMServerError, (num, FMErrorNum[num]) | This function raises an error based on the specified error code. | Below is the the instruction that describes the task:
### Input:
This function raises an error based on the specified error code.
### Response:
def FMErrorByNum( num ):
"""This function raises an error based on the specified error code."""
if not num in FMErrorNum.keys():
raise FMServerError, (num, FMErrorNum[-1])
elif num == 102:
raise FMFieldError, (num, FMErrorNum[num])
else:
raise FMServerError, (num, FMErrorNum[num]) |
def _encode_string(string):
"""Return a byte string, encoding Unicode with UTF-8."""
if not isinstance(string, bytes):
string = string.encode('utf8')
return ffi.new('char[]', string) | Return a byte string, encoding Unicode with UTF-8. | Below is the the instruction that describes the task:
### Input:
Return a byte string, encoding Unicode with UTF-8.
### Response:
def _encode_string(string):
"""Return a byte string, encoding Unicode with UTF-8."""
if not isinstance(string, bytes):
string = string.encode('utf8')
return ffi.new('char[]', string) |
def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
"""Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
"""
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph | Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3)) | Below is the the instruction that describes the task:
### Input:
Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
### Response:
def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
"""Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
"""
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph |
def list_my(self):
""" Find organization that has the current identity as the owner or as the member """
org_list = self.call_contract_command("Registry", "listOrganizations", [])
rez_owner = []
rez_member = []
for idx, org_id in enumerate(org_list):
(found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id])
if (not found):
raise Exception("Organization was removed during this call. Please retry.");
if self.ident.address == owner:
rez_owner.append((org_name, bytes32_to_str(org_id)))
if self.ident.address in members:
rez_member.append((org_name, bytes32_to_str(org_id)))
if (rez_owner):
self._printout("# Organizations you are the owner of")
self._printout("# OrgName OrgId")
for n,i in rez_owner:
self._printout("%s %s"%(n,i))
if (rez_member):
self._printout("# Organizations you are the member of")
self._printout("# OrgName OrgId")
for n,i in rez_member:
self._printout("%s %s"%(n,i)) | Find organization that has the current identity as the owner or as the member | Below is the the instruction that describes the task:
### Input:
Find organization that has the current identity as the owner or as the member
### Response:
def list_my(self):
""" Find organization that has the current identity as the owner or as the member """
org_list = self.call_contract_command("Registry", "listOrganizations", [])
rez_owner = []
rez_member = []
for idx, org_id in enumerate(org_list):
(found, org_id, org_name, owner, members, serviceNames, repositoryNames) = self.call_contract_command("Registry", "getOrganizationById", [org_id])
if (not found):
raise Exception("Organization was removed during this call. Please retry.");
if self.ident.address == owner:
rez_owner.append((org_name, bytes32_to_str(org_id)))
if self.ident.address in members:
rez_member.append((org_name, bytes32_to_str(org_id)))
if (rez_owner):
self._printout("# Organizations you are the owner of")
self._printout("# OrgName OrgId")
for n,i in rez_owner:
self._printout("%s %s"%(n,i))
if (rez_member):
self._printout("# Organizations you are the member of")
self._printout("# OrgName OrgId")
for n,i in rez_member:
self._printout("%s %s"%(n,i)) |
def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
shutil.copy(apk_fn, os.path.join(path, 'target.apk'))
finally:
os.unlink(apk_fn) | Download Android .apk
@type path:
@param path: | Below is the the instruction that describes the task:
### Input:
Download Android .apk
@type path:
@param path:
### Response:
def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
shutil.copy(apk_fn, os.path.join(path, 'target.apk'))
finally:
os.unlink(apk_fn) |
def transform_leave_one_out(self, X_in, y, mapping=None):
"""
Leave one out encoding uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train])
is_nan = X[col].isnull()
is_unknown_value = X[col].isin(unseen_values.dropna())
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = (colmap['sum'] / colmap['count']).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else: # Replace level with its mean target, calculated excluding this row's target
# The y (target) mean for this level is normally just the sum/count;
# excluding this row's y, it's (sum - y) / (count - 1)
level_means = (X[col].map(colmap['sum']) - y) / (X[col].map(colmap['count']) - 1)
# The 'where' fills in singleton levels (count = 1 -> div by 0) with the global mean
X[col] = level_means.where(X[col].map(colmap['count'][level_notunique]).notnull(), self._mean)
if self.handle_unknown == 'value':
X.loc[is_unknown_value, col] = self._mean
elif self.handle_unknown == 'return_nan':
X.loc[is_unknown_value, col] = np.nan
if self.handle_missing == 'value':
X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean
elif self.handle_missing == 'return_nan':
X.loc[is_nan, col] = np.nan
if self.sigma is not None and y is not None:
X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0])
return X | Leave one out encoding uses a single column of floats to represent the means of the target variables. | Below is the the instruction that describes the task:
### Input:
Leave one out encoding uses a single column of floats to represent the means of the target variables.
### Response:
def transform_leave_one_out(self, X_in, y, mapping=None):
"""
Leave one out encoding uses a single column of floats to represent the means of the target variables.
"""
X = X_in.copy(deep=True)
random_state_ = check_random_state(self.random_state)
for col, colmap in mapping.items():
level_notunique = colmap['count'] > 1
unique_train = colmap.index
unseen_values = pd.Series([x for x in X[col].unique() if x not in unique_train])
is_nan = X[col].isnull()
is_unknown_value = X[col].isin(unseen_values.dropna())
if self.handle_unknown == 'error' and is_unknown_value.any():
raise ValueError('Columns to be encoded can not contain new values')
if y is None: # Replace level with its mean target; if level occurs only once, use global mean
level_means = (colmap['sum'] / colmap['count']).where(level_notunique, self._mean)
X[col] = X[col].map(level_means)
else: # Replace level with its mean target, calculated excluding this row's target
# The y (target) mean for this level is normally just the sum/count;
# excluding this row's y, it's (sum - y) / (count - 1)
level_means = (X[col].map(colmap['sum']) - y) / (X[col].map(colmap['count']) - 1)
# The 'where' fills in singleton levels (count = 1 -> div by 0) with the global mean
X[col] = level_means.where(X[col].map(colmap['count'][level_notunique]).notnull(), self._mean)
if self.handle_unknown == 'value':
X.loc[is_unknown_value, col] = self._mean
elif self.handle_unknown == 'return_nan':
X.loc[is_unknown_value, col] = np.nan
if self.handle_missing == 'value':
X.loc[is_nan & unseen_values.isnull().any(), col] = self._mean
elif self.handle_missing == 'return_nan':
X.loc[is_nan, col] = np.nan
if self.sigma is not None and y is not None:
X[col] = X[col] * random_state_.normal(1., self.sigma, X[col].shape[0])
return X |
def extract_haml(fileobj, keywords, comment_tags, options):
""" babel translation token extract function for haml files """
import haml
from mako import lexer, parsetree
from mako.ext.babelplugin import extract_nodes
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(haml.preprocessor(fileobj.read()), input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(), keywords, comment_tags, options):
yield extracted | babel translation token extract function for haml files | Below is the the instruction that describes the task:
### Input:
babel translation token extract function for haml files
### Response:
def extract_haml(fileobj, keywords, comment_tags, options):
""" babel translation token extract function for haml files """
import haml
from mako import lexer, parsetree
from mako.ext.babelplugin import extract_nodes
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(haml.preprocessor(fileobj.read()), input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(), keywords, comment_tags, options):
yield extracted |
def print_model(self):
"""Return the assembled cards as a JSON string.
Returns
-------
cards_json : str
The JSON string representing the assembled cards.
"""
cards = [c.card for c in self.cards]
# If there is only one card, print it as a single
# card not as a list
if len(cards) == 1:
cards = cards[0]
cards_json = json.dumps(cards, indent=1)
return cards_json | Return the assembled cards as a JSON string.
Returns
-------
cards_json : str
The JSON string representing the assembled cards. | Below is the the instruction that describes the task:
### Input:
Return the assembled cards as a JSON string.
Returns
-------
cards_json : str
The JSON string representing the assembled cards.
### Response:
def print_model(self):
"""Return the assembled cards as a JSON string.
Returns
-------
cards_json : str
The JSON string representing the assembled cards.
"""
cards = [c.card for c in self.cards]
# If there is only one card, print it as a single
# card not as a list
if len(cards) == 1:
cards = cards[0]
cards_json = json.dumps(cards, indent=1)
return cards_json |
def GetAllUserSummaries():
"""Returns a string containing summary info for all GRR users."""
grr_api = maintenance_utils.InitGRRRootAPI()
user_wrappers = sorted(grr_api.ListGrrUsers(), key=lambda x: x.username)
summaries = [_Summarize(w.data) for w in user_wrappers]
return "\n\n".join(summaries) | Returns a string containing summary info for all GRR users. | Below is the the instruction that describes the task:
### Input:
Returns a string containing summary info for all GRR users.
### Response:
def GetAllUserSummaries():
"""Returns a string containing summary info for all GRR users."""
grr_api = maintenance_utils.InitGRRRootAPI()
user_wrappers = sorted(grr_api.ListGrrUsers(), key=lambda x: x.username)
summaries = [_Summarize(w.data) for w in user_wrappers]
return "\n\n".join(summaries) |
def drop_not_null(self, model, *names):
"""Drop not null."""
for name in names:
field = model._meta.fields[name]
field.null = True
self.ops.append(self.migrator.drop_not_null(model._meta.table_name, field.column_name))
return model | Drop not null. | Below is the the instruction that describes the task:
### Input:
Drop not null.
### Response:
def drop_not_null(self, model, *names):
"""Drop not null."""
for name in names:
field = model._meta.fields[name]
field.null = True
self.ops.append(self.migrator.drop_not_null(model._meta.table_name, field.column_name))
return model |
def handle_message(self, msg):
"""manage message of different type and in the context of path"""
if msg.module not in self._modules:
if msg.module:
self.writeln("************* Module %s" % msg.module)
self._modules.add(msg.module)
else:
self.writeln("************* ")
self.write_message(msg) | manage message of different type and in the context of path | Below is the the instruction that describes the task:
### Input:
manage message of different type and in the context of path
### Response:
def handle_message(self, msg):
"""manage message of different type and in the context of path"""
if msg.module not in self._modules:
if msg.module:
self.writeln("************* Module %s" % msg.module)
self._modules.add(msg.module)
else:
self.writeln("************* ")
self.write_message(msg) |
def run_simulation(topo, **kwargs):
'''
Get the simulation substrate started. The key things are to set up
a series of queues that connect nodes together and get the link emulation
objects started (all inside the NodeExecutor class). The NodePlumbing
named tuples hold together threads for each node, the emulation
substrate (NodeExecutors), and the ingress queue that each node receives
packets from.
'''
log_debug("Threads at startup:")
for t in threading.enumerate():
log_debug("\tthread at startup {}".format(t.name))
with yellow():
log_info("Starting up switchyard simulation substrate.")
glue = SyssGlue(topo, **kwargs)
cli = Cli(glue, topo)
try:
cli.cmdloop()
except KeyboardInterrupt:
print("Received SIGINT --- shutting down.")
cli.stop() | Get the simulation substrate started. The key things are to set up
a series of queues that connect nodes together and get the link emulation
objects started (all inside the NodeExecutor class). The NodePlumbing
named tuples hold together threads for each node, the emulation
substrate (NodeExecutors), and the ingress queue that each node receives
packets from. | Below is the the instruction that describes the task:
### Input:
Get the simulation substrate started. The key things are to set up
a series of queues that connect nodes together and get the link emulation
objects started (all inside the NodeExecutor class). The NodePlumbing
named tuples hold together threads for each node, the emulation
substrate (NodeExecutors), and the ingress queue that each node receives
packets from.
### Response:
def run_simulation(topo, **kwargs):
'''
Get the simulation substrate started. The key things are to set up
a series of queues that connect nodes together and get the link emulation
objects started (all inside the NodeExecutor class). The NodePlumbing
named tuples hold together threads for each node, the emulation
substrate (NodeExecutors), and the ingress queue that each node receives
packets from.
'''
log_debug("Threads at startup:")
for t in threading.enumerate():
log_debug("\tthread at startup {}".format(t.name))
with yellow():
log_info("Starting up switchyard simulation substrate.")
glue = SyssGlue(topo, **kwargs)
cli = Cli(glue, topo)
try:
cli.cmdloop()
except KeyboardInterrupt:
print("Received SIGINT --- shutting down.")
cli.stop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.