code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def session_id(self):
"""
Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed.
"""
if self._session_id is None:
req = self.request("POST /4/sessions")
self._session_id = req.get("session_key") or req.get("session_id")
return CallableString(self._session_id) | Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed. | Below is the the instruction that describes the task:
### Input:
Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed.
### Response:
def session_id(self):
"""
Return the session id of the current connection.
The session id is issued (through an API request) the first time it is requested, but no sooner. This is
because generating a session id puts it into the DKV on the server, which effectively locks the cluster. Once
issued, the session id will stay the same until the connection is closed.
"""
if self._session_id is None:
req = self.request("POST /4/sessions")
self._session_id = req.get("session_key") or req.get("session_id")
return CallableString(self._session_id) |
def one_of(s):
'''Parser a char from specified string.'''
@Parser
def one_of_parser(text, index=0):
if index < len(text) and text[index] in s:
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'one of {}'.format(s))
return one_of_parser | Parser a char from specified string. | Below is the the instruction that describes the task:
### Input:
Parser a char from specified string.
### Response:
def one_of(s):
'''Parser a char from specified string.'''
@Parser
def one_of_parser(text, index=0):
if index < len(text) and text[index] in s:
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'one of {}'.format(s))
return one_of_parser |
def p(self):
"""
Helper property containing the percentage this slider is "filled".
This property is read-only.
"""
return (self.n-self.nmin)/max((self.nmax-self.nmin),1) | Helper property containing the percentage this slider is "filled".
This property is read-only. | Below is the the instruction that describes the task:
### Input:
Helper property containing the percentage this slider is "filled".
This property is read-only.
### Response:
def p(self):
"""
Helper property containing the percentage this slider is "filled".
This property is read-only.
"""
return (self.n-self.nmin)/max((self.nmax-self.nmin),1) |
def replace_pool_members(hostname, username, password, name, members):
'''
A function to connect to a bigip device and replace members of an existing pool with new members.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
members
List of comma delimited pool members to replace existing members with.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
CLI Example::
salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80
'''
payload = {}
payload['name'] = name
#specify members if provided
if members is not None:
if isinstance(members, six.string_types):
members = members.split(',')
pool_members = []
for member in members:
#check to see if already a dictionary ( for states)
if isinstance(member, dict):
#check for state alternative name 'member_state', replace with state
if 'member_state' in member.keys():
member['state'] = member.pop('member_state')
#replace underscore with dash
for key in member:
new_key = key.replace('_', '-')
member[new_key] = member.pop(key)
pool_members.append(member)
#parse string passed via execution command (for executions)
else:
pool_members.append({'name': member, 'address': member.split(':')[0]})
payload['members'] = pool_members
#build session
bigip_session = _build_session(username, password)
#put to REST
try:
response = bigip_session.put(
BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}'.format(name=name),
data=salt.utils.json.dumps(payload)
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response) | A function to connect to a bigip device and replace members of an existing pool with new members.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
members
List of comma delimited pool members to replace existing members with.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
CLI Example::
salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80 | Below is the the instruction that describes the task:
### Input:
A function to connect to a bigip device and replace members of an existing pool with new members.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
members
List of comma delimited pool members to replace existing members with.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
CLI Example::
salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80
### Response:
def replace_pool_members(hostname, username, password, name, members):
'''
A function to connect to a bigip device and replace members of an existing pool with new members.
hostname
The host/address of the bigip device
username
The iControl REST username
password
The iControl REST password
name
The name of the pool to modify
members
List of comma delimited pool members to replace existing members with.
i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80
CLI Example::
salt '*' bigip.replace_pool_members bigip admin admin my-pool 10.2.2.1:80,10.2.2.2:80,10.2.2.3:80
'''
payload = {}
payload['name'] = name
#specify members if provided
if members is not None:
if isinstance(members, six.string_types):
members = members.split(',')
pool_members = []
for member in members:
#check to see if already a dictionary ( for states)
if isinstance(member, dict):
#check for state alternative name 'member_state', replace with state
if 'member_state' in member.keys():
member['state'] = member.pop('member_state')
#replace underscore with dash
for key in member:
new_key = key.replace('_', '-')
member[new_key] = member.pop(key)
pool_members.append(member)
#parse string passed via execution command (for executions)
else:
pool_members.append({'name': member, 'address': member.split(':')[0]})
payload['members'] = pool_members
#build session
bigip_session = _build_session(username, password)
#put to REST
try:
response = bigip_session.put(
BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool/{name}'.format(name=name),
data=salt.utils.json.dumps(payload)
)
except requests.exceptions.ConnectionError as e:
return _load_connection_error(hostname, e)
return _load_response(response) |
def licenseFile(self):
"""
Returns the license file for this builder.
:return <str>
"""
if self._licenseFile:
return self._licenseFile
elif self._license:
f = projex.resources.find('licenses/{0}.txt'.format(self.license()))
return f
else:
return '' | Returns the license file for this builder.
:return <str> | Below is the the instruction that describes the task:
### Input:
Returns the license file for this builder.
:return <str>
### Response:
def licenseFile(self):
"""
Returns the license file for this builder.
:return <str>
"""
if self._licenseFile:
return self._licenseFile
elif self._license:
f = projex.resources.find('licenses/{0}.txt'.format(self.license()))
return f
else:
return '' |
def get_template_names(self):
"""
datagrid的默认模板
"""
names = super(EasyUIUpdateView, self).get_template_names()
names.append('easyui/form.html')
return names | datagrid的默认模板 | Below is the the instruction that describes the task:
### Input:
datagrid的默认模板
### Response:
def get_template_names(self):
"""
datagrid的默认模板
"""
names = super(EasyUIUpdateView, self).get_template_names()
names.append('easyui/form.html')
return names |
def make_request(self, url, method='get', headers=None, data=None,
callback=None, errors=STRICT, verify=False, timeout=None, **params):
"""
Reusable method for performing requests.
:param url - URL to request
:param method - request method, default is 'get'
:param headers - request headers
:param data - post data
:param callback - callback to be applied to response,
default callback will parse response as json object.
:param errors - specifies communication errors handling mode, possible
values are:
* strict (default) - throw an error as soon as one
occurred
* graceful - ignore certain errors, e.g. EmptyResponse
* ignore - ignore all errors and return a result in
any case.
NOTE that it DOES NOT mean that no
exceptions can be
raised from this method, it mostly ignores
communication
related errors.
* None or empty string equals to default
:param verify - whether or not to verify SSL cert, default to False
:param timeout - the timeout of the request in second, default to None
:param params - additional query parameters for request
"""
error_modes = (STRICT, GRACEFUL, IGNORE)
error_mode = errors or GRACEFUL
if error_mode.lower() not in error_modes:
raise ValueError(
'Possible values for errors argument are: %s'
% ','.join(error_modes))
if callback is None:
callback = self._default_resp_callback
request = getattr(requests, method.lower())
log.debug('* Request URL: %s' % url)
log.debug('* Request method: %s' % method)
log.debug('* Request query params: %s' % params)
log.debug('* Request headers: %s' % headers)
log.debug('* Request timeout: %s' % timeout)
r = request(
url, headers=headers, data=data, verify=verify, timeout=timeout, params=params)
log.debug('* r.url: %s' % r.url)
try:
r.raise_for_status()
return callback(r)
except Exception as e:
return self._with_error_handling(r, e,
error_mode, self.response_format) | Reusable method for performing requests.
:param url - URL to request
:param method - request method, default is 'get'
:param headers - request headers
:param data - post data
:param callback - callback to be applied to response,
default callback will parse response as json object.
:param errors - specifies communication errors handling mode, possible
values are:
* strict (default) - throw an error as soon as one
occurred
* graceful - ignore certain errors, e.g. EmptyResponse
* ignore - ignore all errors and return a result in
any case.
NOTE that it DOES NOT mean that no
exceptions can be
raised from this method, it mostly ignores
communication
related errors.
* None or empty string equals to default
:param verify - whether or not to verify SSL cert, default to False
:param timeout - the timeout of the request in second, default to None
:param params - additional query parameters for request | Below is the the instruction that describes the task:
### Input:
Reusable method for performing requests.
:param url - URL to request
:param method - request method, default is 'get'
:param headers - request headers
:param data - post data
:param callback - callback to be applied to response,
default callback will parse response as json object.
:param errors - specifies communication errors handling mode, possible
values are:
* strict (default) - throw an error as soon as one
occurred
* graceful - ignore certain errors, e.g. EmptyResponse
* ignore - ignore all errors and return a result in
any case.
NOTE that it DOES NOT mean that no
exceptions can be
raised from this method, it mostly ignores
communication
related errors.
* None or empty string equals to default
:param verify - whether or not to verify SSL cert, default to False
:param timeout - the timeout of the request in second, default to None
:param params - additional query parameters for request
### Response:
def make_request(self, url, method='get', headers=None, data=None,
callback=None, errors=STRICT, verify=False, timeout=None, **params):
"""
Reusable method for performing requests.
:param url - URL to request
:param method - request method, default is 'get'
:param headers - request headers
:param data - post data
:param callback - callback to be applied to response,
default callback will parse response as json object.
:param errors - specifies communication errors handling mode, possible
values are:
* strict (default) - throw an error as soon as one
occurred
* graceful - ignore certain errors, e.g. EmptyResponse
* ignore - ignore all errors and return a result in
any case.
NOTE that it DOES NOT mean that no
exceptions can be
raised from this method, it mostly ignores
communication
related errors.
* None or empty string equals to default
:param verify - whether or not to verify SSL cert, default to False
:param timeout - the timeout of the request in second, default to None
:param params - additional query parameters for request
"""
error_modes = (STRICT, GRACEFUL, IGNORE)
error_mode = errors or GRACEFUL
if error_mode.lower() not in error_modes:
raise ValueError(
'Possible values for errors argument are: %s'
% ','.join(error_modes))
if callback is None:
callback = self._default_resp_callback
request = getattr(requests, method.lower())
log.debug('* Request URL: %s' % url)
log.debug('* Request method: %s' % method)
log.debug('* Request query params: %s' % params)
log.debug('* Request headers: %s' % headers)
log.debug('* Request timeout: %s' % timeout)
r = request(
url, headers=headers, data=data, verify=verify, timeout=timeout, params=params)
log.debug('* r.url: %s' % r.url)
try:
r.raise_for_status()
return callback(r)
except Exception as e:
return self._with_error_handling(r, e,
error_mode, self.response_format) |
def run(self, workflow_input, *args, **kwargs):
'''
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
'''
return super(DXGlobalWorkflow, self).run(workflow_input, *args, **kwargs) | :param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose) | Below is the the instruction that describes the task:
### Input:
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
### Response:
def run(self, workflow_input, *args, **kwargs):
'''
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
'''
return super(DXGlobalWorkflow, self).run(workflow_input, *args, **kwargs) |
def advance(self, size: int) -> None:
"""
Advance the current buffer position by ``size`` bytes.
"""
assert 0 < size <= self._size
self._size -= size
pos = self._first_pos
buffers = self._buffers
while buffers and size > 0:
is_large, b = buffers[0]
b_remain = len(b) - size - pos
if b_remain <= 0:
buffers.popleft()
size -= len(b) - pos
pos = 0
elif is_large:
pos += size
size = 0
else:
# Amortized O(1) shrink for Python 2
pos += size
if len(b) <= 2 * pos:
del typing.cast(bytearray, b)[:pos]
pos = 0
size = 0
assert size == 0
self._first_pos = pos | Advance the current buffer position by ``size`` bytes. | Below is the the instruction that describes the task:
### Input:
Advance the current buffer position by ``size`` bytes.
### Response:
def advance(self, size: int) -> None:
"""
Advance the current buffer position by ``size`` bytes.
"""
assert 0 < size <= self._size
self._size -= size
pos = self._first_pos
buffers = self._buffers
while buffers and size > 0:
is_large, b = buffers[0]
b_remain = len(b) - size - pos
if b_remain <= 0:
buffers.popleft()
size -= len(b) - pos
pos = 0
elif is_large:
pos += size
size = 0
else:
# Amortized O(1) shrink for Python 2
pos += size
if len(b) <= 2 * pos:
del typing.cast(bytearray, b)[:pos]
pos = 0
size = 0
assert size == 0
self._first_pos = pos |
def add_edge(self, u, v, **kwargs):
"""
Add an edge between variable_node and factor_node.
Parameters
----------
u, v: nodes
Nodes can be any hashable Python object.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> G = FactorGraph()
>>> G.add_nodes_from(['a', 'b', 'c'])
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> G.add_nodes_from([phi1, phi2])
>>> G.add_edge('a', phi1)
"""
if u != v:
super(FactorGraph, self).add_edge(u, v, **kwargs)
else:
raise ValueError('Self loops are not allowed') | Add an edge between variable_node and factor_node.
Parameters
----------
u, v: nodes
Nodes can be any hashable Python object.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> G = FactorGraph()
>>> G.add_nodes_from(['a', 'b', 'c'])
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> G.add_nodes_from([phi1, phi2])
>>> G.add_edge('a', phi1) | Below is the the instruction that describes the task:
### Input:
Add an edge between variable_node and factor_node.
Parameters
----------
u, v: nodes
Nodes can be any hashable Python object.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> G = FactorGraph()
>>> G.add_nodes_from(['a', 'b', 'c'])
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> G.add_nodes_from([phi1, phi2])
>>> G.add_edge('a', phi1)
### Response:
def add_edge(self, u, v, **kwargs):
"""
Add an edge between variable_node and factor_node.
Parameters
----------
u, v: nodes
Nodes can be any hashable Python object.
Examples
--------
>>> from pgmpy.models import FactorGraph
>>> G = FactorGraph()
>>> G.add_nodes_from(['a', 'b', 'c'])
>>> phi1 = DiscreteFactor(['a', 'b'], [2, 2], np.random.rand(4))
>>> G.add_nodes_from([phi1, phi2])
>>> G.add_edge('a', phi1)
"""
if u != v:
super(FactorGraph, self).add_edge(u, v, **kwargs)
else:
raise ValueError('Self loops are not allowed') |
def _chunk_filter(self, extensions):
""" Create a filter from the extensions and ignore files """
if isinstance(extensions, six.string_types):
extensions = extensions.split()
def _filter(chunk):
""" Exclusion filter """
name = chunk['name']
if extensions is not None:
if not any(name.endswith(e) for e in extensions):
return False
for pattern in self.state.ignore_re:
if pattern.match(name):
return False
for pattern in self.state.ignore:
if fnmatch.fnmatchcase(name, pattern):
return False
return True
return _filter | Create a filter from the extensions and ignore files | Below is the the instruction that describes the task:
### Input:
Create a filter from the extensions and ignore files
### Response:
def _chunk_filter(self, extensions):
""" Create a filter from the extensions and ignore files """
if isinstance(extensions, six.string_types):
extensions = extensions.split()
def _filter(chunk):
""" Exclusion filter """
name = chunk['name']
if extensions is not None:
if not any(name.endswith(e) for e in extensions):
return False
for pattern in self.state.ignore_re:
if pattern.match(name):
return False
for pattern in self.state.ignore:
if fnmatch.fnmatchcase(name, pattern):
return False
return True
return _filter |
def _fetch_size(self, request: Request) -> int:
'''Return size of file.
Coroutine.
'''
try:
size = yield from self._commander.size(request.file_path)
return size
except FTPServerError:
return | Return size of file.
Coroutine. | Below is the the instruction that describes the task:
### Input:
Return size of file.
Coroutine.
### Response:
def _fetch_size(self, request: Request) -> int:
'''Return size of file.
Coroutine.
'''
try:
size = yield from self._commander.size(request.file_path)
return size
except FTPServerError:
return |
def wrap(self, value):
''' Validates ``value`` and wraps it with ``ComputedField.computed_type``'''
self.validate_wrap(value)
return self.computed_type.wrap(value) | Validates ``value`` and wraps it with ``ComputedField.computed_type`` | Below is the the instruction that describes the task:
### Input:
Validates ``value`` and wraps it with ``ComputedField.computed_type``
### Response:
def wrap(self, value):
''' Validates ``value`` and wraps it with ``ComputedField.computed_type``'''
self.validate_wrap(value)
return self.computed_type.wrap(value) |
def _spec_from_via(self, proxied_inventory_name, via_spec):
"""
Produce a dict connection specifiction given a string `via_spec`, of
the form `[[become_method:]become_user@]inventory_hostname`.
"""
become_user, _, inventory_name = via_spec.rpartition('@')
become_method, _, become_user = become_user.rpartition(':')
# must use __contains__ to avoid a TypeError for a missing host on
# Ansible 2.3.
if self.host_vars is None or inventory_name not in self.host_vars:
raise ansible.errors.AnsibleConnectionFailure(
self.unknown_via_msg % (
via_spec,
proxied_inventory_name,
)
)
via_vars = self.host_vars[inventory_name]
return ansible_mitogen.transport_config.MitogenViaSpec(
inventory_name=inventory_name,
play_context=self._play_context,
host_vars=dict(via_vars), # TODO: make it lazy
become_method=become_method or None,
become_user=become_user or None,
) | Produce a dict connection specifiction given a string `via_spec`, of
the form `[[become_method:]become_user@]inventory_hostname`. | Below is the the instruction that describes the task:
### Input:
Produce a dict connection specifiction given a string `via_spec`, of
the form `[[become_method:]become_user@]inventory_hostname`.
### Response:
def _spec_from_via(self, proxied_inventory_name, via_spec):
"""
Produce a dict connection specifiction given a string `via_spec`, of
the form `[[become_method:]become_user@]inventory_hostname`.
"""
become_user, _, inventory_name = via_spec.rpartition('@')
become_method, _, become_user = become_user.rpartition(':')
# must use __contains__ to avoid a TypeError for a missing host on
# Ansible 2.3.
if self.host_vars is None or inventory_name not in self.host_vars:
raise ansible.errors.AnsibleConnectionFailure(
self.unknown_via_msg % (
via_spec,
proxied_inventory_name,
)
)
via_vars = self.host_vars[inventory_name]
return ansible_mitogen.transport_config.MitogenViaSpec(
inventory_name=inventory_name,
play_context=self._play_context,
host_vars=dict(via_vars), # TODO: make it lazy
become_method=become_method or None,
become_user=become_user or None,
) |
def unblock_user_signals(self, name, ignore_error=False):
"""
Reconnects the user-defined signals for the specified
parameter name (blocked with "block_user_signal_changed")
Note this only affects those connections made with
connect_signal_changed(), and I do not recommend adding new connections
while they're blocked!
"""
x = self._find_parameter(name.split("/"), quiet=ignore_error)
# if it pooped.
if x==None: return None
# reconnect it to all its functions
if name in self._connection_lists:
for f in self._connection_lists[name]: x.sigValueChanged.connect(f)
return self | Reconnects the user-defined signals for the specified
parameter name (blocked with "block_user_signal_changed")
Note this only affects those connections made with
connect_signal_changed(), and I do not recommend adding new connections
while they're blocked! | Below is the the instruction that describes the task:
### Input:
Reconnects the user-defined signals for the specified
parameter name (blocked with "block_user_signal_changed")
Note this only affects those connections made with
connect_signal_changed(), and I do not recommend adding new connections
while they're blocked!
### Response:
def unblock_user_signals(self, name, ignore_error=False):
"""
Reconnects the user-defined signals for the specified
parameter name (blocked with "block_user_signal_changed")
Note this only affects those connections made with
connect_signal_changed(), and I do not recommend adding new connections
while they're blocked!
"""
x = self._find_parameter(name.split("/"), quiet=ignore_error)
# if it pooped.
if x==None: return None
# reconnect it to all its functions
if name in self._connection_lists:
for f in self._connection_lists[name]: x.sigValueChanged.connect(f)
return self |
def _get_rupture_dimensions(src, mag, nodal_plane):
"""
Calculate and return the rupture length and width
for given magnitude ``mag`` and nodal plane.
:param src:
a PointSource, AreaSource or MultiPointSource
:param mag:
a magnitude
:param nodal_plane:
Instance of :class:`openquake.hazardlib.geo.nodalplane.NodalPlane`.
:returns:
Tuple of two items: rupture length in width in km.
The rupture area is calculated using method
:meth:`~openquake.hazardlib.scalerel.base.BaseMSR.get_median_area`
of source's
magnitude-scaling relationship. In any case the returned
dimensions multiplication is equal to that value. Than
the area is decomposed to length and width with respect
to source's rupture aspect ratio.
If calculated rupture width being inclined by nodal plane's
dip angle would not fit in between upper and lower seismogenic
depth, the rupture width is shrunken to a maximum possible
and rupture length is extended to preserve the same area.
"""
area = src.magnitude_scaling_relationship.get_median_area(
mag, nodal_plane.rake)
rup_length = math.sqrt(area * src.rupture_aspect_ratio)
rup_width = area / rup_length
seismogenic_layer_width = (src.lower_seismogenic_depth
- src.upper_seismogenic_depth)
max_width = (seismogenic_layer_width
/ math.sin(math.radians(nodal_plane.dip)))
if rup_width > max_width:
rup_width = max_width
rup_length = area / rup_width
return rup_length, rup_width | Calculate and return the rupture length and width
for given magnitude ``mag`` and nodal plane.
:param src:
a PointSource, AreaSource or MultiPointSource
:param mag:
a magnitude
:param nodal_plane:
Instance of :class:`openquake.hazardlib.geo.nodalplane.NodalPlane`.
:returns:
Tuple of two items: rupture length in width in km.
The rupture area is calculated using method
:meth:`~openquake.hazardlib.scalerel.base.BaseMSR.get_median_area`
of source's
magnitude-scaling relationship. In any case the returned
dimensions multiplication is equal to that value. Than
the area is decomposed to length and width with respect
to source's rupture aspect ratio.
If calculated rupture width being inclined by nodal plane's
dip angle would not fit in between upper and lower seismogenic
depth, the rupture width is shrunken to a maximum possible
and rupture length is extended to preserve the same area. | Below is the the instruction that describes the task:
### Input:
Calculate and return the rupture length and width
for given magnitude ``mag`` and nodal plane.
:param src:
a PointSource, AreaSource or MultiPointSource
:param mag:
a magnitude
:param nodal_plane:
Instance of :class:`openquake.hazardlib.geo.nodalplane.NodalPlane`.
:returns:
Tuple of two items: rupture length in width in km.
The rupture area is calculated using method
:meth:`~openquake.hazardlib.scalerel.base.BaseMSR.get_median_area`
of source's
magnitude-scaling relationship. In any case the returned
dimensions multiplication is equal to that value. Than
the area is decomposed to length and width with respect
to source's rupture aspect ratio.
If calculated rupture width being inclined by nodal plane's
dip angle would not fit in between upper and lower seismogenic
depth, the rupture width is shrunken to a maximum possible
and rupture length is extended to preserve the same area.
### Response:
def _get_rupture_dimensions(src, mag, nodal_plane):
"""
Calculate and return the rupture length and width
for given magnitude ``mag`` and nodal plane.
:param src:
a PointSource, AreaSource or MultiPointSource
:param mag:
a magnitude
:param nodal_plane:
Instance of :class:`openquake.hazardlib.geo.nodalplane.NodalPlane`.
:returns:
Tuple of two items: rupture length in width in km.
The rupture area is calculated using method
:meth:`~openquake.hazardlib.scalerel.base.BaseMSR.get_median_area`
of source's
magnitude-scaling relationship. In any case the returned
dimensions multiplication is equal to that value. Than
the area is decomposed to length and width with respect
to source's rupture aspect ratio.
If calculated rupture width being inclined by nodal plane's
dip angle would not fit in between upper and lower seismogenic
depth, the rupture width is shrunken to a maximum possible
and rupture length is extended to preserve the same area.
"""
area = src.magnitude_scaling_relationship.get_median_area(
mag, nodal_plane.rake)
rup_length = math.sqrt(area * src.rupture_aspect_ratio)
rup_width = area / rup_length
seismogenic_layer_width = (src.lower_seismogenic_depth
- src.upper_seismogenic_depth)
max_width = (seismogenic_layer_width
/ math.sin(math.radians(nodal_plane.dip)))
if rup_width > max_width:
rup_width = max_width
rup_length = area / rup_width
return rup_length, rup_width |
def SLICE(array, n, position=None):
"""
Returns a subset of an array.
See https://docs.mongodb.com/manual/reference/operator/aggregation/slice/
for more details
:param array: Any valid expression as long as it resolves to an array.
:param n: Any valid expression as long as it resolves to an integer.
:param position: Optional. Any valid expression as long as it resolves to an integer.
:return: Aggregation operator
"""
return {'$slice': [array, position, n]} if position is not None else {'$slice': [array, n]} | Returns a subset of an array.
See https://docs.mongodb.com/manual/reference/operator/aggregation/slice/
for more details
:param array: Any valid expression as long as it resolves to an array.
:param n: Any valid expression as long as it resolves to an integer.
:param position: Optional. Any valid expression as long as it resolves to an integer.
:return: Aggregation operator | Below is the the instruction that describes the task:
### Input:
Returns a subset of an array.
See https://docs.mongodb.com/manual/reference/operator/aggregation/slice/
for more details
:param array: Any valid expression as long as it resolves to an array.
:param n: Any valid expression as long as it resolves to an integer.
:param position: Optional. Any valid expression as long as it resolves to an integer.
:return: Aggregation operator
### Response:
def SLICE(array, n, position=None):
"""
Returns a subset of an array.
See https://docs.mongodb.com/manual/reference/operator/aggregation/slice/
for more details
:param array: Any valid expression as long as it resolves to an array.
:param n: Any valid expression as long as it resolves to an integer.
:param position: Optional. Any valid expression as long as it resolves to an integer.
:return: Aggregation operator
"""
return {'$slice': [array, position, n]} if position is not None else {'$slice': [array, n]} |
def _get_kernel_from_markov_model(self, model):
"""
Computes the Gibbs transition models from a Markov Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: MarkovModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
factors_dict = {var: [] for var in self.variables}
for factor in model.get_factors():
for var in factor.scope():
factors_dict[var].append(factor)
# Take factor product
factors_dict = {var: factor_product(*factors) if len(factors) > 1 else factors[0]
for var, factors in factors_dict.items()}
self.cardinalities = {var: factors_dict[var].get_cardinality([var])[var] for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
kernel = {}
factor = factors_dict[var]
scope = set(factor.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(first_var, s) for first_var, s in zip(other_vars, tup) if first_var in scope]
reduced_factor = factor.reduce(states, inplace=False)
kernel[tup] = reduced_factor.values / sum(reduced_factor.values)
self.transition_models[var] = kernel | Computes the Gibbs transition models from a Markov Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: MarkovModel
The model from which probabilities will be computed. | Below is the the instruction that describes the task:
### Input:
Computes the Gibbs transition models from a Markov Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: MarkovModel
The model from which probabilities will be computed.
### Response:
def _get_kernel_from_markov_model(self, model):
"""
Computes the Gibbs transition models from a Markov Network.
'Probabilistic Graphical Model Principles and Techniques', Koller and
Friedman, Section 12.3.3 pp 512-513.
Parameters:
-----------
model: MarkovModel
The model from which probabilities will be computed.
"""
self.variables = np.array(model.nodes())
factors_dict = {var: [] for var in self.variables}
for factor in model.get_factors():
for var in factor.scope():
factors_dict[var].append(factor)
# Take factor product
factors_dict = {var: factor_product(*factors) if len(factors) > 1 else factors[0]
for var, factors in factors_dict.items()}
self.cardinalities = {var: factors_dict[var].get_cardinality([var])[var] for var in self.variables}
for var in self.variables:
other_vars = [v for v in self.variables if var != v]
other_cards = [self.cardinalities[v] for v in other_vars]
kernel = {}
factor = factors_dict[var]
scope = set(factor.scope())
for tup in itertools.product(*[range(card) for card in other_cards]):
states = [State(first_var, s) for first_var, s in zip(other_vars, tup) if first_var in scope]
reduced_factor = factor.reduce(states, inplace=False)
kernel[tup] = reduced_factor.values / sum(reduced_factor.values)
self.transition_models[var] = kernel |
def warning(self, amplexception):
"""
Receives notification of a warning.
"""
msg = '\t'+str(amplexception).replace('\n', '\n\t')
print('Warning:\n{:s}'.format(msg)) | Receives notification of a warning. | Below is the the instruction that describes the task:
### Input:
Receives notification of a warning.
### Response:
def warning(self, amplexception):
"""
Receives notification of a warning.
"""
msg = '\t'+str(amplexception).replace('\n', '\n\t')
print('Warning:\n{:s}'.format(msg)) |
def Fierz_to_JMS_lep(C, ddll):
"""From Fierz to semileptonic JMS basis for Class V.
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
if ddll[:2] == 'uc':
s = str(uflav[ddll[0]] + 1)
b = str(uflav[ddll[1]] + 1)
q = 'u'
else:
s = str(dflav[ddll[0]] + 1)
b = str(dflav[ddll[1]] + 1)
q = 'd'
l = str(lflav[ddll[4:ddll.find('n')]] + 1)
lp = str(lflav[ddll[ddll.find('_',5)+1:len(ddll)]] + 1)
ind = ddll.replace('l_','').replace('nu_','')
d = {
"Ve" + q + "LL" + '_' + l + lp + s + b : -C['F' + ind + '10'] + C['F' + ind + '9'],
"V" + q + "eLR" + '_' + s + b + l + lp : C['F' + ind + '10'] + C['F' + ind + '9'],
"Se" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + 'P'] + C['F' + ind + 'S'],
"Se" + q + "RL" + '_' + lp + l + b + s : -C['F' + ind + 'P'].conjugate() + C['F' + ind + 'S'].conjugate(),
"Te" + q + "RR" + '_' + lp + l + b + s : C['F' + ind + 'T'].conjugate() - C['F' + ind + 'T5'].conjugate(),
"Te" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + 'T'] + C['F' + ind + 'T5'],
"Ve" + q + "LR" + '_' + l + lp + s + b : -C['F' + ind + '10p'] + C['F' + ind + '9p'],
"Ve" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + '10p'] + C['F' + ind + '9p'],
"Se" + q + "RL" + '_' + l + lp + s + b : C['F' + ind + 'Pp'] + C['F' + ind + 'Sp'],
"Se" + q + "RR" + '_' + lp + l + b + s : -C['F' + ind + 'Pp'].conjugate() + C['F' + ind + 'Sp'].conjugate(),
}
return symmetrize_JMS_dict(d) | From Fierz to semileptonic JMS basis for Class V.
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc. | Below is the the instruction that describes the task:
### Input:
From Fierz to semileptonic JMS basis for Class V.
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.
### Response:
def Fierz_to_JMS_lep(C, ddll):
"""From Fierz to semileptonic JMS basis for Class V.
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
if ddll[:2] == 'uc':
s = str(uflav[ddll[0]] + 1)
b = str(uflav[ddll[1]] + 1)
q = 'u'
else:
s = str(dflav[ddll[0]] + 1)
b = str(dflav[ddll[1]] + 1)
q = 'd'
l = str(lflav[ddll[4:ddll.find('n')]] + 1)
lp = str(lflav[ddll[ddll.find('_',5)+1:len(ddll)]] + 1)
ind = ddll.replace('l_','').replace('nu_','')
d = {
"Ve" + q + "LL" + '_' + l + lp + s + b : -C['F' + ind + '10'] + C['F' + ind + '9'],
"V" + q + "eLR" + '_' + s + b + l + lp : C['F' + ind + '10'] + C['F' + ind + '9'],
"Se" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + 'P'] + C['F' + ind + 'S'],
"Se" + q + "RL" + '_' + lp + l + b + s : -C['F' + ind + 'P'].conjugate() + C['F' + ind + 'S'].conjugate(),
"Te" + q + "RR" + '_' + lp + l + b + s : C['F' + ind + 'T'].conjugate() - C['F' + ind + 'T5'].conjugate(),
"Te" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + 'T'] + C['F' + ind + 'T5'],
"Ve" + q + "LR" + '_' + l + lp + s + b : -C['F' + ind + '10p'] + C['F' + ind + '9p'],
"Ve" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + '10p'] + C['F' + ind + '9p'],
"Se" + q + "RL" + '_' + l + lp + s + b : C['F' + ind + 'Pp'] + C['F' + ind + 'Sp'],
"Se" + q + "RR" + '_' + lp + l + b + s : -C['F' + ind + 'Pp'].conjugate() + C['F' + ind + 'Sp'].conjugate(),
}
return symmetrize_JMS_dict(d) |
def _init_map(self):
"""stub"""
self.my_osid_object_form._my_map['maxStrings'] = \
self._max_strings_metadata['default_integer_values'][0]
self.my_osid_object_form._my_map['expectedLength'] = \
self._expected_length_metadata['default_integer_values'][0]
self.my_osid_object_form._my_map['expectedLines'] = \
self._expected_lines_metadata['default_integer_values'][0]
super(QTIExtendedTextAnswerQuestionFormRecord, self)._init_map() | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def _init_map(self):
"""stub"""
self.my_osid_object_form._my_map['maxStrings'] = \
self._max_strings_metadata['default_integer_values'][0]
self.my_osid_object_form._my_map['expectedLength'] = \
self._expected_length_metadata['default_integer_values'][0]
self.my_osid_object_form._my_map['expectedLines'] = \
self._expected_lines_metadata['default_integer_values'][0]
super(QTIExtendedTextAnswerQuestionFormRecord, self)._init_map() |
def restart(self, container, instances=None, map_name=None, **kwargs):
"""
Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs) | Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput] | Below is the the instruction that describes the task:
### Input:
Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput]
### Response:
def restart(self, container, instances=None, map_name=None, **kwargs):
"""
Restarts instances for a container configuration.
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to stop. If not specified, will restart all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to
the main container restart.
:return: Return values of restarted containers.
:rtype: list[dockermap.map.runner.ActionOutput]
"""
return self.run_actions('restart', container, instances=instances, map_name=map_name, **kwargs) |
def exp(self):
""" Returns the exponent of the quaternion.
(not tested)
"""
# Init
vecNorm = self.x**2 + self.y**2 + self.z**2
wPart = np.exp(self.w)
q = Quaternion()
# Calculate
q.w = wPart * np.cos(vecNorm)
q.x = wPart * self.x * np.sin(vecNorm) / vecNorm
q.y = wPart * self.y * np.sin(vecNorm) / vecNorm
q.z = wPart * self.z * np.sin(vecNorm) / vecNorm
return q | Returns the exponent of the quaternion.
(not tested) | Below is the the instruction that describes the task:
### Input:
Returns the exponent of the quaternion.
(not tested)
### Response:
def exp(self):
""" Returns the exponent of the quaternion.
(not tested)
"""
# Init
vecNorm = self.x**2 + self.y**2 + self.z**2
wPart = np.exp(self.w)
q = Quaternion()
# Calculate
q.w = wPart * np.cos(vecNorm)
q.x = wPart * self.x * np.sin(vecNorm) / vecNorm
q.y = wPart * self.y * np.sin(vecNorm) / vecNorm
q.z = wPart * self.z * np.sin(vecNorm) / vecNorm
return q |
def clear(self, *objs):
"""
Clear the third relationship table, but not the ModelA or ModelB
"""
if objs:
keys = get_objs_columns(objs)
self.do_(self.model.table.delete(self.condition & self.model.table.c[self.model._primary_field].in_(keys)))
else:
self.do_(self.model.table.delete(self.condition)) | Clear the third relationship table, but not the ModelA or ModelB | Below is the the instruction that describes the task:
### Input:
Clear the third relationship table, but not the ModelA or ModelB
### Response:
def clear(self, *objs):
"""
Clear the third relationship table, but not the ModelA or ModelB
"""
if objs:
keys = get_objs_columns(objs)
self.do_(self.model.table.delete(self.condition & self.model.table.c[self.model._primary_field].in_(keys)))
else:
self.do_(self.model.table.delete(self.condition)) |
def iterbusinessdays(self, d1, d2):
"""
Date iterator returning dates in d1 <= x < d2, excluding weekends and holidays
"""
assert d2 >= d1
if d1.date() == d2.date() and d2.time() < self.business_hours[0]:
return
first = True
for dt in self.iterdays(d1, d2):
if first and d1.time() > self.business_hours[1]:
first = False
continue
first = False
if not self.isweekend(dt) and not self.isholiday(dt):
yield dt | Date iterator returning dates in d1 <= x < d2, excluding weekends and holidays | Below is the the instruction that describes the task:
### Input:
Date iterator returning dates in d1 <= x < d2, excluding weekends and holidays
### Response:
def iterbusinessdays(self, d1, d2):
"""
Date iterator returning dates in d1 <= x < d2, excluding weekends and holidays
"""
assert d2 >= d1
if d1.date() == d2.date() and d2.time() < self.business_hours[0]:
return
first = True
for dt in self.iterdays(d1, d2):
if first and d1.time() > self.business_hours[1]:
first = False
continue
first = False
if not self.isweekend(dt) and not self.isholiday(dt):
yield dt |
def utc_datetime_and_leap_second(self):
"""Convert to a Python ``datetime`` in UTC, plus a leap second value.
Convert this time to a `datetime`_ object and a leap second::
dt, leap_second = t.utc_datetime_and_leap_second()
If the third-party `pytz`_ package is available, then its
``utc`` timezone will be used as the timezone of the return
value. Otherwise, Skyfield uses its own ``utc`` timezone.
The leap second value is provided because a Python ``datetime``
can only number seconds ``0`` through ``59``, but leap seconds
have a designation of at least ``60``. The leap second return
value will normally be ``0``, but will instead be ``1`` if the
date and time are a UTC leap second. Add the leap second value
to the ``second`` field of the ``datetime`` to learn the real
name of the second.
If this time is an array, then an array of ``datetime`` objects
and an array of leap second integers is returned, instead of a
single value each.
"""
year, month, day, hour, minute, second = self._utc_tuple(
_half_millisecond)
second, fraction = divmod(second, 1.0)
second = second.astype(int)
leap_second = second // 60
second -= leap_second
milli = (fraction * 1000).astype(int) * 1000
if self.shape:
utcs = [utc] * self.shape[0]
argsets = zip(year, month, day, hour, minute, second, milli, utcs)
dt = array([datetime(*args) for args in argsets])
else:
dt = datetime(year, month, day, hour, minute, second, milli, utc)
return dt, leap_second | Convert to a Python ``datetime`` in UTC, plus a leap second value.
Convert this time to a `datetime`_ object and a leap second::
dt, leap_second = t.utc_datetime_and_leap_second()
If the third-party `pytz`_ package is available, then its
``utc`` timezone will be used as the timezone of the return
value. Otherwise, Skyfield uses its own ``utc`` timezone.
The leap second value is provided because a Python ``datetime``
can only number seconds ``0`` through ``59``, but leap seconds
have a designation of at least ``60``. The leap second return
value will normally be ``0``, but will instead be ``1`` if the
date and time are a UTC leap second. Add the leap second value
to the ``second`` field of the ``datetime`` to learn the real
name of the second.
If this time is an array, then an array of ``datetime`` objects
and an array of leap second integers is returned, instead of a
single value each. | Below is the the instruction that describes the task:
### Input:
Convert to a Python ``datetime`` in UTC, plus a leap second value.
Convert this time to a `datetime`_ object and a leap second::
dt, leap_second = t.utc_datetime_and_leap_second()
If the third-party `pytz`_ package is available, then its
``utc`` timezone will be used as the timezone of the return
value. Otherwise, Skyfield uses its own ``utc`` timezone.
The leap second value is provided because a Python ``datetime``
can only number seconds ``0`` through ``59``, but leap seconds
have a designation of at least ``60``. The leap second return
value will normally be ``0``, but will instead be ``1`` if the
date and time are a UTC leap second. Add the leap second value
to the ``second`` field of the ``datetime`` to learn the real
name of the second.
If this time is an array, then an array of ``datetime`` objects
and an array of leap second integers is returned, instead of a
single value each.
### Response:
def utc_datetime_and_leap_second(self):
"""Convert to a Python ``datetime`` in UTC, plus a leap second value.
Convert this time to a `datetime`_ object and a leap second::
dt, leap_second = t.utc_datetime_and_leap_second()
If the third-party `pytz`_ package is available, then its
``utc`` timezone will be used as the timezone of the return
value. Otherwise, Skyfield uses its own ``utc`` timezone.
The leap second value is provided because a Python ``datetime``
can only number seconds ``0`` through ``59``, but leap seconds
have a designation of at least ``60``. The leap second return
value will normally be ``0``, but will instead be ``1`` if the
date and time are a UTC leap second. Add the leap second value
to the ``second`` field of the ``datetime`` to learn the real
name of the second.
If this time is an array, then an array of ``datetime`` objects
and an array of leap second integers is returned, instead of a
single value each.
"""
year, month, day, hour, minute, second = self._utc_tuple(
_half_millisecond)
second, fraction = divmod(second, 1.0)
second = second.astype(int)
leap_second = second // 60
second -= leap_second
milli = (fraction * 1000).astype(int) * 1000
if self.shape:
utcs = [utc] * self.shape[0]
argsets = zip(year, month, day, hour, minute, second, milli, utcs)
dt = array([datetime(*args) for args in argsets])
else:
dt = datetime(year, month, day, hour, minute, second, milli, utc)
return dt, leap_second |
def parse_gbk(gbks):
"""
parse gbk file
"""
for gbk in gbks:
for record in SeqIO.parse(open(gbk), 'genbank'):
for feature in record.features:
if feature.type == 'gene':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
continue
if feature.type == 'CDS':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
pass
start = int(feature.location.start) + int(feature.qualifiers['codon_start'][0])
end, strand = int(feature.location.end), feature.location.strand
if strand is None:
strand = 1
else:
strand = -1
contig = record.id
# contig = record.id.rsplit('.', 1)[0]
yield contig, [locus, \
[start, end, strand], \
feature.qualifiers] | parse gbk file | Below is the the instruction that describes the task:
### Input:
parse gbk file
### Response:
def parse_gbk(gbks):
"""
parse gbk file
"""
for gbk in gbks:
for record in SeqIO.parse(open(gbk), 'genbank'):
for feature in record.features:
if feature.type == 'gene':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
continue
if feature.type == 'CDS':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
pass
start = int(feature.location.start) + int(feature.qualifiers['codon_start'][0])
end, strand = int(feature.location.end), feature.location.strand
if strand is None:
strand = 1
else:
strand = -1
contig = record.id
# contig = record.id.rsplit('.', 1)[0]
yield contig, [locus, \
[start, end, strand], \
feature.qualifiers] |
def generate_image_from_url(url=None, timeout=30):
"""
Downloads and saves a image from url into a file.
"""
file_name = posixpath.basename(url)
img_tmp = NamedTemporaryFile(delete=True)
try:
response = requests.get(url, timeout=timeout)
response.raise_for_status()
except Exception as e: # NOQA
return None, None
img_tmp.write(response.content)
img_tmp.flush()
image = File(img_tmp)
image.seek(0)
return file_name, image | Downloads and saves a image from url into a file. | Below is the the instruction that describes the task:
### Input:
Downloads and saves a image from url into a file.
### Response:
def generate_image_from_url(url=None, timeout=30):
"""
Downloads and saves a image from url into a file.
"""
file_name = posixpath.basename(url)
img_tmp = NamedTemporaryFile(delete=True)
try:
response = requests.get(url, timeout=timeout)
response.raise_for_status()
except Exception as e: # NOQA
return None, None
img_tmp.write(response.content)
img_tmp.flush()
image = File(img_tmp)
image.seek(0)
return file_name, image |
def estimate_tuning(y=None, sr=22050, S=None, n_fft=2048,
resolution=0.01, bins_per_octave=12, **kwargs):
'''Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
'''
pitch, mag = piptrack(y=y, sr=sr, S=S, n_fft=n_fft, **kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return pitch_tuning(pitch[(mag >= threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave) | Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062 | Below is the the instruction that describes the task:
### Input:
Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
### Response:
def estimate_tuning(y=None, sr=22050, S=None, n_fft=2048,
resolution=0.01, bins_per_octave=12, **kwargs):
'''Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
'''
pitch, mag = piptrack(y=y, sr=sr, S=S, n_fft=n_fft, **kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return pitch_tuning(pitch[(mag >= threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave) |
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
self.version = self._version_ihl.value >> 4
self.ihl = self._version_ihl.value & 15
self.dscp = self._dscp_ecn.value >> 2
self.ecn = self._dscp_ecn.value & 3
self.length = self.length.value
self.identification = self.identification.value
self.flags = self._flags_offset.value >> 13
self.offset = self._flags_offset.value & 8191
self.ttl = self.ttl.value
self.protocol = self.protocol.value
self.checksum = self.checksum.value
self.source = self.source.value
self.destination = self.destination.value
if self.ihl > 5:
options_size = (self.ihl - 5) * 4
self.data = self.options.value[options_size:]
self.options = self.options.value[:options_size]
else:
self.data = self.options.value
self.options = b'' | Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails. | Below is the the instruction that describes the task:
### Input:
Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
### Response:
def unpack(self, buff, offset=0):
"""Unpack a binary struct into this object's attributes.
Return the values instead of the lib's basic types.
Args:
buff (bytes): Binary buffer.
offset (int): Where to begin unpacking.
Raises:
:exc:`~.exceptions.UnpackException`: If unpack fails.
"""
super().unpack(buff, offset)
self.version = self._version_ihl.value >> 4
self.ihl = self._version_ihl.value & 15
self.dscp = self._dscp_ecn.value >> 2
self.ecn = self._dscp_ecn.value & 3
self.length = self.length.value
self.identification = self.identification.value
self.flags = self._flags_offset.value >> 13
self.offset = self._flags_offset.value & 8191
self.ttl = self.ttl.value
self.protocol = self.protocol.value
self.checksum = self.checksum.value
self.source = self.source.value
self.destination = self.destination.value
if self.ihl > 5:
options_size = (self.ihl - 5) * 4
self.data = self.options.value[options_size:]
self.options = self.options.value[:options_size]
else:
self.data = self.options.value
self.options = b'' |
def set_children(self, value, defined):
"""Set the children of the object."""
self.children = value
self.children_defined = defined
return self | Set the children of the object. | Below is the the instruction that describes the task:
### Input:
Set the children of the object.
### Response:
def set_children(self, value, defined):
"""Set the children of the object."""
self.children = value
self.children_defined = defined
return self |
def put_connection_filename(filename, working_filename, verbose = False):
"""
This function reverses the effect of a previous call to
get_connection_filename(), restoring the working copy to its
original location if the two are different. This function should
always be called after calling get_connection_filename() when the
file is no longer in use.
During the move operation, this function traps the signals used by
Condor to evict jobs. This reduces the risk of corrupting a
document by the job terminating part-way through the restoration of
the file to its original location. When the move operation is
concluded, the original signal handlers are restored and if any
signals were trapped they are resent to the current process in
order. Typically this will result in the signal handlers installed
by the install_signal_trap() function being invoked, meaning any
other scratch files that might be in use get deleted and the
current process is terminated.
"""
if working_filename != filename:
# initialize SIGTERM and SIGTSTP trap
deferred_signals = []
def newsigterm(signum, frame):
deferred_signals.append(signum)
oldhandlers = {}
for sig in (signal.SIGTERM, signal.SIGTSTP):
oldhandlers[sig] = signal.getsignal(sig)
signal.signal(sig, newsigterm)
# replace document
if verbose:
print >>sys.stderr, "moving '%s' to '%s' ..." % (working_filename, filename),
shutil.move(working_filename, filename)
if verbose:
print >>sys.stderr, "done."
# remove reference to tempfile.TemporaryFile object.
# because we've just deleted the file above, this would
# produce an annoying but harmless message about an ignored
# OSError, so we create a dummy file for the TemporaryFile
# to delete. ignore any errors that occur when trying to
# make the dummy file. FIXME: this is stupid, find a
# better way to shut TemporaryFile up
try:
open(working_filename, "w").close()
except:
pass
with temporary_files_lock:
del temporary_files[working_filename]
# restore original handlers, and send ourselves any trapped signals
# in order
for sig, oldhandler in oldhandlers.iteritems():
signal.signal(sig, oldhandler)
while deferred_signals:
os.kill(os.getpid(), deferred_signals.pop(0))
# if there are no more temporary files in place, remove the
# temporary-file signal traps
with temporary_files_lock:
if not temporary_files:
uninstall_signal_trap() | This function reverses the effect of a previous call to
get_connection_filename(), restoring the working copy to its
original location if the two are different. This function should
always be called after calling get_connection_filename() when the
file is no longer in use.
During the move operation, this function traps the signals used by
Condor to evict jobs. This reduces the risk of corrupting a
document by the job terminating part-way through the restoration of
the file to its original location. When the move operation is
concluded, the original signal handlers are restored and if any
signals were trapped they are resent to the current process in
order. Typically this will result in the signal handlers installed
by the install_signal_trap() function being invoked, meaning any
other scratch files that might be in use get deleted and the
current process is terminated. | Below is the the instruction that describes the task:
### Input:
This function reverses the effect of a previous call to
get_connection_filename(), restoring the working copy to its
original location if the two are different. This function should
always be called after calling get_connection_filename() when the
file is no longer in use.
During the move operation, this function traps the signals used by
Condor to evict jobs. This reduces the risk of corrupting a
document by the job terminating part-way through the restoration of
the file to its original location. When the move operation is
concluded, the original signal handlers are restored and if any
signals were trapped they are resent to the current process in
order. Typically this will result in the signal handlers installed
by the install_signal_trap() function being invoked, meaning any
other scratch files that might be in use get deleted and the
current process is terminated.
### Response:
def put_connection_filename(filename, working_filename, verbose = False):
"""
This function reverses the effect of a previous call to
get_connection_filename(), restoring the working copy to its
original location if the two are different. This function should
always be called after calling get_connection_filename() when the
file is no longer in use.
During the move operation, this function traps the signals used by
Condor to evict jobs. This reduces the risk of corrupting a
document by the job terminating part-way through the restoration of
the file to its original location. When the move operation is
concluded, the original signal handlers are restored and if any
signals were trapped they are resent to the current process in
order. Typically this will result in the signal handlers installed
by the install_signal_trap() function being invoked, meaning any
other scratch files that might be in use get deleted and the
current process is terminated.
"""
if working_filename != filename:
# initialize SIGTERM and SIGTSTP trap
deferred_signals = []
def newsigterm(signum, frame):
deferred_signals.append(signum)
oldhandlers = {}
for sig in (signal.SIGTERM, signal.SIGTSTP):
oldhandlers[sig] = signal.getsignal(sig)
signal.signal(sig, newsigterm)
# replace document
if verbose:
print >>sys.stderr, "moving '%s' to '%s' ..." % (working_filename, filename),
shutil.move(working_filename, filename)
if verbose:
print >>sys.stderr, "done."
# remove reference to tempfile.TemporaryFile object.
# because we've just deleted the file above, this would
# produce an annoying but harmless message about an ignored
# OSError, so we create a dummy file for the TemporaryFile
# to delete. ignore any errors that occur when trying to
# make the dummy file. FIXME: this is stupid, find a
# better way to shut TemporaryFile up
try:
open(working_filename, "w").close()
except:
pass
with temporary_files_lock:
del temporary_files[working_filename]
# restore original handlers, and send ourselves any trapped signals
# in order
for sig, oldhandler in oldhandlers.iteritems():
signal.signal(sig, oldhandler)
while deferred_signals:
os.kill(os.getpid(), deferred_signals.pop(0))
# if there are no more temporary files in place, remove the
# temporary-file signal traps
with temporary_files_lock:
if not temporary_files:
uninstall_signal_trap() |
def check(self):
"""
Checks if the list of tracked terms has changed.
Returns True if changed, otherwise False.
"""
new_tracking_terms = self.update_tracking_terms()
terms_changed = False
# any deleted terms?
if self._tracking_terms_set > new_tracking_terms:
logging.debug("Some tracking terms removed")
terms_changed = True
# any added terms?
elif self._tracking_terms_set < new_tracking_terms:
logging.debug("Some tracking terms added")
terms_changed = True
# Go ahead and store for later
self._tracking_terms_set = new_tracking_terms
# If the terms changed, we need to restart the stream
return terms_changed | Checks if the list of tracked terms has changed.
Returns True if changed, otherwise False. | Below is the the instruction that describes the task:
### Input:
Checks if the list of tracked terms has changed.
Returns True if changed, otherwise False.
### Response:
def check(self):
"""
Checks if the list of tracked terms has changed.
Returns True if changed, otherwise False.
"""
new_tracking_terms = self.update_tracking_terms()
terms_changed = False
# any deleted terms?
if self._tracking_terms_set > new_tracking_terms:
logging.debug("Some tracking terms removed")
terms_changed = True
# any added terms?
elif self._tracking_terms_set < new_tracking_terms:
logging.debug("Some tracking terms added")
terms_changed = True
# Go ahead and store for later
self._tracking_terms_set = new_tracking_terms
# If the terms changed, we need to restart the stream
return terms_changed |
def execute_after_scenario_steps(self, context):
"""
actions after each scenario
:param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave.
"""
if not self.feature_error and not self.scenario_error:
self.__execute_steps_by_action(context, ACTIONS_AFTER_SCENARIO)
# Behave dynamic environment: Fail all steps if dyn_env has got any error and reset it
if self.reset_error_status():
context.scenario.reset()
context.dyn_env.fail_first_step_precondition_exception(context.scenario) | actions after each scenario
:param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave. | Below is the the instruction that describes the task:
### Input:
actions after each scenario
:param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave.
### Response:
def execute_after_scenario_steps(self, context):
"""
actions after each scenario
:param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave.
"""
if not self.feature_error and not self.scenario_error:
self.__execute_steps_by_action(context, ACTIONS_AFTER_SCENARIO)
# Behave dynamic environment: Fail all steps if dyn_env has got any error and reset it
if self.reset_error_status():
context.scenario.reset()
context.dyn_env.fail_first_step_precondition_exception(context.scenario) |
def decode(s):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode.
"""
if isinstance(s, binary_type):
s = s.decode('latin-1')
if not isinstance(s, text_type):
return s
r = []
_in = []
for c in s:
if c == '&' and not _in:
_in.append('&')
elif c == '-' and _in:
if len(_in) == 1:
r.append('&')
else:
r.append(modified_deutf7(''.join(_in[1:])))
_in = []
elif _in:
_in.append(c)
else:
r.append(c)
if _in:
r.append(modified_deutf7(''.join(_in[1:])))
return ''.join(r) | Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode. | Below is the the instruction that describes the task:
### Input:
Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode.
### Response:
def decode(s):
"""Decode a folder name from IMAP modified UTF-7 encoding to unicode.
Despite the function's name, the input may still be a unicode
string. If the input is bytes, it's first decoded to unicode.
"""
if isinstance(s, binary_type):
s = s.decode('latin-1')
if not isinstance(s, text_type):
return s
r = []
_in = []
for c in s:
if c == '&' and not _in:
_in.append('&')
elif c == '-' and _in:
if len(_in) == 1:
r.append('&')
else:
r.append(modified_deutf7(''.join(_in[1:])))
_in = []
elif _in:
_in.append(c)
else:
r.append(c)
if _in:
r.append(modified_deutf7(''.join(_in[1:])))
return ''.join(r) |
def export(*pools, **kwargs):
'''
.. versionadded:: 2015.5.0
Export storage pools
pools : string
One or more storage pools to export
force : boolean
Force export of storage pools
CLI Example:
.. code-block:: bash
salt '*' zpool.export myzpool ... [force=True|False]
salt '*' zpool.export myzpool2 myzpool2 ... [force=True|False]
'''
## Configure pool
# NOTE: initialize the defaults
flags = []
targets = []
# NOTE: set extra config based on kwargs
if kwargs.get('force', False):
flags.append('-f')
# NOTE: append the pool name and specifications
targets = list(pools)
## Export pools
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='export',
flags=flags,
target=targets,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'exported') | .. versionadded:: 2015.5.0
Export storage pools
pools : string
One or more storage pools to export
force : boolean
Force export of storage pools
CLI Example:
.. code-block:: bash
salt '*' zpool.export myzpool ... [force=True|False]
salt '*' zpool.export myzpool2 myzpool2 ... [force=True|False] | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.5.0
Export storage pools
pools : string
One or more storage pools to export
force : boolean
Force export of storage pools
CLI Example:
.. code-block:: bash
salt '*' zpool.export myzpool ... [force=True|False]
salt '*' zpool.export myzpool2 myzpool2 ... [force=True|False]
### Response:
def export(*pools, **kwargs):
'''
.. versionadded:: 2015.5.0
Export storage pools
pools : string
One or more storage pools to export
force : boolean
Force export of storage pools
CLI Example:
.. code-block:: bash
salt '*' zpool.export myzpool ... [force=True|False]
salt '*' zpool.export myzpool2 myzpool2 ... [force=True|False]
'''
## Configure pool
# NOTE: initialize the defaults
flags = []
targets = []
# NOTE: set extra config based on kwargs
if kwargs.get('force', False):
flags.append('-f')
# NOTE: append the pool name and specifications
targets = list(pools)
## Export pools
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='export',
flags=flags,
target=targets,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'exported') |
def find_previous(element, l):
"""
find previous element in a sorted list
>>> find_previous(0, [0])
0
>>> find_previous(2, [1, 1, 3])
1
>>> find_previous(0, [1, 2])
>>> find_previous(1.5, [1, 2])
1
>>> find_previous(3, [1, 2])
2
"""
length = len(l)
for index, current in enumerate(l):
# current is the last element
if length - 1 == index:
return current
# current is the first element
if index == 0:
if element < current:
return None
if current <= element < l[index+1]:
return current | find previous element in a sorted list
>>> find_previous(0, [0])
0
>>> find_previous(2, [1, 1, 3])
1
>>> find_previous(0, [1, 2])
>>> find_previous(1.5, [1, 2])
1
>>> find_previous(3, [1, 2])
2 | Below is the the instruction that describes the task:
### Input:
find previous element in a sorted list
>>> find_previous(0, [0])
0
>>> find_previous(2, [1, 1, 3])
1
>>> find_previous(0, [1, 2])
>>> find_previous(1.5, [1, 2])
1
>>> find_previous(3, [1, 2])
2
### Response:
def find_previous(element, l):
"""
find previous element in a sorted list
>>> find_previous(0, [0])
0
>>> find_previous(2, [1, 1, 3])
1
>>> find_previous(0, [1, 2])
>>> find_previous(1.5, [1, 2])
1
>>> find_previous(3, [1, 2])
2
"""
length = len(l)
for index, current in enumerate(l):
# current is the last element
if length - 1 == index:
return current
# current is the first element
if index == 0:
if element < current:
return None
if current <= element < l[index+1]:
return current |
def install_json_params(self, ij=None):
"""Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key.
"""
if self._install_json_params is None or ij is not None:
self._install_json_params = {}
# TODO: support for projects with multiple install.json files is not supported
if ij is None:
ij = self.install_json
for p in ij.get('params') or []:
self._install_json_params.setdefault(p.get('name'), p)
return self._install_json_params | Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key. | Below is the the instruction that describes the task:
### Input:
Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key.
### Response:
def install_json_params(self, ij=None):
"""Return install.json params in a dict with name param as key.
Args:
ij (dict, optional): Defaults to None. The install.json contents.
Returns:
dict: A dictionary containing the install.json input params with name as key.
"""
if self._install_json_params is None or ij is not None:
self._install_json_params = {}
# TODO: support for projects with multiple install.json files is not supported
if ij is None:
ij = self.install_json
for p in ij.get('params') or []:
self._install_json_params.setdefault(p.get('name'), p)
return self._install_json_params |
def format_box(title, ch="*"):
"""
Encloses title in a box. Result is a list
>>> for line in format_box("Today's TODO list"):
... print(line)
*************************
*** Today's TODO list ***
*************************
"""
lt = len(title)
return [(ch * (lt + 8)),
(ch * 3 + " " + title + " " + ch * 3),
(ch * (lt + 8))
] | Encloses title in a box. Result is a list
>>> for line in format_box("Today's TODO list"):
... print(line)
*************************
*** Today's TODO list ***
************************* | Below is the the instruction that describes the task:
### Input:
Encloses title in a box. Result is a list
>>> for line in format_box("Today's TODO list"):
... print(line)
*************************
*** Today's TODO list ***
*************************
### Response:
def format_box(title, ch="*"):
"""
Encloses title in a box. Result is a list
>>> for line in format_box("Today's TODO list"):
... print(line)
*************************
*** Today's TODO list ***
*************************
"""
lt = len(title)
return [(ch * (lt + 8)),
(ch * 3 + " " + title + " " + ch * 3),
(ch * (lt + 8))
] |
def _perp_eigendecompose(matrix: np.ndarray,
rtol: float = 1e-5,
atol: float = 1e-8,
) -> Tuple[np.array, List[np.ndarray]]:
"""An eigendecomposition that ensures eigenvectors are perpendicular.
numpy.linalg.eig doesn't guarantee that eigenvectors from the same
eigenspace will be perpendicular. This method uses Gram-Schmidt to recover
a perpendicular set. It further checks that all eigenvectors are
perpendicular and raises an ArithmeticError otherwise.
Args:
matrix: The matrix to decompose.
rtol: Relative threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
atol: Absolute threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
Returns:
The eigenvalues and column eigenvectors. The i'th eigenvalue is
associated with the i'th column eigenvector.
Raises:
ArithmeticError: Failed to find perpendicular eigenvectors.
"""
vals, cols = np.linalg.eig(matrix)
vecs = [cols[:, i] for i in range(len(cols))]
# Convert list of row arrays to list of column arrays.
for i in range(len(vecs)):
vecs[i] = np.reshape(vecs[i], (len(vecs[i]), vecs[i].ndim))
# Group by similar eigenvalue.
n = len(vecs)
groups = _group_similar(
list(range(n)),
lambda k1, k2: np.allclose(vals[k1], vals[k2], rtol=rtol))
# Remove overlap between eigenvectors with the same eigenvalue.
for g in groups:
q, _ = np.linalg.qr(np.hstack([vecs[i] for i in g]))
for i in range(len(g)):
vecs[g[i]] = q[:, i]
return vals, vecs | An eigendecomposition that ensures eigenvectors are perpendicular.
numpy.linalg.eig doesn't guarantee that eigenvectors from the same
eigenspace will be perpendicular. This method uses Gram-Schmidt to recover
a perpendicular set. It further checks that all eigenvectors are
perpendicular and raises an ArithmeticError otherwise.
Args:
matrix: The matrix to decompose.
rtol: Relative threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
atol: Absolute threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
Returns:
The eigenvalues and column eigenvectors. The i'th eigenvalue is
associated with the i'th column eigenvector.
Raises:
ArithmeticError: Failed to find perpendicular eigenvectors. | Below is the the instruction that describes the task:
### Input:
An eigendecomposition that ensures eigenvectors are perpendicular.
numpy.linalg.eig doesn't guarantee that eigenvectors from the same
eigenspace will be perpendicular. This method uses Gram-Schmidt to recover
a perpendicular set. It further checks that all eigenvectors are
perpendicular and raises an ArithmeticError otherwise.
Args:
matrix: The matrix to decompose.
rtol: Relative threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
atol: Absolute threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
Returns:
The eigenvalues and column eigenvectors. The i'th eigenvalue is
associated with the i'th column eigenvector.
Raises:
ArithmeticError: Failed to find perpendicular eigenvectors.
### Response:
def _perp_eigendecompose(matrix: np.ndarray,
rtol: float = 1e-5,
atol: float = 1e-8,
) -> Tuple[np.array, List[np.ndarray]]:
"""An eigendecomposition that ensures eigenvectors are perpendicular.
numpy.linalg.eig doesn't guarantee that eigenvectors from the same
eigenspace will be perpendicular. This method uses Gram-Schmidt to recover
a perpendicular set. It further checks that all eigenvectors are
perpendicular and raises an ArithmeticError otherwise.
Args:
matrix: The matrix to decompose.
rtol: Relative threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
atol: Absolute threshold for determining whether eigenvalues are from
the same eigenspace and whether eigenvectors are perpendicular.
Returns:
The eigenvalues and column eigenvectors. The i'th eigenvalue is
associated with the i'th column eigenvector.
Raises:
ArithmeticError: Failed to find perpendicular eigenvectors.
"""
vals, cols = np.linalg.eig(matrix)
vecs = [cols[:, i] for i in range(len(cols))]
# Convert list of row arrays to list of column arrays.
for i in range(len(vecs)):
vecs[i] = np.reshape(vecs[i], (len(vecs[i]), vecs[i].ndim))
# Group by similar eigenvalue.
n = len(vecs)
groups = _group_similar(
list(range(n)),
lambda k1, k2: np.allclose(vals[k1], vals[k2], rtol=rtol))
# Remove overlap between eigenvectors with the same eigenvalue.
for g in groups:
q, _ = np.linalg.qr(np.hstack([vecs[i] for i in g]))
for i in range(len(g)):
vecs[g[i]] = q[:, i]
return vals, vecs |
def from_spectra(cls, *spectra, **kwargs):
"""Build a new `Spectrogram` from a list of spectra.
Parameters
----------
*spectra
any number of `~gwpy.frequencyseries.FrequencySeries` series
dt : `float`, `~astropy.units.Quantity`, optional
stride between given spectra
Returns
-------
Spectrogram
a new `Spectrogram` from a vertical stacking of the spectra
The new object takes the metadata from the first given
`~gwpy.frequencyseries.FrequencySeries` if not given explicitly
Notes
-----
Each `~gwpy.frequencyseries.FrequencySeries` passed to this
constructor must be the same length.
"""
data = numpy.vstack([s.value for s in spectra])
spec1 = list(spectra)[0]
if not all(s.f0 == spec1.f0 for s in spectra):
raise ValueError("Cannot stack spectra with different f0")
if not all(s.df == spec1.df for s in spectra):
raise ValueError("Cannot stack spectra with different df")
kwargs.setdefault('name', spec1.name)
kwargs.setdefault('channel', spec1.channel)
kwargs.setdefault('epoch', spec1.epoch)
kwargs.setdefault('f0', spec1.f0)
kwargs.setdefault('df', spec1.df)
kwargs.setdefault('unit', spec1.unit)
if not ('dt' in kwargs or 'times' in kwargs):
try:
kwargs.setdefault('dt', spectra[1].epoch.gps - spec1.epoch.gps)
except (AttributeError, IndexError):
raise ValueError("Cannot determine dt (time-spacing) for "
"Spectrogram from inputs")
return Spectrogram(data, **kwargs) | Build a new `Spectrogram` from a list of spectra.
Parameters
----------
*spectra
any number of `~gwpy.frequencyseries.FrequencySeries` series
dt : `float`, `~astropy.units.Quantity`, optional
stride between given spectra
Returns
-------
Spectrogram
a new `Spectrogram` from a vertical stacking of the spectra
The new object takes the metadata from the first given
`~gwpy.frequencyseries.FrequencySeries` if not given explicitly
Notes
-----
Each `~gwpy.frequencyseries.FrequencySeries` passed to this
constructor must be the same length. | Below is the the instruction that describes the task:
### Input:
Build a new `Spectrogram` from a list of spectra.
Parameters
----------
*spectra
any number of `~gwpy.frequencyseries.FrequencySeries` series
dt : `float`, `~astropy.units.Quantity`, optional
stride between given spectra
Returns
-------
Spectrogram
a new `Spectrogram` from a vertical stacking of the spectra
The new object takes the metadata from the first given
`~gwpy.frequencyseries.FrequencySeries` if not given explicitly
Notes
-----
Each `~gwpy.frequencyseries.FrequencySeries` passed to this
constructor must be the same length.
### Response:
def from_spectra(cls, *spectra, **kwargs):
"""Build a new `Spectrogram` from a list of spectra.
Parameters
----------
*spectra
any number of `~gwpy.frequencyseries.FrequencySeries` series
dt : `float`, `~astropy.units.Quantity`, optional
stride between given spectra
Returns
-------
Spectrogram
a new `Spectrogram` from a vertical stacking of the spectra
The new object takes the metadata from the first given
`~gwpy.frequencyseries.FrequencySeries` if not given explicitly
Notes
-----
Each `~gwpy.frequencyseries.FrequencySeries` passed to this
constructor must be the same length.
"""
data = numpy.vstack([s.value for s in spectra])
spec1 = list(spectra)[0]
if not all(s.f0 == spec1.f0 for s in spectra):
raise ValueError("Cannot stack spectra with different f0")
if not all(s.df == spec1.df for s in spectra):
raise ValueError("Cannot stack spectra with different df")
kwargs.setdefault('name', spec1.name)
kwargs.setdefault('channel', spec1.channel)
kwargs.setdefault('epoch', spec1.epoch)
kwargs.setdefault('f0', spec1.f0)
kwargs.setdefault('df', spec1.df)
kwargs.setdefault('unit', spec1.unit)
if not ('dt' in kwargs or 'times' in kwargs):
try:
kwargs.setdefault('dt', spectra[1].epoch.gps - spec1.epoch.gps)
except (AttributeError, IndexError):
raise ValueError("Cannot determine dt (time-spacing) for "
"Spectrogram from inputs")
return Spectrogram(data, **kwargs) |
def list(self, path, depth=1):
"""Returns the listing/contents of the given remote directory
:param path: path to the remote directory
:param depth: depth of the listing, integer or "infinity"
:returns: directory listing
:rtype: array of :class:`FileInfo` objects
:raises: HTTPResponseError in case an HTTP error status was returned
"""
if not path.endswith('/'):
path += '/'
headers = {}
if isinstance(depth, int) or depth == "infinity":
headers['Depth'] = str(depth)
res = self._make_dav_request('PROPFIND', path, headers=headers)
# first one is always the root, remove it from listing
if res:
return res[1:]
return None | Returns the listing/contents of the given remote directory
:param path: path to the remote directory
:param depth: depth of the listing, integer or "infinity"
:returns: directory listing
:rtype: array of :class:`FileInfo` objects
:raises: HTTPResponseError in case an HTTP error status was returned | Below is the the instruction that describes the task:
### Input:
Returns the listing/contents of the given remote directory
:param path: path to the remote directory
:param depth: depth of the listing, integer or "infinity"
:returns: directory listing
:rtype: array of :class:`FileInfo` objects
:raises: HTTPResponseError in case an HTTP error status was returned
### Response:
def list(self, path, depth=1):
"""Returns the listing/contents of the given remote directory
:param path: path to the remote directory
:param depth: depth of the listing, integer or "infinity"
:returns: directory listing
:rtype: array of :class:`FileInfo` objects
:raises: HTTPResponseError in case an HTTP error status was returned
"""
if not path.endswith('/'):
path += '/'
headers = {}
if isinstance(depth, int) or depth == "infinity":
headers['Depth'] = str(depth)
res = self._make_dav_request('PROPFIND', path, headers=headers)
# first one is always the root, remove it from listing
if res:
return res[1:]
return None |
def present(self, value):
"""Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
"""
for k, v in self.special.items():
if v == value:
return k
return self.to_literal(value, *self.args, **self.kw) | Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent. | Below is the the instruction that describes the task:
### Input:
Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
### Response:
def present(self, value):
"""Return a user-friendly representation of a value.
Lookup value in self.specials, or call .to_literal() if absent.
"""
for k, v in self.special.items():
if v == value:
return k
return self.to_literal(value, *self.args, **self.kw) |
def set_application_name(self, options):
"""
Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa
"""
supported_backends = ['django.db.backends.postgresql',
'django.db.backends.postgresql_psycopg2']
opt_name = 'fallback_application_name'
default_app_name = 'django_shell'
app_name = default_app_name
dbs = getattr(settings, 'DATABASES', [])
# lookup over all the databases entry
for db in dbs.keys():
if dbs[db]['ENGINE'] in supported_backends:
try:
options = dbs[db]['OPTIONS']
except KeyError:
options = {}
# dot not override a defined value
if opt_name in options.keys():
app_name = dbs[db]['OPTIONS'][opt_name]
else:
dbs[db].setdefault('OPTIONS', {}).update({opt_name: default_app_name})
app_name = default_app_name
return app_name | Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa | Below is the the instruction that describes the task:
### Input:
Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa
### Response:
def set_application_name(self, options):
"""
Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa
"""
supported_backends = ['django.db.backends.postgresql',
'django.db.backends.postgresql_psycopg2']
opt_name = 'fallback_application_name'
default_app_name = 'django_shell'
app_name = default_app_name
dbs = getattr(settings, 'DATABASES', [])
# lookup over all the databases entry
for db in dbs.keys():
if dbs[db]['ENGINE'] in supported_backends:
try:
options = dbs[db]['OPTIONS']
except KeyError:
options = {}
# dot not override a defined value
if opt_name in options.keys():
app_name = dbs[db]['OPTIONS'][opt_name]
else:
dbs[db].setdefault('OPTIONS', {}).update({opt_name: default_app_name})
app_name = default_app_name
return app_name |
def get_string(self,
key,
is_list=False,
is_optional=False,
is_secret=False,
is_local=False,
default=None,
options=None):
"""
Get a the value corresponding to the key and converts it to `str`/`list(str)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`str`: value corresponding to the key.
"""
if is_list:
return self._get_typed_list_value(key=key,
target_type=str,
type_convert=str,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options)
return self._get_typed_value(key=key,
target_type=str,
type_convert=str,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options) | Get a the value corresponding to the key and converts it to `str`/`list(str)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`str`: value corresponding to the key. | Below is the the instruction that describes the task:
### Input:
Get a the value corresponding to the key and converts it to `str`/`list(str)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`str`: value corresponding to the key.
### Response:
def get_string(self,
key,
is_list=False,
is_optional=False,
is_secret=False,
is_local=False,
default=None,
options=None):
"""
Get a the value corresponding to the key and converts it to `str`/`list(str)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`str`: value corresponding to the key.
"""
if is_list:
return self._get_typed_list_value(key=key,
target_type=str,
type_convert=str,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options)
return self._get_typed_value(key=key,
target_type=str,
type_convert=str,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options) |
def fetch(args):
"""
%prog fetch "query"
OR
%prog fetch queries.txt
Please provide a UniProt compatible `query` to retrieve data. If `query` contains
spaces, please remember to "quote" it.
You can also specify a `filename` which contains queries, one per line.
Follow this syntax <http://www.uniprot.org/help/text-search#text-search-syntax>
to query any of the documented fields <http://www.uniprot.org/help/query-fields>
"""
import re
import csv
p = OptionParser(fetch.__doc__)
p.add_option("--format", default="tab", choices=valid_formats,
help="download format [default: %default]")
p.add_option("--columns", default="entry name, protein names, genes,organism",
help="columns to download, if --format is `tab` or `xls`." +
" [default: %default]")
p.add_option("--include", default=False, action="store_true",
help="Include isoforms when --format is `fasta` or include `description` when" +
" --format is `rdf`. [default: %default]")
p.add_option("--limit", default=10, type="int",
help="Max number of results to retrieve [default: %default]")
p.add_option("--offset", default=0, type="int",
help="Offset of first result, used with --limit [default: %default]")
p.add_option("--skipcheck", default=False, action="store_true",
help="turn off prompt to check file existence [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
query, = args
url_params = {}
if op.exists(query):
pf = query.rsplit(".", 1)[0]
list_of_queries = [row.strip() for row in open(query)]
else:
# the query is the search term
pf = query.strip().strip('\"')
list_of_queries = [pf]
pf = re.sub(r"\s+", '_', pf)
assert len(list_of_queries) > 0, \
"Please provide atleast one input query"
url_params['format'] = opts.format
if opts.columns and opts.format in valid_column_formats:
reader = csv.reader([opts.columns], skipinitialspace=True)
cols = [col for r in reader for col in r]
for col in cols:
assert col in valid_columns, \
"Column '{0}' is not a valid. Allowed options are {1}".\
format(col, valid_columns)
url_params['columns'] = ",".join(cols)
if opts.include and opts.format in valid_include_formats:
url_params['include'] = "yes"
url_params['limit'] = opts.limit
url_params['offset'] = opts.offset
outfile = "{0}.{1}".format(pf, opts.format)
# If noprompt, will not check file existence
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
return
seen = set()
for query in list_of_queries:
if query in seen:
logging.error("Duplicate query ({0}) found".format(query))
continue
url_params['query'] = query
data = urlencode(url_params)
try:
request = Request(uniprot_url, data)
response = urlopen(request)
except (HTTPError, URLError,
RuntimeError, KeyError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
page = response.read()
if not page:
logging.error("query `{0}` yielded no results".format(query))
continue
print(page, file=fw)
seen.add(query)
if seen:
print("A total of {0} out of {1} queries returned results.".
format(len(seen), len(list_of_queries)), file=sys.stderr) | %prog fetch "query"
OR
%prog fetch queries.txt
Please provide a UniProt compatible `query` to retrieve data. If `query` contains
spaces, please remember to "quote" it.
You can also specify a `filename` which contains queries, one per line.
Follow this syntax <http://www.uniprot.org/help/text-search#text-search-syntax>
to query any of the documented fields <http://www.uniprot.org/help/query-fields> | Below is the the instruction that describes the task:
### Input:
%prog fetch "query"
OR
%prog fetch queries.txt
Please provide a UniProt compatible `query` to retrieve data. If `query` contains
spaces, please remember to "quote" it.
You can also specify a `filename` which contains queries, one per line.
Follow this syntax <http://www.uniprot.org/help/text-search#text-search-syntax>
to query any of the documented fields <http://www.uniprot.org/help/query-fields>
### Response:
def fetch(args):
"""
%prog fetch "query"
OR
%prog fetch queries.txt
Please provide a UniProt compatible `query` to retrieve data. If `query` contains
spaces, please remember to "quote" it.
You can also specify a `filename` which contains queries, one per line.
Follow this syntax <http://www.uniprot.org/help/text-search#text-search-syntax>
to query any of the documented fields <http://www.uniprot.org/help/query-fields>
"""
import re
import csv
p = OptionParser(fetch.__doc__)
p.add_option("--format", default="tab", choices=valid_formats,
help="download format [default: %default]")
p.add_option("--columns", default="entry name, protein names, genes,organism",
help="columns to download, if --format is `tab` or `xls`." +
" [default: %default]")
p.add_option("--include", default=False, action="store_true",
help="Include isoforms when --format is `fasta` or include `description` when" +
" --format is `rdf`. [default: %default]")
p.add_option("--limit", default=10, type="int",
help="Max number of results to retrieve [default: %default]")
p.add_option("--offset", default=0, type="int",
help="Offset of first result, used with --limit [default: %default]")
p.add_option("--skipcheck", default=False, action="store_true",
help="turn off prompt to check file existence [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
query, = args
url_params = {}
if op.exists(query):
pf = query.rsplit(".", 1)[0]
list_of_queries = [row.strip() for row in open(query)]
else:
# the query is the search term
pf = query.strip().strip('\"')
list_of_queries = [pf]
pf = re.sub(r"\s+", '_', pf)
assert len(list_of_queries) > 0, \
"Please provide atleast one input query"
url_params['format'] = opts.format
if opts.columns and opts.format in valid_column_formats:
reader = csv.reader([opts.columns], skipinitialspace=True)
cols = [col for r in reader for col in r]
for col in cols:
assert col in valid_columns, \
"Column '{0}' is not a valid. Allowed options are {1}".\
format(col, valid_columns)
url_params['columns'] = ",".join(cols)
if opts.include and opts.format in valid_include_formats:
url_params['include'] = "yes"
url_params['limit'] = opts.limit
url_params['offset'] = opts.offset
outfile = "{0}.{1}".format(pf, opts.format)
# If noprompt, will not check file existence
fw = must_open(outfile, "w", checkexists=True,
skipcheck=opts.skipcheck)
if fw is None:
return
seen = set()
for query in list_of_queries:
if query in seen:
logging.error("Duplicate query ({0}) found".format(query))
continue
url_params['query'] = query
data = urlencode(url_params)
try:
request = Request(uniprot_url, data)
response = urlopen(request)
except (HTTPError, URLError,
RuntimeError, KeyError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
page = response.read()
if not page:
logging.error("query `{0}` yielded no results".format(query))
continue
print(page, file=fw)
seen.add(query)
if seen:
print("A total of {0} out of {1} queries returned results.".
format(len(seen), len(list_of_queries)), file=sys.stderr) |
def blue(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('blue', string, auto=auto) | Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color | Below is the the instruction that describes the task:
### Input:
Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
### Response:
def blue(cls, string, auto=False):
"""Color-code entire string.
:param str string: String to colorize.
:param bool auto: Enable auto-color (dark/light terminal).
:return: Class instance for colorized string.
:rtype: Color
"""
return cls.colorize('blue', string, auto=auto) |
def _get_tab(cls):
"""Generate and return the COBS table."""
if not cls._tabs['dec_cobs']:
# Compute the COBS table for decoding
cls._tabs['dec_cobs']['\xff'] = (255, '')
cls._tabs['dec_cobs'].update(dict((chr(l), (l, '\0'))
for l in range(1, 255)))
# Compute the COBS table for encoding
cls._tabs['enc_cobs'] = [(255, '\xff'),
dict((l, chr(l))
for l in range(1, 255)),
]
return cls._tabs['dec_cobs'], cls._tabs['enc_cobs'] | Generate and return the COBS table. | Below is the the instruction that describes the task:
### Input:
Generate and return the COBS table.
### Response:
def _get_tab(cls):
"""Generate and return the COBS table."""
if not cls._tabs['dec_cobs']:
# Compute the COBS table for decoding
cls._tabs['dec_cobs']['\xff'] = (255, '')
cls._tabs['dec_cobs'].update(dict((chr(l), (l, '\0'))
for l in range(1, 255)))
# Compute the COBS table for encoding
cls._tabs['enc_cobs'] = [(255, '\xff'),
dict((l, chr(l))
for l in range(1, 255)),
]
return cls._tabs['dec_cobs'], cls._tabs['enc_cobs'] |
def variance_inflation_factor(regressors, hasconst=False):
"""Calculate variance inflation factor (VIF) for each all `regressors`.
A wrapper/modification of statsmodels:
statsmodels.stats.outliers_influence.variance_inflation_factor
One recommendation is that if VIF is greater than 5, then the explanatory
variable `x` is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this. [source: StatsModels]
Parameters
----------
regressors: DataFrame
DataFrame containing the entire set of regressors
hasconst : bool, default False
If False, a column vector will be added to `regressors` for use in
OLS
Example
-------
# Generate some data
from datetime import date
from pandas_datareader.data import DataReader as dr
syms = {'TWEXBMTH' : 'usd',
'T10Y2YM' : 'term_spread',
'PCOPPUSDM' : 'copper'
}
start = date(2000, 1, 1)
data = (dr(syms.keys(), 'fred', start)
.pct_change()
.dropna())
data = data.rename(columns = syms)
print(variance_inflation_factor(data))
usd 1.31609
term_spread 1.03793
copper 1.37055
dtype: float64
"""
if not hasconst:
regressors = add_constant(regressors, prepend=False)
k = regressors.shape[1]
def vif_sub(x, regressors):
x_i = regressors.iloc[:, x]
mask = np.arange(k) != x
x_not_i = regressors.iloc[:, mask]
rsq = linear_model.OLS(x_i, x_not_i, missing="drop").fit().rsquared_adj
vif = 1.0 / (1.0 - rsq)
return vif
vifs = pd.Series(np.arange(k), index=regressors.columns)
vifs = vifs.apply(vif_sub, args=(regressors,))
# Find the constant column (probably called 'const', but not necessarily
# and drop it. `is_nonzero_const` borrowed from statsmodels.add_constant
is_nonzero_const = np.ptp(regressors.values, axis=0) == 0
is_nonzero_const &= np.all(regressors != 0.0, axis=0)
vifs.drop(vifs.index[is_nonzero_const], inplace=True)
return vifs | Calculate variance inflation factor (VIF) for each all `regressors`.
A wrapper/modification of statsmodels:
statsmodels.stats.outliers_influence.variance_inflation_factor
One recommendation is that if VIF is greater than 5, then the explanatory
variable `x` is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this. [source: StatsModels]
Parameters
----------
regressors: DataFrame
DataFrame containing the entire set of regressors
hasconst : bool, default False
If False, a column vector will be added to `regressors` for use in
OLS
Example
-------
# Generate some data
from datetime import date
from pandas_datareader.data import DataReader as dr
syms = {'TWEXBMTH' : 'usd',
'T10Y2YM' : 'term_spread',
'PCOPPUSDM' : 'copper'
}
start = date(2000, 1, 1)
data = (dr(syms.keys(), 'fred', start)
.pct_change()
.dropna())
data = data.rename(columns = syms)
print(variance_inflation_factor(data))
usd 1.31609
term_spread 1.03793
copper 1.37055
dtype: float64 | Below is the the instruction that describes the task:
### Input:
Calculate variance inflation factor (VIF) for each all `regressors`.
A wrapper/modification of statsmodels:
statsmodels.stats.outliers_influence.variance_inflation_factor
One recommendation is that if VIF is greater than 5, then the explanatory
variable `x` is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this. [source: StatsModels]
Parameters
----------
regressors: DataFrame
DataFrame containing the entire set of regressors
hasconst : bool, default False
If False, a column vector will be added to `regressors` for use in
OLS
Example
-------
# Generate some data
from datetime import date
from pandas_datareader.data import DataReader as dr
syms = {'TWEXBMTH' : 'usd',
'T10Y2YM' : 'term_spread',
'PCOPPUSDM' : 'copper'
}
start = date(2000, 1, 1)
data = (dr(syms.keys(), 'fred', start)
.pct_change()
.dropna())
data = data.rename(columns = syms)
print(variance_inflation_factor(data))
usd 1.31609
term_spread 1.03793
copper 1.37055
dtype: float64
### Response:
def variance_inflation_factor(regressors, hasconst=False):
"""Calculate variance inflation factor (VIF) for each all `regressors`.
A wrapper/modification of statsmodels:
statsmodels.stats.outliers_influence.variance_inflation_factor
One recommendation is that if VIF is greater than 5, then the explanatory
variable `x` is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this. [source: StatsModels]
Parameters
----------
regressors: DataFrame
DataFrame containing the entire set of regressors
hasconst : bool, default False
If False, a column vector will be added to `regressors` for use in
OLS
Example
-------
# Generate some data
from datetime import date
from pandas_datareader.data import DataReader as dr
syms = {'TWEXBMTH' : 'usd',
'T10Y2YM' : 'term_spread',
'PCOPPUSDM' : 'copper'
}
start = date(2000, 1, 1)
data = (dr(syms.keys(), 'fred', start)
.pct_change()
.dropna())
data = data.rename(columns = syms)
print(variance_inflation_factor(data))
usd 1.31609
term_spread 1.03793
copper 1.37055
dtype: float64
"""
if not hasconst:
regressors = add_constant(regressors, prepend=False)
k = regressors.shape[1]
def vif_sub(x, regressors):
x_i = regressors.iloc[:, x]
mask = np.arange(k) != x
x_not_i = regressors.iloc[:, mask]
rsq = linear_model.OLS(x_i, x_not_i, missing="drop").fit().rsquared_adj
vif = 1.0 / (1.0 - rsq)
return vif
vifs = pd.Series(np.arange(k), index=regressors.columns)
vifs = vifs.apply(vif_sub, args=(regressors,))
# Find the constant column (probably called 'const', but not necessarily
# and drop it. `is_nonzero_const` borrowed from statsmodels.add_constant
is_nonzero_const = np.ptp(regressors.values, axis=0) == 0
is_nonzero_const &= np.all(regressors != 0.0, axis=0)
vifs.drop(vifs.index[is_nonzero_const], inplace=True)
return vifs |
def support_support_param_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras")
support_param = ET.SubElement(support, "support-param")
username = ET.SubElement(support_param, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def support_support_param_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras")
support_param = ET.SubElement(support, "support-param")
username = ET.SubElement(support_param, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator) | $and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause. | Below is the the instruction that describes the task:
### Input:
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
### Response:
def _logical(self, operator, params):
'''
$and: joins query clauses with a logical AND returns all items
that match the conditions of both clauses
$or: joins query clauses with a logical OR returns all items
that match the conditions of either clause.
'''
result = list()
if isinstance(params, dict):
for k,v in params.items():
selectors, modifiers = self._parse(dict([(k, v),]))
result.append("(%s)" % selectors)
elif isinstance(params, (list, tuple)):
for v in params:
selectors, modifiers = self._parse(v)
result.append("(%s)" % selectors)
else:
raise RuntimeError('Unknow parameter type, %s:%s' % (type(params), params))
if operator == '$and':
return ' AND '.join(result)
elif operator == '$or':
return ' OR '.join(result)
else:
raise RuntimeError('Unknown operator, %s' % operator) |
def get_patched_request(requires, patchlist):
"""Apply patch args to a request.
For example, consider:
>>> print get_patched_request(["foo-5", "bah-8.1"], ["foo-6"])
["foo-6", "bah-8.1"]
>>> print get_patched_request(["foo-5", "bah-8.1"], ["^bah"])
["foo-5"]
The following rules apply wrt how normal/conflict/weak patches override
(note though that the new request is always added, even if it doesn't
override an existing request):
PATCH OVERRIDES: foo !foo ~foo
----- ---------- --- ---- -----
foo Y Y Y
!foo N N N
~foo N N Y
^foo Y Y Y
Args:
requires (list of str or `version.Requirement`): Request.
patchlist (list of str): List of patch requests.
Returns:
List of `version.Requirement`: Patched request.
"""
# rules from table in docstring above
rules = {
'': (True, True, True ),
'!': (False, False, False),
'~': (False, False, True ),
'^': (True, True, True )
}
requires = [Requirement(x) if not isinstance(x, Requirement) else x
for x in requires]
appended = []
for patch in patchlist:
if patch and patch[0] in ('!', '~', '^'):
ch = patch[0]
name = Requirement(patch[1:]).name
else:
ch = ''
name = Requirement(patch).name
rule = rules[ch]
replaced = (ch == '^')
for i, req in enumerate(requires):
if req is None or req.name != name:
continue
if not req.conflict:
replace = rule[0] # foo
elif not req.weak:
replace = rule[1] # !foo
else:
replace = rule[2] # ~foo
if replace:
if replaced:
requires[i] = None
else:
requires[i] = Requirement(patch)
replaced = True
if not replaced:
appended.append(Requirement(patch))
result = [x for x in requires if x is not None] + appended
return result | Apply patch args to a request.
For example, consider:
>>> print get_patched_request(["foo-5", "bah-8.1"], ["foo-6"])
["foo-6", "bah-8.1"]
>>> print get_patched_request(["foo-5", "bah-8.1"], ["^bah"])
["foo-5"]
The following rules apply wrt how normal/conflict/weak patches override
(note though that the new request is always added, even if it doesn't
override an existing request):
PATCH OVERRIDES: foo !foo ~foo
----- ---------- --- ---- -----
foo Y Y Y
!foo N N N
~foo N N Y
^foo Y Y Y
Args:
requires (list of str or `version.Requirement`): Request.
patchlist (list of str): List of patch requests.
Returns:
List of `version.Requirement`: Patched request. | Below is the the instruction that describes the task:
### Input:
Apply patch args to a request.
For example, consider:
>>> print get_patched_request(["foo-5", "bah-8.1"], ["foo-6"])
["foo-6", "bah-8.1"]
>>> print get_patched_request(["foo-5", "bah-8.1"], ["^bah"])
["foo-5"]
The following rules apply wrt how normal/conflict/weak patches override
(note though that the new request is always added, even if it doesn't
override an existing request):
PATCH OVERRIDES: foo !foo ~foo
----- ---------- --- ---- -----
foo Y Y Y
!foo N N N
~foo N N Y
^foo Y Y Y
Args:
requires (list of str or `version.Requirement`): Request.
patchlist (list of str): List of patch requests.
Returns:
List of `version.Requirement`: Patched request.
### Response:
def get_patched_request(requires, patchlist):
"""Apply patch args to a request.
For example, consider:
>>> print get_patched_request(["foo-5", "bah-8.1"], ["foo-6"])
["foo-6", "bah-8.1"]
>>> print get_patched_request(["foo-5", "bah-8.1"], ["^bah"])
["foo-5"]
The following rules apply wrt how normal/conflict/weak patches override
(note though that the new request is always added, even if it doesn't
override an existing request):
PATCH OVERRIDES: foo !foo ~foo
----- ---------- --- ---- -----
foo Y Y Y
!foo N N N
~foo N N Y
^foo Y Y Y
Args:
requires (list of str or `version.Requirement`): Request.
patchlist (list of str): List of patch requests.
Returns:
List of `version.Requirement`: Patched request.
"""
# rules from table in docstring above
rules = {
'': (True, True, True ),
'!': (False, False, False),
'~': (False, False, True ),
'^': (True, True, True )
}
requires = [Requirement(x) if not isinstance(x, Requirement) else x
for x in requires]
appended = []
for patch in patchlist:
if patch and patch[0] in ('!', '~', '^'):
ch = patch[0]
name = Requirement(patch[1:]).name
else:
ch = ''
name = Requirement(patch).name
rule = rules[ch]
replaced = (ch == '^')
for i, req in enumerate(requires):
if req is None or req.name != name:
continue
if not req.conflict:
replace = rule[0] # foo
elif not req.weak:
replace = rule[1] # !foo
else:
replace = rule[2] # ~foo
if replace:
if replaced:
requires[i] = None
else:
requires[i] = Requirement(patch)
replaced = True
if not replaced:
appended.append(Requirement(patch))
result = [x for x in requires if x is not None] + appended
return result |
def detect_intent_with_texttospeech_response(project_id, session_id, texts,
language_code):
"""Returns the result of detect intent with texts as inputs and includes
the response in an audio format.
Using the same `session_id` between requests allows continuation
of the conversaion."""
import dialogflow_v2beta1 as dialogflow
session_client = dialogflow.SessionsClient()
session_path = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session_path))
for text in texts:
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
# Set the query parameters with sentiment analysis
output_audio_config = dialogflow.types.OutputAudioConfig(
audio_encoding=dialogflow.enums.OutputAudioEncoding
.OUTPUT_AUDIO_ENCODING_LINEAR_16)
response = session_client.detect_intent(
session=session_path, query_input=query_input,
output_audio_config=output_audio_config)
print('=' * 20)
print('Query text: {}'.format(response.query_result.query_text))
print('Detected intent: {} (confidence: {})\n'.format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence))
print('Fulfillment text: {}\n'.format(
response.query_result.fulfillment_text))
# The response's audio_content is binary.
with open('output.wav', 'wb') as out:
out.write(response.output_audio)
print('Audio content written to file "output.wav"') | Returns the result of detect intent with texts as inputs and includes
the response in an audio format.
Using the same `session_id` between requests allows continuation
of the conversaion. | Below is the the instruction that describes the task:
### Input:
Returns the result of detect intent with texts as inputs and includes
the response in an audio format.
Using the same `session_id` between requests allows continuation
of the conversaion.
### Response:
def detect_intent_with_texttospeech_response(project_id, session_id, texts,
language_code):
"""Returns the result of detect intent with texts as inputs and includes
the response in an audio format.
Using the same `session_id` between requests allows continuation
of the conversaion."""
import dialogflow_v2beta1 as dialogflow
session_client = dialogflow.SessionsClient()
session_path = session_client.session_path(project_id, session_id)
print('Session path: {}\n'.format(session_path))
for text in texts:
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
# Set the query parameters with sentiment analysis
output_audio_config = dialogflow.types.OutputAudioConfig(
audio_encoding=dialogflow.enums.OutputAudioEncoding
.OUTPUT_AUDIO_ENCODING_LINEAR_16)
response = session_client.detect_intent(
session=session_path, query_input=query_input,
output_audio_config=output_audio_config)
print('=' * 20)
print('Query text: {}'.format(response.query_result.query_text))
print('Detected intent: {} (confidence: {})\n'.format(
response.query_result.intent.display_name,
response.query_result.intent_detection_confidence))
print('Fulfillment text: {}\n'.format(
response.query_result.fulfillment_text))
# The response's audio_content is binary.
with open('output.wav', 'wb') as out:
out.write(response.output_audio)
print('Audio content written to file "output.wav"') |
def _process_download_descriptor(self, dd):
# type: (Downloader, blobxfer.models.download.Descriptor) -> None
"""Process download descriptor
:param Downloader self: this
:param blobxfer.models.download.Descriptor dd: download descriptor
"""
# update progress bar
self._update_progress_bar()
# get download offsets
offsets, resume_bytes = dd.next_offsets()
# add resume bytes to counter
if resume_bytes is not None:
with self._disk_operation_lock:
self._download_bytes_sofar += resume_bytes
logger.debug('adding {} sofar {} from {}'.format(
resume_bytes, self._download_bytes_sofar, dd.entity.name))
del resume_bytes
# check if all operations completed
if offsets is None and dd.all_operations_completed:
finalize = True
sfpath = str(dd.final_path)
# finalize integrity
dd.finalize_integrity()
# vectored io checks
if dd.entity.vectored_io is not None:
with self._transfer_lock:
if sfpath not in self._vio_map:
self._vio_map[sfpath] = 1
else:
self._vio_map[sfpath] += 1
if (self._vio_map[sfpath] ==
dd.entity.vectored_io.total_slices):
self._vio_map.pop(sfpath)
else:
finalize = False
# finalize file
if finalize:
dd.finalize_file()
# accounting
with self._transfer_lock:
self._download_sofar += 1
if dd.entity.is_encrypted:
self._dd_map.pop(sfpath)
self._transfer_set.remove(
blobxfer.operations.download.Downloader.
create_unique_transfer_operation_id(dd.entity))
self._transfer_cc.pop(dd.entity.path, None)
return
# re-enqueue for other threads to download
if offsets is None:
self._transfer_queue.put(dd)
return
# ensure forthcoming disk operation is accounted for
with self._disk_operation_lock:
self._disk_set.add(
blobxfer.operations.download.Downloader.
create_unique_disk_operation_id(dd, offsets))
# check if there are too many concurrent connections
with self._transfer_lock:
self._transfer_cc[dd.entity.path] += 1
cc_xfer = self._transfer_cc[dd.entity.path]
if cc_xfer <= self._spec.options.max_single_object_concurrency:
self._transfer_queue.put(dd)
# issue get range
if dd.entity.mode == blobxfer.models.azure.StorageModes.File:
data = blobxfer.operations.azure.file.get_file_range(
dd.entity, offsets)
else:
data = blobxfer.operations.azure.blob.get_blob_range(
dd.entity, offsets)
with self._transfer_lock:
self._transfer_cc[dd.entity.path] -= 1
if cc_xfer > self._spec.options.max_single_object_concurrency:
self._transfer_queue.put(dd)
# enqueue data for processing
self._disk_queue.put((dd, offsets, data)) | Process download descriptor
:param Downloader self: this
:param blobxfer.models.download.Descriptor dd: download descriptor | Below is the the instruction that describes the task:
### Input:
Process download descriptor
:param Downloader self: this
:param blobxfer.models.download.Descriptor dd: download descriptor
### Response:
def _process_download_descriptor(self, dd):
# type: (Downloader, blobxfer.models.download.Descriptor) -> None
"""Process download descriptor
:param Downloader self: this
:param blobxfer.models.download.Descriptor dd: download descriptor
"""
# update progress bar
self._update_progress_bar()
# get download offsets
offsets, resume_bytes = dd.next_offsets()
# add resume bytes to counter
if resume_bytes is not None:
with self._disk_operation_lock:
self._download_bytes_sofar += resume_bytes
logger.debug('adding {} sofar {} from {}'.format(
resume_bytes, self._download_bytes_sofar, dd.entity.name))
del resume_bytes
# check if all operations completed
if offsets is None and dd.all_operations_completed:
finalize = True
sfpath = str(dd.final_path)
# finalize integrity
dd.finalize_integrity()
# vectored io checks
if dd.entity.vectored_io is not None:
with self._transfer_lock:
if sfpath not in self._vio_map:
self._vio_map[sfpath] = 1
else:
self._vio_map[sfpath] += 1
if (self._vio_map[sfpath] ==
dd.entity.vectored_io.total_slices):
self._vio_map.pop(sfpath)
else:
finalize = False
# finalize file
if finalize:
dd.finalize_file()
# accounting
with self._transfer_lock:
self._download_sofar += 1
if dd.entity.is_encrypted:
self._dd_map.pop(sfpath)
self._transfer_set.remove(
blobxfer.operations.download.Downloader.
create_unique_transfer_operation_id(dd.entity))
self._transfer_cc.pop(dd.entity.path, None)
return
# re-enqueue for other threads to download
if offsets is None:
self._transfer_queue.put(dd)
return
# ensure forthcoming disk operation is accounted for
with self._disk_operation_lock:
self._disk_set.add(
blobxfer.operations.download.Downloader.
create_unique_disk_operation_id(dd, offsets))
# check if there are too many concurrent connections
with self._transfer_lock:
self._transfer_cc[dd.entity.path] += 1
cc_xfer = self._transfer_cc[dd.entity.path]
if cc_xfer <= self._spec.options.max_single_object_concurrency:
self._transfer_queue.put(dd)
# issue get range
if dd.entity.mode == blobxfer.models.azure.StorageModes.File:
data = blobxfer.operations.azure.file.get_file_range(
dd.entity, offsets)
else:
data = blobxfer.operations.azure.blob.get_blob_range(
dd.entity, offsets)
with self._transfer_lock:
self._transfer_cc[dd.entity.path] -= 1
if cc_xfer > self._spec.options.max_single_object_concurrency:
self._transfer_queue.put(dd)
# enqueue data for processing
self._disk_queue.put((dd, offsets, data)) |
def get_urls(self):
"""
Returns urls handling bundles and views.
This processes the 'item view' first in order
and then adds any non item views at the end.
"""
parts = []
seen = set()
# Process item views in order
for v in list(self._meta.item_views)+list(self._meta.action_views):
if not v in seen:
view, name = self.get_view_and_name(v)
if view and name:
parts.append(self.get_url(name, view, v))
seen.add(v)
# Process everything else that we have not seen
for v in set(self._views).difference(seen):
# Get the url name
view, name = self.get_view_and_name(v)
if view and name:
parts.append(self.get_url(name, view, v))
return parts | Returns urls handling bundles and views.
This processes the 'item view' first in order
and then adds any non item views at the end. | Below is the the instruction that describes the task:
### Input:
Returns urls handling bundles and views.
This processes the 'item view' first in order
and then adds any non item views at the end.
### Response:
def get_urls(self):
"""
Returns urls handling bundles and views.
This processes the 'item view' first in order
and then adds any non item views at the end.
"""
parts = []
seen = set()
# Process item views in order
for v in list(self._meta.item_views)+list(self._meta.action_views):
if not v in seen:
view, name = self.get_view_and_name(v)
if view and name:
parts.append(self.get_url(name, view, v))
seen.add(v)
# Process everything else that we have not seen
for v in set(self._views).difference(seen):
# Get the url name
view, name = self.get_view_and_name(v)
if view and name:
parts.append(self.get_url(name, view, v))
return parts |
def _getOccurs(self, e):
'''return a 3 item tuple
'''
minOccurs = maxOccurs = '1'
nillable = True
return minOccurs,maxOccurs,nillable | return a 3 item tuple | Below is the the instruction that describes the task:
### Input:
return a 3 item tuple
### Response:
def _getOccurs(self, e):
'''return a 3 item tuple
'''
minOccurs = maxOccurs = '1'
nillable = True
return minOccurs,maxOccurs,nillable |
def process_new_post(self, bulk_mode, api_post, posts, author, post_categories, post_tags, post_media_attachments):
"""
Instantiate a new Post object using data from the WP API.
Related fields -- author, categories, tags, and attachments should be processed in advance
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the Post
:param posts: the potentially growing list of Posts that we are processing in this run
:param author: the Author object for this Post
:param post_categories: the list of Category objects that should be linked to this Post
:param post_tags: the list of Tags objects that should be linked to this Post
:param post_media_attachments: the list of Media objects that should be attached to this Post
:return: None
"""
post = Post(site_id=self.site_id,
wp_id=api_post["ID"],
author=author,
post_date=api_post["date"],
modified=api_post["modified"],
title=api_post["title"],
url=api_post["URL"],
short_url=api_post["short_URL"],
content=api_post["content"],
excerpt=api_post["excerpt"],
slug=api_post["slug"],
guid=api_post["guid"],
status=api_post["status"],
sticky=api_post["sticky"],
password=api_post["password"],
parent=api_post["parent"],
post_type=api_post["type"],
likes_enabled=api_post["likes_enabled"],
sharing_enabled=api_post["sharing_enabled"],
like_count=api_post["like_count"],
global_ID=api_post["global_ID"],
featured_image=api_post["featured_image"],
format=api_post["format"],
menu_order=api_post["menu_order"],
metadata=api_post["metadata"],
post_thumbnail=api_post["post_thumbnail"])
posts.append(post)
# if we're not in bulk mode, go ahead and create the post in the db now
# otherwise this happens after all API posts are processed
if not bulk_mode:
self.bulk_create_posts(posts, post_categories, post_tags, post_media_attachments) | Instantiate a new Post object using data from the WP API.
Related fields -- author, categories, tags, and attachments should be processed in advance
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the Post
:param posts: the potentially growing list of Posts that we are processing in this run
:param author: the Author object for this Post
:param post_categories: the list of Category objects that should be linked to this Post
:param post_tags: the list of Tags objects that should be linked to this Post
:param post_media_attachments: the list of Media objects that should be attached to this Post
:return: None | Below is the the instruction that describes the task:
### Input:
Instantiate a new Post object using data from the WP API.
Related fields -- author, categories, tags, and attachments should be processed in advance
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the Post
:param posts: the potentially growing list of Posts that we are processing in this run
:param author: the Author object for this Post
:param post_categories: the list of Category objects that should be linked to this Post
:param post_tags: the list of Tags objects that should be linked to this Post
:param post_media_attachments: the list of Media objects that should be attached to this Post
:return: None
### Response:
def process_new_post(self, bulk_mode, api_post, posts, author, post_categories, post_tags, post_media_attachments):
"""
Instantiate a new Post object using data from the WP API.
Related fields -- author, categories, tags, and attachments should be processed in advance
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_post: the API data for the Post
:param posts: the potentially growing list of Posts that we are processing in this run
:param author: the Author object for this Post
:param post_categories: the list of Category objects that should be linked to this Post
:param post_tags: the list of Tags objects that should be linked to this Post
:param post_media_attachments: the list of Media objects that should be attached to this Post
:return: None
"""
post = Post(site_id=self.site_id,
wp_id=api_post["ID"],
author=author,
post_date=api_post["date"],
modified=api_post["modified"],
title=api_post["title"],
url=api_post["URL"],
short_url=api_post["short_URL"],
content=api_post["content"],
excerpt=api_post["excerpt"],
slug=api_post["slug"],
guid=api_post["guid"],
status=api_post["status"],
sticky=api_post["sticky"],
password=api_post["password"],
parent=api_post["parent"],
post_type=api_post["type"],
likes_enabled=api_post["likes_enabled"],
sharing_enabled=api_post["sharing_enabled"],
like_count=api_post["like_count"],
global_ID=api_post["global_ID"],
featured_image=api_post["featured_image"],
format=api_post["format"],
menu_order=api_post["menu_order"],
metadata=api_post["metadata"],
post_thumbnail=api_post["post_thumbnail"])
posts.append(post)
# if we're not in bulk mode, go ahead and create the post in the db now
# otherwise this happens after all API posts are processed
if not bulk_mode:
self.bulk_create_posts(posts, post_categories, post_tags, post_media_attachments) |
def pdhg_stepsize(L, tau=None, sigma=None):
r"""Default step sizes for `pdhg`.
Parameters
----------
L : `Operator` or float
Operator or norm of the operator that are used in the `pdhg` method.
If it is an `Operator`, the norm is computed with
``Operator.norm(estimate=True)``.
tau : positive float, optional
Use this value for ``tau`` instead of computing it from the
operator norms, see Notes.
sigma : positive float, optional
The ``sigma`` step size parameters for the dual update.
Returns
-------
tau : float
The ``tau`` step size parameter for the primal update.
sigma : tuple of float
The ``sigma`` step size parameter for the dual update.
Notes
-----
To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma`
and :math:`L` need to satisfy
.. math::
\tau \sigma \|L\|^2 < 1
This function has 4 options, :math:`\tau`/:math:`\sigma` given or not
given.
- Neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as
.. math::
\tau = \sigma = \frac{\sqrt{0.9}}{\|L\|}
- If only :math:`\sigma` is given, :math:`\tau` is set to
.. math::
\tau = \frac{0.9}{\sigma \|L\|^2}
- If only :math:`\tau` is given, :math:`\sigma` is set
to
.. math::
\sigma = \frac{0.9}{\tau \|L\|^2}
- If both are given, they are returned as-is without further validation.
"""
if tau is not None and sigma is not None:
return float(tau), float(sigma)
L_norm = L.norm(estimate=True) if isinstance(L, Operator) else float(L)
if tau is None and sigma is None:
tau = sigma = np.sqrt(0.9) / L_norm
return tau, sigma
elif tau is None:
tau = 0.9 / (sigma * L_norm ** 2)
return tau, float(sigma)
else: # sigma is None
sigma = 0.9 / (tau * L_norm ** 2)
return float(tau), sigma | r"""Default step sizes for `pdhg`.
Parameters
----------
L : `Operator` or float
Operator or norm of the operator that are used in the `pdhg` method.
If it is an `Operator`, the norm is computed with
``Operator.norm(estimate=True)``.
tau : positive float, optional
Use this value for ``tau`` instead of computing it from the
operator norms, see Notes.
sigma : positive float, optional
The ``sigma`` step size parameters for the dual update.
Returns
-------
tau : float
The ``tau`` step size parameter for the primal update.
sigma : tuple of float
The ``sigma`` step size parameter for the dual update.
Notes
-----
To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma`
and :math:`L` need to satisfy
.. math::
\tau \sigma \|L\|^2 < 1
This function has 4 options, :math:`\tau`/:math:`\sigma` given or not
given.
- Neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as
.. math::
\tau = \sigma = \frac{\sqrt{0.9}}{\|L\|}
- If only :math:`\sigma` is given, :math:`\tau` is set to
.. math::
\tau = \frac{0.9}{\sigma \|L\|^2}
- If only :math:`\tau` is given, :math:`\sigma` is set
to
.. math::
\sigma = \frac{0.9}{\tau \|L\|^2}
- If both are given, they are returned as-is without further validation. | Below is the the instruction that describes the task:
### Input:
r"""Default step sizes for `pdhg`.
Parameters
----------
L : `Operator` or float
Operator or norm of the operator that are used in the `pdhg` method.
If it is an `Operator`, the norm is computed with
``Operator.norm(estimate=True)``.
tau : positive float, optional
Use this value for ``tau`` instead of computing it from the
operator norms, see Notes.
sigma : positive float, optional
The ``sigma`` step size parameters for the dual update.
Returns
-------
tau : float
The ``tau`` step size parameter for the primal update.
sigma : tuple of float
The ``sigma`` step size parameter for the dual update.
Notes
-----
To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma`
and :math:`L` need to satisfy
.. math::
\tau \sigma \|L\|^2 < 1
This function has 4 options, :math:`\tau`/:math:`\sigma` given or not
given.
- Neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as
.. math::
\tau = \sigma = \frac{\sqrt{0.9}}{\|L\|}
- If only :math:`\sigma` is given, :math:`\tau` is set to
.. math::
\tau = \frac{0.9}{\sigma \|L\|^2}
- If only :math:`\tau` is given, :math:`\sigma` is set
to
.. math::
\sigma = \frac{0.9}{\tau \|L\|^2}
- If both are given, they are returned as-is without further validation.
### Response:
def pdhg_stepsize(L, tau=None, sigma=None):
r"""Default step sizes for `pdhg`.
Parameters
----------
L : `Operator` or float
Operator or norm of the operator that are used in the `pdhg` method.
If it is an `Operator`, the norm is computed with
``Operator.norm(estimate=True)``.
tau : positive float, optional
Use this value for ``tau`` instead of computing it from the
operator norms, see Notes.
sigma : positive float, optional
The ``sigma`` step size parameters for the dual update.
Returns
-------
tau : float
The ``tau`` step size parameter for the primal update.
sigma : tuple of float
The ``sigma`` step size parameter for the dual update.
Notes
-----
To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma`
and :math:`L` need to satisfy
.. math::
\tau \sigma \|L\|^2 < 1
This function has 4 options, :math:`\tau`/:math:`\sigma` given or not
given.
- Neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as
.. math::
\tau = \sigma = \frac{\sqrt{0.9}}{\|L\|}
- If only :math:`\sigma` is given, :math:`\tau` is set to
.. math::
\tau = \frac{0.9}{\sigma \|L\|^2}
- If only :math:`\tau` is given, :math:`\sigma` is set
to
.. math::
\sigma = \frac{0.9}{\tau \|L\|^2}
- If both are given, they are returned as-is without further validation.
"""
if tau is not None and sigma is not None:
return float(tau), float(sigma)
L_norm = L.norm(estimate=True) if isinstance(L, Operator) else float(L)
if tau is None and sigma is None:
tau = sigma = np.sqrt(0.9) / L_norm
return tau, sigma
elif tau is None:
tau = 0.9 / (sigma * L_norm ** 2)
return tau, float(sigma)
else: # sigma is None
sigma = 0.9 / (tau * L_norm ** 2)
return float(tau), sigma |
def delete(instance, disconnect=True):
'''
Delete an *instance* from its metaclass instance pool and optionally
*disconnect* it from any links it might be connected to.
'''
if not isinstance(instance, Class):
raise DeleteException("the provided argument is not an xtuml instance")
return get_metaclass(instance).delete(instance, disconnect) | Delete an *instance* from its metaclass instance pool and optionally
*disconnect* it from any links it might be connected to. | Below is the the instruction that describes the task:
### Input:
Delete an *instance* from its metaclass instance pool and optionally
*disconnect* it from any links it might be connected to.
### Response:
def delete(instance, disconnect=True):
'''
Delete an *instance* from its metaclass instance pool and optionally
*disconnect* it from any links it might be connected to.
'''
if not isinstance(instance, Class):
raise DeleteException("the provided argument is not an xtuml instance")
return get_metaclass(instance).delete(instance, disconnect) |
def main(param_path='parameters.txt'):
"""
Entry point function for analysis based on parameter files.
Parameters
----------
param_path : str
Path to user-generated parameter file
"""
# Confirm parameters file is present
if not os.path.isfile(param_path):
raise IOError, "Parameter file not found at %s" % param_path
# Get raw params and base options (non-run-dependent options)
params, base_options = _get_params_base_options(param_path)
# Configure and start logging
# Done here instead of in function so will affect all subsequent calls
log_path = os.path.join(base_options['results_dir'], '_log.txt')
if os.path.isfile(log_path):
os.remove(log_path)
logging.basicConfig(level=logging.INFO, format='%(message)s')
fileh = logging.FileHandler(log_path)
fileh.setLevel(logging.DEBUG)
filefmt = logging.Formatter(
time.strftime("%Y/%m/%d %H:%M:%S %p", time.localtime()) +
' - %(name)s - %(levelname)s - %(message)s')
fileh.setFormatter(filefmt)
logging.getLogger('').addHandler(fileh)
def log_uncaught(type1, value1, traceback1):
tb_list = traceback.format_exception(type1, value1, traceback1)
tb_str = ''.join(tb_list)
logging.critical('\n\n'+tb_str)
sys.excepthook = log_uncaught
logging.info('Running macroeco') # v%s' % __version__)
logging.info('Parameters file at %s' % os.path.abspath(param_path))
# Preliminary check for errors in parameters file
bad_params = misc.check_parameter_file(param_path)
if len(bad_params[0]) > 0:
logging.warning("Possible formatting error(s) in" +
" %s: parameters %s on lines %s"
% (param_path, bad_params[0], bad_params[1]))
logging.info('Starting analysis')
# Do analysis for each run
for run_name in base_options['run_names']:
logging.info('Starting run %s' % run_name)
options = dict(params[run_name]) # All parameters from this run
options.update(base_options) # Add base parameters
options['run_dir'] = os.path.join(base_options['results_dir'],run_name)
if 'format' in options['analysis']:
_do_format(options)
else:
_do_analysis(options)
logging.info('Finished run %s' % run_name)
logging.info('Finished analysis successfully')
logging.info('Results available at %s' % options['param_dir'])
# Close logging - releases log file lock in Windows GUI
logging.shutdown() | Entry point function for analysis based on parameter files.
Parameters
----------
param_path : str
Path to user-generated parameter file | Below is the the instruction that describes the task:
### Input:
Entry point function for analysis based on parameter files.
Parameters
----------
param_path : str
Path to user-generated parameter file
### Response:
def main(param_path='parameters.txt'):
"""
Entry point function for analysis based on parameter files.
Parameters
----------
param_path : str
Path to user-generated parameter file
"""
# Confirm parameters file is present
if not os.path.isfile(param_path):
raise IOError, "Parameter file not found at %s" % param_path
# Get raw params and base options (non-run-dependent options)
params, base_options = _get_params_base_options(param_path)
# Configure and start logging
# Done here instead of in function so will affect all subsequent calls
log_path = os.path.join(base_options['results_dir'], '_log.txt')
if os.path.isfile(log_path):
os.remove(log_path)
logging.basicConfig(level=logging.INFO, format='%(message)s')
fileh = logging.FileHandler(log_path)
fileh.setLevel(logging.DEBUG)
filefmt = logging.Formatter(
time.strftime("%Y/%m/%d %H:%M:%S %p", time.localtime()) +
' - %(name)s - %(levelname)s - %(message)s')
fileh.setFormatter(filefmt)
logging.getLogger('').addHandler(fileh)
def log_uncaught(type1, value1, traceback1):
tb_list = traceback.format_exception(type1, value1, traceback1)
tb_str = ''.join(tb_list)
logging.critical('\n\n'+tb_str)
sys.excepthook = log_uncaught
logging.info('Running macroeco') # v%s' % __version__)
logging.info('Parameters file at %s' % os.path.abspath(param_path))
# Preliminary check for errors in parameters file
bad_params = misc.check_parameter_file(param_path)
if len(bad_params[0]) > 0:
logging.warning("Possible formatting error(s) in" +
" %s: parameters %s on lines %s"
% (param_path, bad_params[0], bad_params[1]))
logging.info('Starting analysis')
# Do analysis for each run
for run_name in base_options['run_names']:
logging.info('Starting run %s' % run_name)
options = dict(params[run_name]) # All parameters from this run
options.update(base_options) # Add base parameters
options['run_dir'] = os.path.join(base_options['results_dir'],run_name)
if 'format' in options['analysis']:
_do_format(options)
else:
_do_analysis(options)
logging.info('Finished run %s' % run_name)
logging.info('Finished analysis successfully')
logging.info('Results available at %s' % options['param_dir'])
# Close logging - releases log file lock in Windows GUI
logging.shutdown() |
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node) | Visit a node. | Below is the the instruction that describes the task:
### Input:
Visit a node.
### Response:
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node) |
def get_orbits(official='%'):
"""Query the orbit table for the object whose official desingation
matches parameter official. By default all entries are returned
"""
sql= "SELECT * FROM orbits WHERE official LIKE '%s' " % (official, )
cfeps.execute(sql)
return mk_dict(cfeps.fetchall(),cfeps.description) | Query the orbit table for the object whose official desingation
matches parameter official. By default all entries are returned | Below is the the instruction that describes the task:
### Input:
Query the orbit table for the object whose official desingation
matches parameter official. By default all entries are returned
### Response:
def get_orbits(official='%'):
"""Query the orbit table for the object whose official desingation
matches parameter official. By default all entries are returned
"""
sql= "SELECT * FROM orbits WHERE official LIKE '%s' " % (official, )
cfeps.execute(sql)
return mk_dict(cfeps.fetchall(),cfeps.description) |
def raw_to_delimited(header: Header, raw_payload: RawPayload) -> DelimitedMsg:
"""\
Returns a message consisting of header frames, delimiter frame, and payload frames.
The payload frames may be given as sequences of bytes (raw) or as `Message`s.
"""
return tuple(header) + (b'',) + tuple(raw_payload) | \
Returns a message consisting of header frames, delimiter frame, and payload frames.
The payload frames may be given as sequences of bytes (raw) or as `Message`s. | Below is the the instruction that describes the task:
### Input:
\
Returns a message consisting of header frames, delimiter frame, and payload frames.
The payload frames may be given as sequences of bytes (raw) or as `Message`s.
### Response:
def raw_to_delimited(header: Header, raw_payload: RawPayload) -> DelimitedMsg:
"""\
Returns a message consisting of header frames, delimiter frame, and payload frames.
The payload frames may be given as sequences of bytes (raw) or as `Message`s.
"""
return tuple(header) + (b'',) + tuple(raw_payload) |
def make_rpc_call(self, rpc_command):
"""
Allow a user to query a device directly using XML-requests.
:param rpc_command: (str) rpc command such as:
<Get><Operational><LLDP><NodeTable></NodeTable></LLDP></Operational></Get>
"""
# ~~~ hack: ~~~
if not self.is_alive():
self.close() # force close for safety
self.open() # reopen
# ~~~ end hack ~~~
result = self._execute_rpc(rpc_command)
return ET.tostring(result) | Allow a user to query a device directly using XML-requests.
:param rpc_command: (str) rpc command such as:
<Get><Operational><LLDP><NodeTable></NodeTable></LLDP></Operational></Get> | Below is the the instruction that describes the task:
### Input:
Allow a user to query a device directly using XML-requests.
:param rpc_command: (str) rpc command such as:
<Get><Operational><LLDP><NodeTable></NodeTable></LLDP></Operational></Get>
### Response:
def make_rpc_call(self, rpc_command):
"""
Allow a user to query a device directly using XML-requests.
:param rpc_command: (str) rpc command such as:
<Get><Operational><LLDP><NodeTable></NodeTable></LLDP></Operational></Get>
"""
# ~~~ hack: ~~~
if not self.is_alive():
self.close() # force close for safety
self.open() # reopen
# ~~~ end hack ~~~
result = self._execute_rpc(rpc_command)
return ET.tostring(result) |
def update_container(
self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
mem_reservation=None, memswap_limit=None, kernel_memory=None,
restart_policy=None
):
"""
Update resource configs of one or more containers.
Args:
container (str): The container to inspect
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
data['BlkioWeight'] = blkio_weight
if cpu_period:
data['CpuPeriod'] = cpu_period
if cpu_shares:
data['CpuShares'] = cpu_shares
if cpu_quota:
data['CpuQuota'] = cpu_quota
if cpuset_cpus:
data['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
data['CpusetMems'] = cpuset_mems
if mem_limit:
data['Memory'] = utils.parse_bytes(mem_limit)
if mem_reservation:
data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
if memswap_limit:
data['MemorySwap'] = utils.parse_bytes(memswap_limit)
if kernel_memory:
data['KernelMemory'] = utils.parse_bytes(kernel_memory)
if restart_policy:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'restart policy update is not supported '
'for API version < 1.23'
)
data['RestartPolicy'] = restart_policy
res = self._post_json(url, data=data)
return self._result(res, True) | Update resource configs of one or more containers.
Args:
container (str): The container to inspect
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Update resource configs of one or more containers.
Args:
container (str): The container to inspect
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def update_container(
self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
mem_reservation=None, memswap_limit=None, kernel_memory=None,
restart_policy=None
):
"""
Update resource configs of one or more containers.
Args:
container (str): The container to inspect
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
data['BlkioWeight'] = blkio_weight
if cpu_period:
data['CpuPeriod'] = cpu_period
if cpu_shares:
data['CpuShares'] = cpu_shares
if cpu_quota:
data['CpuQuota'] = cpu_quota
if cpuset_cpus:
data['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
data['CpusetMems'] = cpuset_mems
if mem_limit:
data['Memory'] = utils.parse_bytes(mem_limit)
if mem_reservation:
data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
if memswap_limit:
data['MemorySwap'] = utils.parse_bytes(memswap_limit)
if kernel_memory:
data['KernelMemory'] = utils.parse_bytes(kernel_memory)
if restart_policy:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'restart policy update is not supported '
'for API version < 1.23'
)
data['RestartPolicy'] = restart_policy
res = self._post_json(url, data=data)
return self._result(res, True) |
def tablespace_list(user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None):
'''
Return dictionary with information about tablespaces of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.tablespace_list
.. versionadded:: 2015.8.0
'''
ret = {}
query = (
'SELECT spcname as "Name", pga.rolname as "Owner", spcacl as "ACL", '
'spcoptions as "Opts", pg_tablespace_location(pgts.oid) as "Location" '
'FROM pg_tablespace pgts, pg_roles pga WHERE pga.oid = pgts.spcowner'
)
rows = __salt__['postgres.psql_query'](query, runas=runas, host=host,
user=user, port=port,
maintenance_db=maintenance_db,
password=password)
for row in rows:
ret[row['Name']] = row
ret[row['Name']].pop('Name')
return ret | Return dictionary with information about tablespaces of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.tablespace_list
.. versionadded:: 2015.8.0 | Below is the the instruction that describes the task:
### Input:
Return dictionary with information about tablespaces of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.tablespace_list
.. versionadded:: 2015.8.0
### Response:
def tablespace_list(user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None):
'''
Return dictionary with information about tablespaces of a Postgres server.
CLI Example:
.. code-block:: bash
salt '*' postgres.tablespace_list
.. versionadded:: 2015.8.0
'''
ret = {}
query = (
'SELECT spcname as "Name", pga.rolname as "Owner", spcacl as "ACL", '
'spcoptions as "Opts", pg_tablespace_location(pgts.oid) as "Location" '
'FROM pg_tablespace pgts, pg_roles pga WHERE pga.oid = pgts.spcowner'
)
rows = __salt__['postgres.psql_query'](query, runas=runas, host=host,
user=user, port=port,
maintenance_db=maintenance_db,
password=password)
for row in rows:
ret[row['Name']] = row
ret[row['Name']].pop('Name')
return ret |
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if self.ENV_USERNAME in os.environ:
response = os.environ
elif type(self.strategy).__name__ == "DjangoStrategy" and self.ENV_USERNAME in self.strategy.request.META:
# Looks like the Django strategy. In this case, it might by mod_wsgi, which stores
# authentication environment variables in request.META
response = self.strategy.request.META
else:
raise AuthMissingParameter(self, "%s, found only: %s"%(self.ENV_USERNAME, str(os.environ)))
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs) | Completes loging process, must return user instance | Below is the the instruction that describes the task:
### Input:
Completes loging process, must return user instance
### Response:
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if self.ENV_USERNAME in os.environ:
response = os.environ
elif type(self.strategy).__name__ == "DjangoStrategy" and self.ENV_USERNAME in self.strategy.request.META:
# Looks like the Django strategy. In this case, it might by mod_wsgi, which stores
# authentication environment variables in request.META
response = self.strategy.request.META
else:
raise AuthMissingParameter(self, "%s, found only: %s"%(self.ENV_USERNAME, str(os.environ)))
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs) |
def fill_delegate_proxy_activation_requirements(
requirements_data, cred_file, lifetime_hours=12
):
"""
Given the activation requirements for an endpoint and a filename for
X.509 credentials, extracts the public key from the activation
requirements, uses the key and the credentials to make a proxy credential,
and returns the requirements data with the proxy chain filled in.
"""
# get the public key from the activation requirements
for data in requirements_data["DATA"]:
if data["type"] == "delegate_proxy" and data["name"] == "public_key":
public_key = data["value"]
break
else:
raise ValueError(
(
"No public_key found in activation requirements, this endpoint "
"does not support Delegate Proxy activation."
)
)
# get user credentials from user credential file"
with open(cred_file) as f:
issuer_cred = f.read()
# create the proxy credentials
proxy = create_proxy_credentials(issuer_cred, public_key, lifetime_hours)
# return the activation requirements document with the proxy_chain filled
for data in requirements_data["DATA"]:
if data["type"] == "delegate_proxy" and data["name"] == "proxy_chain":
data["value"] = proxy
return requirements_data
else:
raise ValueError(
(
"No proxy_chain found in activation requirements, this endpoint "
"does not support Delegate Proxy activation."
)
) | Given the activation requirements for an endpoint and a filename for
X.509 credentials, extracts the public key from the activation
requirements, uses the key and the credentials to make a proxy credential,
and returns the requirements data with the proxy chain filled in. | Below is the the instruction that describes the task:
### Input:
Given the activation requirements for an endpoint and a filename for
X.509 credentials, extracts the public key from the activation
requirements, uses the key and the credentials to make a proxy credential,
and returns the requirements data with the proxy chain filled in.
### Response:
def fill_delegate_proxy_activation_requirements(
requirements_data, cred_file, lifetime_hours=12
):
"""
Given the activation requirements for an endpoint and a filename for
X.509 credentials, extracts the public key from the activation
requirements, uses the key and the credentials to make a proxy credential,
and returns the requirements data with the proxy chain filled in.
"""
# get the public key from the activation requirements
for data in requirements_data["DATA"]:
if data["type"] == "delegate_proxy" and data["name"] == "public_key":
public_key = data["value"]
break
else:
raise ValueError(
(
"No public_key found in activation requirements, this endpoint "
"does not support Delegate Proxy activation."
)
)
# get user credentials from user credential file"
with open(cred_file) as f:
issuer_cred = f.read()
# create the proxy credentials
proxy = create_proxy_credentials(issuer_cred, public_key, lifetime_hours)
# return the activation requirements document with the proxy_chain filled
for data in requirements_data["DATA"]:
if data["type"] == "delegate_proxy" and data["name"] == "proxy_chain":
data["value"] = proxy
return requirements_data
else:
raise ValueError(
(
"No proxy_chain found in activation requirements, this endpoint "
"does not support Delegate Proxy activation."
)
) |
def load_exons(self, exons, genes=None, build='37'):
"""Create exon objects and insert them into the database
Args:
exons(iterable(dict))
"""
genes = genes or self.ensembl_genes(build)
for exon in exons:
exon_obj = build_exon(exon, genes)
if not exon_obj:
continue
res = self.exon_collection.insert_one(exon_obj) | Create exon objects and insert them into the database
Args:
exons(iterable(dict)) | Below is the the instruction that describes the task:
### Input:
Create exon objects and insert them into the database
Args:
exons(iterable(dict))
### Response:
def load_exons(self, exons, genes=None, build='37'):
"""Create exon objects and insert them into the database
Args:
exons(iterable(dict))
"""
genes = genes or self.ensembl_genes(build)
for exon in exons:
exon_obj = build_exon(exon, genes)
if not exon_obj:
continue
res = self.exon_collection.insert_one(exon_obj) |
def _max(ctx, *number):
"""
Returns the maximum value of all arguments
"""
if len(number) == 0:
raise ValueError("Wrong number of arguments")
result = conversions.to_decimal(number[0], ctx)
for arg in number[1:]:
arg = conversions.to_decimal(arg, ctx)
if arg > result:
result = arg
return result | Returns the maximum value of all arguments | Below is the the instruction that describes the task:
### Input:
Returns the maximum value of all arguments
### Response:
def _max(ctx, *number):
"""
Returns the maximum value of all arguments
"""
if len(number) == 0:
raise ValueError("Wrong number of arguments")
result = conversions.to_decimal(number[0], ctx)
for arg in number[1:]:
arg = conversions.to_decimal(arg, ctx)
if arg > result:
result = arg
return result |
def _create_penwidth_combo(self):
"""Create pen width combo box"""
choices = map(unicode, xrange(12))
self.pen_width_combo = \
_widgets.PenWidthComboBox(self, choices=choices,
style=wx.CB_READONLY, size=(50, -1))
self.pen_width_combo.SetToolTipString(_(u"Border width"))
self.AddControl(self.pen_width_combo)
self.Bind(wx.EVT_COMBOBOX, self.OnLineWidth, self.pen_width_combo) | Create pen width combo box | Below is the the instruction that describes the task:
### Input:
Create pen width combo box
### Response:
def _create_penwidth_combo(self):
"""Create pen width combo box"""
choices = map(unicode, xrange(12))
self.pen_width_combo = \
_widgets.PenWidthComboBox(self, choices=choices,
style=wx.CB_READONLY, size=(50, -1))
self.pen_width_combo.SetToolTipString(_(u"Border width"))
self.AddControl(self.pen_width_combo)
self.Bind(wx.EVT_COMBOBOX, self.OnLineWidth, self.pen_width_combo) |
def convert_user_pars(wcspars):
""" Convert the parameters provided by the configObj into the corresponding
parameters from an HSTWCS object
"""
default_pars = default_user_wcs.copy()
for kw in user_hstwcs_pars:
default_pars[user_hstwcs_pars[kw]] = wcspars[kw]
return default_pars | Convert the parameters provided by the configObj into the corresponding
parameters from an HSTWCS object | Below is the the instruction that describes the task:
### Input:
Convert the parameters provided by the configObj into the corresponding
parameters from an HSTWCS object
### Response:
def convert_user_pars(wcspars):
""" Convert the parameters provided by the configObj into the corresponding
parameters from an HSTWCS object
"""
default_pars = default_user_wcs.copy()
for kw in user_hstwcs_pars:
default_pars[user_hstwcs_pars[kw]] = wcspars[kw]
return default_pars |
def present(name, value, zone, record_type, ttl=None, identifier=None, region=None, key=None,
keyid=None, profile=None, wait_for_sync=True, split_dns=False, private_zone=False):
'''
Ensure the Route53 record is present.
name
Name of the record.
value
Value of the record. As a special case, you can pass in:
`private:<Name tag>` to have the function autodetermine the private IP
`public:<Name tag>` to have the function autodetermine the public IP
zone
The zone to create the record in.
record_type
The record type (A, NS, MX, TXT, etc.)
ttl
The time to live for the record.
identifier
The unique identifier to use for this record.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that contains a dict
with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53 before returning success.
split_dns
Route53 supports parallel public and private DNS zones with the same name.
private_zone
If using split_dns, specify if this is the private zone.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# If a list is passed in for value, change it to a comma-separated string
# So it will work with subsequent boto module calls and string functions
if isinstance(value, list):
value = ','.join(value)
elif value.startswith('private:') or value.startswith('public:'):
name_tag = value.split(':', 1)[1]
in_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
r = __salt__['boto_ec2.find_instances'](name=name_tag,
return_objs=True,
in_states=in_states,
profile=profile)
if not r:
ret['comment'] = 'Error: instance with Name tag {0} not found'.format(name_tag)
ret['result'] = False
return ret
if len(r) > 1:
ret['comment'] = 'Error: Name tag {0} matched more than one instance'.format(name_tag)
ret['result'] = False
return ret
instance = r[0]
private_ip = getattr(instance, 'private_ip_address', None)
public_ip = getattr(instance, 'ip_address', None)
if value.startswith('private:'):
value = private_ip
log.info('Found private IP %s for instance %s', private_ip, name_tag)
else:
if public_ip is None:
ret['comment'] = 'Error: No Public IP assigned to instance with Name {0}'.format(name_tag)
ret['result'] = False
return ret
value = public_ip
log.info('Found public IP %s for instance %s', public_ip, name_tag)
try:
record = __salt__['boto_route53.get_record'](name, zone, record_type,
False, region, key, keyid,
profile, split_dns,
private_zone, identifier)
except SaltInvocationError as err:
ret['comment'] = 'Error: {0}'.format(err)
ret['result'] = False
return ret
if isinstance(record, dict) and not record:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be added.'.format(name)
ret['result'] = None
return ret
added = __salt__['boto_route53.add_record'](name, value, zone,
record_type, identifier,
ttl, region, key, keyid,
profile, wait_for_sync,
split_dns, private_zone)
if added:
ret['changes']['old'] = None
ret['changes']['new'] = {'name': name,
'value': value,
'record_type': record_type,
'ttl': ttl,
'identifier': identifier}
ret['comment'] = 'Added {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to add {0} Route53 record.'.format(name)
return ret
elif record:
need_to_update = False
# Values can be a comma separated list and some values will end with a
# period (even if we set it without one). To easily check this we need
# to split and check with the period stripped from the input and what's
# in route53.
# TODO: figure out if this will cause us problems with some records.
_values = [x.rstrip('.') for x in value.split(',')]
_r_values = [x.rstrip('.') for x in record['value'].split(',')]
_values.sort()
_r_values.sort()
if _values != _r_values:
need_to_update = True
if identifier and identifier != record['identifier']:
need_to_update = True
if ttl and six.text_type(ttl) != six.text_type(record['ttl']):
need_to_update = True
if need_to_update:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be updated.'.format(name)
ret['result'] = None
return ret
updated = __salt__['boto_route53.update_record'](name, value, zone,
record_type,
identifier, ttl,
region, key,
keyid, profile,
wait_for_sync,
split_dns,
private_zone)
if updated:
ret['changes']['old'] = record
ret['changes']['new'] = {'name': name,
'value': value,
'record_type': record_type,
'ttl': ttl,
'identifier': identifier}
ret['comment'] = 'Updated {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to update {0} Route53 record.'.format(name)
else:
ret['comment'] = '{0} exists.'.format(name)
return ret | Ensure the Route53 record is present.
name
Name of the record.
value
Value of the record. As a special case, you can pass in:
`private:<Name tag>` to have the function autodetermine the private IP
`public:<Name tag>` to have the function autodetermine the public IP
zone
The zone to create the record in.
record_type
The record type (A, NS, MX, TXT, etc.)
ttl
The time to live for the record.
identifier
The unique identifier to use for this record.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that contains a dict
with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53 before returning success.
split_dns
Route53 supports parallel public and private DNS zones with the same name.
private_zone
If using split_dns, specify if this is the private zone. | Below is the the instruction that describes the task:
### Input:
Ensure the Route53 record is present.
name
Name of the record.
value
Value of the record. As a special case, you can pass in:
`private:<Name tag>` to have the function autodetermine the private IP
`public:<Name tag>` to have the function autodetermine the public IP
zone
The zone to create the record in.
record_type
The record type (A, NS, MX, TXT, etc.)
ttl
The time to live for the record.
identifier
The unique identifier to use for this record.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that contains a dict
with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53 before returning success.
split_dns
Route53 supports parallel public and private DNS zones with the same name.
private_zone
If using split_dns, specify if this is the private zone.
### Response:
def present(name, value, zone, record_type, ttl=None, identifier=None, region=None, key=None,
keyid=None, profile=None, wait_for_sync=True, split_dns=False, private_zone=False):
'''
Ensure the Route53 record is present.
name
Name of the record.
value
Value of the record. As a special case, you can pass in:
`private:<Name tag>` to have the function autodetermine the private IP
`public:<Name tag>` to have the function autodetermine the public IP
zone
The zone to create the record in.
record_type
The record type (A, NS, MX, TXT, etc.)
ttl
The time to live for the record.
identifier
The unique identifier to use for this record.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that contains a dict
with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53 before returning success.
split_dns
Route53 supports parallel public and private DNS zones with the same name.
private_zone
If using split_dns, specify if this is the private zone.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# If a list is passed in for value, change it to a comma-separated string
# So it will work with subsequent boto module calls and string functions
if isinstance(value, list):
value = ','.join(value)
elif value.startswith('private:') or value.startswith('public:'):
name_tag = value.split(':', 1)[1]
in_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
r = __salt__['boto_ec2.find_instances'](name=name_tag,
return_objs=True,
in_states=in_states,
profile=profile)
if not r:
ret['comment'] = 'Error: instance with Name tag {0} not found'.format(name_tag)
ret['result'] = False
return ret
if len(r) > 1:
ret['comment'] = 'Error: Name tag {0} matched more than one instance'.format(name_tag)
ret['result'] = False
return ret
instance = r[0]
private_ip = getattr(instance, 'private_ip_address', None)
public_ip = getattr(instance, 'ip_address', None)
if value.startswith('private:'):
value = private_ip
log.info('Found private IP %s for instance %s', private_ip, name_tag)
else:
if public_ip is None:
ret['comment'] = 'Error: No Public IP assigned to instance with Name {0}'.format(name_tag)
ret['result'] = False
return ret
value = public_ip
log.info('Found public IP %s for instance %s', public_ip, name_tag)
try:
record = __salt__['boto_route53.get_record'](name, zone, record_type,
False, region, key, keyid,
profile, split_dns,
private_zone, identifier)
except SaltInvocationError as err:
ret['comment'] = 'Error: {0}'.format(err)
ret['result'] = False
return ret
if isinstance(record, dict) and not record:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be added.'.format(name)
ret['result'] = None
return ret
added = __salt__['boto_route53.add_record'](name, value, zone,
record_type, identifier,
ttl, region, key, keyid,
profile, wait_for_sync,
split_dns, private_zone)
if added:
ret['changes']['old'] = None
ret['changes']['new'] = {'name': name,
'value': value,
'record_type': record_type,
'ttl': ttl,
'identifier': identifier}
ret['comment'] = 'Added {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to add {0} Route53 record.'.format(name)
return ret
elif record:
need_to_update = False
# Values can be a comma separated list and some values will end with a
# period (even if we set it without one). To easily check this we need
# to split and check with the period stripped from the input and what's
# in route53.
# TODO: figure out if this will cause us problems with some records.
_values = [x.rstrip('.') for x in value.split(',')]
_r_values = [x.rstrip('.') for x in record['value'].split(',')]
_values.sort()
_r_values.sort()
if _values != _r_values:
need_to_update = True
if identifier and identifier != record['identifier']:
need_to_update = True
if ttl and six.text_type(ttl) != six.text_type(record['ttl']):
need_to_update = True
if need_to_update:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be updated.'.format(name)
ret['result'] = None
return ret
updated = __salt__['boto_route53.update_record'](name, value, zone,
record_type,
identifier, ttl,
region, key,
keyid, profile,
wait_for_sync,
split_dns,
private_zone)
if updated:
ret['changes']['old'] = record
ret['changes']['new'] = {'name': name,
'value': value,
'record_type': record_type,
'ttl': ttl,
'identifier': identifier}
ret['comment'] = 'Updated {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to update {0} Route53 record.'.format(name)
else:
ret['comment'] = '{0} exists.'.format(name)
return ret |
def add_gene(self, gene):
"""Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary
"""
logger.debug("Adding gene {0} to variant {1}".format(
gene, self['variant_id']))
self['genes'].append(gene) | Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary | Below is the the instruction that describes the task:
### Input:
Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary
### Response:
def add_gene(self, gene):
"""Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary
"""
logger.debug("Adding gene {0} to variant {1}".format(
gene, self['variant_id']))
self['genes'].append(gene) |
def execute(self, query, args=None):
"""
:return: Future[Cursor]
:rtype: Future
"""
self._ensure_conn()
cur = self._conn.cursor()
yield cur.execute(query, args)
raise Return(cur) | :return: Future[Cursor]
:rtype: Future | Below is the the instruction that describes the task:
### Input:
:return: Future[Cursor]
:rtype: Future
### Response:
def execute(self, query, args=None):
"""
:return: Future[Cursor]
:rtype: Future
"""
self._ensure_conn()
cur = self._conn.cursor()
yield cur.execute(query, args)
raise Return(cur) |
def _serialize(self):
"""
Serialize the ResponseObject. Returns a webob `Response`
object.
"""
# Do something appropriate if the response object is unbound
if self._defcode is None:
raise exceptions.UnboundResponse()
# Build the response
resp = self.response_class(request=self.req, status=self.code,
headerlist=self._headers.items())
# Do we have a body?
if self.result:
resp.content_type = self.content_type
resp.body = self.serializer(self.result)
# Return the response
return resp | Serialize the ResponseObject. Returns a webob `Response`
object. | Below is the the instruction that describes the task:
### Input:
Serialize the ResponseObject. Returns a webob `Response`
object.
### Response:
def _serialize(self):
"""
Serialize the ResponseObject. Returns a webob `Response`
object.
"""
# Do something appropriate if the response object is unbound
if self._defcode is None:
raise exceptions.UnboundResponse()
# Build the response
resp = self.response_class(request=self.req, status=self.code,
headerlist=self._headers.items())
# Do we have a body?
if self.result:
resp.content_type = self.content_type
resp.body = self.serializer(self.result)
# Return the response
return resp |
def similarity(state_a, state_b):
"""
The (L2) distance between the counts of the state addresses in the history of the path.
:param state_a: The first state to compare
:param state_b: The second state to compare
"""
count_a = Counter(state_a.history.bbl_addrs)
count_b = Counter(state_b.history.bbl_addrs)
normal_distance = sum((count_a.get(addr, 0) - count_b.get(addr, 0)) ** 2
for addr in set(list(count_a.keys()) + list(count_b.keys()))) ** 0.5
return 1.0 / (1 + normal_distance) | The (L2) distance between the counts of the state addresses in the history of the path.
:param state_a: The first state to compare
:param state_b: The second state to compare | Below is the the instruction that describes the task:
### Input:
The (L2) distance between the counts of the state addresses in the history of the path.
:param state_a: The first state to compare
:param state_b: The second state to compare
### Response:
def similarity(state_a, state_b):
"""
The (L2) distance between the counts of the state addresses in the history of the path.
:param state_a: The first state to compare
:param state_b: The second state to compare
"""
count_a = Counter(state_a.history.bbl_addrs)
count_b = Counter(state_b.history.bbl_addrs)
normal_distance = sum((count_a.get(addr, 0) - count_b.get(addr, 0)) ** 2
for addr in set(list(count_a.keys()) + list(count_b.keys()))) ** 0.5
return 1.0 / (1 + normal_distance) |
def Where_filter_gen(*data):
"""
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
"""
where = []
def Fwhere(field, pattern):
"""Add where filter for the given field with the given pattern."""
where.append("WHERE {0} LIKE '{1}'".format(field, pattern))
def Fstring(field, string):
"""Add a where filter based on a string."""
Fwhere(field, "%{0}%".format(string if not isinstance(string, str)
else str(string)))
def Fdict(field, data):
"""Add where filters to search for dict keys and values."""
for key, value in data.items():
if value == '*':
Fstring(field, key)
else:
Fstring(field, "{0}:%{1}".format(key, value if not
isinstance(value, str)
else str(value)))
def Flist(field, data):
"""Add where filters to search for elements of a list."""
for elem in data:
Fstring(field, elem if not isinstance(elem, str) else
str(elem))
for field, data in data:
if isinstance(data, str):
Fstring(field, data)
elif isinstance(data, dict):
Fdict(field, data)
elif isinstance(data, list):
Flist(field, data)
return ' AND '.join(where) | Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples. | Below is the the instruction that describes the task:
### Input:
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
### Response:
def Where_filter_gen(*data):
"""
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
"""
where = []
def Fwhere(field, pattern):
"""Add where filter for the given field with the given pattern."""
where.append("WHERE {0} LIKE '{1}'".format(field, pattern))
def Fstring(field, string):
"""Add a where filter based on a string."""
Fwhere(field, "%{0}%".format(string if not isinstance(string, str)
else str(string)))
def Fdict(field, data):
"""Add where filters to search for dict keys and values."""
for key, value in data.items():
if value == '*':
Fstring(field, key)
else:
Fstring(field, "{0}:%{1}".format(key, value if not
isinstance(value, str)
else str(value)))
def Flist(field, data):
"""Add where filters to search for elements of a list."""
for elem in data:
Fstring(field, elem if not isinstance(elem, str) else
str(elem))
for field, data in data:
if isinstance(data, str):
Fstring(field, data)
elif isinstance(data, dict):
Fdict(field, data)
elif isinstance(data, list):
Flist(field, data)
return ' AND '.join(where) |
def get_lambda_to_execute(self):
"""
return a function that executes the function assigned to this job.
If job.track_progress is None (the default), the returned function accepts no argument
and simply needs to be called. If job.track_progress is True, an update_progress function
is passed in that can be used by the function to provide feedback progress back to the
job scheduling system.
:return: a function that executes the original function assigned to this job.
"""
def y(update_progress_func, cancel_job_func):
"""
Call the function stored in self.func, and passing in update_progress_func
or cancel_job_func depending if self.track_progress or self.cancellable is defined,
respectively.
:param update_progress_func: The callback for when the job updates its progress.
:param cancel_job_func: The function that the function has to call occasionally to see
if the user wants to cancel the currently running job.
:return: Any
"""
func = import_stringified_func(self.func)
extrafunckwargs = {}
args, kwargs = copy.copy(self.args), copy.copy(self.kwargs)
if self.track_progress:
extrafunckwargs["update_progress"] = partial(update_progress_func, self.job_id)
if self.cancellable:
extrafunckwargs["check_for_cancel"] = partial(cancel_job_func, self.job_id)
kwargs.update(extrafunckwargs)
return func(*args, **kwargs)
return y | return a function that executes the function assigned to this job.
If job.track_progress is None (the default), the returned function accepts no argument
and simply needs to be called. If job.track_progress is True, an update_progress function
is passed in that can be used by the function to provide feedback progress back to the
job scheduling system.
:return: a function that executes the original function assigned to this job. | Below is the the instruction that describes the task:
### Input:
return a function that executes the function assigned to this job.
If job.track_progress is None (the default), the returned function accepts no argument
and simply needs to be called. If job.track_progress is True, an update_progress function
is passed in that can be used by the function to provide feedback progress back to the
job scheduling system.
:return: a function that executes the original function assigned to this job.
### Response:
def get_lambda_to_execute(self):
"""
return a function that executes the function assigned to this job.
If job.track_progress is None (the default), the returned function accepts no argument
and simply needs to be called. If job.track_progress is True, an update_progress function
is passed in that can be used by the function to provide feedback progress back to the
job scheduling system.
:return: a function that executes the original function assigned to this job.
"""
def y(update_progress_func, cancel_job_func):
"""
Call the function stored in self.func, and passing in update_progress_func
or cancel_job_func depending if self.track_progress or self.cancellable is defined,
respectively.
:param update_progress_func: The callback for when the job updates its progress.
:param cancel_job_func: The function that the function has to call occasionally to see
if the user wants to cancel the currently running job.
:return: Any
"""
func = import_stringified_func(self.func)
extrafunckwargs = {}
args, kwargs = copy.copy(self.args), copy.copy(self.kwargs)
if self.track_progress:
extrafunckwargs["update_progress"] = partial(update_progress_func, self.job_id)
if self.cancellable:
extrafunckwargs["check_for_cancel"] = partial(cancel_job_func, self.job_id)
kwargs.update(extrafunckwargs)
return func(*args, **kwargs)
return y |
def handle_start_scan_command(self, scan_et):
""" Handles <start_scan> command.
@return: Response string for <start_scan> command.
"""
target_str = scan_et.attrib.get('target')
ports_str = scan_et.attrib.get('ports')
# For backward compatibility, if target and ports attributes are set,
# <targets> element is ignored.
if target_str is None or ports_str is None:
target_list = scan_et.find('targets')
if target_list is None or not target_list:
raise OSPDError('No targets or ports', 'start_scan')
else:
scan_targets = self.process_targets_element(target_list)
else:
scan_targets = []
for single_target in target_str_to_list(target_str):
scan_targets.append([single_target, ports_str, ''])
scan_id = scan_et.attrib.get('scan_id')
if scan_id is not None and scan_id != '' and not valid_uuid(scan_id):
raise OSPDError('Invalid scan_id UUID', 'start_scan')
try:
parallel = int(scan_et.attrib.get('parallel', '1'))
if parallel < 1 or parallel > 20:
parallel = 1
except ValueError:
raise OSPDError('Invalid value for parallel scans. '
'It must be a number', 'start_scan')
scanner_params = scan_et.find('scanner_params')
if scanner_params is None:
raise OSPDError('No scanner_params element', 'start_scan')
params = self._preprocess_scan_params(scanner_params)
# VTS is an optional element. If present should not be empty.
vt_selection = {}
scanner_vts = scan_et.find('vt_selection')
if scanner_vts is not None:
if not scanner_vts:
raise OSPDError('VTs list is empty', 'start_scan')
else:
vt_selection = self.process_vts_params(scanner_vts)
# Dry run case.
if 'dry_run' in params and int(params['dry_run']):
scan_func = self.dry_run_scan
scan_params = None
else:
scan_func = self.start_scan
scan_params = self.process_scan_params(params)
scan_id = self.create_scan(scan_id, scan_targets,
scan_params, vt_selection)
scan_process = multiprocessing.Process(target=scan_func,
args=(scan_id,
scan_targets,
parallel))
self.scan_processes[scan_id] = scan_process
scan_process.start()
id_ = Element('id')
id_.text = scan_id
return simple_response_str('start_scan', 200, 'OK', id_) | Handles <start_scan> command.
@return: Response string for <start_scan> command. | Below is the the instruction that describes the task:
### Input:
Handles <start_scan> command.
@return: Response string for <start_scan> command.
### Response:
def handle_start_scan_command(self, scan_et):
""" Handles <start_scan> command.
@return: Response string for <start_scan> command.
"""
target_str = scan_et.attrib.get('target')
ports_str = scan_et.attrib.get('ports')
# For backward compatibility, if target and ports attributes are set,
# <targets> element is ignored.
if target_str is None or ports_str is None:
target_list = scan_et.find('targets')
if target_list is None or not target_list:
raise OSPDError('No targets or ports', 'start_scan')
else:
scan_targets = self.process_targets_element(target_list)
else:
scan_targets = []
for single_target in target_str_to_list(target_str):
scan_targets.append([single_target, ports_str, ''])
scan_id = scan_et.attrib.get('scan_id')
if scan_id is not None and scan_id != '' and not valid_uuid(scan_id):
raise OSPDError('Invalid scan_id UUID', 'start_scan')
try:
parallel = int(scan_et.attrib.get('parallel', '1'))
if parallel < 1 or parallel > 20:
parallel = 1
except ValueError:
raise OSPDError('Invalid value for parallel scans. '
'It must be a number', 'start_scan')
scanner_params = scan_et.find('scanner_params')
if scanner_params is None:
raise OSPDError('No scanner_params element', 'start_scan')
params = self._preprocess_scan_params(scanner_params)
# VTS is an optional element. If present should not be empty.
vt_selection = {}
scanner_vts = scan_et.find('vt_selection')
if scanner_vts is not None:
if not scanner_vts:
raise OSPDError('VTs list is empty', 'start_scan')
else:
vt_selection = self.process_vts_params(scanner_vts)
# Dry run case.
if 'dry_run' in params and int(params['dry_run']):
scan_func = self.dry_run_scan
scan_params = None
else:
scan_func = self.start_scan
scan_params = self.process_scan_params(params)
scan_id = self.create_scan(scan_id, scan_targets,
scan_params, vt_selection)
scan_process = multiprocessing.Process(target=scan_func,
args=(scan_id,
scan_targets,
parallel))
self.scan_processes[scan_id] = scan_process
scan_process.start()
id_ = Element('id')
id_.text = scan_id
return simple_response_str('start_scan', 200, 'OK', id_) |
def _tokenize(sentence):
'''Tokenizer and Stemmer'''
_tokens = nltk.word_tokenize(sentence)
tokens = [stemmer.stem(tk) for tk in _tokens]
return tokens | Tokenizer and Stemmer | Below is the the instruction that describes the task:
### Input:
Tokenizer and Stemmer
### Response:
def _tokenize(sentence):
'''Tokenizer and Stemmer'''
_tokens = nltk.word_tokenize(sentence)
tokens = [stemmer.stem(tk) for tk in _tokens]
return tokens |
def _SkipFieldValue(tokenizer):
"""Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
"""
# String/bytes tokens can come in multiple adjacent string literals.
# If we can consume one, consume as many as we can.
if tokenizer.TryConsumeByteString():
while tokenizer.TryConsumeByteString():
pass
return
if (not tokenizer.TryConsumeIdentifier() and
not tokenizer.TryConsumeInt64() and
not tokenizer.TryConsumeUint64() and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token) | Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found. | Below is the the instruction that describes the task:
### Input:
Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
### Response:
def _SkipFieldValue(tokenizer):
"""Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
"""
# String/bytes tokens can come in multiple adjacent string literals.
# If we can consume one, consume as many as we can.
if tokenizer.TryConsumeByteString():
while tokenizer.TryConsumeByteString():
pass
return
if (not tokenizer.TryConsumeIdentifier() and
not tokenizer.TryConsumeInt64() and
not tokenizer.TryConsumeUint64() and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token) |
def get_body_from_file(kwds):
"""Reads message body if specified via filepath."""
if kwds["file"] and os.path.isfile(kwds["file"]):
kwds["body"] = open(kwds["file"], "r").read()
kwds["file"] = None | Reads message body if specified via filepath. | Below is the the instruction that describes the task:
### Input:
Reads message body if specified via filepath.
### Response:
def get_body_from_file(kwds):
"""Reads message body if specified via filepath."""
if kwds["file"] and os.path.isfile(kwds["file"]):
kwds["body"] = open(kwds["file"], "r").read()
kwds["file"] = None |
def ensure_context(**vars):
"""Ensures that a context is in the stack, creates one otherwise.
"""
ctx = _context_stack.top
stacked = False
if not ctx:
ctx = Context()
stacked = True
_context_stack.push(ctx)
ctx.update(vars)
try:
yield ctx
finally:
if stacked:
_context_stack.pop() | Ensures that a context is in the stack, creates one otherwise. | Below is the the instruction that describes the task:
### Input:
Ensures that a context is in the stack, creates one otherwise.
### Response:
def ensure_context(**vars):
"""Ensures that a context is in the stack, creates one otherwise.
"""
ctx = _context_stack.top
stacked = False
if not ctx:
ctx = Context()
stacked = True
_context_stack.push(ctx)
ctx.update(vars)
try:
yield ctx
finally:
if stacked:
_context_stack.pop() |
def oldest_peer(peers):
"""Determines who the oldest peer is by comparing unit numbers."""
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True | Determines who the oldest peer is by comparing unit numbers. | Below is the the instruction that describes the task:
### Input:
Determines who the oldest peer is by comparing unit numbers.
### Response:
def oldest_peer(peers):
"""Determines who the oldest peer is by comparing unit numbers."""
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True |
def cast(cls, value_type, value, visitor=None, **kwargs):
"""Cast is for visitors where you are visiting some random data
structure (perhaps returned by a previous ``VisitorPattern.visit()``
operation), and you want to convert back to the value type.
This function also takes positional arguments:
``value_type=``\ *RecordType*
The type to cast to.
``value=``\ *object*
``visitor=``\ *Visitor.Options*
Specifies the visitor options, which customizes the descent
and reduction.
"""
if visitor is None:
visitor = cls.Visitor(
cls.grok, cls.reverse, cls.collect, cls.produce,
**kwargs)
return cls.map(visitor, value, value_type) | Cast is for visitors where you are visiting some random data
structure (perhaps returned by a previous ``VisitorPattern.visit()``
operation), and you want to convert back to the value type.
This function also takes positional arguments:
``value_type=``\ *RecordType*
The type to cast to.
``value=``\ *object*
``visitor=``\ *Visitor.Options*
Specifies the visitor options, which customizes the descent
and reduction. | Below is the the instruction that describes the task:
### Input:
Cast is for visitors where you are visiting some random data
structure (perhaps returned by a previous ``VisitorPattern.visit()``
operation), and you want to convert back to the value type.
This function also takes positional arguments:
``value_type=``\ *RecordType*
The type to cast to.
``value=``\ *object*
``visitor=``\ *Visitor.Options*
Specifies the visitor options, which customizes the descent
and reduction.
### Response:
def cast(cls, value_type, value, visitor=None, **kwargs):
"""Cast is for visitors where you are visiting some random data
structure (perhaps returned by a previous ``VisitorPattern.visit()``
operation), and you want to convert back to the value type.
This function also takes positional arguments:
``value_type=``\ *RecordType*
The type to cast to.
``value=``\ *object*
``visitor=``\ *Visitor.Options*
Specifies the visitor options, which customizes the descent
and reduction.
"""
if visitor is None:
visitor = cls.Visitor(
cls.grok, cls.reverse, cls.collect, cls.produce,
**kwargs)
return cls.map(visitor, value, value_type) |
def to_datetime_field(formatter):
"""
Returns a callable instance that will convert a string to a DateTime.
:param formatter: String that represents data format for parsing.
:return: instance of the DateTimeConverter.
"""
class DateTimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = parser.parse(value)
return value
return DateTimeConverter(formatter) | Returns a callable instance that will convert a string to a DateTime.
:param formatter: String that represents data format for parsing.
:return: instance of the DateTimeConverter. | Below is the the instruction that describes the task:
### Input:
Returns a callable instance that will convert a string to a DateTime.
:param formatter: String that represents data format for parsing.
:return: instance of the DateTimeConverter.
### Response:
def to_datetime_field(formatter):
"""
Returns a callable instance that will convert a string to a DateTime.
:param formatter: String that represents data format for parsing.
:return: instance of the DateTimeConverter.
"""
class DateTimeConverter(object):
def __init__(self, formatter):
self.formatter = formatter
def __call__(self, value):
if isinstance(value, string_types):
value = parser.parse(value)
return value
return DateTimeConverter(formatter) |
def call(self, function, args=(), kwargs={}):
"""Call a method given some args and kwargs.
function -- string containing the method name to call
args -- arguments, either a list or tuple
returns the result of the method.
May raise an exception if the method isn't in the dict.
"""
return self.functions[function](*args, **kwargs) | Call a method given some args and kwargs.
function -- string containing the method name to call
args -- arguments, either a list or tuple
returns the result of the method.
May raise an exception if the method isn't in the dict. | Below is the the instruction that describes the task:
### Input:
Call a method given some args and kwargs.
function -- string containing the method name to call
args -- arguments, either a list or tuple
returns the result of the method.
May raise an exception if the method isn't in the dict.
### Response:
def call(self, function, args=(), kwargs={}):
"""Call a method given some args and kwargs.
function -- string containing the method name to call
args -- arguments, either a list or tuple
returns the result of the method.
May raise an exception if the method isn't in the dict.
"""
return self.functions[function](*args, **kwargs) |
def scoring_history(self):
"""
Retrieve Model Score History.
:returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.
"""
model = self._model_json["output"]
if "scoring_history" in model and model["scoring_history"] is not None:
return model["scoring_history"].as_data_frame()
print("No score history for this model") | Retrieve Model Score History.
:returns: The score history as an H2OTwoDimTable or a Pandas DataFrame. | Below is the the instruction that describes the task:
### Input:
Retrieve Model Score History.
:returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.
### Response:
def scoring_history(self):
"""
Retrieve Model Score History.
:returns: The score history as an H2OTwoDimTable or a Pandas DataFrame.
"""
model = self._model_json["output"]
if "scoring_history" in model and model["scoring_history"] is not None:
return model["scoring_history"].as_data_frame()
print("No score history for this model") |
def _obtain_region(self, a, offset, size, flags, is_recursive):
"""Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region"""
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r | Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region | Below is the the instruction that describes the task:
### Input:
Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region
### Response:
def _obtain_region(self, a, offset, size, flags, is_recursive):
"""Utilty to create a new region - for more information on the parameters,
see MapCursor.use_region.
:param a: A regions (a)rray
:return: The newly created region"""
if self._memory_size + size > self._max_memory_size:
self._collect_lru_region(size)
# END handle collection
r = None
if a:
assert len(a) == 1
r = a[0]
else:
try:
r = self.MapRegionCls(a.path_or_fd(), 0, sys.maxsize, flags)
except Exception:
# apparently we are out of system resources or hit a limit
# As many more operations are likely to fail in that condition (
# like reading a file from disk, etc) we free up as much as possible
# As this invalidates our insert position, we have to recurse here
if is_recursive:
# we already tried this, and still have no success in obtaining
# a mapping. This is an exception, so we propagate it
raise
# END handle existing recursion
self._collect_lru_region(0)
return self._obtain_region(a, offset, size, flags, True)
# END handle exceptions
self._handle_count += 1
self._memory_size += r.size()
a.append(r)
# END handle array
assert r.includes_ofs(offset)
return r |
def isNot(self, value):
"""
Sets the operator type to Query.Op.IsNot and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa __ne__
:usage |>>> from orb import Query as Q
|>>> query = Q('test').isNot(1)
|>>> print query
|test is not 1
"""
newq = self.copy()
newq.setOp(Query.Op.IsNot)
newq.setValue(value)
return newq | Sets the operator type to Query.Op.IsNot and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa __ne__
:usage |>>> from orb import Query as Q
|>>> query = Q('test').isNot(1)
|>>> print query
|test is not 1 | Below is the the instruction that describes the task:
### Input:
Sets the operator type to Query.Op.IsNot and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa __ne__
:usage |>>> from orb import Query as Q
|>>> query = Q('test').isNot(1)
|>>> print query
|test is not 1
### Response:
def isNot(self, value):
"""
Sets the operator type to Query.Op.IsNot and sets the
value to the inputted value.
:param value <variant>
:return <Query>
:sa __ne__
:usage |>>> from orb import Query as Q
|>>> query = Q('test').isNot(1)
|>>> print query
|test is not 1
"""
newq = self.copy()
newq.setOp(Query.Op.IsNot)
newq.setValue(value)
return newq |
def get_scoped_variable_from_name(self, name):
""" Get the scoped variable for a unique name
:param name: the unique name of the scoped variable
:return: the scoped variable specified by the name
:raises exceptions.AttributeError: if the name is not in the the scoped_variables dictionary
"""
for scoped_variable_id, scoped_variable in self.scoped_variables.items():
if scoped_variable.name == name:
return scoped_variable_id
raise AttributeError("Name %s is not in scoped_variables dictionary", name) | Get the scoped variable for a unique name
:param name: the unique name of the scoped variable
:return: the scoped variable specified by the name
:raises exceptions.AttributeError: if the name is not in the the scoped_variables dictionary | Below is the the instruction that describes the task:
### Input:
Get the scoped variable for a unique name
:param name: the unique name of the scoped variable
:return: the scoped variable specified by the name
:raises exceptions.AttributeError: if the name is not in the the scoped_variables dictionary
### Response:
def get_scoped_variable_from_name(self, name):
""" Get the scoped variable for a unique name
:param name: the unique name of the scoped variable
:return: the scoped variable specified by the name
:raises exceptions.AttributeError: if the name is not in the the scoped_variables dictionary
"""
for scoped_variable_id, scoped_variable in self.scoped_variables.items():
if scoped_variable.name == name:
return scoped_variable_id
raise AttributeError("Name %s is not in scoped_variables dictionary", name) |
def clear_max_attempts(self):
"""stub"""
if (self.get_max_attempts_metadata().is_read_only() or
self.get_max_attempts_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['maxAttempts'] = \
list(self._max_attempts_metadata['default_integer_values'])[0] | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def clear_max_attempts(self):
"""stub"""
if (self.get_max_attempts_metadata().is_read_only() or
self.get_max_attempts_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['maxAttempts'] = \
list(self._max_attempts_metadata['default_integer_values'])[0] |
def to_oncotator(self):
"""Returns mutation in oncotator input format. Assumes mutations have
vcf/mpileup style positions."""
if self.type == ".":
ref = self.ref
alt = self.change
start = self.pos
end = self.pos
elif self.type == "-":
ref = self.change
alt = "-"
start = self.pos + 1
end = start + len(self.change)
elif self.type == "+":
ref = "-"
alt = self.change
start = self.pos
end = start + len(self.change)
else:
raise(Exception("Unexpected mutation type: {}".format(self.type)))
return "{chrom}\t{start}\t{end}\t{ref}\t{alt}".format(chrom=self.chrom, start=start,
end=end, ref=ref, alt=alt) | Returns mutation in oncotator input format. Assumes mutations have
vcf/mpileup style positions. | Below is the the instruction that describes the task:
### Input:
Returns mutation in oncotator input format. Assumes mutations have
vcf/mpileup style positions.
### Response:
def to_oncotator(self):
"""Returns mutation in oncotator input format. Assumes mutations have
vcf/mpileup style positions."""
if self.type == ".":
ref = self.ref
alt = self.change
start = self.pos
end = self.pos
elif self.type == "-":
ref = self.change
alt = "-"
start = self.pos + 1
end = start + len(self.change)
elif self.type == "+":
ref = "-"
alt = self.change
start = self.pos
end = start + len(self.change)
else:
raise(Exception("Unexpected mutation type: {}".format(self.type)))
return "{chrom}\t{start}\t{end}\t{ref}\t{alt}".format(chrom=self.chrom, start=start,
end=end, ref=ref, alt=alt) |
def remove(self, paths, **params):
"""
Delete paths from the watched list.
"""
log = self._getparam('log', self._discard, **params)
commit = self._getparam('commit', True, **params)
if type(paths) is not list:
paths = [paths]
rebuild = False
for path in paths:
if path in self.paths_pending:
del self.paths_pending[path]
if path in self.paths:
del self.paths[path]
rebuild = True
else:
log.error("Attempt to remove %r which was never added", path)
raise Exception("Path %r has never been added" % path)
if commit and rebuild:
self.commit(**params) | Delete paths from the watched list. | Below is the the instruction that describes the task:
### Input:
Delete paths from the watched list.
### Response:
def remove(self, paths, **params):
"""
Delete paths from the watched list.
"""
log = self._getparam('log', self._discard, **params)
commit = self._getparam('commit', True, **params)
if type(paths) is not list:
paths = [paths]
rebuild = False
for path in paths:
if path in self.paths_pending:
del self.paths_pending[path]
if path in self.paths:
del self.paths[path]
rebuild = True
else:
log.error("Attempt to remove %r which was never added", path)
raise Exception("Path %r has never been added" % path)
if commit and rebuild:
self.commit(**params) |
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] | Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] | Below is the the instruction that describes the task:
### Input:
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
### Response:
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] |
def _get_zone_name(self):
"""Get receivers zone name if not set yet."""
if self._name is None:
# Collect tags for AppCommand.xml call
tags = ["GetZoneName"]
# Execute call
root = self.exec_appcommand_post(tags)
# Check result
if root is None:
_LOGGER.error("Getting ZoneName failed.")
else:
zone = self._get_own_zone()
try:
name = root.find(
"./cmd/{zone}".format(zone=zone)).text
except AttributeError:
_LOGGER.error("No ZoneName found for zone %s", self.zone)
else:
self._name = name.strip() | Get receivers zone name if not set yet. | Below is the the instruction that describes the task:
### Input:
Get receivers zone name if not set yet.
### Response:
def _get_zone_name(self):
"""Get receivers zone name if not set yet."""
if self._name is None:
# Collect tags for AppCommand.xml call
tags = ["GetZoneName"]
# Execute call
root = self.exec_appcommand_post(tags)
# Check result
if root is None:
_LOGGER.error("Getting ZoneName failed.")
else:
zone = self._get_own_zone()
try:
name = root.find(
"./cmd/{zone}".format(zone=zone)).text
except AttributeError:
_LOGGER.error("No ZoneName found for zone %s", self.zone)
else:
self._name = name.strip() |
def fetch(reload: bool = False) -> dict:
"""
Returns a dictionary containing all of the available Cauldron commands
currently registered. This data is cached for performance. Unless the
reload argument is set to True, the command list will only be generated
the first time this function is called.
:param reload:
Whether or not to disregard any cached command data and generate a
new dictionary of available commands.
:return:
A dictionary where the keys are the name of the commands and the
values are the modules for the command .
"""
if len(list(COMMANDS.keys())) > 0 and not reload:
return COMMANDS
COMMANDS.clear()
for key in dir(commands):
e = getattr(commands, key)
if e and hasattr(e, 'NAME') and hasattr(e, 'DESCRIPTION'):
COMMANDS[e.NAME] = e
return dict(COMMANDS.items()) | Returns a dictionary containing all of the available Cauldron commands
currently registered. This data is cached for performance. Unless the
reload argument is set to True, the command list will only be generated
the first time this function is called.
:param reload:
Whether or not to disregard any cached command data and generate a
new dictionary of available commands.
:return:
A dictionary where the keys are the name of the commands and the
values are the modules for the command . | Below is the the instruction that describes the task:
### Input:
Returns a dictionary containing all of the available Cauldron commands
currently registered. This data is cached for performance. Unless the
reload argument is set to True, the command list will only be generated
the first time this function is called.
:param reload:
Whether or not to disregard any cached command data and generate a
new dictionary of available commands.
:return:
A dictionary where the keys are the name of the commands and the
values are the modules for the command .
### Response:
def fetch(reload: bool = False) -> dict:
"""
Returns a dictionary containing all of the available Cauldron commands
currently registered. This data is cached for performance. Unless the
reload argument is set to True, the command list will only be generated
the first time this function is called.
:param reload:
Whether or not to disregard any cached command data and generate a
new dictionary of available commands.
:return:
A dictionary where the keys are the name of the commands and the
values are the modules for the command .
"""
if len(list(COMMANDS.keys())) > 0 and not reload:
return COMMANDS
COMMANDS.clear()
for key in dir(commands):
e = getattr(commands, key)
if e and hasattr(e, 'NAME') and hasattr(e, 'DESCRIPTION'):
COMMANDS[e.NAME] = e
return dict(COMMANDS.items()) |
def p_lexpr(p):
""" lexpr : ID EQ
| LET ID EQ
| ARRAY_ID EQ
| LET ARRAY_ID EQ
"""
global LET_ASSIGNMENT
LET_ASSIGNMENT = True # Mark we're about to start a LET sentence
if p[1] == 'LET':
p[0] = p[2]
i = 2
else:
p[0] = p[1]
i = 1
SYMBOL_TABLE.access_id(p[i], p.lineno(i)) | lexpr : ID EQ
| LET ID EQ
| ARRAY_ID EQ
| LET ARRAY_ID EQ | Below is the the instruction that describes the task:
### Input:
lexpr : ID EQ
| LET ID EQ
| ARRAY_ID EQ
| LET ARRAY_ID EQ
### Response:
def p_lexpr(p):
""" lexpr : ID EQ
| LET ID EQ
| ARRAY_ID EQ
| LET ARRAY_ID EQ
"""
global LET_ASSIGNMENT
LET_ASSIGNMENT = True # Mark we're about to start a LET sentence
if p[1] == 'LET':
p[0] = p[2]
i = 2
else:
p[0] = p[1]
i = 1
SYMBOL_TABLE.access_id(p[i], p.lineno(i)) |
def show_in_external_file_explorer(fnames=None):
"""Show files in external file explorer
Args:
fnames (list): Names of files to show.
"""
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
for fname in fnames:
open_file_in_external_explorer(fname) | Show files in external file explorer
Args:
fnames (list): Names of files to show. | Below is the the instruction that describes the task:
### Input:
Show files in external file explorer
Args:
fnames (list): Names of files to show.
### Response:
def show_in_external_file_explorer(fnames=None):
"""Show files in external file explorer
Args:
fnames (list): Names of files to show.
"""
if not isinstance(fnames, (tuple, list)):
fnames = [fnames]
for fname in fnames:
open_file_in_external_explorer(fname) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.