code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_runtime_value(self, resourceid: int):
"""Get runtime value of specified resource it
The returned value will be boolean, integer or float
Return None if resource cannot be found or on error
"""
payload = """<getRuntimeValue1 xmlns="utcs">{id}</getRuntimeValue1>
""".format(id=resourceid)
xdoc = self.connection.soap_action('/ws/ResourceInteractionService',
'getResourceValue',
payload)
if not xdoc:
return False
boolresult = xdoc.find(
'./SOAP-ENV:Body/ns1:getRuntimeValue2/ns1:value/ns2:value',
IHCSoapClient.ihcns)
if boolresult is not None:
return boolresult.text == "true"
intresult = xdoc.find(
'./SOAP-ENV:Body/ns1:getRuntimeValue2/ns1:value/ns2:integer',
IHCSoapClient.ihcns)
if intresult is not None:
return int(intresult.text)
floatresult = xdoc.find(
('./SOAP-ENV:Body/ns1:getRuntimeValue2/'
'ns1:value/ns2:floatingPointValue'),
IHCSoapClient.ihcns)
if floatresult is not None:
return float(floatresult.text)
enumNameResut = xdoc.find(
('./SOAP-ENV:Body/ns1:getRuntimeValue2/'
'ns1:value/ns2:enumName'),
IHCSoapClient.ihcns)
if enumNameResut is not None:
return enumNameResut.text
return False | Get runtime value of specified resource it
The returned value will be boolean, integer or float
Return None if resource cannot be found or on error | Below is the the instruction that describes the task:
### Input:
Get runtime value of specified resource it
The returned value will be boolean, integer or float
Return None if resource cannot be found or on error
### Response:
def get_runtime_value(self, resourceid: int):
"""Get runtime value of specified resource it
The returned value will be boolean, integer or float
Return None if resource cannot be found or on error
"""
payload = """<getRuntimeValue1 xmlns="utcs">{id}</getRuntimeValue1>
""".format(id=resourceid)
xdoc = self.connection.soap_action('/ws/ResourceInteractionService',
'getResourceValue',
payload)
if not xdoc:
return False
boolresult = xdoc.find(
'./SOAP-ENV:Body/ns1:getRuntimeValue2/ns1:value/ns2:value',
IHCSoapClient.ihcns)
if boolresult is not None:
return boolresult.text == "true"
intresult = xdoc.find(
'./SOAP-ENV:Body/ns1:getRuntimeValue2/ns1:value/ns2:integer',
IHCSoapClient.ihcns)
if intresult is not None:
return int(intresult.text)
floatresult = xdoc.find(
('./SOAP-ENV:Body/ns1:getRuntimeValue2/'
'ns1:value/ns2:floatingPointValue'),
IHCSoapClient.ihcns)
if floatresult is not None:
return float(floatresult.text)
enumNameResut = xdoc.find(
('./SOAP-ENV:Body/ns1:getRuntimeValue2/'
'ns1:value/ns2:enumName'),
IHCSoapClient.ihcns)
if enumNameResut is not None:
return enumNameResut.text
return False |
def show_top(queue=False, **kwargs):
'''
Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
errors = []
top_ = st_.get_top()
errors += st_.verify_tops(top_)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
matches = st_.top_matches(top_)
return matches | Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top | Below is the the instruction that describes the task:
### Input:
Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top
### Response:
def show_top(queue=False, **kwargs):
'''
Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(opts,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts))
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar'])
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError('Pillar failed to render', info=errors)
errors = []
top_ = st_.get_top()
errors += st_.verify_tops(top_)
if errors:
__context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
matches = st_.top_matches(top_)
return matches |
def DEFINE_integer_list(self, name, default, help, constant=False):
"""A helper for defining lists of integer options."""
self.AddOption(
type_info.List(
name=name,
default=default,
description=help,
validator=type_info.Integer()),
constant=constant) | A helper for defining lists of integer options. | Below is the the instruction that describes the task:
### Input:
A helper for defining lists of integer options.
### Response:
def DEFINE_integer_list(self, name, default, help, constant=False):
"""A helper for defining lists of integer options."""
self.AddOption(
type_info.List(
name=name,
default=default,
description=help,
validator=type_info.Integer()),
constant=constant) |
def _return_tables(self, mag, imt, val_type):
"""
Returns the vector of ground motions or standard deviations
corresponding to the specific magnitude and intensity measure type.
:param val_type:
String indicating the type of data {"IMLs", "Total", "Inter" etc}
"""
if imt.name in 'PGA PGV':
# Get scalar imt
if val_type == "IMLs":
iml_table = self.imls[imt.name][:]
else:
iml_table = self.stddevs[val_type][imt.name][:]
n_d, n_s, n_m = iml_table.shape
iml_table = iml_table.reshape([n_d, n_m])
else:
if val_type == "IMLs":
periods = self.imls["T"][:]
iml_table = self.imls["SA"][:]
else:
periods = self.stddevs[val_type]["T"][:]
iml_table = self.stddevs[val_type]["SA"][:]
low_period = round(periods[0], 7)
high_period = round(periods[-1], 7)
if (round(imt.period, 7) < low_period) or (
round(imt.period, 7) > high_period):
raise ValueError("Spectral period %.3f outside of valid range "
"(%.3f to %.3f)" % (imt.period, periods[0],
periods[-1]))
# Apply log-log interpolation for spectral period
interpolator = interp1d(numpy.log10(periods),
numpy.log10(iml_table),
axis=1)
iml_table = 10. ** interpolator(numpy.log10(imt.period))
return self.apply_magnitude_interpolation(mag, iml_table) | Returns the vector of ground motions or standard deviations
corresponding to the specific magnitude and intensity measure type.
:param val_type:
String indicating the type of data {"IMLs", "Total", "Inter" etc} | Below is the the instruction that describes the task:
### Input:
Returns the vector of ground motions or standard deviations
corresponding to the specific magnitude and intensity measure type.
:param val_type:
String indicating the type of data {"IMLs", "Total", "Inter" etc}
### Response:
def _return_tables(self, mag, imt, val_type):
"""
Returns the vector of ground motions or standard deviations
corresponding to the specific magnitude and intensity measure type.
:param val_type:
String indicating the type of data {"IMLs", "Total", "Inter" etc}
"""
if imt.name in 'PGA PGV':
# Get scalar imt
if val_type == "IMLs":
iml_table = self.imls[imt.name][:]
else:
iml_table = self.stddevs[val_type][imt.name][:]
n_d, n_s, n_m = iml_table.shape
iml_table = iml_table.reshape([n_d, n_m])
else:
if val_type == "IMLs":
periods = self.imls["T"][:]
iml_table = self.imls["SA"][:]
else:
periods = self.stddevs[val_type]["T"][:]
iml_table = self.stddevs[val_type]["SA"][:]
low_period = round(periods[0], 7)
high_period = round(periods[-1], 7)
if (round(imt.period, 7) < low_period) or (
round(imt.period, 7) > high_period):
raise ValueError("Spectral period %.3f outside of valid range "
"(%.3f to %.3f)" % (imt.period, periods[0],
periods[-1]))
# Apply log-log interpolation for spectral period
interpolator = interp1d(numpy.log10(periods),
numpy.log10(iml_table),
axis=1)
iml_table = 10. ** interpolator(numpy.log10(imt.period))
return self.apply_magnitude_interpolation(mag, iml_table) |
def dataReceived(self, data):
"""
Callback function that is called when data has been received
on the connection.
Reaches back to the Connection object and queues the data for
processing.
"""
self.connection._iobuf.write(data)
self.connection.handle_read() | Callback function that is called when data has been received
on the connection.
Reaches back to the Connection object and queues the data for
processing. | Below is the the instruction that describes the task:
### Input:
Callback function that is called when data has been received
on the connection.
Reaches back to the Connection object and queues the data for
processing.
### Response:
def dataReceived(self, data):
"""
Callback function that is called when data has been received
on the connection.
Reaches back to the Connection object and queues the data for
processing.
"""
self.connection._iobuf.write(data)
self.connection.handle_read() |
def full_award_group_funding_source(tag):
"""
Given a funding group element
Find the award group funding sources, one for each
item found in the get_funding_group section
"""
award_group_funding_sources = []
funding_source_nodes = extract_nodes(tag, "funding-source")
for funding_source_node in funding_source_nodes:
award_group_funding_source = {}
institution_nodes = extract_nodes(funding_source_node, 'institution')
institution_node = first(institution_nodes)
if institution_node:
award_group_funding_source['institution'] = node_text(institution_node)
if 'content-type' in institution_node.attrs:
award_group_funding_source['institution-type'] = institution_node['content-type']
institution_id_nodes = extract_nodes(funding_source_node, 'institution-id')
institution_id_node = first(institution_id_nodes)
if institution_id_node:
award_group_funding_source['institution-id'] = node_text(institution_id_node)
if 'institution-id-type' in institution_id_node.attrs:
award_group_funding_source['institution-id-type'] = institution_id_node['institution-id-type']
award_group_funding_sources.append(award_group_funding_source)
return award_group_funding_sources | Given a funding group element
Find the award group funding sources, one for each
item found in the get_funding_group section | Below is the the instruction that describes the task:
### Input:
Given a funding group element
Find the award group funding sources, one for each
item found in the get_funding_group section
### Response:
def full_award_group_funding_source(tag):
"""
Given a funding group element
Find the award group funding sources, one for each
item found in the get_funding_group section
"""
award_group_funding_sources = []
funding_source_nodes = extract_nodes(tag, "funding-source")
for funding_source_node in funding_source_nodes:
award_group_funding_source = {}
institution_nodes = extract_nodes(funding_source_node, 'institution')
institution_node = first(institution_nodes)
if institution_node:
award_group_funding_source['institution'] = node_text(institution_node)
if 'content-type' in institution_node.attrs:
award_group_funding_source['institution-type'] = institution_node['content-type']
institution_id_nodes = extract_nodes(funding_source_node, 'institution-id')
institution_id_node = first(institution_id_nodes)
if institution_id_node:
award_group_funding_source['institution-id'] = node_text(institution_id_node)
if 'institution-id-type' in institution_id_node.attrs:
award_group_funding_source['institution-id-type'] = institution_id_node['institution-id-type']
award_group_funding_sources.append(award_group_funding_source)
return award_group_funding_sources |
def _get_mixed_actions(tableaux, bases):
"""
From `tableaux` and `bases`, extract non-slack basic variables and
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays containing the tableaux, of shape (n, m+n+1)
and (m, m+n+1), respectively.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays containing the bases, of shape (n,) and
(m,), respectively.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions as given by the non-slack basic variables
in the tableaux.
"""
nums_actions = tableaux[1].shape[0], tableaux[0].shape[0]
num = nums_actions[0] + nums_actions[1]
out = np.zeros(num)
for pl, (start, stop) in enumerate(zip((0, nums_actions[0]),
(nums_actions[0], num))):
sum_ = 0.
for i in range(nums_actions[1-pl]):
k = bases[pl][i]
if start <= k < stop:
out[k] = tableaux[pl][i, -1]
sum_ += tableaux[pl][i, -1]
if sum_ != 0:
out[start:stop] /= sum_
return out[:nums_actions[0]], out[nums_actions[0]:] | From `tableaux` and `bases`, extract non-slack basic variables and
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays containing the tableaux, of shape (n, m+n+1)
and (m, m+n+1), respectively.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays containing the bases, of shape (n,) and
(m,), respectively.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions as given by the non-slack basic variables
in the tableaux. | Below is the the instruction that describes the task:
### Input:
From `tableaux` and `bases`, extract non-slack basic variables and
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays containing the tableaux, of shape (n, m+n+1)
and (m, m+n+1), respectively.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays containing the bases, of shape (n,) and
(m,), respectively.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions as given by the non-slack basic variables
in the tableaux.
### Response:
def _get_mixed_actions(tableaux, bases):
"""
From `tableaux` and `bases`, extract non-slack basic variables and
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays containing the tableaux, of shape (n, m+n+1)
and (m, m+n+1), respectively.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays containing the bases, of shape (n,) and
(m,), respectively.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions as given by the non-slack basic variables
in the tableaux.
"""
nums_actions = tableaux[1].shape[0], tableaux[0].shape[0]
num = nums_actions[0] + nums_actions[1]
out = np.zeros(num)
for pl, (start, stop) in enumerate(zip((0, nums_actions[0]),
(nums_actions[0], num))):
sum_ = 0.
for i in range(nums_actions[1-pl]):
k = bases[pl][i]
if start <= k < stop:
out[k] = tableaux[pl][i, -1]
sum_ += tableaux[pl][i, -1]
if sum_ != 0:
out[start:stop] /= sum_
return out[:nums_actions[0]], out[nums_actions[0]:] |
def clone(self):
"""
Do not initialize again since everything is ready to launch app.
:return: Initialized monitor instance
"""
return Monitor(org=self.org, app=self.app, env=self.env) | Do not initialize again since everything is ready to launch app.
:return: Initialized monitor instance | Below is the the instruction that describes the task:
### Input:
Do not initialize again since everything is ready to launch app.
:return: Initialized monitor instance
### Response:
def clone(self):
"""
Do not initialize again since everything is ready to launch app.
:return: Initialized monitor instance
"""
return Monitor(org=self.org, app=self.app, env=self.env) |
def delete(self, receiver_id=None, event_id=None):
"""Handle DELETE request."""
event = self._get_event(receiver_id, event_id)
event.delete()
db.session.commit()
return make_response(event) | Handle DELETE request. | Below is the the instruction that describes the task:
### Input:
Handle DELETE request.
### Response:
def delete(self, receiver_id=None, event_id=None):
"""Handle DELETE request."""
event = self._get_event(receiver_id, event_id)
event.delete()
db.session.commit()
return make_response(event) |
def fetch_attr_names(self, table_name):
"""
:return: List of attribute names in the table.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_attr_names(table_name))
try:
print(con.fetch_attr_names("not_existing"))
except simplesqlite.TableNotFoundError as e:
print(e)
:Output:
.. parsed-literal::
['attr_a', 'attr_b']
'not_existing' table not found in /tmp/sample.sqlite
"""
self.verify_table_existence(table_name)
return self.schema_extractor.fetch_table_schema(table_name).get_attr_names() | :return: List of attribute names in the table.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_attr_names(table_name))
try:
print(con.fetch_attr_names("not_existing"))
except simplesqlite.TableNotFoundError as e:
print(e)
:Output:
.. parsed-literal::
['attr_a', 'attr_b']
'not_existing' table not found in /tmp/sample.sqlite | Below is the the instruction that describes the task:
### Input:
:return: List of attribute names in the table.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_attr_names(table_name))
try:
print(con.fetch_attr_names("not_existing"))
except simplesqlite.TableNotFoundError as e:
print(e)
:Output:
.. parsed-literal::
['attr_a', 'attr_b']
'not_existing' table not found in /tmp/sample.sqlite
### Response:
def fetch_attr_names(self, table_name):
"""
:return: List of attribute names in the table.
:rtype: list
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.TableNotFoundError:
|raises_verify_table_existence|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
.. code:: python
import simplesqlite
table_name = "sample_table"
con = simplesqlite.SimpleSQLite("sample.sqlite", "w")
con.create_table_from_data_matrix(
table_name,
["attr_a", "attr_b"],
[[1, "a"], [2, "b"]])
print(con.fetch_attr_names(table_name))
try:
print(con.fetch_attr_names("not_existing"))
except simplesqlite.TableNotFoundError as e:
print(e)
:Output:
.. parsed-literal::
['attr_a', 'attr_b']
'not_existing' table not found in /tmp/sample.sqlite
"""
self.verify_table_existence(table_name)
return self.schema_extractor.fetch_table_schema(table_name).get_attr_names() |
def set_menu(self, menu):
'''set a MPTopMenu on the frame'''
self.menu = menu
self.in_queue.put(MPImageMenu(menu)) | set a MPTopMenu on the frame | Below is the the instruction that describes the task:
### Input:
set a MPTopMenu on the frame
### Response:
def set_menu(self, menu):
'''set a MPTopMenu on the frame'''
self.menu = menu
self.in_queue.put(MPImageMenu(menu)) |
def _set_route_type(self, v, load=False):
"""
Setter method for route_type, mapped from YANG variable /rbridge_id/route_map/content/match/route_type (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_type() directly.
YANG Description: Route type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route_type.route_type, is_container='container', presence=False, yang_name="route-type", rest_name="route-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route type.', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_type must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route_type.route_type, is_container='container', presence=False, yang_name="route-type", rest_name="route-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route type.', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""",
})
self.__route_type = t
if hasattr(self, '_set'):
self._set() | Setter method for route_type, mapped from YANG variable /rbridge_id/route_map/content/match/route_type (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_type() directly.
YANG Description: Route type. | Below is the the instruction that describes the task:
### Input:
Setter method for route_type, mapped from YANG variable /rbridge_id/route_map/content/match/route_type (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_type() directly.
YANG Description: Route type.
### Response:
def _set_route_type(self, v, load=False):
"""
Setter method for route_type, mapped from YANG variable /rbridge_id/route_map/content/match/route_type (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_type() directly.
YANG Description: Route type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route_type.route_type, is_container='container', presence=False, yang_name="route-type", rest_name="route-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route type.', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_type must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route_type.route_type, is_container='container', presence=False, yang_name="route-type", rest_name="route-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route type.', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)""",
})
self.__route_type = t
if hasattr(self, '_set'):
self._set() |
def HHIPreFilter(config={}):
"""HHI pre-interlace filter.
A widely used prefilter to prevent line twitter when converting
sequential images to interlace.
Coefficients taken from: 'Specification of a Generic Format
Converter', S. Pigeon, L. Vandendorpe, L. Cuvelier and B. Maison,
CEC RACE/HAMLET Deliverable no R2110/WP2/DS/S/006/b1, September
1995. http://www.stephanepigeon.com/Docs/deliv2.pdf
"""
fil = numpy.array(
[-4, 8, 25, -123, 230, 728, 230, -123, 25, 8, -4],
dtype=numpy.float32).reshape((-1, 1, 1)) / numpy.float32(1000)
resize = Resize(config=config)
out_frame = Frame()
out_frame.data = fil
out_frame.type = 'fil'
audit = out_frame.metadata.get('audit')
audit += 'data = HHI pre-interlace filter\n'
out_frame.metadata.set('audit', audit)
resize.filter(out_frame)
return resize | HHI pre-interlace filter.
A widely used prefilter to prevent line twitter when converting
sequential images to interlace.
Coefficients taken from: 'Specification of a Generic Format
Converter', S. Pigeon, L. Vandendorpe, L. Cuvelier and B. Maison,
CEC RACE/HAMLET Deliverable no R2110/WP2/DS/S/006/b1, September
1995. http://www.stephanepigeon.com/Docs/deliv2.pdf | Below is the the instruction that describes the task:
### Input:
HHI pre-interlace filter.
A widely used prefilter to prevent line twitter when converting
sequential images to interlace.
Coefficients taken from: 'Specification of a Generic Format
Converter', S. Pigeon, L. Vandendorpe, L. Cuvelier and B. Maison,
CEC RACE/HAMLET Deliverable no R2110/WP2/DS/S/006/b1, September
1995. http://www.stephanepigeon.com/Docs/deliv2.pdf
### Response:
def HHIPreFilter(config={}):
"""HHI pre-interlace filter.
A widely used prefilter to prevent line twitter when converting
sequential images to interlace.
Coefficients taken from: 'Specification of a Generic Format
Converter', S. Pigeon, L. Vandendorpe, L. Cuvelier and B. Maison,
CEC RACE/HAMLET Deliverable no R2110/WP2/DS/S/006/b1, September
1995. http://www.stephanepigeon.com/Docs/deliv2.pdf
"""
fil = numpy.array(
[-4, 8, 25, -123, 230, 728, 230, -123, 25, 8, -4],
dtype=numpy.float32).reshape((-1, 1, 1)) / numpy.float32(1000)
resize = Resize(config=config)
out_frame = Frame()
out_frame.data = fil
out_frame.type = 'fil'
audit = out_frame.metadata.get('audit')
audit += 'data = HHI pre-interlace filter\n'
out_frame.metadata.set('audit', audit)
resize.filter(out_frame)
return resize |
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode) | Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0 | Below is the the instruction that describes the task:
### Input:
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
### Response:
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode) |
def incrby(self, type, offset, increment):
"""
Increments or decrements (if a negative increment is given)
the specified bit field and returns the new value.
"""
self._command_stack.extend(['INCRBY', type, offset, increment])
return self | Increments or decrements (if a negative increment is given)
the specified bit field and returns the new value. | Below is the the instruction that describes the task:
### Input:
Increments or decrements (if a negative increment is given)
the specified bit field and returns the new value.
### Response:
def incrby(self, type, offset, increment):
"""
Increments or decrements (if a negative increment is given)
the specified bit field and returns the new value.
"""
self._command_stack.extend(['INCRBY', type, offset, increment])
return self |
def add_connection(self, connection_id, internal_id, context):
"""Add an already created connection. Used to register devices connected before starting the device adapter.
Args:
connection_id (int): The external connection id
internal_id (string): An internal identifier for the connection
context (dict): Additional information to associate with this context
"""
# Make sure we are not reusing an id that is currently connected to something
if self._get_connection_state(connection_id) != self.Disconnected:
return
if self._get_connection_state(internal_id) != self.Disconnected:
return
conn_data = {
'state': self.Idle,
'microstate': None,
'connection_id': connection_id,
'internal_id': internal_id,
'context': context
}
self._connections[connection_id] = conn_data
self._int_connections[internal_id] = conn_data | Add an already created connection. Used to register devices connected before starting the device adapter.
Args:
connection_id (int): The external connection id
internal_id (string): An internal identifier for the connection
context (dict): Additional information to associate with this context | Below is the the instruction that describes the task:
### Input:
Add an already created connection. Used to register devices connected before starting the device adapter.
Args:
connection_id (int): The external connection id
internal_id (string): An internal identifier for the connection
context (dict): Additional information to associate with this context
### Response:
def add_connection(self, connection_id, internal_id, context):
"""Add an already created connection. Used to register devices connected before starting the device adapter.
Args:
connection_id (int): The external connection id
internal_id (string): An internal identifier for the connection
context (dict): Additional information to associate with this context
"""
# Make sure we are not reusing an id that is currently connected to something
if self._get_connection_state(connection_id) != self.Disconnected:
return
if self._get_connection_state(internal_id) != self.Disconnected:
return
conn_data = {
'state': self.Idle,
'microstate': None,
'connection_id': connection_id,
'internal_id': internal_id,
'context': context
}
self._connections[connection_id] = conn_data
self._int_connections[internal_id] = conn_data |
def import_submodules(package, name=None, recursive=True):
"""Import all submodules of ``package``.
Parameters
----------
package : `module` or string
Package whose submodules to import.
name : string, optional
Override the package name with this value in the full
submodule names. By default, ``package`` is used.
recursive : bool, optional
If ``True``, recursively import all submodules of ``package``.
Otherwise, import only the modules at the top level.
Returns
-------
pkg_dict : dict
Dictionary where keys are the full submodule names and values
are the corresponding module objects.
"""
if isinstance(package, str):
package = importlib.import_module(package)
if name is None:
name = package.__name__
submodules = [m[0] for m in inspect.getmembers(package, inspect.ismodule)
if m[1].__name__.startswith('odl')]
results = {}
for pkgname in submodules:
full_name = name + '.' + pkgname
try:
results[full_name] = importlib.import_module(full_name)
except ImportError:
pass
else:
if recursive:
results.update(import_submodules(full_name, full_name))
return results | Import all submodules of ``package``.
Parameters
----------
package : `module` or string
Package whose submodules to import.
name : string, optional
Override the package name with this value in the full
submodule names. By default, ``package`` is used.
recursive : bool, optional
If ``True``, recursively import all submodules of ``package``.
Otherwise, import only the modules at the top level.
Returns
-------
pkg_dict : dict
Dictionary where keys are the full submodule names and values
are the corresponding module objects. | Below is the the instruction that describes the task:
### Input:
Import all submodules of ``package``.
Parameters
----------
package : `module` or string
Package whose submodules to import.
name : string, optional
Override the package name with this value in the full
submodule names. By default, ``package`` is used.
recursive : bool, optional
If ``True``, recursively import all submodules of ``package``.
Otherwise, import only the modules at the top level.
Returns
-------
pkg_dict : dict
Dictionary where keys are the full submodule names and values
are the corresponding module objects.
### Response:
def import_submodules(package, name=None, recursive=True):
"""Import all submodules of ``package``.
Parameters
----------
package : `module` or string
Package whose submodules to import.
name : string, optional
Override the package name with this value in the full
submodule names. By default, ``package`` is used.
recursive : bool, optional
If ``True``, recursively import all submodules of ``package``.
Otherwise, import only the modules at the top level.
Returns
-------
pkg_dict : dict
Dictionary where keys are the full submodule names and values
are the corresponding module objects.
"""
if isinstance(package, str):
package = importlib.import_module(package)
if name is None:
name = package.__name__
submodules = [m[0] for m in inspect.getmembers(package, inspect.ismodule)
if m[1].__name__.startswith('odl')]
results = {}
for pkgname in submodules:
full_name = name + '.' + pkgname
try:
results[full_name] = importlib.import_module(full_name)
except ImportError:
pass
else:
if recursive:
results.update(import_submodules(full_name, full_name))
return results |
def patch_certificate_signing_request_status(self, name, body, **kwargs):
"""
partially update status of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_certificate_signing_request_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_certificate_signing_request_status_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_certificate_signing_request_status_with_http_info(name, body, **kwargs)
return data | partially update status of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_certificate_signing_request_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
partially update status of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_certificate_signing_request_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_certificate_signing_request_status(self, name, body, **kwargs):
"""
partially update status of the specified CertificateSigningRequest
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_certificate_signing_request_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CertificateSigningRequest (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:return: V1beta1CertificateSigningRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_certificate_signing_request_status_with_http_info(name, body, **kwargs)
else:
(data) = self.patch_certificate_signing_request_status_with_http_info(name, body, **kwargs)
return data |
def _gen_prov(self):
"""
Extracts provenance information from the pipeline into a PipelineProv
object
Returns
-------
prov : dict[str, *]
A dictionary containing the provenance information to record
for the pipeline
"""
# Export worfklow graph to node-link data format
wf_dict = nx_json.node_link_data(self.workflow._graph)
# Replace references to Node objects with the node's provenance
# information and convert to a dict organised by node name to allow it
# to be compared more easily. Also change link node-references from
# node index to node ID so it is not dependent on the order the nodes
# are written to the dictionary (which for Python < 3.7 is guaranteed
# to be the same between identical runs)
for link in wf_dict['links']:
if int(networkx_version.split('.')[0]) < 2: # @UndefinedVariable
link['source'] = wf_dict['nodes'][link['source']]['id'].name
link['target'] = wf_dict['nodes'][link['target']]['id'].name
else:
link['source'] = link['source'].name
link['target'] = link['target'].name
wf_dict['nodes'] = {n['id'].name: n['id'].prov
for n in wf_dict['nodes']}
# Roundtrip to JSON to convert any tuples into lists so dictionaries
# can be compared directly
wf_dict = json.loads(json.dumps(wf_dict))
dependency_versions = {d: extract_package_version(d)
for d in ARCANA_DEPENDENCIES}
pkg_versions = {'arcana': __version__}
pkg_versions.update((k, v) for k, v in dependency_versions.items()
if v is not None)
prov = {
'__prov_version__': PROVENANCE_VERSION,
'name': self.name,
'workflow': wf_dict,
'study': self.study.prov,
'pkg_versions': pkg_versions,
'python_version': sys.version,
'joined_ids': self._joined_ids()}
return prov | Extracts provenance information from the pipeline into a PipelineProv
object
Returns
-------
prov : dict[str, *]
A dictionary containing the provenance information to record
for the pipeline | Below is the the instruction that describes the task:
### Input:
Extracts provenance information from the pipeline into a PipelineProv
object
Returns
-------
prov : dict[str, *]
A dictionary containing the provenance information to record
for the pipeline
### Response:
def _gen_prov(self):
"""
Extracts provenance information from the pipeline into a PipelineProv
object
Returns
-------
prov : dict[str, *]
A dictionary containing the provenance information to record
for the pipeline
"""
# Export worfklow graph to node-link data format
wf_dict = nx_json.node_link_data(self.workflow._graph)
# Replace references to Node objects with the node's provenance
# information and convert to a dict organised by node name to allow it
# to be compared more easily. Also change link node-references from
# node index to node ID so it is not dependent on the order the nodes
# are written to the dictionary (which for Python < 3.7 is guaranteed
# to be the same between identical runs)
for link in wf_dict['links']:
if int(networkx_version.split('.')[0]) < 2: # @UndefinedVariable
link['source'] = wf_dict['nodes'][link['source']]['id'].name
link['target'] = wf_dict['nodes'][link['target']]['id'].name
else:
link['source'] = link['source'].name
link['target'] = link['target'].name
wf_dict['nodes'] = {n['id'].name: n['id'].prov
for n in wf_dict['nodes']}
# Roundtrip to JSON to convert any tuples into lists so dictionaries
# can be compared directly
wf_dict = json.loads(json.dumps(wf_dict))
dependency_versions = {d: extract_package_version(d)
for d in ARCANA_DEPENDENCIES}
pkg_versions = {'arcana': __version__}
pkg_versions.update((k, v) for k, v in dependency_versions.items()
if v is not None)
prov = {
'__prov_version__': PROVENANCE_VERSION,
'name': self.name,
'workflow': wf_dict,
'study': self.study.prov,
'pkg_versions': pkg_versions,
'python_version': sys.version,
'joined_ids': self._joined_ids()}
return prov |
def get_placeholder_image(width, height, name=None, fg_color=get_color('black'),
bg_color=get_color('grey'), text=None, font=u'Verdana.ttf',
fontsize=42, encoding=u'unic', mode='RGBA', fmt=u'PNG'):
"""Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it."""
size = (width, height)
text = text if text else '{0}x{1}'.format(width, height)
try:
font = ImageFont.truetype(font, size=fontsize, encoding=encoding)
except IOError:
font = ImageFont.load_default()
result_img = Image.new(mode, size, bg_color)
text_size = font.getsize(text)
text_img = Image.new("RGBA", size, bg_color)
#position for the text:
left = size[0] / 2 - text_size[0] / 2
top = size[1] / 2 - text_size[1] / 2
drawing = ImageDraw.Draw(text_img)
drawing.text((left, top),
text,
font=font,
fill=fg_color)
txt_img = ImageOps.fit(text_img, size, method=Image.BICUBIC, centering=(0.5, 0.5))
result_img.paste(txt_img)
file_obj = io.BytesIO()
txt_img.save(file_obj, fmt)
return file_obj.getvalue() | Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it. | Below is the the instruction that describes the task:
### Input:
Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it.
### Response:
def get_placeholder_image(width, height, name=None, fg_color=get_color('black'),
bg_color=get_color('grey'), text=None, font=u'Verdana.ttf',
fontsize=42, encoding=u'unic', mode='RGBA', fmt=u'PNG'):
"""Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it."""
size = (width, height)
text = text if text else '{0}x{1}'.format(width, height)
try:
font = ImageFont.truetype(font, size=fontsize, encoding=encoding)
except IOError:
font = ImageFont.load_default()
result_img = Image.new(mode, size, bg_color)
text_size = font.getsize(text)
text_img = Image.new("RGBA", size, bg_color)
#position for the text:
left = size[0] / 2 - text_size[0] / 2
top = size[1] / 2 - text_size[1] / 2
drawing = ImageDraw.Draw(text_img)
drawing.text((left, top),
text,
font=font,
fill=fg_color)
txt_img = ImageOps.fit(text_img, size, method=Image.BICUBIC, centering=(0.5, 0.5))
result_img.paste(txt_img)
file_obj = io.BytesIO()
txt_img.save(file_obj, fmt)
return file_obj.getvalue() |
def get_option_default(self, key, subkey):
"""Get the default value of the option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:return: Default value of the option (type varies).
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define
any option.
"""
key, subkey = _lower_keys(key, subkey)
_entry_must_exist(self.gc, key, subkey)
df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)]
if df["type"].values[0] == "bool":
return bool(df["default"].values[0])
elif df["type"].values[0] == "int":
return int(df["default"].values[0])
else:
return df["default"].values[0] | Get the default value of the option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:return: Default value of the option (type varies).
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define
any option. | Below is the the instruction that describes the task:
### Input:
Get the default value of the option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:return: Default value of the option (type varies).
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define
any option.
### Response:
def get_option_default(self, key, subkey):
"""Get the default value of the option.
:param str key: First identifier of the option.
:param str subkey: Second identifier of the option.
:return: Default value of the option (type varies).
:raise:
:NotRegisteredError: If ``key`` or ``subkey`` do not define
any option.
"""
key, subkey = _lower_keys(key, subkey)
_entry_must_exist(self.gc, key, subkey)
df = self.gc[(self.gc["k1"] == key) & (self.gc["k2"] == subkey)]
if df["type"].values[0] == "bool":
return bool(df["default"].values[0])
elif df["type"].values[0] == "int":
return int(df["default"].values[0])
else:
return df["default"].values[0] |
def populate_branch(self, editor, root_item, tree_cache=None):
"""
Generates an outline of the editor's content and stores the result
in a cache.
"""
if tree_cache is None:
tree_cache = {}
# Removing cached items for which line is > total line nb
for _l in list(tree_cache.keys()):
if _l >= editor.get_line_count():
# Checking if key is still in tree cache in case one of its
# ancestors was deleted in the meantime (deleting all children):
if _l in tree_cache:
remove_from_tree_cache(tree_cache, line=_l)
ancestors = [(root_item, 0)]
cell_ancestors = [(root_item, 0)]
previous_item = None
previous_level = None
prev_cell_level = None
prev_cell_item = None
oe_data = editor.get_outlineexplorer_data()
for block_nb in range(editor.get_line_count()):
line_nb = block_nb+1
data = oe_data.get(block_nb)
level = None if data is None else data.fold_level
citem, clevel, _d = tree_cache.get(line_nb, (None, None, ""))
# Skip iteration if line is not the first line of a foldable block
if level is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Searching for class/function statements
not_class_nor_function = data.is_not_class_nor_function()
if not not_class_nor_function:
class_name = data.get_class_name()
if class_name is None:
func_name = data.get_function_name()
if func_name is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Skip iteration for if/else/try/for/etc foldable blocks.
if not_class_nor_function and not data.is_comment():
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
cname = to_text_string(citem.text(0))
cparent = citem.parent
# Blocks for Cell Groups.
if (data is not None and data.def_type == data.CELL and
self.group_cells):
preceding = (root_item if previous_item is None
else previous_item)
cell_level = data.cell_level
if prev_cell_level is not None:
if cell_level == prev_cell_level:
pass
elif cell_level > prev_cell_level:
cell_ancestors.append((prev_cell_item,
prev_cell_level))
else:
while (len(cell_ancestors) > 1 and
cell_level <= prev_cell_level):
cell_ancestors.pop(-1)
_item, prev_cell_level = cell_ancestors[-1]
parent, _level = cell_ancestors[-1]
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = CellItem(data.def_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
ancestors = [(item, 0)]
prev_cell_level = cell_level
prev_cell_item = item
previous_item = item
continue
# Blocks for Code Groups.
if previous_level is not None:
if level == previous_level:
pass
elif level > previous_level:
ancestors.append((previous_item, previous_level))
else:
while len(ancestors) > 1 and level <= previous_level:
ancestors.pop(-1)
_item, previous_level = ancestors[-1]
parent, _level = ancestors[-1]
preceding = root_item if previous_item is None else previous_item
if not_class_nor_function and data.is_comment():
if not self.show_comments:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
if data.def_type == data.CELL:
item = CellItem(data.def_name, line_nb, parent, preceding)
else:
item = CommentItem(data.text, line_nb, parent, preceding)
elif class_name is not None:
if citem is not None:
if (class_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = ClassItem(class_name, line_nb, parent, preceding)
else:
if citem is not None:
if (func_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = FunctionItem(func_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
previous_level = level
previous_item = item
return tree_cache | Generates an outline of the editor's content and stores the result
in a cache. | Below is the the instruction that describes the task:
### Input:
Generates an outline of the editor's content and stores the result
in a cache.
### Response:
def populate_branch(self, editor, root_item, tree_cache=None):
"""
Generates an outline of the editor's content and stores the result
in a cache.
"""
if tree_cache is None:
tree_cache = {}
# Removing cached items for which line is > total line nb
for _l in list(tree_cache.keys()):
if _l >= editor.get_line_count():
# Checking if key is still in tree cache in case one of its
# ancestors was deleted in the meantime (deleting all children):
if _l in tree_cache:
remove_from_tree_cache(tree_cache, line=_l)
ancestors = [(root_item, 0)]
cell_ancestors = [(root_item, 0)]
previous_item = None
previous_level = None
prev_cell_level = None
prev_cell_item = None
oe_data = editor.get_outlineexplorer_data()
for block_nb in range(editor.get_line_count()):
line_nb = block_nb+1
data = oe_data.get(block_nb)
level = None if data is None else data.fold_level
citem, clevel, _d = tree_cache.get(line_nb, (None, None, ""))
# Skip iteration if line is not the first line of a foldable block
if level is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Searching for class/function statements
not_class_nor_function = data.is_not_class_nor_function()
if not not_class_nor_function:
class_name = data.get_class_name()
if class_name is None:
func_name = data.get_function_name()
if func_name is None:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
# Skip iteration for if/else/try/for/etc foldable blocks.
if not_class_nor_function and not data.is_comment():
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
cname = to_text_string(citem.text(0))
cparent = citem.parent
# Blocks for Cell Groups.
if (data is not None and data.def_type == data.CELL and
self.group_cells):
preceding = (root_item if previous_item is None
else previous_item)
cell_level = data.cell_level
if prev_cell_level is not None:
if cell_level == prev_cell_level:
pass
elif cell_level > prev_cell_level:
cell_ancestors.append((prev_cell_item,
prev_cell_level))
else:
while (len(cell_ancestors) > 1 and
cell_level <= prev_cell_level):
cell_ancestors.pop(-1)
_item, prev_cell_level = cell_ancestors[-1]
parent, _level = cell_ancestors[-1]
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = CellItem(data.def_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
ancestors = [(item, 0)]
prev_cell_level = cell_level
prev_cell_item = item
previous_item = item
continue
# Blocks for Code Groups.
if previous_level is not None:
if level == previous_level:
pass
elif level > previous_level:
ancestors.append((previous_item, previous_level))
else:
while len(ancestors) > 1 and level <= previous_level:
ancestors.pop(-1)
_item, previous_level = ancestors[-1]
parent, _level = ancestors[-1]
preceding = root_item if previous_item is None else previous_item
if not_class_nor_function and data.is_comment():
if not self.show_comments:
if citem is not None:
remove_from_tree_cache(tree_cache, line=line_nb)
continue
if citem is not None:
if data.text == cname and level == clevel:
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
if data.def_type == data.CELL:
item = CellItem(data.def_name, line_nb, parent, preceding)
else:
item = CommentItem(data.text, line_nb, parent, preceding)
elif class_name is not None:
if citem is not None:
if (class_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = ClassItem(class_name, line_nb, parent, preceding)
else:
if citem is not None:
if (func_name == cname and level == clevel and
parent is cparent):
previous_level = clevel
previous_item = citem
continue
else:
remove_from_tree_cache(tree_cache, line=line_nb)
item = FunctionItem(func_name, line_nb, parent, preceding)
item.setup()
debug = "%s -- %s/%s" % (str(item.line).rjust(6),
to_text_string(item.parent().text(0)),
to_text_string(item.text(0)))
tree_cache[line_nb] = (item, level, debug)
previous_level = level
previous_item = item
return tree_cache |
def is_transversion(self):
"""Is this variant a pyrimidine to purine change or vice versa"""
return self.is_snv and is_purine(self.ref) != is_purine(self.alt) | Is this variant a pyrimidine to purine change or vice versa | Below is the the instruction that describes the task:
### Input:
Is this variant a pyrimidine to purine change or vice versa
### Response:
def is_transversion(self):
"""Is this variant a pyrimidine to purine change or vice versa"""
return self.is_snv and is_purine(self.ref) != is_purine(self.alt) |
def resize_image_with_crop_or_pad(img, target_height, target_width):
"""
Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image.
"""
h, w = target_height, target_width
max_h, max_w, c = img.shape
# crop
img = crop_center(img, min(max_h, h), min(max_w, w))
# pad
padded_img = np.zeros(shape=(h, w, c), dtype=img.dtype)
padded_img[:img.shape[0], :img.shape[1], :img.shape[2]] = img
return padded_img | Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image. | Below is the the instruction that describes the task:
### Input:
Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image.
### Response:
def resize_image_with_crop_or_pad(img, target_height, target_width):
"""
Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either cropping the image or padding it with zeros.
NO CENTER CROP. NO CENTER PAD. (Just fill bottom right or crop bottom right)
:param img: Numpy array representing the image.
:param target_height: Target height.
:param target_width: Target width.
:return: The cropped and padded image.
"""
h, w = target_height, target_width
max_h, max_w, c = img.shape
# crop
img = crop_center(img, min(max_h, h), min(max_w, w))
# pad
padded_img = np.zeros(shape=(h, w, c), dtype=img.dtype)
padded_img[:img.shape[0], :img.shape[1], :img.shape[2]] = img
return padded_img |
def alarm_set(self, time, wake_with_radio=False):
"""
set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound
"""
# TODO: check for correct time format
log.debug("alarm => set...")
params = {
"enabled": True,
"time": time,
"wake_with_radio": wake_with_radio
}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) | set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound | Below is the the instruction that describes the task:
### Input:
set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound
### Response:
def alarm_set(self, time, wake_with_radio=False):
"""
set the alarm clock
:param str time: time of the alarm (format: %H:%M:%S)
:param bool wake_with_radio: if True, radio will be used for the alarm
instead of beep sound
"""
# TODO: check for correct time format
log.debug("alarm => set...")
params = {
"enabled": True,
"time": time,
"wake_with_radio": wake_with_radio
}
self._app_exec("com.lametric.clock", "clock.alarm", params=params) |
def save_session_to_file(self, filename: Optional[str] = None) -> None:
"""Saves internally stored :class:`requests.Session` object.
:param filename: Filename, or None to use default filename.
"""
if filename is None:
filename = get_default_session_filename(self.context.username)
dirname = os.path.dirname(filename)
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
os.chmod(dirname, 0o700)
with open(filename, 'wb') as sessionfile:
os.chmod(filename, 0o600)
self.context.save_session_to_file(sessionfile)
self.context.log("Saved session to %s." % filename) | Saves internally stored :class:`requests.Session` object.
:param filename: Filename, or None to use default filename. | Below is the the instruction that describes the task:
### Input:
Saves internally stored :class:`requests.Session` object.
:param filename: Filename, or None to use default filename.
### Response:
def save_session_to_file(self, filename: Optional[str] = None) -> None:
"""Saves internally stored :class:`requests.Session` object.
:param filename: Filename, or None to use default filename.
"""
if filename is None:
filename = get_default_session_filename(self.context.username)
dirname = os.path.dirname(filename)
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
os.chmod(dirname, 0o700)
with open(filename, 'wb') as sessionfile:
os.chmod(filename, 0o600)
self.context.save_session_to_file(sessionfile)
self.context.log("Saved session to %s." % filename) |
def reset(cls):
"""
Reset the conspect elements to initial state.
"""
cls.input_el.value = ""
cls.subconspect_el.html = ""
cls.show_error(False) | Reset the conspect elements to initial state. | Below is the the instruction that describes the task:
### Input:
Reset the conspect elements to initial state.
### Response:
def reset(cls):
"""
Reset the conspect elements to initial state.
"""
cls.input_el.value = ""
cls.subconspect_el.html = ""
cls.show_error(False) |
def window_sumsquare(window, n_frames, hop_length=512, win_length=None, n_fft=2048,
dtype=np.float32, norm=None):
'''
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout()
'''
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm)**2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x | Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout() | Below is the the instruction that describes the task:
### Input:
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout()
### Response:
def window_sumsquare(window, n_frames, hop_length=512, win_length=None, n_fft=2048,
dtype=np.float32, norm=None):
'''
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout()
'''
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm)**2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x |
def fromexportunreg(
cls, bundle, exporterid, rsid, export_ref, exception, endpoint
):
# type: (Bundle, Tuple[str, str], Tuple[Tuple[str, str], int], ExportReference, Optional[Tuple[Any, Any, Any]], EndpointDescription) -> RemoteServiceAdminEvent
"""
Creates a RemoteServiceAdminEvent object from the departure of an
ExportRegistration
"""
return RemoteServiceAdminEvent(
typ=RemoteServiceAdminEvent.EXPORT_UNREGISTRATION,
bundle=bundle,
cid=exporterid,
rsid=rsid,
export_ref=export_ref,
exception=exception,
endpoint=endpoint,
) | Creates a RemoteServiceAdminEvent object from the departure of an
ExportRegistration | Below is the the instruction that describes the task:
### Input:
Creates a RemoteServiceAdminEvent object from the departure of an
ExportRegistration
### Response:
def fromexportunreg(
cls, bundle, exporterid, rsid, export_ref, exception, endpoint
):
# type: (Bundle, Tuple[str, str], Tuple[Tuple[str, str], int], ExportReference, Optional[Tuple[Any, Any, Any]], EndpointDescription) -> RemoteServiceAdminEvent
"""
Creates a RemoteServiceAdminEvent object from the departure of an
ExportRegistration
"""
return RemoteServiceAdminEvent(
typ=RemoteServiceAdminEvent.EXPORT_UNREGISTRATION,
bundle=bundle,
cid=exporterid,
rsid=rsid,
export_ref=export_ref,
exception=exception,
endpoint=endpoint,
) |
def fetch(self, url, path, filename):
"""Verify if the file is already downloaded and complete. If they don't
exists or if are not complete, use homura download function to fetch
files. Return a list with the path of the downloaded file and the size
of the remote file.
"""
logger.debug('initializing download in ', url)
remote_file_size = self.get_remote_file_size(url)
if exists(join(path, filename)):
size = getsize(join(path, filename))
if size == remote_file_size:
logger.error('%s already exists on your system' % filename)
print('%s already exists on your system' % filename)
return [join(path, filename), size]
logger.debug('Downloading: %s' % filename)
print('Downloading: %s' % filename)
fetch(url, path)
print('stored at %s' % path)
logger.debug('stored at %s' % path)
return [join(path, filename), remote_file_size] | Verify if the file is already downloaded and complete. If they don't
exists or if are not complete, use homura download function to fetch
files. Return a list with the path of the downloaded file and the size
of the remote file. | Below is the the instruction that describes the task:
### Input:
Verify if the file is already downloaded and complete. If they don't
exists or if are not complete, use homura download function to fetch
files. Return a list with the path of the downloaded file and the size
of the remote file.
### Response:
def fetch(self, url, path, filename):
"""Verify if the file is already downloaded and complete. If they don't
exists or if are not complete, use homura download function to fetch
files. Return a list with the path of the downloaded file and the size
of the remote file.
"""
logger.debug('initializing download in ', url)
remote_file_size = self.get_remote_file_size(url)
if exists(join(path, filename)):
size = getsize(join(path, filename))
if size == remote_file_size:
logger.error('%s already exists on your system' % filename)
print('%s already exists on your system' % filename)
return [join(path, filename), size]
logger.debug('Downloading: %s' % filename)
print('Downloading: %s' % filename)
fetch(url, path)
print('stored at %s' % path)
logger.debug('stored at %s' % path)
return [join(path, filename), remote_file_size] |
def get(self, key, default=CACHE_MISS):
"""
Get a value out of the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
:param default: value to return if the key is not found (defaults to CACHE_MISS)
"""
if not self.options.enabled:
return CACHE_DISABLED
ret = default
if self.has(key):
ret = self._dict[key].value
logger.debug('get({}, default={}) == {}'.format(repr(key), repr(default), repr(ret)))
return ret | Get a value out of the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
:param default: value to return if the key is not found (defaults to CACHE_MISS) | Below is the the instruction that describes the task:
### Input:
Get a value out of the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
:param default: value to return if the key is not found (defaults to CACHE_MISS)
### Response:
def get(self, key, default=CACHE_MISS):
"""
Get a value out of the cache
Returns CACHE_DISABLED if the cache is disabled
:param key: key to search for
:param default: value to return if the key is not found (defaults to CACHE_MISS)
"""
if not self.options.enabled:
return CACHE_DISABLED
ret = default
if self.has(key):
ret = self._dict[key].value
logger.debug('get({}, default={}) == {}'.format(repr(key), repr(default), repr(ret)))
return ret |
def getKwConfig(self, kw):
""" return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw)
"""
confd = self.getKwAsDict(kw).values()[0].values()[0]
return {k.lower(): v for k, v in confd.items()} | return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw) | Below is the the instruction that describes the task:
### Input:
return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw)
### Response:
def getKwConfig(self, kw):
""" return the configuration of kw, dict
USAGE: rdict = getKwConfig(kw)
"""
confd = self.getKwAsDict(kw).values()[0].values()[0]
return {k.lower(): v for k, v in confd.items()} |
def validate_samples(self):
"""Scan through the SampleData values and make sure
that each one is correct
"""
bsc = getToolByName(self, 'bika_setup_catalog')
keywords = bsc.uniqueValuesFor('getKeyword')
profiles = []
for p in bsc(portal_type='AnalysisProfile'):
p = p.getObject()
profiles.append(p.Title())
profiles.append(p.getProfileKey())
row_nr = 0
for gridrow in self.getSampleData():
row_nr += 1
# validate against sample and ar schemas
for k, v in gridrow.items():
if k in ['Analysis', 'Profiles']:
break
if k in sample_schema:
try:
self.validate_against_schema(
sample_schema, row_nr, k, v)
continue
except ValueError as e:
self.error(e.message)
break
if k in ar_schema:
try:
self.validate_against_schema(
ar_schema, row_nr, k, v)
except ValueError as e:
self.error(e.message)
an_cnt = 0
for v in gridrow['Analyses']:
if v and v not in keywords:
self.error("Row %s: value is invalid (%s=%s)" %
('Analysis keyword', row_nr, v))
else:
an_cnt += 1
for v in gridrow['Profiles']:
if v and v not in profiles:
self.error("Row %s: value is invalid (%s=%s)" %
('Profile Title', row_nr, v))
else:
an_cnt += 1
if not an_cnt:
self.error("Row %s: No valid analyses or profiles" % row_nr) | Scan through the SampleData values and make sure
that each one is correct | Below is the the instruction that describes the task:
### Input:
Scan through the SampleData values and make sure
that each one is correct
### Response:
def validate_samples(self):
"""Scan through the SampleData values and make sure
that each one is correct
"""
bsc = getToolByName(self, 'bika_setup_catalog')
keywords = bsc.uniqueValuesFor('getKeyword')
profiles = []
for p in bsc(portal_type='AnalysisProfile'):
p = p.getObject()
profiles.append(p.Title())
profiles.append(p.getProfileKey())
row_nr = 0
for gridrow in self.getSampleData():
row_nr += 1
# validate against sample and ar schemas
for k, v in gridrow.items():
if k in ['Analysis', 'Profiles']:
break
if k in sample_schema:
try:
self.validate_against_schema(
sample_schema, row_nr, k, v)
continue
except ValueError as e:
self.error(e.message)
break
if k in ar_schema:
try:
self.validate_against_schema(
ar_schema, row_nr, k, v)
except ValueError as e:
self.error(e.message)
an_cnt = 0
for v in gridrow['Analyses']:
if v and v not in keywords:
self.error("Row %s: value is invalid (%s=%s)" %
('Analysis keyword', row_nr, v))
else:
an_cnt += 1
for v in gridrow['Profiles']:
if v and v not in profiles:
self.error("Row %s: value is invalid (%s=%s)" %
('Profile Title', row_nr, v))
else:
an_cnt += 1
if not an_cnt:
self.error("Row %s: No valid analyses or profiles" % row_nr) |
def hasInfo(self):
""" Will have information to write. """
count = len([None
for (fromUUID, size)
in Diff.theKnownSizes[self.uuid].iteritems()
if size is not None and fromUUID is not None
])
return count > 0 | Will have information to write. | Below is the the instruction that describes the task:
### Input:
Will have information to write.
### Response:
def hasInfo(self):
""" Will have information to write. """
count = len([None
for (fromUUID, size)
in Diff.theKnownSizes[self.uuid].iteritems()
if size is not None and fromUUID is not None
])
return count > 0 |
def parse(readDataInstance, nStreams):
"""
Returns a new L{NetMetaDataStreams} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreams} object.
@type nStreams: int
@param nStreams: The number of L{NetMetaDataStreamEntry} objects in the C{readDataInstance} object.
@rtype: L{NetMetaDataStreams}
@return: A new L{NetMetaDataStreams} object.
"""
streams = NetMetaDataStreams()
for i in range(nStreams):
streamEntry = NetMetaDataStreamEntry()
streamEntry.offset.value = readDataInstance.readDword()
streamEntry.size.value = readDataInstance.readDword()
streamEntry.name.value = readDataInstance.readAlignedString()
#streams.append(streamEntry)
streams.update({ i: streamEntry, streamEntry.name.value: streamEntry })
return streams | Returns a new L{NetMetaDataStreams} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreams} object.
@type nStreams: int
@param nStreams: The number of L{NetMetaDataStreamEntry} objects in the C{readDataInstance} object.
@rtype: L{NetMetaDataStreams}
@return: A new L{NetMetaDataStreams} object. | Below is the the instruction that describes the task:
### Input:
Returns a new L{NetMetaDataStreams} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreams} object.
@type nStreams: int
@param nStreams: The number of L{NetMetaDataStreamEntry} objects in the C{readDataInstance} object.
@rtype: L{NetMetaDataStreams}
@return: A new L{NetMetaDataStreams} object.
### Response:
def parse(readDataInstance, nStreams):
"""
Returns a new L{NetMetaDataStreams} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object with data to be parsed as a L{NetMetaDataStreams} object.
@type nStreams: int
@param nStreams: The number of L{NetMetaDataStreamEntry} objects in the C{readDataInstance} object.
@rtype: L{NetMetaDataStreams}
@return: A new L{NetMetaDataStreams} object.
"""
streams = NetMetaDataStreams()
for i in range(nStreams):
streamEntry = NetMetaDataStreamEntry()
streamEntry.offset.value = readDataInstance.readDword()
streamEntry.size.value = readDataInstance.readDword()
streamEntry.name.value = readDataInstance.readAlignedString()
#streams.append(streamEntry)
streams.update({ i: streamEntry, streamEntry.name.value: streamEntry })
return streams |
def change_password():
"""View function which handles a change password request."""
form_class = _security.change_password_form
if request.is_json:
form = form_class(MultiDict(request.get_json()))
else:
form = form_class()
if form.validate_on_submit():
after_this_request(_commit)
change_user_password(current_user._get_current_object(),
form.new_password.data)
if not request.is_json:
do_flash(*get_message('PASSWORD_CHANGE'))
return redirect(get_url(_security.post_change_view) or
get_url(_security.post_login_view))
if request.is_json:
form.user = current_user
return _render_json(form)
return _security.render_template(
config_value('CHANGE_PASSWORD_TEMPLATE'),
change_password_form=form,
**_ctx('change_password')
) | View function which handles a change password request. | Below is the the instruction that describes the task:
### Input:
View function which handles a change password request.
### Response:
def change_password():
"""View function which handles a change password request."""
form_class = _security.change_password_form
if request.is_json:
form = form_class(MultiDict(request.get_json()))
else:
form = form_class()
if form.validate_on_submit():
after_this_request(_commit)
change_user_password(current_user._get_current_object(),
form.new_password.data)
if not request.is_json:
do_flash(*get_message('PASSWORD_CHANGE'))
return redirect(get_url(_security.post_change_view) or
get_url(_security.post_login_view))
if request.is_json:
form.user = current_user
return _render_json(form)
return _security.render_template(
config_value('CHANGE_PASSWORD_TEMPLATE'),
change_password_form=form,
**_ctx('change_password')
) |
def _path_to_value(cls, path, parent_dict):
"""Return a value from a dictionary at the given path"""
keys = cls._path_to_keys(path)
# Traverse to the tip of the path
child_dict = parent_dict
for key in keys[:-1]:
child_dict = child_dict.get(key)
if child_dict is None:
return
return child_dict.get(keys[-1]) | Return a value from a dictionary at the given path | Below is the the instruction that describes the task:
### Input:
Return a value from a dictionary at the given path
### Response:
def _path_to_value(cls, path, parent_dict):
"""Return a value from a dictionary at the given path"""
keys = cls._path_to_keys(path)
# Traverse to the tip of the path
child_dict = parent_dict
for key in keys[:-1]:
child_dict = child_dict.get(key)
if child_dict is None:
return
return child_dict.get(keys[-1]) |
def main():
"""
NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
else:
out=''
if '-i' in sys.argv: # if one is -i
a95=0
while 1:
try:
ans = input("Input Declination: <cntrl-D to quit> ")
Dec = float(ans) # assign input to Dec, after conversion to floating point
ans = input("Input Inclination: ")
Inc = float(ans)
ans = input("Input Site Latitude: ")
slat = float(ans)
ans = input("Input Site Longitude: ")
slong = float(ans)
output = pmag.dia_vgp(Dec,Inc,a95,slat,slong)
print('%7.1f %7.1f'%(output[0],output[1]))
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
data=numpy.loadtxt(file)
else: #
data = numpy.loadtxt(sys.stdin,dtype=numpy.float) # read from S/I
if len(data.shape)>1: # 2-D array
N=data.shape[0]
if data.shape[1]==4: # only dec,inc,sitelat, site long -no alpha95
data=data.transpose()
inlist=numpy.array([data[0],data[1],numpy.zeros(N),data[2],data[3]]).transpose()
output = pmag.dia_vgp(inlist)
for k in range(N):
if out=='':
print('%7.1f %7.1f'%(output[0][k],output[1][k]))
else:
out.write('%7.1f %7.1f\n'%(output[0][k],output[1][k]))
else: # single line of data
if len(data)==4:
data=[data[0],data[1],0,data[2],data[3]]
output = pmag.dia_vgp(data)
if out=='': # spit to standard output
print('%7.1f %7.1f'%(output[0],output[1]))
else: # write to file
out.write('%7.1f %7.1f\n'%(output[0],output[1])) | NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east) | Below is the the instruction that describes the task:
### Input:
NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
### Response:
def main():
"""
NAME
di_vgp.py
DESCRIPTION
converts declination/inclination to virtual geomagnetic pole
SYNTAX
di_vgp.py [-h] [options]
OPTIONS
-h prints help message and quits
-i interactive data entry
-f FILE to specify intput file
-F FILE to specify output file
<filename to read/write from/to standard input
INPUT
for file entry:
D I SLAT SLON
where:
D: declination
I: inclination
SLAT: site latitude (positive north)
SLON: site longitude (positive east)
OUTPUT
PLON PLAT
where:
PLAT: pole latitude
PLON: pole longitude (positive east)
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=sys.argv[ind+1]
out=open(ofile,'w')
else:
out=''
if '-i' in sys.argv: # if one is -i
a95=0
while 1:
try:
ans = input("Input Declination: <cntrl-D to quit> ")
Dec = float(ans) # assign input to Dec, after conversion to floating point
ans = input("Input Inclination: ")
Inc = float(ans)
ans = input("Input Site Latitude: ")
slat = float(ans)
ans = input("Input Site Longitude: ")
slong = float(ans)
output = pmag.dia_vgp(Dec,Inc,a95,slat,slong)
print('%7.1f %7.1f'%(output[0],output[1]))
except:
print("\n Good-bye\n")
sys.exit()
elif '-f' in sys.argv: # input of file name
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
data=numpy.loadtxt(file)
else: #
data = numpy.loadtxt(sys.stdin,dtype=numpy.float) # read from S/I
if len(data.shape)>1: # 2-D array
N=data.shape[0]
if data.shape[1]==4: # only dec,inc,sitelat, site long -no alpha95
data=data.transpose()
inlist=numpy.array([data[0],data[1],numpy.zeros(N),data[2],data[3]]).transpose()
output = pmag.dia_vgp(inlist)
for k in range(N):
if out=='':
print('%7.1f %7.1f'%(output[0][k],output[1][k]))
else:
out.write('%7.1f %7.1f\n'%(output[0][k],output[1][k]))
else: # single line of data
if len(data)==4:
data=[data[0],data[1],0,data[2],data[3]]
output = pmag.dia_vgp(data)
if out=='': # spit to standard output
print('%7.1f %7.1f'%(output[0],output[1]))
else: # write to file
out.write('%7.1f %7.1f\n'%(output[0],output[1])) |
def FilePattern(pattern, settings, **kwargs):
"""Factory method returns LocalFilePattern or GoogleStorageFilePattern
"""
url = _urlparse(pattern)
if url.scheme == 'gs':
return GoogleStorageFilePattern(pattern, settings, **kwargs)
else:
assert url.scheme == 'file'
return LocalFilePattern(pattern, settings, **kwargs) | Factory method returns LocalFilePattern or GoogleStorageFilePattern | Below is the the instruction that describes the task:
### Input:
Factory method returns LocalFilePattern or GoogleStorageFilePattern
### Response:
def FilePattern(pattern, settings, **kwargs):
"""Factory method returns LocalFilePattern or GoogleStorageFilePattern
"""
url = _urlparse(pattern)
if url.scheme == 'gs':
return GoogleStorageFilePattern(pattern, settings, **kwargs)
else:
assert url.scheme == 'file'
return LocalFilePattern(pattern, settings, **kwargs) |
def add_create_update_args(parser, required_args, optional_args, create=False):
"""Wrapper around ``add_parser_arguments``.
If ``create`` is True, one argument group will be created for each of
``required_args`` and ``optional_args``. Each required argument will have
the ``required`` parameter set to True automatically.
If ``create`` is False, only one group of optional arguments will be
created containing all the arguments.
The arguments should be specified the same way as for
``add_parser_arguments``.
"""
if create:
for key in required_args:
required_args[key]['required'] = True
add_parser_arguments(parser, required_args, group='required arguments')
else:
optional_args.update(required_args)
add_parser_arguments(parser, optional_args) | Wrapper around ``add_parser_arguments``.
If ``create`` is True, one argument group will be created for each of
``required_args`` and ``optional_args``. Each required argument will have
the ``required`` parameter set to True automatically.
If ``create`` is False, only one group of optional arguments will be
created containing all the arguments.
The arguments should be specified the same way as for
``add_parser_arguments``. | Below is the the instruction that describes the task:
### Input:
Wrapper around ``add_parser_arguments``.
If ``create`` is True, one argument group will be created for each of
``required_args`` and ``optional_args``. Each required argument will have
the ``required`` parameter set to True automatically.
If ``create`` is False, only one group of optional arguments will be
created containing all the arguments.
The arguments should be specified the same way as for
``add_parser_arguments``.
### Response:
def add_create_update_args(parser, required_args, optional_args, create=False):
"""Wrapper around ``add_parser_arguments``.
If ``create`` is True, one argument group will be created for each of
``required_args`` and ``optional_args``. Each required argument will have
the ``required`` parameter set to True automatically.
If ``create`` is False, only one group of optional arguments will be
created containing all the arguments.
The arguments should be specified the same way as for
``add_parser_arguments``.
"""
if create:
for key in required_args:
required_args[key]['required'] = True
add_parser_arguments(parser, required_args, group='required arguments')
else:
optional_args.update(required_args)
add_parser_arguments(parser, optional_args) |
def get_chebi_name_from_id(chebi_id, offline=False):
"""Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
"""
chebi_name = chebi_id_to_name.get(chebi_id)
if chebi_name is None and not offline:
chebi_name = get_chebi_name_from_id_web(chebi_id)
return chebi_name | Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned. | Below is the the instruction that describes the task:
### Input:
Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
### Response:
def get_chebi_name_from_id(chebi_id, offline=False):
"""Return a ChEBI name corresponding to the given ChEBI ID.
Parameters
----------
chebi_id : str
The ChEBI ID whose name is to be returned.
offline : Optional[bool]
Choose whether to allow an online lookup if the local lookup fails. If
True, the online lookup is not attempted. Default: False.
Returns
-------
chebi_name : str
The name corresponding to the given ChEBI ID. If the lookup
fails, None is returned.
"""
chebi_name = chebi_id_to_name.get(chebi_id)
if chebi_name is None and not offline:
chebi_name = get_chebi_name_from_id_web(chebi_id)
return chebi_name |
def clientdisconnect(self, event):
"""Handler to deal with a possibly disconnected remote controlling
client
:param event: ClientDisconnect Event
"""
try:
if event.clientuuid == self.remote_controller:
self.log("Remote controller disconnected!", lvl=critical)
self.remote_controller = None
except Exception as e:
self.log("Strange thing while client disconnected", e, type(e)) | Handler to deal with a possibly disconnected remote controlling
client
:param event: ClientDisconnect Event | Below is the the instruction that describes the task:
### Input:
Handler to deal with a possibly disconnected remote controlling
client
:param event: ClientDisconnect Event
### Response:
def clientdisconnect(self, event):
"""Handler to deal with a possibly disconnected remote controlling
client
:param event: ClientDisconnect Event
"""
try:
if event.clientuuid == self.remote_controller:
self.log("Remote controller disconnected!", lvl=critical)
self.remote_controller = None
except Exception as e:
self.log("Strange thing while client disconnected", e, type(e)) |
def background(self):
"""
Background color of the caret line. Default is to use a color slightly
darker/lighter than the background color. You can override the
automatic color by setting up this property
"""
if self._color or not self.editor:
return self._color
else:
return drift_color(self.editor.background, 110) | Background color of the caret line. Default is to use a color slightly
darker/lighter than the background color. You can override the
automatic color by setting up this property | Below is the the instruction that describes the task:
### Input:
Background color of the caret line. Default is to use a color slightly
darker/lighter than the background color. You can override the
automatic color by setting up this property
### Response:
def background(self):
"""
Background color of the caret line. Default is to use a color slightly
darker/lighter than the background color. You can override the
automatic color by setting up this property
"""
if self._color or not self.editor:
return self._color
else:
return drift_color(self.editor.background, 110) |
def require_email_confirmation(self):
""" Mark email as unconfirmed"""
self.email_confirmed = False
self.email_link = self.generate_hash(50)
now = datetime.datetime.utcnow()
self.email_link_expires = now + datetime.timedelta(hours=24) | Mark email as unconfirmed | Below is the the instruction that describes the task:
### Input:
Mark email as unconfirmed
### Response:
def require_email_confirmation(self):
""" Mark email as unconfirmed"""
self.email_confirmed = False
self.email_link = self.generate_hash(50)
now = datetime.datetime.utcnow()
self.email_link_expires = now + datetime.timedelta(hours=24) |
def stable_cho_factor(x,tiny=_TINY):
"""
NAME:
stable_cho_factor
PURPOSE:
Stable version of the cholesky decomposition
INPUT:
x - (sc.array) positive definite matrix
tiny - (double) tiny number to add to the covariance matrix to make the decomposition stable (has a default)
OUTPUT:
(L,lowerFlag) - output from scipy.linalg.cho_factor for lower=True
REVISION HISTORY:
2009-09-25 - Written - Bovy (NYU)
"""
return linalg.cho_factor(x+numpy.sum(numpy.diag(x))*tiny*numpy.eye(x.shape[0]),lower=True) | NAME:
stable_cho_factor
PURPOSE:
Stable version of the cholesky decomposition
INPUT:
x - (sc.array) positive definite matrix
tiny - (double) tiny number to add to the covariance matrix to make the decomposition stable (has a default)
OUTPUT:
(L,lowerFlag) - output from scipy.linalg.cho_factor for lower=True
REVISION HISTORY:
2009-09-25 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
stable_cho_factor
PURPOSE:
Stable version of the cholesky decomposition
INPUT:
x - (sc.array) positive definite matrix
tiny - (double) tiny number to add to the covariance matrix to make the decomposition stable (has a default)
OUTPUT:
(L,lowerFlag) - output from scipy.linalg.cho_factor for lower=True
REVISION HISTORY:
2009-09-25 - Written - Bovy (NYU)
### Response:
def stable_cho_factor(x,tiny=_TINY):
"""
NAME:
stable_cho_factor
PURPOSE:
Stable version of the cholesky decomposition
INPUT:
x - (sc.array) positive definite matrix
tiny - (double) tiny number to add to the covariance matrix to make the decomposition stable (has a default)
OUTPUT:
(L,lowerFlag) - output from scipy.linalg.cho_factor for lower=True
REVISION HISTORY:
2009-09-25 - Written - Bovy (NYU)
"""
return linalg.cho_factor(x+numpy.sum(numpy.diag(x))*tiny*numpy.eye(x.shape[0]),lower=True) |
def create_main_frame(self):
self.main_frame = QWidget()
#self.main_frame.setFixedSize(self.width(), self.width())
self.dpi = 128
self.ShapeGroups =200
self.view = gl.GLViewWidget()
#self.view = pg.PlotWidget()
#self.view.setFixedSize(self.width(),self.height())
self.view.setFixedSize(self.width(), self.width())
self.view.setParent(self.main_frame)
# Other GUI controls
self.save_button = QPushButton('&Save')
self.save_button.clicked.connect(self.saveImgFile)
self.draw_button = QPushButton('&Reset')
self.draw_button.clicked.connect(self.Reset)
self.load_button = QPushButton('&Load')
#self.load_button.clicked.connect(self.Load)
self.fit_cb= QCheckBox('&PolyFit')
self.fit_cb.setChecked(False)
self.fit_cb.stateChanged.connect(self.Magic) # int
self.fit_label = QLabel('Exp')
self.fit_seter = QLineEdit(self)
self.fit_seter.textChanged[str].connect(self.FitChanged)
self.shape_cb= QCheckBox('&Shape')
self.shape_cb.setChecked(False)
self.shape_cb.stateChanged.connect(self.Magic) # int
self.Normalize_cb = QCheckBox('&Normalize')
self.Normalize_cb.setChecked(False)
self.Normalize_cb.stateChanged.connect(self.Magic) # int
self.norm_slider_label = QLabel('Standard:' + self.NameChosen)
self.norm_slider = QSlider(Qt.Horizontal)
self.norm_slider.setRange(0, 4)
self.norm_slider.setValue(0)
self.norm_slider.setTracking(True)
self.norm_slider.setTickPosition(QSlider.TicksBothSides)
self.norm_slider.valueChanged.connect(self.Magic) # int
self.x_element = QSlider(Qt.Horizontal)
self.x_element.setRange(0, len(self.items) - 1)
self.x_element.setValue(0)
self.x_element.setTracking(True)
self.x_element.setTickPosition(QSlider.TicksBothSides)
self.x_element.valueChanged.connect(self.Magic) # int
self.x_element_label = QLabel('X')
self.logx_cb = QCheckBox('&Log')
self.logx_cb.setChecked(False)
self.logx_cb.stateChanged.connect(self.Magic) # int
self.y_element = QSlider(Qt.Horizontal)
self.y_element.setRange(0, len(self.items) - 1)
self.y_element.setValue(1)
self.y_element.setTracking(True)
self.y_element.setTickPosition(QSlider.TicksBothSides)
self.y_element.valueChanged.connect(self.Magic) # int
self.y_element_label = QLabel('Y')
self.logy_cb = QCheckBox('&Log')
self.logy_cb.setChecked(False)
self.logy_cb.stateChanged.connect(self.Magic) # int
self.z_element = QSlider(Qt.Horizontal)
self.z_element.setRange(0, len(self.items) - 1)
self.z_element.setValue(2)
self.z_element.setTracking(True)
self.z_element.setTickPosition(QSlider.TicksBothSides)
self.z_element.valueChanged.connect(self.Magic) # int
self.z_element_label = QLabel('Z')
self.logz_cb = QCheckBox('&Log')
self.logz_cb.setChecked(False)
self.logz_cb.stateChanged.connect(self.Magic) # int
self.xlim_seter_left_label = QLabel('Xleft')
self.xlim_seter_left = QLineEdit(self)
self.xlim_seter_left.textChanged[str].connect(self.XleftChanged)
self.xlim_seter_right_label = QLabel('Xright')
self.xlim_seter_right = QLineEdit(self)
self.xlim_seter_right.textChanged[str].connect(self.XrightChanged)
self.ylim_seter_down_label = QLabel('Ydown')
self.ylim_seter_down = QLineEdit(self)
self.ylim_seter_down.textChanged[str].connect(self.YdownChanged)
self.ylim_seter_up_label = QLabel('Yup')
self.ylim_seter_up = QLineEdit(self)
self.ylim_seter_up.textChanged[str].connect(self.YupChanged)
self.hbox0 = QHBoxLayout()
self.hbox1 = QHBoxLayout()
self.hbox2 = QHBoxLayout()
self.hbox3 = QHBoxLayout()
self.hbox4 = QHBoxLayout()
self.hbox5 = QHBoxLayout()
self.hbox6 = QHBoxLayout()
self.hbox7 = QHBoxLayout()
'''
for w in [self.fit_cb,self.fit_label, self.fit_seter,self.xlim_seter_left_label,self.xlim_seter_left,self.xlim_seter_right_label,self.xlim_seter_right,self.ylim_seter_down_label,self.ylim_seter_down,self.ylim_seter_up_label,self.ylim_seter_up,self.shape_cb]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter)
'''
for w in [self.view]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter)
for w in [self.Normalize_cb, self.norm_slider_label, self.norm_slider]:
self.hbox1.addWidget(w)
self.hbox1.setAlignment(w, Qt.AlignVCenter)
for w in [self.logx_cb, self.x_element_label, self.x_element]:
self.hbox2.addWidget(w)
self.hbox2.setAlignment(w, Qt.AlignVCenter)
for w in [self.logy_cb, self.y_element_label, self.y_element]:
self.hbox3.addWidget(w)
self.hbox3.setAlignment(w, Qt.AlignVCenter)
for w in [self.logz_cb, self.z_element_label, self.z_element]:
self.hbox4.addWidget(w)
self.hbox4.setAlignment(w, Qt.AlignVCenter)
self.vbox = QVBoxLayout()
#self.vbox.addWidget(self.view)
self.vbox.addLayout(self.hbox0)
self.vbox.addLayout(self.hbox1)
self.vbox.addLayout(self.hbox2)
self.vbox.addLayout(self.hbox3)
self.vbox.addLayout(self.hbox4)
self.main_frame.setLayout(self.vbox)
self.setCentralWidget(self.main_frame) | for w in [self.fit_cb,self.fit_label, self.fit_seter,self.xlim_seter_left_label,self.xlim_seter_left,self.xlim_seter_right_label,self.xlim_seter_right,self.ylim_seter_down_label,self.ylim_seter_down,self.ylim_seter_up_label,self.ylim_seter_up,self.shape_cb]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter) | Below is the the instruction that describes the task:
### Input:
for w in [self.fit_cb,self.fit_label, self.fit_seter,self.xlim_seter_left_label,self.xlim_seter_left,self.xlim_seter_right_label,self.xlim_seter_right,self.ylim_seter_down_label,self.ylim_seter_down,self.ylim_seter_up_label,self.ylim_seter_up,self.shape_cb]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter)
### Response:
def create_main_frame(self):
self.main_frame = QWidget()
#self.main_frame.setFixedSize(self.width(), self.width())
self.dpi = 128
self.ShapeGroups =200
self.view = gl.GLViewWidget()
#self.view = pg.PlotWidget()
#self.view.setFixedSize(self.width(),self.height())
self.view.setFixedSize(self.width(), self.width())
self.view.setParent(self.main_frame)
# Other GUI controls
self.save_button = QPushButton('&Save')
self.save_button.clicked.connect(self.saveImgFile)
self.draw_button = QPushButton('&Reset')
self.draw_button.clicked.connect(self.Reset)
self.load_button = QPushButton('&Load')
#self.load_button.clicked.connect(self.Load)
self.fit_cb= QCheckBox('&PolyFit')
self.fit_cb.setChecked(False)
self.fit_cb.stateChanged.connect(self.Magic) # int
self.fit_label = QLabel('Exp')
self.fit_seter = QLineEdit(self)
self.fit_seter.textChanged[str].connect(self.FitChanged)
self.shape_cb= QCheckBox('&Shape')
self.shape_cb.setChecked(False)
self.shape_cb.stateChanged.connect(self.Magic) # int
self.Normalize_cb = QCheckBox('&Normalize')
self.Normalize_cb.setChecked(False)
self.Normalize_cb.stateChanged.connect(self.Magic) # int
self.norm_slider_label = QLabel('Standard:' + self.NameChosen)
self.norm_slider = QSlider(Qt.Horizontal)
self.norm_slider.setRange(0, 4)
self.norm_slider.setValue(0)
self.norm_slider.setTracking(True)
self.norm_slider.setTickPosition(QSlider.TicksBothSides)
self.norm_slider.valueChanged.connect(self.Magic) # int
self.x_element = QSlider(Qt.Horizontal)
self.x_element.setRange(0, len(self.items) - 1)
self.x_element.setValue(0)
self.x_element.setTracking(True)
self.x_element.setTickPosition(QSlider.TicksBothSides)
self.x_element.valueChanged.connect(self.Magic) # int
self.x_element_label = QLabel('X')
self.logx_cb = QCheckBox('&Log')
self.logx_cb.setChecked(False)
self.logx_cb.stateChanged.connect(self.Magic) # int
self.y_element = QSlider(Qt.Horizontal)
self.y_element.setRange(0, len(self.items) - 1)
self.y_element.setValue(1)
self.y_element.setTracking(True)
self.y_element.setTickPosition(QSlider.TicksBothSides)
self.y_element.valueChanged.connect(self.Magic) # int
self.y_element_label = QLabel('Y')
self.logy_cb = QCheckBox('&Log')
self.logy_cb.setChecked(False)
self.logy_cb.stateChanged.connect(self.Magic) # int
self.z_element = QSlider(Qt.Horizontal)
self.z_element.setRange(0, len(self.items) - 1)
self.z_element.setValue(2)
self.z_element.setTracking(True)
self.z_element.setTickPosition(QSlider.TicksBothSides)
self.z_element.valueChanged.connect(self.Magic) # int
self.z_element_label = QLabel('Z')
self.logz_cb = QCheckBox('&Log')
self.logz_cb.setChecked(False)
self.logz_cb.stateChanged.connect(self.Magic) # int
self.xlim_seter_left_label = QLabel('Xleft')
self.xlim_seter_left = QLineEdit(self)
self.xlim_seter_left.textChanged[str].connect(self.XleftChanged)
self.xlim_seter_right_label = QLabel('Xright')
self.xlim_seter_right = QLineEdit(self)
self.xlim_seter_right.textChanged[str].connect(self.XrightChanged)
self.ylim_seter_down_label = QLabel('Ydown')
self.ylim_seter_down = QLineEdit(self)
self.ylim_seter_down.textChanged[str].connect(self.YdownChanged)
self.ylim_seter_up_label = QLabel('Yup')
self.ylim_seter_up = QLineEdit(self)
self.ylim_seter_up.textChanged[str].connect(self.YupChanged)
self.hbox0 = QHBoxLayout()
self.hbox1 = QHBoxLayout()
self.hbox2 = QHBoxLayout()
self.hbox3 = QHBoxLayout()
self.hbox4 = QHBoxLayout()
self.hbox5 = QHBoxLayout()
self.hbox6 = QHBoxLayout()
self.hbox7 = QHBoxLayout()
'''
for w in [self.fit_cb,self.fit_label, self.fit_seter,self.xlim_seter_left_label,self.xlim_seter_left,self.xlim_seter_right_label,self.xlim_seter_right,self.ylim_seter_down_label,self.ylim_seter_down,self.ylim_seter_up_label,self.ylim_seter_up,self.shape_cb]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter)
'''
for w in [self.view]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter)
for w in [self.Normalize_cb, self.norm_slider_label, self.norm_slider]:
self.hbox1.addWidget(w)
self.hbox1.setAlignment(w, Qt.AlignVCenter)
for w in [self.logx_cb, self.x_element_label, self.x_element]:
self.hbox2.addWidget(w)
self.hbox2.setAlignment(w, Qt.AlignVCenter)
for w in [self.logy_cb, self.y_element_label, self.y_element]:
self.hbox3.addWidget(w)
self.hbox3.setAlignment(w, Qt.AlignVCenter)
for w in [self.logz_cb, self.z_element_label, self.z_element]:
self.hbox4.addWidget(w)
self.hbox4.setAlignment(w, Qt.AlignVCenter)
self.vbox = QVBoxLayout()
#self.vbox.addWidget(self.view)
self.vbox.addLayout(self.hbox0)
self.vbox.addLayout(self.hbox1)
self.vbox.addLayout(self.hbox2)
self.vbox.addLayout(self.hbox3)
self.vbox.addLayout(self.hbox4)
self.main_frame.setLayout(self.vbox)
self.setCentralWidget(self.main_frame) |
def name(self, src=None):
"""Return string representing the name of this type."""
return "{%s}" % ", ".join("%s: %s" % (key, _get_type_name(ktype, src))
for key, ktype in viewitems(self._types)) | Return string representing the name of this type. | Below is the the instruction that describes the task:
### Input:
Return string representing the name of this type.
### Response:
def name(self, src=None):
"""Return string representing the name of this type."""
return "{%s}" % ", ".join("%s: %s" % (key, _get_type_name(ktype, src))
for key, ktype in viewitems(self._types)) |
def run_exercises(setup_manager: SetupManager, exercises: Iterable[Exercise], seed: int) -> Iterator[Result]:
"""
It is recommended to use setup_manager_context() to generate your setup_manager.
"""
game_interface = setup_manager.game_interface
names = [exercise.get_name() for exercise in exercises]
with training_status_renderer_context(names, game_interface.renderer) as ren:
for i, exercise in enumerate(exercises):
def update_row(status: str, status_color_func):
nonlocal i
nonlocal exercise
ren.update(i, Row(exercise.get_name(), status, status_color_func))
update_row('config', ren.renderman.white)
# Only reload the match if the config has changed.
new_match_config = exercise.get_match_config()
if new_match_config != setup_manager.match_config:
update_row('match', ren.renderman.white)
_setup_match(new_match_config, setup_manager)
update_row('bots', ren.renderman.white)
_wait_until_bots_ready(setup_manager, new_match_config)
update_row('wait', ren.renderman.white)
_wait_until_good_ticks(game_interface)
update_row('setup', ren.renderman.white)
error_result = _setup_exercise(game_interface, exercise, seed)
if error_result is not None:
update_row('setup', ren.renderman.red)
yield error_result
continue
update_row('reload', ren.renderman.white)
setup_manager.reload_all_agents(quiet=True)
# Wait for the set_game_state() to propagate before we start running ex.on_tick()
# TODO: wait until the game looks similar.
update_row('sleep', ren.renderman.white)
time.sleep(0.03)
update_row('>>>>', ren.renderman.white)
result = _grade_exercise(game_interface, exercise, seed)
if isinstance(result.grade, Pass):
update_row('PASS', ren.renderman.green)
else:
update_row('FAIL', ren.renderman.red)
yield result | It is recommended to use setup_manager_context() to generate your setup_manager. | Below is the the instruction that describes the task:
### Input:
It is recommended to use setup_manager_context() to generate your setup_manager.
### Response:
def run_exercises(setup_manager: SetupManager, exercises: Iterable[Exercise], seed: int) -> Iterator[Result]:
"""
It is recommended to use setup_manager_context() to generate your setup_manager.
"""
game_interface = setup_manager.game_interface
names = [exercise.get_name() for exercise in exercises]
with training_status_renderer_context(names, game_interface.renderer) as ren:
for i, exercise in enumerate(exercises):
def update_row(status: str, status_color_func):
nonlocal i
nonlocal exercise
ren.update(i, Row(exercise.get_name(), status, status_color_func))
update_row('config', ren.renderman.white)
# Only reload the match if the config has changed.
new_match_config = exercise.get_match_config()
if new_match_config != setup_manager.match_config:
update_row('match', ren.renderman.white)
_setup_match(new_match_config, setup_manager)
update_row('bots', ren.renderman.white)
_wait_until_bots_ready(setup_manager, new_match_config)
update_row('wait', ren.renderman.white)
_wait_until_good_ticks(game_interface)
update_row('setup', ren.renderman.white)
error_result = _setup_exercise(game_interface, exercise, seed)
if error_result is not None:
update_row('setup', ren.renderman.red)
yield error_result
continue
update_row('reload', ren.renderman.white)
setup_manager.reload_all_agents(quiet=True)
# Wait for the set_game_state() to propagate before we start running ex.on_tick()
# TODO: wait until the game looks similar.
update_row('sleep', ren.renderman.white)
time.sleep(0.03)
update_row('>>>>', ren.renderman.white)
result = _grade_exercise(game_interface, exercise, seed)
if isinstance(result.grade, Pass):
update_row('PASS', ren.renderman.green)
else:
update_row('FAIL', ren.renderman.red)
yield result |
def extend_relations(self, data, kind):
"""Extend metadata for tables or views
:param data: list of (rel_name, ) tuples
:param kind: either 'tables' or 'views'
:return:
"""
# 'data' is a generator object. It can throw an exception while being
# consumed. This could happen if the user has launched the app without
# specifying a database name. This exception must be handled to prevent
# crashing.
try:
data = [self.escaped_names(d) for d in data]
except Exception:
data = []
# dbmetadata['tables'][$schema_name][$table_name] should be a list of
# column names. Default to an asterisk
metadata = self.dbmetadata[kind]
for relname in data:
try:
metadata[self.dbname][relname[0]] = ['*']
except KeyError:
_logger.error('%r %r listed in unrecognized schema %r',
kind, relname[0], self.dbname)
self.all_completions.add(relname[0]) | Extend metadata for tables or views
:param data: list of (rel_name, ) tuples
:param kind: either 'tables' or 'views'
:return: | Below is the the instruction that describes the task:
### Input:
Extend metadata for tables or views
:param data: list of (rel_name, ) tuples
:param kind: either 'tables' or 'views'
:return:
### Response:
def extend_relations(self, data, kind):
"""Extend metadata for tables or views
:param data: list of (rel_name, ) tuples
:param kind: either 'tables' or 'views'
:return:
"""
# 'data' is a generator object. It can throw an exception while being
# consumed. This could happen if the user has launched the app without
# specifying a database name. This exception must be handled to prevent
# crashing.
try:
data = [self.escaped_names(d) for d in data]
except Exception:
data = []
# dbmetadata['tables'][$schema_name][$table_name] should be a list of
# column names. Default to an asterisk
metadata = self.dbmetadata[kind]
for relname in data:
try:
metadata[self.dbname][relname[0]] = ['*']
except KeyError:
_logger.error('%r %r listed in unrecognized schema %r',
kind, relname[0], self.dbname)
self.all_completions.add(relname[0]) |
def thermal_fit_result(fit_result, v_residual=None,
v_label='Unit-cell volume $(\mathrm{\AA}^3)$',
temp_fitline=np.asarray(
[300., 1000., 1500., 2000., 2500., 3000.]),
figsize=(5, 5), height_ratios=(3, 1), ms_data=50,
p_err=None, v_err=None, cbar_loc=(0.99, 0.1, .01, 0.82),
pdf_filen=None, title='Fit result'):
"""
plot P-V-T EOS curve fitting result
:param fit_result: lmfit result object, see example jnb file for detail
:param v_label: label for volume axis
:param temp_fitline: temperatures to calculate isothermal compression
curves, default = [300., 1000., 1500., 2000., 2500., 3000.]
:param figsize: figure size, default = (7,7)
:param height_ratios: height ratio between the main and residue plots,
default = (3,1)
:param ms_data: marker size for data points
:param p_err: pressure error bar
:param v_err: volume error bar
:param cbar_loc: location of color bar
:param pdf_filen: name of pdf output file
:param title: title of the figure
:return: None
"""
# basic figure setup
f, ax = plt.subplots(2, 1, sharex=True, figsize=figsize,
gridspec_kw={'height_ratios': height_ratios})
for ax_i in ax:
ax_i.tick_params(direction='in')
# read data to plot
v_data = fit_result.userkws['v']
temp_data = fit_result.userkws['temp']
p_data = fit_result.data
p_datafit = fit_result.best_fit
v0 = uct.ufloat(fit_result.params['st_v0'].value,
fit_result.params['st_v0'].stderr)
sm = plt.cm.ScalarMappable(cmap=c_map,
norm=plt.Normalize(
vmin=300., vmax=temp_data.max()))
a = sm.to_rgba(temp_fitline)
v_fitline = np.linspace(v0.n, min(v_data), 1000)
fitmodel_copy = copy.deepcopy(fit_result)
for a_i, temp_i in zip(a, temp_fitline):
p_fitline = fitmodel_copy.eval(v=v_fitline,
temp=np.ones_like(v_fitline) * temp_i)
ax[0].plot(p_fitline, v_fitline, c=a_i)
# error range here does not make a lot sense, so not supported
# if (p_err is not None) and (v_err is not None):
ax[0].errorbar(p_data, v_data, xerr=p_err, yerr=v_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
points = ax[0].scatter(p_data, v_data, marker='o', s=ms_data, c=temp_data,
cmap=c_map, vmin=300., vmax=temp_data.max(),
zorder=1)
if v_residual is None:
ax[1].scatter(p_data, p_data - p_datafit, marker='o', s=ms_data,
c=temp_data, cmap=c_map, vmin=300.,
vmax=temp_data.max(), zorder=1)
ax[1].errorbar(p_data, p_data - p_datafit, yerr=p_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
ax[1].set_ylabel('$P_{obs} - P_{fit}$')
else:
ax[1].scatter(p_data, v_residual, marker='o', s=ms_data, c=temp_data,
cmap=c_map, vmin=300., vmax=temp_data.max(), zorder=1)
ax[1].errorbar(p_data, v_residual, yerr=p_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
ax[1].set_ylabel('$V_{obs} - V_{fit}$')
# ax[0].legend()
position = f.add_axes(cbar_loc)
f.colorbar(points, orientation="vertical", cax=position,
ticks=temp_fitline)
ax[1].axhline(0, c='k', ls='--')
ax[1].set_xlabel('Pressure (GPa)')
ax[0].set_ylabel(v_label)
ax[0].set_title(title)
plt.tight_layout()
if pdf_filen is not None:
f.savefig(pdf_filen) | plot P-V-T EOS curve fitting result
:param fit_result: lmfit result object, see example jnb file for detail
:param v_label: label for volume axis
:param temp_fitline: temperatures to calculate isothermal compression
curves, default = [300., 1000., 1500., 2000., 2500., 3000.]
:param figsize: figure size, default = (7,7)
:param height_ratios: height ratio between the main and residue plots,
default = (3,1)
:param ms_data: marker size for data points
:param p_err: pressure error bar
:param v_err: volume error bar
:param cbar_loc: location of color bar
:param pdf_filen: name of pdf output file
:param title: title of the figure
:return: None | Below is the the instruction that describes the task:
### Input:
plot P-V-T EOS curve fitting result
:param fit_result: lmfit result object, see example jnb file for detail
:param v_label: label for volume axis
:param temp_fitline: temperatures to calculate isothermal compression
curves, default = [300., 1000., 1500., 2000., 2500., 3000.]
:param figsize: figure size, default = (7,7)
:param height_ratios: height ratio between the main and residue plots,
default = (3,1)
:param ms_data: marker size for data points
:param p_err: pressure error bar
:param v_err: volume error bar
:param cbar_loc: location of color bar
:param pdf_filen: name of pdf output file
:param title: title of the figure
:return: None
### Response:
def thermal_fit_result(fit_result, v_residual=None,
v_label='Unit-cell volume $(\mathrm{\AA}^3)$',
temp_fitline=np.asarray(
[300., 1000., 1500., 2000., 2500., 3000.]),
figsize=(5, 5), height_ratios=(3, 1), ms_data=50,
p_err=None, v_err=None, cbar_loc=(0.99, 0.1, .01, 0.82),
pdf_filen=None, title='Fit result'):
"""
plot P-V-T EOS curve fitting result
:param fit_result: lmfit result object, see example jnb file for detail
:param v_label: label for volume axis
:param temp_fitline: temperatures to calculate isothermal compression
curves, default = [300., 1000., 1500., 2000., 2500., 3000.]
:param figsize: figure size, default = (7,7)
:param height_ratios: height ratio between the main and residue plots,
default = (3,1)
:param ms_data: marker size for data points
:param p_err: pressure error bar
:param v_err: volume error bar
:param cbar_loc: location of color bar
:param pdf_filen: name of pdf output file
:param title: title of the figure
:return: None
"""
# basic figure setup
f, ax = plt.subplots(2, 1, sharex=True, figsize=figsize,
gridspec_kw={'height_ratios': height_ratios})
for ax_i in ax:
ax_i.tick_params(direction='in')
# read data to plot
v_data = fit_result.userkws['v']
temp_data = fit_result.userkws['temp']
p_data = fit_result.data
p_datafit = fit_result.best_fit
v0 = uct.ufloat(fit_result.params['st_v0'].value,
fit_result.params['st_v0'].stderr)
sm = plt.cm.ScalarMappable(cmap=c_map,
norm=plt.Normalize(
vmin=300., vmax=temp_data.max()))
a = sm.to_rgba(temp_fitline)
v_fitline = np.linspace(v0.n, min(v_data), 1000)
fitmodel_copy = copy.deepcopy(fit_result)
for a_i, temp_i in zip(a, temp_fitline):
p_fitline = fitmodel_copy.eval(v=v_fitline,
temp=np.ones_like(v_fitline) * temp_i)
ax[0].plot(p_fitline, v_fitline, c=a_i)
# error range here does not make a lot sense, so not supported
# if (p_err is not None) and (v_err is not None):
ax[0].errorbar(p_data, v_data, xerr=p_err, yerr=v_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
points = ax[0].scatter(p_data, v_data, marker='o', s=ms_data, c=temp_data,
cmap=c_map, vmin=300., vmax=temp_data.max(),
zorder=1)
if v_residual is None:
ax[1].scatter(p_data, p_data - p_datafit, marker='o', s=ms_data,
c=temp_data, cmap=c_map, vmin=300.,
vmax=temp_data.max(), zorder=1)
ax[1].errorbar(p_data, p_data - p_datafit, yerr=p_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
ax[1].set_ylabel('$P_{obs} - P_{fit}$')
else:
ax[1].scatter(p_data, v_residual, marker='o', s=ms_data, c=temp_data,
cmap=c_map, vmin=300., vmax=temp_data.max(), zorder=1)
ax[1].errorbar(p_data, v_residual, yerr=p_err, fmt=' ', c='k',
capsize=0, elinewidth=0.5, label='Data', zorder=0)
ax[1].set_ylabel('$V_{obs} - V_{fit}$')
# ax[0].legend()
position = f.add_axes(cbar_loc)
f.colorbar(points, orientation="vertical", cax=position,
ticks=temp_fitline)
ax[1].axhline(0, c='k', ls='--')
ax[1].set_xlabel('Pressure (GPa)')
ax[0].set_ylabel(v_label)
ax[0].set_title(title)
plt.tight_layout()
if pdf_filen is not None:
f.savefig(pdf_filen) |
def update_nseg(self):
"""Update the number of segments, displayed in the dialog."""
self.nseg = 0
if self.one_grp:
segments = self.get_segments()
if segments is not None:
self.nseg = len(segments)
self.show_nseg.setText('Number of segments: ' + str(self.nseg))
times = [t for seg in segments for t in seg['times']]
self.parent.overview.mark_poi(times)
else:
self.show_nseg.setText('No valid segments')
self.toggle_freq() | Update the number of segments, displayed in the dialog. | Below is the the instruction that describes the task:
### Input:
Update the number of segments, displayed in the dialog.
### Response:
def update_nseg(self):
"""Update the number of segments, displayed in the dialog."""
self.nseg = 0
if self.one_grp:
segments = self.get_segments()
if segments is not None:
self.nseg = len(segments)
self.show_nseg.setText('Number of segments: ' + str(self.nseg))
times = [t for seg in segments for t in seg['times']]
self.parent.overview.mark_poi(times)
else:
self.show_nseg.setText('No valid segments')
self.toggle_freq() |
def delete(name):
'''
Remove the named group
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if salt.utils.stringutils.contains_whitespace(name):
raise SaltInvocationError('Group name cannot contain whitespace')
if name.startswith('_'):
raise SaltInvocationError(
'Salt will not remove groups beginning with underscores'
)
if not info(name):
return True
cmd = ['dseditgroup', '-o', 'delete', name]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 | Remove the named group
CLI Example:
.. code-block:: bash
salt '*' group.delete foo | Below is the the instruction that describes the task:
### Input:
Remove the named group
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
### Response:
def delete(name):
'''
Remove the named group
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if salt.utils.stringutils.contains_whitespace(name):
raise SaltInvocationError('Group name cannot contain whitespace')
if name.startswith('_'):
raise SaltInvocationError(
'Salt will not remove groups beginning with underscores'
)
if not info(name):
return True
cmd = ['dseditgroup', '-o', 'delete', name]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0 |
def previous_unwrittable_on_col(view, coords):
"""Return position of the previous (in column) letter that is unwrittable"""
x, y = coords
miny = -1
for offset in range(y - 1, miny, -1):
letter = view[x, offset]
if letter not in REWRITABLE_LETTERS:
return offset
return None | Return position of the previous (in column) letter that is unwrittable | Below is the the instruction that describes the task:
### Input:
Return position of the previous (in column) letter that is unwrittable
### Response:
def previous_unwrittable_on_col(view, coords):
"""Return position of the previous (in column) letter that is unwrittable"""
x, y = coords
miny = -1
for offset in range(y - 1, miny, -1):
letter = view[x, offset]
if letter not in REWRITABLE_LETTERS:
return offset
return None |
def get_shape_points(cur, shape_id):
"""
Given a shape_id, return its shape-sequence.
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
shape_id: str
id of the route
Returns
-------
shape_points: list
elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
"""
cur.execute('''SELECT seq, lat, lon, d FROM shapes where shape_id=?
ORDER BY seq''', (shape_id,))
shape_points = [dict(seq=row[0], lat=row[1], lon=row[2], d=row[3])
for row in cur]
return shape_points | Given a shape_id, return its shape-sequence.
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
shape_id: str
id of the route
Returns
-------
shape_points: list
elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape | Below is the the instruction that describes the task:
### Input:
Given a shape_id, return its shape-sequence.
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
shape_id: str
id of the route
Returns
-------
shape_points: list
elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
### Response:
def get_shape_points(cur, shape_id):
"""
Given a shape_id, return its shape-sequence.
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
shape_id: str
id of the route
Returns
-------
shape_points: list
elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
"""
cur.execute('''SELECT seq, lat, lon, d FROM shapes where shape_id=?
ORDER BY seq''', (shape_id,))
shape_points = [dict(seq=row[0], lat=row[1], lon=row[2], d=row[3])
for row in cur]
return shape_points |
def get_post_data(self):
'''
Get all the arguments from post request. Only get the first argument by default.
'''
post_data = {}
for key in self.request.arguments:
post_data[key] = self.get_arguments(key)[0]
return post_data | Get all the arguments from post request. Only get the first argument by default. | Below is the the instruction that describes the task:
### Input:
Get all the arguments from post request. Only get the first argument by default.
### Response:
def get_post_data(self):
'''
Get all the arguments from post request. Only get the first argument by default.
'''
post_data = {}
for key in self.request.arguments:
post_data[key] = self.get_arguments(key)[0]
return post_data |
def supports(cls, *functionalities):
"""
A view decorator to indicate that an xBlock view has support for the
given functionalities.
Arguments:
functionalities: String identifiers for the functionalities of the view.
For example: "multi_device".
"""
def _decorator(view):
"""
Internal decorator that updates the given view's list of supported
functionalities.
"""
# pylint: disable=protected-access
if not hasattr(view, "_supports"):
view._supports = set()
for functionality in functionalities:
view._supports.add(functionality)
return view
return _decorator | A view decorator to indicate that an xBlock view has support for the
given functionalities.
Arguments:
functionalities: String identifiers for the functionalities of the view.
For example: "multi_device". | Below is the the instruction that describes the task:
### Input:
A view decorator to indicate that an xBlock view has support for the
given functionalities.
Arguments:
functionalities: String identifiers for the functionalities of the view.
For example: "multi_device".
### Response:
def supports(cls, *functionalities):
"""
A view decorator to indicate that an xBlock view has support for the
given functionalities.
Arguments:
functionalities: String identifiers for the functionalities of the view.
For example: "multi_device".
"""
def _decorator(view):
"""
Internal decorator that updates the given view's list of supported
functionalities.
"""
# pylint: disable=protected-access
if not hasattr(view, "_supports"):
view._supports = set()
for functionality in functionalities:
view._supports.add(functionality)
return view
return _decorator |
def _charInfo(self, point, padding):
"""
Displays character info.
"""
print('{0:0>4X} '.format(point).rjust(padding), ud.name(chr(point), '<code point {0:0>4X}>'.format(point))) | Displays character info. | Below is the the instruction that describes the task:
### Input:
Displays character info.
### Response:
def _charInfo(self, point, padding):
"""
Displays character info.
"""
print('{0:0>4X} '.format(point).rjust(padding), ud.name(chr(point), '<code point {0:0>4X}>'.format(point))) |
def add(self, row={}, step=None):
"""Adds or updates a history step.
If row isn't specified, will write the current state of row.
If step is specified, the row will be written only when add() is called with
a different step value.
run.history.row["duration"] = 1.0
run.history.add({"loss": 1})
=> {"duration": 1.0, "loss": 1}
"""
if not isinstance(row, collections.Mapping):
raise wandb.Error('history.add expects dict-like object')
if step is None:
self.update(row)
if not self.batched:
self._write()
else:
if not isinstance(step, numbers.Integral):
raise wandb.Error(
"Step must be an integer, not {}".format(step))
elif step < self._steps:
warnings.warn(
"Adding to old History rows isn't currently supported. Dropping.", wandb.WandbWarning)
return
elif step == self._steps:
pass
elif self.batched:
raise wandb.Error(
"Can't log to a particular History step ({}) while in batched mode.".format(step))
else: # step > self._steps
self._write()
self._steps = step
self.update(row) | Adds or updates a history step.
If row isn't specified, will write the current state of row.
If step is specified, the row will be written only when add() is called with
a different step value.
run.history.row["duration"] = 1.0
run.history.add({"loss": 1})
=> {"duration": 1.0, "loss": 1} | Below is the the instruction that describes the task:
### Input:
Adds or updates a history step.
If row isn't specified, will write the current state of row.
If step is specified, the row will be written only when add() is called with
a different step value.
run.history.row["duration"] = 1.0
run.history.add({"loss": 1})
=> {"duration": 1.0, "loss": 1}
### Response:
def add(self, row={}, step=None):
"""Adds or updates a history step.
If row isn't specified, will write the current state of row.
If step is specified, the row will be written only when add() is called with
a different step value.
run.history.row["duration"] = 1.0
run.history.add({"loss": 1})
=> {"duration": 1.0, "loss": 1}
"""
if not isinstance(row, collections.Mapping):
raise wandb.Error('history.add expects dict-like object')
if step is None:
self.update(row)
if not self.batched:
self._write()
else:
if not isinstance(step, numbers.Integral):
raise wandb.Error(
"Step must be an integer, not {}".format(step))
elif step < self._steps:
warnings.warn(
"Adding to old History rows isn't currently supported. Dropping.", wandb.WandbWarning)
return
elif step == self._steps:
pass
elif self.batched:
raise wandb.Error(
"Can't log to a particular History step ({}) while in batched mode.".format(step))
else: # step > self._steps
self._write()
self._steps = step
self.update(row) |
def parallel_running_stats(districts_list,
n_of_processes,
n_of_districts=1,
source='pkl',
mode='',
critical=False,
save_csv=False,
save_path=''):
'''Organize parallel runs of ding0 to calculate stats
The function take all districts in a list and divide them into
n_of_processes parallel processes. For each process, the assigned districts
are given to the function process_runs() with arguments n_of_districts,
source, mode, and critical
Parameters
----------
districts_list: list of int
List with all districts to be run.
n_of_processes: int
Number of processes to run in parallel
n_of_districts: int
Number of districts to be run in each cluster given as argument to
process_stats()
source: str
If 'pkl', pickle files are read. Otherwise, ding0 is run over the districts.
mode: str
If 'MV', medium voltage stats are calculated.
If 'LV', low voltage stats are calculated.
If empty, medium and low voltage stats are calculated.
critical: bool
If True, critical nodes and branches are returned
path: str
path to save the pkl and csv files
Returns
-------
DataFrame
mv_stats: MV stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
DataFrame
lv_stats: LV stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
DataFrame
mv_crit_nodes: MV critical nodes stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
mv_crit_edges: MV critical edges stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_nodes: LV critical nodes stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_edges: LV critical edges stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
See Also
--------
process_stats
'''
start = time.time()
nw_name = os.path.join(save_path, 'ding0_grids__') # name of files prefix
#######################################################################
# Define an output queue
output_stats = mp.Queue()
#######################################################################
# Setup a list of processes that we want to run
max_dist = len(districts_list)
threat_long = floor(max_dist / n_of_processes)
if threat_long == 0:
threat_long = 1
threats = [districts_list[x:x + threat_long] for x in
range(0, len(districts_list), threat_long)]
processes = []
for districts in threats:
args = (districts, n_of_districts, source, mode, critical, nw_name, output_stats)
processes.append(mp.Process(target=process_stats, args=args))
#######################################################################
# Run processes
for p in processes:
p.start()
# Resque output_stats from processes
output = [output_stats.get() for p in processes]
# Exit the completed processes
for p in processes:
p.join()
#######################################################################
# create outputs
# Name of files
if save_csv:
nw_name = nw_name + str(districts_list[0])
if not districts_list[0] == districts_list[-1]:
nw_name = nw_name + '_to_' + str(districts_list[-1])
# concatenate all dataframes
try:
mv_stats = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][0]],
axis=0)
except:
mv_stats = pd.DataFrame.from_dict({})
try:
lv_stats = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][1]],
axis=0)
except:
lv_stats = pd.DataFrame.from_dict({})
try:
mv_crit_nodes = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][2]],
axis=0)
except:
mv_crit_nodes = pd.DataFrame.from_dict({})
try:
mv_crit_edges = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][3]],
axis=0)
except:
mv_crit_edges = pd.DataFrame.from_dict({})
try:
lv_crit_nodes = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][4]],
axis=0)
except:
lv_crit_nodes = pd.DataFrame.from_dict({})
try:
lv_crit_edges = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][5]],
axis=0)
except:
lv_crit_edges = pd.DataFrame.from_dict({})
# format concatenated Dataframes
if not mv_stats.empty:
mv_stats = mv_stats.fillna(0)
mv_stats = mv_stats[sorted(mv_stats.columns.tolist())]
mv_stats.sort_index(inplace=True)
if save_csv:
mv_stats.to_csv(nw_name + '_mv_stats.csv')
if not lv_stats.empty:
lv_stats = lv_stats.fillna(0)
lv_stats = lv_stats[sorted(lv_stats.columns.tolist())]
lv_stats.sort_index(inplace=True)
if save_csv:
lv_stats.to_csv(nw_name + '_lv_stats.csv')
if not mv_crit_nodes.empty:
mv_crit_nodes = mv_crit_nodes.fillna(0)
mv_crit_nodes = mv_crit_nodes[sorted(mv_crit_nodes.columns.tolist())]
mv_crit_nodes.sort_index(inplace=True)
if save_csv:
mv_crit_nodes.to_csv(nw_name + '_mv_crit_nodes.csv')
if not mv_crit_edges.empty:
mv_crit_edges = mv_crit_edges.fillna(0)
mv_crit_edges = mv_crit_edges[sorted(mv_crit_edges.columns.tolist())]
mv_crit_edges.sort_index(inplace=True)
if save_csv:
mv_crit_edges.to_csv(nw_name + '_mv_crit_edges.csv')
if not lv_crit_nodes.empty:
lv_crit_nodes = lv_crit_nodes.fillna(0)
lv_crit_nodes = lv_crit_nodes[sorted(lv_crit_nodes.columns.tolist())]
lv_crit_nodes.sort_index(inplace=True)
if save_csv:
lv_crit_nodes.to_csv(nw_name + '_lv_crit_nodes.csv')
if not lv_crit_edges.empty:
lv_crit_edges = lv_crit_edges.fillna(0)
lv_crit_edges = lv_crit_edges[sorted(lv_crit_edges.columns.tolist())]
lv_crit_edges.sort_index(inplace=True)
if save_csv:
lv_crit_edges.to_csv(nw_name + '_lv_crit_edges.csv')
#######################################################################
print('\n########################################')
print(' Elapsed time for', str(max_dist),
'MV grid districts (seconds): {}'.format(time.time() - start))
print('\n########################################')
#######################################################################
return mv_stats, lv_stats, mv_crit_nodes, mv_crit_edges, lv_crit_nodes, lv_crit_edges | Organize parallel runs of ding0 to calculate stats
The function take all districts in a list and divide them into
n_of_processes parallel processes. For each process, the assigned districts
are given to the function process_runs() with arguments n_of_districts,
source, mode, and critical
Parameters
----------
districts_list: list of int
List with all districts to be run.
n_of_processes: int
Number of processes to run in parallel
n_of_districts: int
Number of districts to be run in each cluster given as argument to
process_stats()
source: str
If 'pkl', pickle files are read. Otherwise, ding0 is run over the districts.
mode: str
If 'MV', medium voltage stats are calculated.
If 'LV', low voltage stats are calculated.
If empty, medium and low voltage stats are calculated.
critical: bool
If True, critical nodes and branches are returned
path: str
path to save the pkl and csv files
Returns
-------
DataFrame
mv_stats: MV stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
DataFrame
lv_stats: LV stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
DataFrame
mv_crit_nodes: MV critical nodes stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
mv_crit_edges: MV critical edges stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_nodes: LV critical nodes stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_edges: LV critical edges stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
See Also
--------
process_stats | Below is the the instruction that describes the task:
### Input:
Organize parallel runs of ding0 to calculate stats
The function take all districts in a list and divide them into
n_of_processes parallel processes. For each process, the assigned districts
are given to the function process_runs() with arguments n_of_districts,
source, mode, and critical
Parameters
----------
districts_list: list of int
List with all districts to be run.
n_of_processes: int
Number of processes to run in parallel
n_of_districts: int
Number of districts to be run in each cluster given as argument to
process_stats()
source: str
If 'pkl', pickle files are read. Otherwise, ding0 is run over the districts.
mode: str
If 'MV', medium voltage stats are calculated.
If 'LV', low voltage stats are calculated.
If empty, medium and low voltage stats are calculated.
critical: bool
If True, critical nodes and branches are returned
path: str
path to save the pkl and csv files
Returns
-------
DataFrame
mv_stats: MV stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
DataFrame
lv_stats: LV stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
DataFrame
mv_crit_nodes: MV critical nodes stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
mv_crit_edges: MV critical edges stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_nodes: LV critical nodes stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_edges: LV critical edges stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
See Also
--------
process_stats
### Response:
def parallel_running_stats(districts_list,
n_of_processes,
n_of_districts=1,
source='pkl',
mode='',
critical=False,
save_csv=False,
save_path=''):
'''Organize parallel runs of ding0 to calculate stats
The function take all districts in a list and divide them into
n_of_processes parallel processes. For each process, the assigned districts
are given to the function process_runs() with arguments n_of_districts,
source, mode, and critical
Parameters
----------
districts_list: list of int
List with all districts to be run.
n_of_processes: int
Number of processes to run in parallel
n_of_districts: int
Number of districts to be run in each cluster given as argument to
process_stats()
source: str
If 'pkl', pickle files are read. Otherwise, ding0 is run over the districts.
mode: str
If 'MV', medium voltage stats are calculated.
If 'LV', low voltage stats are calculated.
If empty, medium and low voltage stats are calculated.
critical: bool
If True, critical nodes and branches are returned
path: str
path to save the pkl and csv files
Returns
-------
DataFrame
mv_stats: MV stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
DataFrame
lv_stats: LV stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
DataFrame
mv_crit_nodes: MV critical nodes stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
mv_crit_edges: MV critical edges stats in a DataFrame.
If mode=='LV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_nodes: LV critical nodes stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
DataFrame
lv_crit_edges: LV critical edges stats in a DataFrame.
If mode=='MV', then DataFrame is empty.
If critical==False, then DataFrame is empty.
See Also
--------
process_stats
'''
start = time.time()
nw_name = os.path.join(save_path, 'ding0_grids__') # name of files prefix
#######################################################################
# Define an output queue
output_stats = mp.Queue()
#######################################################################
# Setup a list of processes that we want to run
max_dist = len(districts_list)
threat_long = floor(max_dist / n_of_processes)
if threat_long == 0:
threat_long = 1
threats = [districts_list[x:x + threat_long] for x in
range(0, len(districts_list), threat_long)]
processes = []
for districts in threats:
args = (districts, n_of_districts, source, mode, critical, nw_name, output_stats)
processes.append(mp.Process(target=process_stats, args=args))
#######################################################################
# Run processes
for p in processes:
p.start()
# Resque output_stats from processes
output = [output_stats.get() for p in processes]
# Exit the completed processes
for p in processes:
p.join()
#######################################################################
# create outputs
# Name of files
if save_csv:
nw_name = nw_name + str(districts_list[0])
if not districts_list[0] == districts_list[-1]:
nw_name = nw_name + '_to_' + str(districts_list[-1])
# concatenate all dataframes
try:
mv_stats = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][0]],
axis=0)
except:
mv_stats = pd.DataFrame.from_dict({})
try:
lv_stats = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][1]],
axis=0)
except:
lv_stats = pd.DataFrame.from_dict({})
try:
mv_crit_nodes = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][2]],
axis=0)
except:
mv_crit_nodes = pd.DataFrame.from_dict({})
try:
mv_crit_edges = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][3]],
axis=0)
except:
mv_crit_edges = pd.DataFrame.from_dict({})
try:
lv_crit_nodes = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][4]],
axis=0)
except:
lv_crit_nodes = pd.DataFrame.from_dict({})
try:
lv_crit_edges = pd.concat(
[df for p in range(0, len(processes)) for df in output[p][5]],
axis=0)
except:
lv_crit_edges = pd.DataFrame.from_dict({})
# format concatenated Dataframes
if not mv_stats.empty:
mv_stats = mv_stats.fillna(0)
mv_stats = mv_stats[sorted(mv_stats.columns.tolist())]
mv_stats.sort_index(inplace=True)
if save_csv:
mv_stats.to_csv(nw_name + '_mv_stats.csv')
if not lv_stats.empty:
lv_stats = lv_stats.fillna(0)
lv_stats = lv_stats[sorted(lv_stats.columns.tolist())]
lv_stats.sort_index(inplace=True)
if save_csv:
lv_stats.to_csv(nw_name + '_lv_stats.csv')
if not mv_crit_nodes.empty:
mv_crit_nodes = mv_crit_nodes.fillna(0)
mv_crit_nodes = mv_crit_nodes[sorted(mv_crit_nodes.columns.tolist())]
mv_crit_nodes.sort_index(inplace=True)
if save_csv:
mv_crit_nodes.to_csv(nw_name + '_mv_crit_nodes.csv')
if not mv_crit_edges.empty:
mv_crit_edges = mv_crit_edges.fillna(0)
mv_crit_edges = mv_crit_edges[sorted(mv_crit_edges.columns.tolist())]
mv_crit_edges.sort_index(inplace=True)
if save_csv:
mv_crit_edges.to_csv(nw_name + '_mv_crit_edges.csv')
if not lv_crit_nodes.empty:
lv_crit_nodes = lv_crit_nodes.fillna(0)
lv_crit_nodes = lv_crit_nodes[sorted(lv_crit_nodes.columns.tolist())]
lv_crit_nodes.sort_index(inplace=True)
if save_csv:
lv_crit_nodes.to_csv(nw_name + '_lv_crit_nodes.csv')
if not lv_crit_edges.empty:
lv_crit_edges = lv_crit_edges.fillna(0)
lv_crit_edges = lv_crit_edges[sorted(lv_crit_edges.columns.tolist())]
lv_crit_edges.sort_index(inplace=True)
if save_csv:
lv_crit_edges.to_csv(nw_name + '_lv_crit_edges.csv')
#######################################################################
print('\n########################################')
print(' Elapsed time for', str(max_dist),
'MV grid districts (seconds): {}'.format(time.time() - start))
print('\n########################################')
#######################################################################
return mv_stats, lv_stats, mv_crit_nodes, mv_crit_edges, lv_crit_nodes, lv_crit_edges |
def memory_usage(self):
"""
Get the combined memory usage of the field data and field values.
"""
data = super(Field, self).memory_usage()
values = 0
for value in self.field_values:
values += value.memory_usage()
data['field_values'] = values
return data | Get the combined memory usage of the field data and field values. | Below is the the instruction that describes the task:
### Input:
Get the combined memory usage of the field data and field values.
### Response:
def memory_usage(self):
"""
Get the combined memory usage of the field data and field values.
"""
data = super(Field, self).memory_usage()
values = 0
for value in self.field_values:
values += value.memory_usage()
data['field_values'] = values
return data |
def serve_websocket(request, port):
"""Start UWSGI websocket loop and proxy."""
env = request.environ
# Send HTTP response 101 Switch Protocol downstream
uwsgi.websocket_handshake(env['HTTP_SEC_WEBSOCKET_KEY'], env.get('HTTP_ORIGIN', ''))
# Map the websocket URL to the upstream localhost:4000x Notebook instance
parts = urlparse(request.url)
parts = parts._replace(scheme="ws", netloc="localhost:{}".format(port))
url = urlunparse(parts)
# Proxy initial connection headers
headers = [(header, value) for header, value in request.headers.items() if header.lower() in CAPTURE_CONNECT_HEADERS]
logger.info("Connecting to upstream websockets: %s, headers: %s", url, headers)
ws = ProxyClient(url, headers=headers)
ws.connect()
# TODO: Will complain loudly about already send headers - how to abort?
return httpexceptions.HTTPOk() | Start UWSGI websocket loop and proxy. | Below is the the instruction that describes the task:
### Input:
Start UWSGI websocket loop and proxy.
### Response:
def serve_websocket(request, port):
"""Start UWSGI websocket loop and proxy."""
env = request.environ
# Send HTTP response 101 Switch Protocol downstream
uwsgi.websocket_handshake(env['HTTP_SEC_WEBSOCKET_KEY'], env.get('HTTP_ORIGIN', ''))
# Map the websocket URL to the upstream localhost:4000x Notebook instance
parts = urlparse(request.url)
parts = parts._replace(scheme="ws", netloc="localhost:{}".format(port))
url = urlunparse(parts)
# Proxy initial connection headers
headers = [(header, value) for header, value in request.headers.items() if header.lower() in CAPTURE_CONNECT_HEADERS]
logger.info("Connecting to upstream websockets: %s, headers: %s", url, headers)
ws = ProxyClient(url, headers=headers)
ws.connect()
# TODO: Will complain loudly about already send headers - how to abort?
return httpexceptions.HTTPOk() |
def parse_commit_message(message: str) -> Tuple[int, str, Optional[str], Tuple[str, str, str]]:
"""
Parses a commit message according to the 1.0 version of python-semantic-release. It expects
a tag of some sort in the commit message and will use the rest of the first line as changelog
content.
:param message: A string of a commit message.
:raises UnknownCommitMessageStyleError: If it does not recognise the commit style
:return: A tuple of (level to bump, type of change, scope of change, a tuple with descriptions)
"""
parsed = re_parser.match(message)
if not parsed:
raise UnknownCommitMessageStyleError(
'Unable to parse the given commit message: {0}'.format(message)
)
subject = parsed.group('subject')
if config.get('semantic_release', 'minor_tag') in message:
level = 'feature'
level_bump = 2
if subject:
subject = subject.replace(config.get('semantic_release', 'minor_tag'.format(level)), '')
elif config.get('semantic_release', 'fix_tag') in message:
level = 'fix'
level_bump = 1
if subject:
subject = subject.replace(config.get('semantic_release', 'fix_tag'.format(level)), '')
else:
raise UnknownCommitMessageStyleError(
'Unable to parse the given commit message: {0}'.format(message)
)
if parsed.group('text') and 'BREAKING CHANGE' in parsed.group('text'):
level = 'breaking'
level_bump = 3
body, footer = parse_text_block(parsed.group('text'))
return level_bump, level, None, (subject.strip(), body.strip(), footer.strip()) | Parses a commit message according to the 1.0 version of python-semantic-release. It expects
a tag of some sort in the commit message and will use the rest of the first line as changelog
content.
:param message: A string of a commit message.
:raises UnknownCommitMessageStyleError: If it does not recognise the commit style
:return: A tuple of (level to bump, type of change, scope of change, a tuple with descriptions) | Below is the the instruction that describes the task:
### Input:
Parses a commit message according to the 1.0 version of python-semantic-release. It expects
a tag of some sort in the commit message and will use the rest of the first line as changelog
content.
:param message: A string of a commit message.
:raises UnknownCommitMessageStyleError: If it does not recognise the commit style
:return: A tuple of (level to bump, type of change, scope of change, a tuple with descriptions)
### Response:
def parse_commit_message(message: str) -> Tuple[int, str, Optional[str], Tuple[str, str, str]]:
"""
Parses a commit message according to the 1.0 version of python-semantic-release. It expects
a tag of some sort in the commit message and will use the rest of the first line as changelog
content.
:param message: A string of a commit message.
:raises UnknownCommitMessageStyleError: If it does not recognise the commit style
:return: A tuple of (level to bump, type of change, scope of change, a tuple with descriptions)
"""
parsed = re_parser.match(message)
if not parsed:
raise UnknownCommitMessageStyleError(
'Unable to parse the given commit message: {0}'.format(message)
)
subject = parsed.group('subject')
if config.get('semantic_release', 'minor_tag') in message:
level = 'feature'
level_bump = 2
if subject:
subject = subject.replace(config.get('semantic_release', 'minor_tag'.format(level)), '')
elif config.get('semantic_release', 'fix_tag') in message:
level = 'fix'
level_bump = 1
if subject:
subject = subject.replace(config.get('semantic_release', 'fix_tag'.format(level)), '')
else:
raise UnknownCommitMessageStyleError(
'Unable to parse the given commit message: {0}'.format(message)
)
if parsed.group('text') and 'BREAKING CHANGE' in parsed.group('text'):
level = 'breaking'
level_bump = 3
body, footer = parse_text_block(parsed.group('text'))
return level_bump, level, None, (subject.strip(), body.strip(), footer.strip()) |
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '\n\n'.join(
codeobj.pretty_str(indent=indent)
for codeobj in self.children
) | Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation. | Below is the the instruction that describes the task:
### Input:
Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
### Response:
def pretty_str(self, indent=0):
"""Return a human-readable string representation of this object.
Kwargs:
indent (int): The amount of spaces to use as indentation.
"""
return '\n\n'.join(
codeobj.pretty_str(indent=indent)
for codeobj in self.children
) |
def list_jobs(self, argument_filters=None):
'''
a method to list jobs in the scheduler
:param argument_filters: list of query criteria dictionaries for class argument keys
:return: list of jobs (which satisfy the filters)
NOTE: query criteria architecture
each item in the argument filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a key in the job record to be queried with a value
that is a dictionary of conditional operators used to test the
value in the corresponding key in each record in the list of jobs.
eg. argument_filters = [ { '.function': { 'must_contain': [ 'debug' ] } } ]
this example filter looks in the function key of each job for a
value which contains the characters 'debug'.
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the argument_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
the list of keys in each query_criteria is the same as the arguments for
adding a job to the scheduler
query_criteria = {
'.id': {},
'.function': {},
'.name': {},
'.dt': {},
'.interval': {},
'.month': {},
'.day': {},
'.weekday': {},
'.hour': {},
'.minute': {},
'.second': {},
'.start_date': {},
'.end_date': {}
}
conditional operators for '.id', '.function', '.name':
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.dt', 'start', 'end':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
operators for '.interval', '.month', '.day', '.weekday', '.hour', '.minute', '.second':
"discrete_values": [ 0 ],
"excluded_values": [ 0 ],
"greater_than": 0,
"less_than": 0,
"max_value": 0,
"min_value": 0
'''
title = '%s.list_jobs' % self.__class__.__name__
# validate inputs
if argument_filters:
self.fields.validate(argument_filters, '.argument_filters')
# send request to get jobs
url = '%s/scheduler/jobs' % self.url
job_list = self._get_request(url)
# construct filter function
def query_function(**kwargs):
job_details = {}
for key, value in kwargs.items():
if key in self.job_model.schema.keys():
job_details[key] = value
for query_criteria in argument_filters:
if self.job_model.query(query_criteria, job_details):
return True
return False
# construct empty list
results_list = []
# add refactored jobs to results list
for job in job_list:
job_details = self._construct_details(job)
if argument_filters:
if query_function(**job_details):
results_list.append(job_details)
else:
results_list.append(job_details)
return results_list | a method to list jobs in the scheduler
:param argument_filters: list of query criteria dictionaries for class argument keys
:return: list of jobs (which satisfy the filters)
NOTE: query criteria architecture
each item in the argument filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a key in the job record to be queried with a value
that is a dictionary of conditional operators used to test the
value in the corresponding key in each record in the list of jobs.
eg. argument_filters = [ { '.function': { 'must_contain': [ 'debug' ] } } ]
this example filter looks in the function key of each job for a
value which contains the characters 'debug'.
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the argument_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
the list of keys in each query_criteria is the same as the arguments for
adding a job to the scheduler
query_criteria = {
'.id': {},
'.function': {},
'.name': {},
'.dt': {},
'.interval': {},
'.month': {},
'.day': {},
'.weekday': {},
'.hour': {},
'.minute': {},
'.second': {},
'.start_date': {},
'.end_date': {}
}
conditional operators for '.id', '.function', '.name':
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.dt', 'start', 'end':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
operators for '.interval', '.month', '.day', '.weekday', '.hour', '.minute', '.second':
"discrete_values": [ 0 ],
"excluded_values": [ 0 ],
"greater_than": 0,
"less_than": 0,
"max_value": 0,
"min_value": 0 | Below is the the instruction that describes the task:
### Input:
a method to list jobs in the scheduler
:param argument_filters: list of query criteria dictionaries for class argument keys
:return: list of jobs (which satisfy the filters)
NOTE: query criteria architecture
each item in the argument filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a key in the job record to be queried with a value
that is a dictionary of conditional operators used to test the
value in the corresponding key in each record in the list of jobs.
eg. argument_filters = [ { '.function': { 'must_contain': [ 'debug' ] } } ]
this example filter looks in the function key of each job for a
value which contains the characters 'debug'.
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the argument_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
the list of keys in each query_criteria is the same as the arguments for
adding a job to the scheduler
query_criteria = {
'.id': {},
'.function': {},
'.name': {},
'.dt': {},
'.interval': {},
'.month': {},
'.day': {},
'.weekday': {},
'.hour': {},
'.minute': {},
'.second': {},
'.start_date': {},
'.end_date': {}
}
conditional operators for '.id', '.function', '.name':
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.dt', 'start', 'end':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
operators for '.interval', '.month', '.day', '.weekday', '.hour', '.minute', '.second':
"discrete_values": [ 0 ],
"excluded_values": [ 0 ],
"greater_than": 0,
"less_than": 0,
"max_value": 0,
"min_value": 0
### Response:
def list_jobs(self, argument_filters=None):
'''
a method to list jobs in the scheduler
:param argument_filters: list of query criteria dictionaries for class argument keys
:return: list of jobs (which satisfy the filters)
NOTE: query criteria architecture
each item in the argument filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a key in the job record to be queried with a value
that is a dictionary of conditional operators used to test the
value in the corresponding key in each record in the list of jobs.
eg. argument_filters = [ { '.function': { 'must_contain': [ 'debug' ] } } ]
this example filter looks in the function key of each job for a
value which contains the characters 'debug'.
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the argument_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
the list of keys in each query_criteria is the same as the arguments for
adding a job to the scheduler
query_criteria = {
'.id': {},
'.function': {},
'.name': {},
'.dt': {},
'.interval': {},
'.month': {},
'.day': {},
'.weekday': {},
'.hour': {},
'.minute': {},
'.second': {},
'.start_date': {},
'.end_date': {}
}
conditional operators for '.id', '.function', '.name':
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.dt', 'start', 'end':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
operators for '.interval', '.month', '.day', '.weekday', '.hour', '.minute', '.second':
"discrete_values": [ 0 ],
"excluded_values": [ 0 ],
"greater_than": 0,
"less_than": 0,
"max_value": 0,
"min_value": 0
'''
title = '%s.list_jobs' % self.__class__.__name__
# validate inputs
if argument_filters:
self.fields.validate(argument_filters, '.argument_filters')
# send request to get jobs
url = '%s/scheduler/jobs' % self.url
job_list = self._get_request(url)
# construct filter function
def query_function(**kwargs):
job_details = {}
for key, value in kwargs.items():
if key in self.job_model.schema.keys():
job_details[key] = value
for query_criteria in argument_filters:
if self.job_model.query(query_criteria, job_details):
return True
return False
# construct empty list
results_list = []
# add refactored jobs to results list
for job in job_list:
job_details = self._construct_details(job)
if argument_filters:
if query_function(**job_details):
results_list.append(job_details)
else:
results_list.append(job_details)
return results_list |
def release(self):
"""Create a release
1. Perform Sanity checks on work file.
2. Copy work file to releasefile location.
3. Perform cleanup actions on releasefile.
:returns: True if successfull, False if not.
:rtype: bool
:raises: None
"""
log.info("Releasing: %s", self._workfile.get_fullpath())
ac = self.build_actions()
ac.execute(self)
s = ac.status().value
if not s == ActionStatus.SUCCESS:
ard = ActionReportDialog(ac)
ard.exec_()
pass
return s == ActionStatus.SUCCESS | Create a release
1. Perform Sanity checks on work file.
2. Copy work file to releasefile location.
3. Perform cleanup actions on releasefile.
:returns: True if successfull, False if not.
:rtype: bool
:raises: None | Below is the the instruction that describes the task:
### Input:
Create a release
1. Perform Sanity checks on work file.
2. Copy work file to releasefile location.
3. Perform cleanup actions on releasefile.
:returns: True if successfull, False if not.
:rtype: bool
:raises: None
### Response:
def release(self):
"""Create a release
1. Perform Sanity checks on work file.
2. Copy work file to releasefile location.
3. Perform cleanup actions on releasefile.
:returns: True if successfull, False if not.
:rtype: bool
:raises: None
"""
log.info("Releasing: %s", self._workfile.get_fullpath())
ac = self.build_actions()
ac.execute(self)
s = ac.status().value
if not s == ActionStatus.SUCCESS:
ard = ActionReportDialog(ac)
ard.exec_()
pass
return s == ActionStatus.SUCCESS |
def get_connector(database_name=None):
"""
Get a connector from its database key in setttings.
"""
from django.db import connections, DEFAULT_DB_ALIAS
# Get DB
database_name = database_name or DEFAULT_DB_ALIAS
connection = connections[database_name]
engine = connection.settings_dict['ENGINE']
connector_settings = settings.CONNECTORS.get(database_name, {})
connector_path = connector_settings.get('CONNECTOR', CONNECTOR_MAPPING[engine])
connector_module_path = '.'.join(connector_path.split('.')[:-1])
module = import_module(connector_module_path)
connector_name = connector_path.split('.')[-1]
connector = getattr(module, connector_name)
return connector(database_name, **connector_settings) | Get a connector from its database key in setttings. | Below is the the instruction that describes the task:
### Input:
Get a connector from its database key in setttings.
### Response:
def get_connector(database_name=None):
"""
Get a connector from its database key in setttings.
"""
from django.db import connections, DEFAULT_DB_ALIAS
# Get DB
database_name = database_name or DEFAULT_DB_ALIAS
connection = connections[database_name]
engine = connection.settings_dict['ENGINE']
connector_settings = settings.CONNECTORS.get(database_name, {})
connector_path = connector_settings.get('CONNECTOR', CONNECTOR_MAPPING[engine])
connector_module_path = '.'.join(connector_path.split('.')[:-1])
module = import_module(connector_module_path)
connector_name = connector_path.split('.')[-1]
connector = getattr(module, connector_name)
return connector(database_name, **connector_settings) |
def make_registryitem_valuename(valuename, condition='is', negate=False, preserve_case=False):
"""
Create a node for RegistryItem/ValueName
:return: A IndicatorItem represented as an Element node
"""
document = 'RegistryItem'
search = 'RegistryItem/ValueName'
content_type = 'string'
content = valuename
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | Create a node for RegistryItem/ValueName
:return: A IndicatorItem represented as an Element node | Below is the the instruction that describes the task:
### Input:
Create a node for RegistryItem/ValueName
:return: A IndicatorItem represented as an Element node
### Response:
def make_registryitem_valuename(valuename, condition='is', negate=False, preserve_case=False):
"""
Create a node for RegistryItem/ValueName
:return: A IndicatorItem represented as an Element node
"""
document = 'RegistryItem'
search = 'RegistryItem/ValueName'
content_type = 'string'
content = valuename
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node |
def configure_volume(before_change=lambda: None, after_change=lambda: None):
'''Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
'''
config = get_config()
if not config:
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
raise VolumeConfigurationError()
if config['ephemeral']:
if os.path.ismount(config['mountpoint']):
before_change()
unmount_volume(config)
after_change()
return 'ephemeral'
else:
# persistent storage
if os.path.ismount(config['mountpoint']):
mounts = dict(managed_mounts())
if mounts.get(config['mountpoint']) != config['device']:
before_change()
unmount_volume(config)
mount_volume(config)
after_change()
else:
before_change()
mount_volume(config)
after_change()
return config['mountpoint'] | Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes. | Below is the the instruction that describes the task:
### Input:
Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
### Response:
def configure_volume(before_change=lambda: None, after_change=lambda: None):
'''Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
'''
config = get_config()
if not config:
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
raise VolumeConfigurationError()
if config['ephemeral']:
if os.path.ismount(config['mountpoint']):
before_change()
unmount_volume(config)
after_change()
return 'ephemeral'
else:
# persistent storage
if os.path.ismount(config['mountpoint']):
mounts = dict(managed_mounts())
if mounts.get(config['mountpoint']) != config['device']:
before_change()
unmount_volume(config)
mount_volume(config)
after_change()
else:
before_change()
mount_volume(config)
after_change()
return config['mountpoint'] |
def reward_bonus(self, assignment_id, amount, reason):
"""Reward the Turker with a bonus."""
from psiturk.amt_services import MTurkServices
self.amt_services = MTurkServices(
self.aws_access_key_id,
self.aws_secret_access_key,
self.config.getboolean(
'Shell Parameters', 'launch_in_sandbox_mode'))
return self.amt_services.bonus_worker(assignment_id, amount, reason) | Reward the Turker with a bonus. | Below is the the instruction that describes the task:
### Input:
Reward the Turker with a bonus.
### Response:
def reward_bonus(self, assignment_id, amount, reason):
"""Reward the Turker with a bonus."""
from psiturk.amt_services import MTurkServices
self.amt_services = MTurkServices(
self.aws_access_key_id,
self.aws_secret_access_key,
self.config.getboolean(
'Shell Parameters', 'launch_in_sandbox_mode'))
return self.amt_services.bonus_worker(assignment_id, amount, reason) |
def fourier(x, N):
"""Fourier approximation with N terms"""
term = 0.
for n in range(1, N, 2):
term += (1. / n) * math.sin(n * math.pi * x / L)
return (4. / (math.pi)) * term | Fourier approximation with N terms | Below is the the instruction that describes the task:
### Input:
Fourier approximation with N terms
### Response:
def fourier(x, N):
"""Fourier approximation with N terms"""
term = 0.
for n in range(1, N, 2):
term += (1. / n) * math.sin(n * math.pi * x / L)
return (4. / (math.pi)) * term |
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False | Required.
Can be used to initialize the server connection. | Below is the the instruction that describes the task:
### Input:
Required.
Can be used to initialize the server connection.
### Response:
def init(opts):
'''
Required.
Can be used to initialize the server connection.
'''
try:
DETAILS['server'] = SSHConnection(host=__opts__['proxy']['host'],
username=__opts__['proxy']['username'],
password=__opts__['proxy']['password'])
out, err = DETAILS['server'].sendline('help')
DETAILS['initialized'] = True
except TerminalException as e:
log.error(e)
return False |
def process_raw(self, raw: str) -> None:
"""Pre-process raw string.
Prepare parameters to work with APIItems.
"""
raw_params = dict(group.split('=', 1) for group in raw.splitlines())
super().process_raw(raw_params) | Pre-process raw string.
Prepare parameters to work with APIItems. | Below is the the instruction that describes the task:
### Input:
Pre-process raw string.
Prepare parameters to work with APIItems.
### Response:
def process_raw(self, raw: str) -> None:
"""Pre-process raw string.
Prepare parameters to work with APIItems.
"""
raw_params = dict(group.split('=', 1) for group in raw.splitlines())
super().process_raw(raw_params) |
def to_json(value, **kwargs):
"""Convert array to JSON list
nan values are converted to string 'nan', inf values to 'inf'.
"""
def _recurse_list(val):
if val and isinstance(val[0], list):
return [_recurse_list(v) for v in val]
return [str(v) if np.isnan(v) or np.isinf(v) else v for v in val]
return _recurse_list(value.tolist()) | Convert array to JSON list
nan values are converted to string 'nan', inf values to 'inf'. | Below is the the instruction that describes the task:
### Input:
Convert array to JSON list
nan values are converted to string 'nan', inf values to 'inf'.
### Response:
def to_json(value, **kwargs):
"""Convert array to JSON list
nan values are converted to string 'nan', inf values to 'inf'.
"""
def _recurse_list(val):
if val and isinstance(val[0], list):
return [_recurse_list(v) for v in val]
return [str(v) if np.isnan(v) or np.isinf(v) else v for v in val]
return _recurse_list(value.tolist()) |
def smart_if(parser, token):
'''
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``.
'''
bits = token.split_contents()[1:]
var = TemplateIfParser(parser, bits).parse()
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = None
return SmartIfNode(var, nodelist_true, nodelist_false) | A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``. | Below is the the instruction that describes the task:
### Input:
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``.
### Response:
def smart_if(parser, token):
'''
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``.
'''
bits = token.split_contents()[1:]
var = TemplateIfParser(parser, bits).parse()
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = None
return SmartIfNode(var, nodelist_true, nodelist_false) |
def merge_duplicates(model_name, keep_descriptors=False):
"""
Identifies repeated experimental values and returns mean values for those
data along with their standard deviation. Only aggregates experimental
values that have been acquired at the same temperature and pressure.
Parameters
----------
model_name: dev_model
the dev_model object to be interrogated
keep_descriptors: boolean, default False
if True descriptors will be included in the output DataFrame
Returns
-----------
out: dataframe
pandas DataFrame of the original data where repeated measurements
have been averaged and their variance stored in a separate column
"""
model_outputs = -6 + model_name.Data_summary.shape[0]
devmodel = model_name
cols = devmodel.Data.columns
if (devmodel.Data.iloc[:, -(4 + model_outputs):-4].max() < 700).all():
for output_index in range(model_outputs):
devmodel.Data.iloc[:, -(5 + output_index)] = \
devmodel.Data.iloc[:, -(5 + output_index)].apply(
lambda x: exp(float(x)))
output_val = pd.DataFrame()
output_xtd = pd.DataFrame()
for output_index in range(model_outputs):
val = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].mean().\
reset_index()
xtd = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].std().\
reset_index()
if output_index == 0:
output_val = val
output_xtd = xtd
else:
output_val = pd.merge(output_val, val)
output_xtd = pd.merge(output_xtd, xtd)
size = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].count().\
reset_index()
cations = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-cation'].first().reset_index()
anions = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-anion'].first().reset_index()
size.columns.values[2] = "count"
salts = (devmodel.Data["smiles-cation"] + "." + devmodel.
Data["smiles-anion"]).unique()
print("Identified {} unique salts in {} datapoints".
format(len(salts), devmodel.Data.shape[0]))
out = pd.merge(output_val, output_xtd,
on=['smiles-cation', 'smiles-anion'],
suffixes=['_mean', '_std'])
out = pd.merge(out, size)
out = pd.merge(out, cations)
out = pd.merge(out, anions)
if keep_descriptors:
cationDescriptors = load_data("cationDescriptors.csv")
cationDescriptors.columns = [str(col) + '-cation' for
col in cationDescriptors.columns]
anionDescriptors = load_data("anionDescriptors.csv")
anionDescriptors.columns = [str(col) + '-anion' for
col in anionDescriptors.columns]
new_df = pd.merge(cationDescriptors, out,
on=["name-cation", "smiles-cation"], how="right")
new_df = pd.merge(anionDescriptors, new_df,
on=["name-anion", "smiles-anion"], how="right")
out = new_df
return out | Identifies repeated experimental values and returns mean values for those
data along with their standard deviation. Only aggregates experimental
values that have been acquired at the same temperature and pressure.
Parameters
----------
model_name: dev_model
the dev_model object to be interrogated
keep_descriptors: boolean, default False
if True descriptors will be included in the output DataFrame
Returns
-----------
out: dataframe
pandas DataFrame of the original data where repeated measurements
have been averaged and their variance stored in a separate column | Below is the the instruction that describes the task:
### Input:
Identifies repeated experimental values and returns mean values for those
data along with their standard deviation. Only aggregates experimental
values that have been acquired at the same temperature and pressure.
Parameters
----------
model_name: dev_model
the dev_model object to be interrogated
keep_descriptors: boolean, default False
if True descriptors will be included in the output DataFrame
Returns
-----------
out: dataframe
pandas DataFrame of the original data where repeated measurements
have been averaged and their variance stored in a separate column
### Response:
def merge_duplicates(model_name, keep_descriptors=False):
"""
Identifies repeated experimental values and returns mean values for those
data along with their standard deviation. Only aggregates experimental
values that have been acquired at the same temperature and pressure.
Parameters
----------
model_name: dev_model
the dev_model object to be interrogated
keep_descriptors: boolean, default False
if True descriptors will be included in the output DataFrame
Returns
-----------
out: dataframe
pandas DataFrame of the original data where repeated measurements
have been averaged and their variance stored in a separate column
"""
model_outputs = -6 + model_name.Data_summary.shape[0]
devmodel = model_name
cols = devmodel.Data.columns
if (devmodel.Data.iloc[:, -(4 + model_outputs):-4].max() < 700).all():
for output_index in range(model_outputs):
devmodel.Data.iloc[:, -(5 + output_index)] = \
devmodel.Data.iloc[:, -(5 + output_index)].apply(
lambda x: exp(float(x)))
output_val = pd.DataFrame()
output_xtd = pd.DataFrame()
for output_index in range(model_outputs):
val = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].mean().\
reset_index()
xtd = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].std().\
reset_index()
if output_index == 0:
output_val = val
output_xtd = xtd
else:
output_val = pd.merge(output_val, val)
output_xtd = pd.merge(output_xtd, xtd)
size = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].count().\
reset_index()
cations = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-cation'].first().reset_index()
anions = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-anion'].first().reset_index()
size.columns.values[2] = "count"
salts = (devmodel.Data["smiles-cation"] + "." + devmodel.
Data["smiles-anion"]).unique()
print("Identified {} unique salts in {} datapoints".
format(len(salts), devmodel.Data.shape[0]))
out = pd.merge(output_val, output_xtd,
on=['smiles-cation', 'smiles-anion'],
suffixes=['_mean', '_std'])
out = pd.merge(out, size)
out = pd.merge(out, cations)
out = pd.merge(out, anions)
if keep_descriptors:
cationDescriptors = load_data("cationDescriptors.csv")
cationDescriptors.columns = [str(col) + '-cation' for
col in cationDescriptors.columns]
anionDescriptors = load_data("anionDescriptors.csv")
anionDescriptors.columns = [str(col) + '-anion' for
col in anionDescriptors.columns]
new_df = pd.merge(cationDescriptors, out,
on=["name-cation", "smiles-cation"], how="right")
new_df = pd.merge(anionDescriptors, new_df,
on=["name-anion", "smiles-anion"], how="right")
out = new_df
return out |
def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out | Add joint calling information to variants, while collapsing independent regions. | Below is the the instruction that describes the task:
### Input:
Add joint calling information to variants, while collapsing independent regions.
### Response:
def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out |
def get_subnets(vpc, **conn):
"""Gets the VPC Subnets"""
subnets = describe_subnets(Filters=[{"Name": "vpc-id", "Values": [vpc["id"]]}], **conn)
s_ids = []
for s in subnets:
s_ids.append(s["SubnetId"])
return s_ids | Gets the VPC Subnets | Below is the the instruction that describes the task:
### Input:
Gets the VPC Subnets
### Response:
def get_subnets(vpc, **conn):
"""Gets the VPC Subnets"""
subnets = describe_subnets(Filters=[{"Name": "vpc-id", "Values": [vpc["id"]]}], **conn)
s_ids = []
for s in subnets:
s_ids.append(s["SubnetId"])
return s_ids |
def history_mean(self, nb=5):
"""Return the mean on the <nb> values in the history.
"""
_, v = zip(*self._history)
return sum(v[-nb:]) / float(v[-1] - v[-nb]) | Return the mean on the <nb> values in the history. | Below is the the instruction that describes the task:
### Input:
Return the mean on the <nb> values in the history.
### Response:
def history_mean(self, nb=5):
"""Return the mean on the <nb> values in the history.
"""
_, v = zip(*self._history)
return sum(v[-nb:]) / float(v[-1] - v[-nb]) |
def read(cls, iprot):
'''
Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
'''
init_kwds = {}
iprot.read_struct_begin()
while True:
ifield_name, ifield_type, _ifield_id = iprot.read_field_begin()
if ifield_type == 0: # STOP
break
elif ifield_name == 'images_per_object':
init_kwds['images_per_object'] = iprot.read_i32()
elif ifield_name == 'objects':
init_kwds['objects'] = iprot.read_i32()
iprot.read_field_end()
iprot.read_struct_end()
return cls(**init_kwds) | Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration | Below is the the instruction that describes the task:
### Input:
Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
### Response:
def read(cls, iprot):
'''
Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
'''
init_kwds = {}
iprot.read_struct_begin()
while True:
ifield_name, ifield_type, _ifield_id = iprot.read_field_begin()
if ifield_type == 0: # STOP
break
elif ifield_name == 'images_per_object':
init_kwds['images_per_object'] = iprot.read_i32()
elif ifield_name == 'objects':
init_kwds['objects'] = iprot.read_i32()
iprot.read_field_end()
iprot.read_struct_end()
return cls(**init_kwds) |
def make_breakpoint(self, event_type, *args, **kwargs):
"""
Creates and adds a breakpoint which would trigger on `event_type`. Additional arguments are passed to the
:class:`BP` constructor.
:return: The created breakpoint, so that it can be removed later.
"""
bp = BP(*args, **kwargs)
self.add_breakpoint(event_type, bp)
return bp | Creates and adds a breakpoint which would trigger on `event_type`. Additional arguments are passed to the
:class:`BP` constructor.
:return: The created breakpoint, so that it can be removed later. | Below is the the instruction that describes the task:
### Input:
Creates and adds a breakpoint which would trigger on `event_type`. Additional arguments are passed to the
:class:`BP` constructor.
:return: The created breakpoint, so that it can be removed later.
### Response:
def make_breakpoint(self, event_type, *args, **kwargs):
"""
Creates and adds a breakpoint which would trigger on `event_type`. Additional arguments are passed to the
:class:`BP` constructor.
:return: The created breakpoint, so that it can be removed later.
"""
bp = BP(*args, **kwargs)
self.add_breakpoint(event_type, bp)
return bp |
def _create_subplots(self, fig, layout):
"""
Create suplots and return axs
"""
num_panels = len(layout)
axsarr = np.empty((self.nrow, self.ncol), dtype=object)
# Create axes
i = 1
for row in range(self.nrow):
for col in range(self.ncol):
axsarr[row, col] = fig.add_subplot(self.nrow, self.ncol, i)
i += 1
# Rearrange axes
# They are ordered to match the positions in the layout table
if self.dir == 'h':
order = 'C'
if not self.as_table:
axsarr = axsarr[::-1]
elif self.dir == 'v':
order = 'F'
if not self.as_table:
axsarr = np.array([row[::-1] for row in axsarr])
axs = axsarr.ravel(order)
# Delete unused axes
for ax in axs[num_panels:]:
fig.delaxes(ax)
axs = axs[:num_panels]
return axs | Create suplots and return axs | Below is the the instruction that describes the task:
### Input:
Create suplots and return axs
### Response:
def _create_subplots(self, fig, layout):
"""
Create suplots and return axs
"""
num_panels = len(layout)
axsarr = np.empty((self.nrow, self.ncol), dtype=object)
# Create axes
i = 1
for row in range(self.nrow):
for col in range(self.ncol):
axsarr[row, col] = fig.add_subplot(self.nrow, self.ncol, i)
i += 1
# Rearrange axes
# They are ordered to match the positions in the layout table
if self.dir == 'h':
order = 'C'
if not self.as_table:
axsarr = axsarr[::-1]
elif self.dir == 'v':
order = 'F'
if not self.as_table:
axsarr = np.array([row[::-1] for row in axsarr])
axs = axsarr.ravel(order)
# Delete unused axes
for ax in axs[num_panels:]:
fig.delaxes(ax)
axs = axs[:num_panels]
return axs |
def salt_ssh_create_dirs(self):
"""
Creates the `salt-ssh` required directory structure
"""
logger.debug('Creating salt-ssh dirs into: %s', self.settings_dir)
utils.create_dir(os.path.join(self.settings_dir, 'salt'))
utils.create_dir(os.path.join(self.settings_dir, 'pillar'))
utils.create_dir(os.path.join(self.settings_dir, 'etc', 'salt'))
utils.create_dir(os.path.join(self.settings_dir, 'var', 'cache', 'salt'))
utils.create_dir(os.path.join(self.settings_dir, 'var', 'log', 'salt')) | Creates the `salt-ssh` required directory structure | Below is the the instruction that describes the task:
### Input:
Creates the `salt-ssh` required directory structure
### Response:
def salt_ssh_create_dirs(self):
"""
Creates the `salt-ssh` required directory structure
"""
logger.debug('Creating salt-ssh dirs into: %s', self.settings_dir)
utils.create_dir(os.path.join(self.settings_dir, 'salt'))
utils.create_dir(os.path.join(self.settings_dir, 'pillar'))
utils.create_dir(os.path.join(self.settings_dir, 'etc', 'salt'))
utils.create_dir(os.path.join(self.settings_dir, 'var', 'cache', 'salt'))
utils.create_dir(os.path.join(self.settings_dir, 'var', 'log', 'salt')) |
def _subnets(proto='inet', interfaces_=None):
'''
Returns a list of subnets to which the host belongs
'''
if interfaces_ is None:
ifaces = interfaces()
elif isinstance(interfaces_, list):
ifaces = {}
for key, value in six.iteritems(interfaces()):
if key in interfaces_:
ifaces[key] = value
else:
ifaces = {interfaces_: interfaces().get(interfaces_, {})}
ret = set()
if proto == 'inet':
subnet = 'netmask'
dflt_cidr = 32
elif proto == 'inet6':
subnet = 'prefixlen'
dflt_cidr = 128
else:
log.error('Invalid proto %s calling subnets()', proto)
return
for ip_info in six.itervalues(ifaces):
addrs = ip_info.get(proto, [])
addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto])
for intf in addrs:
if subnet in intf:
intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet]))
else:
intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr))
if not intf.is_loopback:
ret.add(intf.network)
return [six.text_type(net) for net in sorted(ret)] | Returns a list of subnets to which the host belongs | Below is the the instruction that describes the task:
### Input:
Returns a list of subnets to which the host belongs
### Response:
def _subnets(proto='inet', interfaces_=None):
'''
Returns a list of subnets to which the host belongs
'''
if interfaces_ is None:
ifaces = interfaces()
elif isinstance(interfaces_, list):
ifaces = {}
for key, value in six.iteritems(interfaces()):
if key in interfaces_:
ifaces[key] = value
else:
ifaces = {interfaces_: interfaces().get(interfaces_, {})}
ret = set()
if proto == 'inet':
subnet = 'netmask'
dflt_cidr = 32
elif proto == 'inet6':
subnet = 'prefixlen'
dflt_cidr = 128
else:
log.error('Invalid proto %s calling subnets()', proto)
return
for ip_info in six.itervalues(ifaces):
addrs = ip_info.get(proto, [])
addrs.extend([addr for addr in ip_info.get('secondary', []) if addr.get('type') == proto])
for intf in addrs:
if subnet in intf:
intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], intf[subnet]))
else:
intf = ipaddress.ip_interface('{0}/{1}'.format(intf['address'], dflt_cidr))
if not intf.is_loopback:
ret.add(intf.network)
return [six.text_type(net) for net in sorted(ret)] |
def getJsonFromApi(view, request):
"""Return json from querying Web Api
Args:
view: django view function.
request: http request object got from django.
Returns: json format dictionary
"""
jsonText = view(request)
jsonText = json.loads(jsonText.content.decode('utf-8'))
return jsonText | Return json from querying Web Api
Args:
view: django view function.
request: http request object got from django.
Returns: json format dictionary | Below is the the instruction that describes the task:
### Input:
Return json from querying Web Api
Args:
view: django view function.
request: http request object got from django.
Returns: json format dictionary
### Response:
def getJsonFromApi(view, request):
"""Return json from querying Web Api
Args:
view: django view function.
request: http request object got from django.
Returns: json format dictionary
"""
jsonText = view(request)
jsonText = json.loads(jsonText.content.decode('utf-8'))
return jsonText |
def diff(self):
"""Calculate difference between fs and db."""
done = set(self.done)
return [name for name in self.todo if name not in done] | Calculate difference between fs and db. | Below is the the instruction that describes the task:
### Input:
Calculate difference between fs and db.
### Response:
def diff(self):
"""Calculate difference between fs and db."""
done = set(self.done)
return [name for name in self.todo if name not in done] |
def route(self, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=False, stream=False, websocket=False):
'''Decorate a function to be registered as a route
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:return: decorated function
'''
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it's not working
if not uri.startswith('/'):
uri = '/' + uri
def response(handler):
if websocket:
handler.is_websocket = True
elif stream:
handler.is_stream = True
self.router.add(uri=uri, methods=methods, handler=handler,
host=host, strict_slashes=strict_slashes)
return handler
return response | Decorate a function to be registered as a route
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:return: decorated function | Below is the the instruction that describes the task:
### Input:
Decorate a function to be registered as a route
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:return: decorated function
### Response:
def route(self, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=False, stream=False, websocket=False):
'''Decorate a function to be registered as a route
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:return: decorated function
'''
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it's not working
if not uri.startswith('/'):
uri = '/' + uri
def response(handler):
if websocket:
handler.is_websocket = True
elif stream:
handler.is_stream = True
self.router.add(uri=uri, methods=methods, handler=handler,
host=host, strict_slashes=strict_slashes)
return handler
return response |
def run(self, inputRecord):
"""
Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`.
"""
# 0-based prediction index for ModelResult
predictionNumber = self._numPredictions
self._numPredictions += 1
result = opf_utils.ModelResult(predictionNumber=predictionNumber,
rawInput=inputRecord)
return result | Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`. | Below is the the instruction that describes the task:
### Input:
Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`.
### Response:
def run(self, inputRecord):
"""
Run one iteration of this model.
:param inputRecord: (object)
A record object formatted according to
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecord` or
:meth:`~nupic.data.record_stream.RecordStreamIface.getNextRecordDict`
result format.
:returns: (:class:`~nupic.frameworks.opf.opf_utils.ModelResult`)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by :meth:`.getInferenceType`.
"""
# 0-based prediction index for ModelResult
predictionNumber = self._numPredictions
self._numPredictions += 1
result = opf_utils.ModelResult(predictionNumber=predictionNumber,
rawInput=inputRecord)
return result |
def read_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that responds to read requests
by resolving the payload as a graphql query against the internal schema.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model
"""
async def action_handler(service, action_type, payload, props, **kwds):
# if the payload represents a new instance of `model`
if action_type == get_crud_action('read', name or Model):
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
try:
# resolve the query using the service schema
resolved = service.schema.execute(payload)
# create the string response
response = json.dumps({
'data': {key:value for key,value in resolved.data.items()},
'errors': resolved.errors
})
# publish the success event
await service.event_broker.send(
payload=response,
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# return the handler
return action_handler | This factory returns an action handler that responds to read requests
by resolving the payload as a graphql query against the internal schema.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model | Below is the the instruction that describes the task:
### Input:
This factory returns an action handler that responds to read requests
by resolving the payload as a graphql query against the internal schema.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model
### Response:
def read_handler(Model, name=None, **kwds):
"""
This factory returns an action handler that responds to read requests
by resolving the payload as a graphql query against the internal schema.
Args:
Model (nautilus.BaseModel): The model to delete when the action
received.
Returns:
function(type, payload): The action handler for this model
"""
async def action_handler(service, action_type, payload, props, **kwds):
# if the payload represents a new instance of `model`
if action_type == get_crud_action('read', name or Model):
# the props of the message
message_props = {}
# if there was a correlation id in the request
if 'correlation_id' in props:
# make sure it ends up in the reply
message_props['correlation_id'] = props['correlation_id']
try:
# resolve the query using the service schema
resolved = service.schema.execute(payload)
# create the string response
response = json.dumps({
'data': {key:value for key,value in resolved.data.items()},
'errors': resolved.errors
})
# publish the success event
await service.event_broker.send(
payload=response,
action_type=change_action_status(action_type, success_status()),
**message_props
)
# if something goes wrong
except Exception as err:
# publish the error as an event
await service.event_broker.send(
payload=str(err),
action_type=change_action_status(action_type, error_status()),
**message_props
)
# return the handler
return action_handler |
def directed_laplacian(self, D=None, eta=0.99, tol=1e-12, max_iter=500):
'''Computes the directed combinatorial graph laplacian.
http://www-all.cs.umass.edu/pubs/2007/johns_m_ICML07.pdf
D: (optional) N-array of degrees
eta: probability of not teleporting (see the paper)
tol, max_iter: convergence params for Perron vector calculation
'''
W = self.matrix('dense')
n = W.shape[0]
if D is None:
D = W.sum(axis=1)
# compute probability transition matrix
with np.errstate(invalid='ignore', divide='ignore'):
P = W.astype(float) / D[:,None]
P[D==0] = 0
# start at the uniform distribution Perron vector (phi)
old_phi = np.ones(n) / n
# iterate to the fixed point (teleporting random walk)
for _ in range(max_iter):
phi = eta * old_phi.dot(P) + (1-eta)/n
if np.abs(phi - old_phi).max() < tol:
break
old_phi = phi
else:
warnings.warn("phi failed to converge after %d iterations" % max_iter)
# L = Phi - (Phi P + P' Phi)/2
return np.diag(phi) - ((phi * P.T).T + P.T * phi)/2 | Computes the directed combinatorial graph laplacian.
http://www-all.cs.umass.edu/pubs/2007/johns_m_ICML07.pdf
D: (optional) N-array of degrees
eta: probability of not teleporting (see the paper)
tol, max_iter: convergence params for Perron vector calculation | Below is the the instruction that describes the task:
### Input:
Computes the directed combinatorial graph laplacian.
http://www-all.cs.umass.edu/pubs/2007/johns_m_ICML07.pdf
D: (optional) N-array of degrees
eta: probability of not teleporting (see the paper)
tol, max_iter: convergence params for Perron vector calculation
### Response:
def directed_laplacian(self, D=None, eta=0.99, tol=1e-12, max_iter=500):
'''Computes the directed combinatorial graph laplacian.
http://www-all.cs.umass.edu/pubs/2007/johns_m_ICML07.pdf
D: (optional) N-array of degrees
eta: probability of not teleporting (see the paper)
tol, max_iter: convergence params for Perron vector calculation
'''
W = self.matrix('dense')
n = W.shape[0]
if D is None:
D = W.sum(axis=1)
# compute probability transition matrix
with np.errstate(invalid='ignore', divide='ignore'):
P = W.astype(float) / D[:,None]
P[D==0] = 0
# start at the uniform distribution Perron vector (phi)
old_phi = np.ones(n) / n
# iterate to the fixed point (teleporting random walk)
for _ in range(max_iter):
phi = eta * old_phi.dot(P) + (1-eta)/n
if np.abs(phi - old_phi).max() < tol:
break
old_phi = phi
else:
warnings.warn("phi failed to converge after %d iterations" % max_iter)
# L = Phi - (Phi P + P' Phi)/2
return np.diag(phi) - ((phi * P.T).T + P.T * phi)/2 |
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None | Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True | Below is the the instruction that describes the task:
### Input:
Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
### Response:
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None |
def makeLys(segID, N, CA, C, O, geo):
'''Creates a Lysine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle=geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_CD_length=geo.CG_CD_length
CB_CG_CD_angle=geo.CB_CG_CD_angle
CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle
CD_CE_length=geo.CD_CE_length
CG_CD_CE_angle=geo.CG_CD_CE_angle
CB_CG_CD_CE_diangle=geo.CB_CG_CD_CE_diangle
CE_NZ_length=geo.CE_NZ_length
CD_CE_NZ_angle=geo.CD_CE_NZ_angle
CG_CD_CE_NZ_diangle=geo.CG_CD_CE_NZ_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)
CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C")
carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle)
CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C")
carbon_e= calculateCoordinates(CB, CG, CD, CD_CE_length, CG_CD_CE_angle, CB_CG_CD_CE_diangle)
CE= Atom("CE", carbon_e, 0.0, 1.0, " ", " CE", 0, "C")
nitrogen_z= calculateCoordinates(CG, CD, CE, CE_NZ_length, CD_CE_NZ_angle, CG_CD_CE_NZ_diangle)
NZ= Atom("NZ", nitrogen_z, 0.0, 1.0, " ", " NZ", 0, "N")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "LYS", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD)
res.add(CE)
res.add(NZ)
return res | Creates a Lysine residue | Below is the the instruction that describes the task:
### Input:
Creates a Lysine residue
### Response:
def makeLys(segID, N, CA, C, O, geo):
'''Creates a Lysine residue'''
##R-Group
CA_CB_length=geo.CA_CB_length
C_CA_CB_angle=geo.C_CA_CB_angle
N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle
CB_CG_length=geo.CB_CG_length
CA_CB_CG_angle=geo.CA_CB_CG_angle
N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle
CG_CD_length=geo.CG_CD_length
CB_CG_CD_angle=geo.CB_CG_CD_angle
CA_CB_CG_CD_diangle=geo.CA_CB_CG_CD_diangle
CD_CE_length=geo.CD_CE_length
CG_CD_CE_angle=geo.CG_CD_CE_angle
CB_CG_CD_CE_diangle=geo.CB_CG_CD_CE_diangle
CE_NZ_length=geo.CE_NZ_length
CD_CE_NZ_angle=geo.CD_CE_NZ_angle
CG_CD_CE_NZ_diangle=geo.CG_CD_CE_NZ_diangle
carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle)
CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C")
carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle)
CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C")
carbon_d= calculateCoordinates(CA, CB, CG, CG_CD_length, CB_CG_CD_angle, CA_CB_CG_CD_diangle)
CD= Atom("CD", carbon_d, 0.0, 1.0, " ", " CD", 0, "C")
carbon_e= calculateCoordinates(CB, CG, CD, CD_CE_length, CG_CD_CE_angle, CB_CG_CD_CE_diangle)
CE= Atom("CE", carbon_e, 0.0, 1.0, " ", " CE", 0, "C")
nitrogen_z= calculateCoordinates(CG, CD, CE, CE_NZ_length, CD_CE_NZ_angle, CG_CD_CE_NZ_diangle)
NZ= Atom("NZ", nitrogen_z, 0.0, 1.0, " ", " NZ", 0, "N")
##Create Residue Data Structure
res= Residue((' ', segID, ' '), "LYS", ' ')
res.add(N)
res.add(CA)
res.add(C)
res.add(O)
res.add(CB)
res.add(CG)
res.add(CD)
res.add(CE)
res.add(NZ)
return res |
def list_resource_quota_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_quota_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_resource_quota_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_resource_quota_for_all_namespaces_with_http_info(**kwargs)
return data | list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_quota_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_quota_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread.
### Response:
def list_resource_quota_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_resource_quota_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_resource_quota_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_resource_quota_for_all_namespaces_with_http_info(**kwargs)
return data |
def CountMethodCall(fn):
""" Decorator for counting memoizer hits/misses while retrieving
a simple value in a class method. It wraps the given method
fn and uses a CountValue object to keep track of the
caching statistics.
Wrapping gets enabled by calling EnableMemoization().
"""
if use_memoizer:
def wrapper(self, *args, **kwargs):
global CounterList
key = self.__class__.__name__+'.'+fn.__name__
if key not in CounterList:
CounterList[key] = CountValue(self.__class__.__name__, fn.__name__)
CounterList[key].count(self, *args, **kwargs)
return fn(self, *args, **kwargs)
wrapper.__name__= fn.__name__
return wrapper
else:
return fn | Decorator for counting memoizer hits/misses while retrieving
a simple value in a class method. It wraps the given method
fn and uses a CountValue object to keep track of the
caching statistics.
Wrapping gets enabled by calling EnableMemoization(). | Below is the the instruction that describes the task:
### Input:
Decorator for counting memoizer hits/misses while retrieving
a simple value in a class method. It wraps the given method
fn and uses a CountValue object to keep track of the
caching statistics.
Wrapping gets enabled by calling EnableMemoization().
### Response:
def CountMethodCall(fn):
""" Decorator for counting memoizer hits/misses while retrieving
a simple value in a class method. It wraps the given method
fn and uses a CountValue object to keep track of the
caching statistics.
Wrapping gets enabled by calling EnableMemoization().
"""
if use_memoizer:
def wrapper(self, *args, **kwargs):
global CounterList
key = self.__class__.__name__+'.'+fn.__name__
if key not in CounterList:
CounterList[key] = CountValue(self.__class__.__name__, fn.__name__)
CounterList[key].count(self, *args, **kwargs)
return fn(self, *args, **kwargs)
wrapper.__name__= fn.__name__
return wrapper
else:
return fn |
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path) | Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything. | Below is the the instruction that describes the task:
### Input:
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
### Response:
def move_to_backup(path, backup_suffix=BACKUP_SUFFIX):
"""
Move the given file or directory to the same name, with a backup suffix.
If backup_suffix not supplied, move it to the extension ".bak".
NB: If backup_suffix is supplied and is None, don't do anything.
"""
if backup_suffix and os.path.exists(path):
backup_path = path + backup_suffix
# Some messy corner cases need to be handled for existing backups.
# TODO: Note if this is a directory, and we do this twice at once, there is a potential race
# that could leave one backup inside the other.
if os.path.islink(backup_path):
os.unlink(backup_path)
elif os.path.isdir(backup_path):
shutil.rmtree(backup_path)
shutil.move(path, backup_path) |
def match_process(pid, name, cmdline, exe, cfg):
"""
Decides whether a process matches with a given process descriptor
:param pid: process pid
:param exe: process executable
:param name: process name
:param cmdline: process cmdline
:param cfg: the dictionary from processes that describes with the
process group we're testing for
:return: True if it matches
:rtype: bool
"""
if cfg['selfmon'] and pid == os.getpid():
return True
for exe_re in cfg['exe']:
if exe_re.search(exe):
return True
for name_re in cfg['name']:
if name_re.search(name):
return True
for cmdline_re in cfg['cmdline']:
if cmdline_re.search(' '.join(cmdline)):
return True
return False | Decides whether a process matches with a given process descriptor
:param pid: process pid
:param exe: process executable
:param name: process name
:param cmdline: process cmdline
:param cfg: the dictionary from processes that describes with the
process group we're testing for
:return: True if it matches
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Decides whether a process matches with a given process descriptor
:param pid: process pid
:param exe: process executable
:param name: process name
:param cmdline: process cmdline
:param cfg: the dictionary from processes that describes with the
process group we're testing for
:return: True if it matches
:rtype: bool
### Response:
def match_process(pid, name, cmdline, exe, cfg):
"""
Decides whether a process matches with a given process descriptor
:param pid: process pid
:param exe: process executable
:param name: process name
:param cmdline: process cmdline
:param cfg: the dictionary from processes that describes with the
process group we're testing for
:return: True if it matches
:rtype: bool
"""
if cfg['selfmon'] and pid == os.getpid():
return True
for exe_re in cfg['exe']:
if exe_re.search(exe):
return True
for name_re in cfg['name']:
if name_re.search(name):
return True
for cmdline_re in cfg['cmdline']:
if cmdline_re.search(' '.join(cmdline)):
return True
return False |
def disconnect(self):
"""Disconnect from the server."""
# here we just request the disconnection
# later in _handle_eio_disconnect we invoke the disconnect handler
for n in self.namespaces:
self._send_packet(packet.Packet(packet.DISCONNECT, namespace=n))
self._send_packet(packet.Packet(
packet.DISCONNECT, namespace='/'))
self.eio.disconnect(abort=True) | Disconnect from the server. | Below is the the instruction that describes the task:
### Input:
Disconnect from the server.
### Response:
def disconnect(self):
"""Disconnect from the server."""
# here we just request the disconnection
# later in _handle_eio_disconnect we invoke the disconnect handler
for n in self.namespaces:
self._send_packet(packet.Packet(packet.DISCONNECT, namespace=n))
self._send_packet(packet.Packet(
packet.DISCONNECT, namespace='/'))
self.eio.disconnect(abort=True) |
def getV0(self, v_mag_guess, buses, generators, type=CASE_GUESS):
""" Returns the initial voltage profile.
"""
if type == CASE_GUESS:
Va = array([b.v_angle * (pi / 180.0) for b in buses])
Vm = array([b.v_magnitude for b in buses])
V0 = Vm * exp(1j * Va)
elif type == FLAT_START:
V0 = ones(len(buses))
elif type == FROM_INPUT:
V0 = v_mag_guess
else:
raise ValueError
# Set the voltages of PV buses and the reference bus in the guess.
# online = [g for g in self.case.generators if g.online]
gbus = [g.bus._i for g in generators]
Vg = array([g.v_magnitude for g in generators])
V0[gbus] = Vg * abs(V0[gbus]) / V0[gbus]
return V0 | Returns the initial voltage profile. | Below is the the instruction that describes the task:
### Input:
Returns the initial voltage profile.
### Response:
def getV0(self, v_mag_guess, buses, generators, type=CASE_GUESS):
""" Returns the initial voltage profile.
"""
if type == CASE_GUESS:
Va = array([b.v_angle * (pi / 180.0) for b in buses])
Vm = array([b.v_magnitude for b in buses])
V0 = Vm * exp(1j * Va)
elif type == FLAT_START:
V0 = ones(len(buses))
elif type == FROM_INPUT:
V0 = v_mag_guess
else:
raise ValueError
# Set the voltages of PV buses and the reference bus in the guess.
# online = [g for g in self.case.generators if g.online]
gbus = [g.bus._i for g in generators]
Vg = array([g.v_magnitude for g in generators])
V0[gbus] = Vg * abs(V0[gbus]) / V0[gbus]
return V0 |
def read_form_data(self):
"""Attempt to read the form data from the request"""
if self.processed_data:
raise exceptions.AlreadyProcessed('The data has already been processed for this form')
if self.readonly:
return
if request.method == self.method:
if self.method == 'POST':
data = request.form
else:
data = request.args
if self.submitted_hidden_input_name in data:
# The form has been submitted
self.processed_data = True
for field in self.all_fields:
# We need to skip readonly fields
if field.readonly:
pass
else:
field.extract_value(data)
# Validate the field
if not field.validate():
log.debug('Validation error in field \'%s\': %s' % (field.name, field.error))
self.has_errors = True | Attempt to read the form data from the request | Below is the the instruction that describes the task:
### Input:
Attempt to read the form data from the request
### Response:
def read_form_data(self):
"""Attempt to read the form data from the request"""
if self.processed_data:
raise exceptions.AlreadyProcessed('The data has already been processed for this form')
if self.readonly:
return
if request.method == self.method:
if self.method == 'POST':
data = request.form
else:
data = request.args
if self.submitted_hidden_input_name in data:
# The form has been submitted
self.processed_data = True
for field in self.all_fields:
# We need to skip readonly fields
if field.readonly:
pass
else:
field.extract_value(data)
# Validate the field
if not field.validate():
log.debug('Validation error in field \'%s\': %s' % (field.name, field.error))
self.has_errors = True |
def receive_request(self, transaction):
"""
Manage the observe option in the request end eventually initialize the client for adding to
the list of observers or remove from the list.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the modified transaction
"""
if transaction.request.observe == 0:
# Observe request
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
non_counter = 0
if key_token in self._relations:
# Renew registration
allowed = True
else:
allowed = False
self._relations[key_token] = ObserveItem(time.time(), non_counter, allowed, transaction)
elif transaction.request.observe == 1:
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
logger.info("Remove Subscriber")
try:
del self._relations[key_token]
except KeyError:
pass
return transaction | Manage the observe option in the request end eventually initialize the client for adding to
the list of observers or remove from the list.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the modified transaction | Below is the the instruction that describes the task:
### Input:
Manage the observe option in the request end eventually initialize the client for adding to
the list of observers or remove from the list.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the modified transaction
### Response:
def receive_request(self, transaction):
"""
Manage the observe option in the request end eventually initialize the client for adding to
the list of observers or remove from the list.
:type transaction: Transaction
:param transaction: the transaction that owns the request
:rtype : Transaction
:return: the modified transaction
"""
if transaction.request.observe == 0:
# Observe request
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
non_counter = 0
if key_token in self._relations:
# Renew registration
allowed = True
else:
allowed = False
self._relations[key_token] = ObserveItem(time.time(), non_counter, allowed, transaction)
elif transaction.request.observe == 1:
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
logger.info("Remove Subscriber")
try:
del self._relations[key_token]
except KeyError:
pass
return transaction |
def make_model(self):
"""Assemble the SBGN model from the collected INDRA Statements.
This method assembles an SBGN model from the set of INDRA Statements.
The assembled model is set as the assembler's sbgn attribute (it is
represented as an XML ElementTree internally). The model is returned
as a serialized XML string.
Returns
-------
sbgn_str : str
The XML serialized SBGN model.
"""
ppa = PysbPreassembler(self.statements)
ppa.replace_activities()
self.statements = ppa.statements
self.sbgn = emaker.sbgn()
self._map = emaker.map()
self.sbgn.append(self._map)
for stmt in self.statements:
if isinstance(stmt, Modification):
self._assemble_modification(stmt)
elif isinstance(stmt, RegulateActivity):
self._assemble_regulateactivity(stmt)
elif isinstance(stmt, RegulateAmount):
self._assemble_regulateamount(stmt)
elif isinstance(stmt, Complex):
self._assemble_complex(stmt)
elif isinstance(stmt, ActiveForm):
#self._assemble_activeform(stmt)
pass
else:
logger.warning("Unhandled Statement type %s" % type(stmt))
continue
sbgn_str = self.print_model()
return sbgn_str | Assemble the SBGN model from the collected INDRA Statements.
This method assembles an SBGN model from the set of INDRA Statements.
The assembled model is set as the assembler's sbgn attribute (it is
represented as an XML ElementTree internally). The model is returned
as a serialized XML string.
Returns
-------
sbgn_str : str
The XML serialized SBGN model. | Below is the the instruction that describes the task:
### Input:
Assemble the SBGN model from the collected INDRA Statements.
This method assembles an SBGN model from the set of INDRA Statements.
The assembled model is set as the assembler's sbgn attribute (it is
represented as an XML ElementTree internally). The model is returned
as a serialized XML string.
Returns
-------
sbgn_str : str
The XML serialized SBGN model.
### Response:
def make_model(self):
"""Assemble the SBGN model from the collected INDRA Statements.
This method assembles an SBGN model from the set of INDRA Statements.
The assembled model is set as the assembler's sbgn attribute (it is
represented as an XML ElementTree internally). The model is returned
as a serialized XML string.
Returns
-------
sbgn_str : str
The XML serialized SBGN model.
"""
ppa = PysbPreassembler(self.statements)
ppa.replace_activities()
self.statements = ppa.statements
self.sbgn = emaker.sbgn()
self._map = emaker.map()
self.sbgn.append(self._map)
for stmt in self.statements:
if isinstance(stmt, Modification):
self._assemble_modification(stmt)
elif isinstance(stmt, RegulateActivity):
self._assemble_regulateactivity(stmt)
elif isinstance(stmt, RegulateAmount):
self._assemble_regulateamount(stmt)
elif isinstance(stmt, Complex):
self._assemble_complex(stmt)
elif isinstance(stmt, ActiveForm):
#self._assemble_activeform(stmt)
pass
else:
logger.warning("Unhandled Statement type %s" % type(stmt))
continue
sbgn_str = self.print_model()
return sbgn_str |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.