code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def excess_drawdown_idx(self, benchmark, method="caer"):
"""Excess drawdown index; TSeries of excess drawdowns.
There are several ways of computing this metric. For highly
volatile returns, the `method` specified will have a
non-negligible effect on the result.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)}
Indicates the methodology used.
"""
# TODO: plot these (compared) in docs.
if isinstance(method, (int, float)):
method = ["caer", "cger", "ecr", "ecrr"][method]
method = method.lower()
if method == "caer":
er = self.excess_ret(benchmark=benchmark, method="arithmetic")
return er.drawdown_idx()
elif method == "cger":
er = self.excess_ret(benchmark=benchmark, method="geometric")
return er.drawdown_idx()
elif method == "ecr":
er = self.ret_idx() - benchmark.ret_idx() + 1
if er.isnull().any():
return er / er.cummax() - 1.0
else:
return er / np.maximum.accumulate(er) - 1.0
elif method == "ecrr":
# Credit to: SO @piRSquared
# https://stackoverflow.com/a/36848867/7954504
p = self.ret_idx().values
b = benchmark.ret_idx().values
er = p - b
if er.isnull().any():
# The slower route but NaN-friendly.
cam = self.expanding(min_periods=1).apply(lambda x: x.argmax())
else:
cam = utils.cumargmax(er)
p0 = p[cam]
b0 = b[cam]
return (p * b0 - b * p0) / (p0 * b0)
else:
raise ValueError(
"`method` must be one of"
" ('caer', 'cger', 'ecr', 'ecrr'),"
" case-insensitive, or"
" an integer mapping to these methods"
" (1 thru 4)."
) | Excess drawdown index; TSeries of excess drawdowns.
There are several ways of computing this metric. For highly
volatile returns, the `method` specified will have a
non-negligible effect on the result.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)}
Indicates the methodology used. | Below is the the instruction that describes the task:
### Input:
Excess drawdown index; TSeries of excess drawdowns.
There are several ways of computing this metric. For highly
volatile returns, the `method` specified will have a
non-negligible effect on the result.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)}
Indicates the methodology used.
### Response:
def excess_drawdown_idx(self, benchmark, method="caer"):
"""Excess drawdown index; TSeries of excess drawdowns.
There are several ways of computing this metric. For highly
volatile returns, the `method` specified will have a
non-negligible effect on the result.
Parameters
----------
benchmark : {pd.Series, TSeries, 1d np.ndarray}
The benchmark security to which `self` is compared.
method : {'caer' (0), 'cger' (1), 'ecr' (2), 'ecrr' (3)}
Indicates the methodology used.
"""
# TODO: plot these (compared) in docs.
if isinstance(method, (int, float)):
method = ["caer", "cger", "ecr", "ecrr"][method]
method = method.lower()
if method == "caer":
er = self.excess_ret(benchmark=benchmark, method="arithmetic")
return er.drawdown_idx()
elif method == "cger":
er = self.excess_ret(benchmark=benchmark, method="geometric")
return er.drawdown_idx()
elif method == "ecr":
er = self.ret_idx() - benchmark.ret_idx() + 1
if er.isnull().any():
return er / er.cummax() - 1.0
else:
return er / np.maximum.accumulate(er) - 1.0
elif method == "ecrr":
# Credit to: SO @piRSquared
# https://stackoverflow.com/a/36848867/7954504
p = self.ret_idx().values
b = benchmark.ret_idx().values
er = p - b
if er.isnull().any():
# The slower route but NaN-friendly.
cam = self.expanding(min_periods=1).apply(lambda x: x.argmax())
else:
cam = utils.cumargmax(er)
p0 = p[cam]
b0 = b[cam]
return (p * b0 - b * p0) / (p0 * b0)
else:
raise ValueError(
"`method` must be one of"
" ('caer', 'cger', 'ecr', 'ecrr'),"
" case-insensitive, or"
" an integer mapping to these methods"
" (1 thru 4)."
) |
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors | Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data. | Below is the the instruction that describes the task:
### Input:
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
### Response:
def get_actors(self):
"""
Returns a list of Actor objects for querying SmartHome devices.
This is currently the only working method for getting temperature data.
"""
devices = self.homeautoswitch("getdevicelistinfos")
xml = ET.fromstring(devices)
actors = []
for device in xml.findall('device'):
actors.append(Actor(fritzbox=self, device=device))
return actors |
def load_info(self):
"""
Parses the JSON object stored at GPJob.info and assigns its metadata to
properties of this GPJob object.
Primarily intended to be called from GPJob.get_info().
"""
self.task_name = self.info['taskName']
self.task_lsid = self.info['taskLsid']
self.user_id = self.info['userId']
self.job_number = int(self.info['jobId'])
self.status = self.get_status_message()
self.date_submitted = self.info['dateSubmitted']
self.log_files = self.info['logFiles']
self.output_files = self.info['outputFiles']
self.num_output_files = self.info['numOutputFiles']
# Create children, if relevant
self.children = self.get_child_jobs() | Parses the JSON object stored at GPJob.info and assigns its metadata to
properties of this GPJob object.
Primarily intended to be called from GPJob.get_info(). | Below is the the instruction that describes the task:
### Input:
Parses the JSON object stored at GPJob.info and assigns its metadata to
properties of this GPJob object.
Primarily intended to be called from GPJob.get_info().
### Response:
def load_info(self):
"""
Parses the JSON object stored at GPJob.info and assigns its metadata to
properties of this GPJob object.
Primarily intended to be called from GPJob.get_info().
"""
self.task_name = self.info['taskName']
self.task_lsid = self.info['taskLsid']
self.user_id = self.info['userId']
self.job_number = int(self.info['jobId'])
self.status = self.get_status_message()
self.date_submitted = self.info['dateSubmitted']
self.log_files = self.info['logFiles']
self.output_files = self.info['outputFiles']
self.num_output_files = self.info['numOutputFiles']
# Create children, if relevant
self.children = self.get_child_jobs() |
def interrupt(self, interrupt):
"""Perform the shutdown of this server and save the exception."""
self._interrupt = True
self.stop()
self._interrupt = interrupt | Perform the shutdown of this server and save the exception. | Below is the the instruction that describes the task:
### Input:
Perform the shutdown of this server and save the exception.
### Response:
def interrupt(self, interrupt):
"""Perform the shutdown of this server and save the exception."""
self._interrupt = True
self.stop()
self._interrupt = interrupt |
def move_edge_target(self, edge_id, node_a):
"""Moves an edge so that it targets node_a."""
# Grab the edge
edge = self.get_edge(edge_id)
# Remove the edge from the original "target node"
original_target_node_id = edge['vertices'][1]
original_target_node = self.get_node(original_target_node_id)
original_target_node['edges'].remove(edge_id)
# Add the edge to the new target node
new_target_node_id = node_a
new_target_node = self.get_node(new_target_node_id)
new_target_node['edges'].append(edge_id)
# Alter the vertices on the edge
edge['vertices'] = (edge['vertices'][0], node_a) | Moves an edge so that it targets node_a. | Below is the the instruction that describes the task:
### Input:
Moves an edge so that it targets node_a.
### Response:
def move_edge_target(self, edge_id, node_a):
"""Moves an edge so that it targets node_a."""
# Grab the edge
edge = self.get_edge(edge_id)
# Remove the edge from the original "target node"
original_target_node_id = edge['vertices'][1]
original_target_node = self.get_node(original_target_node_id)
original_target_node['edges'].remove(edge_id)
# Add the edge to the new target node
new_target_node_id = node_a
new_target_node = self.get_node(new_target_node_id)
new_target_node['edges'].append(edge_id)
# Alter the vertices on the edge
edge['vertices'] = (edge['vertices'][0], node_a) |
def render_templates(self, templates, filepath=None):
"""Render a collection of :class:`jinja2.Template` objects.
:param templates:
A collection of Templates to render.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``.
"""
for template in templates:
self.render_template(template, filepath) | Render a collection of :class:`jinja2.Template` objects.
:param templates:
A collection of Templates to render.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``. | Below is the the instruction that describes the task:
### Input:
Render a collection of :class:`jinja2.Template` objects.
:param templates:
A collection of Templates to render.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``.
### Response:
def render_templates(self, templates, filepath=None):
"""Render a collection of :class:`jinja2.Template` objects.
:param templates:
A collection of Templates to render.
:param filepath:
Optional. A file or file-like object to dump the complete template
stream into. Defaults to to ``os.path.join(self.outpath,
template.name)``.
"""
for template in templates:
self.render_template(template, filepath) |
def ext(external, pillar=None):
'''
.. versionchanged:: 2016.3.6,2016.11.3,2017.7.0
The supported ext_pillar types are now tunable using the
:conf_master:`on_demand_ext_pillar` config option. Earlier releases
used a hard-coded default.
Generate the pillar and apply an explicit external pillar
external
A single ext_pillar to add to the ext_pillar configuration. This must
be passed as a single section from the ext_pillar configuration (see
CLI examples below). For more complicated ``ext_pillar``
configurations, it can be helpful to use the Python shell to load YAML
configuration into a dictionary, and figure out
.. code-block:: python
>>> import salt.utils.yaml
>>> ext_pillar = salt.utils.yaml.safe_load("""
... ext_pillar:
... - git:
... - issue38440 https://github.com/terminalmage/git_pillar:
... - env: base
... """)
>>> ext_pillar
{'ext_pillar': [{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}]}
>>> ext_pillar['ext_pillar'][0]
{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}
In the above example, the value to pass would be
``{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}``.
Note that this would need to be quoted when passing on the CLI (as in
the CLI examples below).
pillar : None
If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. These pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' pillar.ext '{libvirt: _}'
salt '*' pillar.ext "{'git': ['master https://github.com/myuser/myrepo']}"
salt '*' pillar.ext "{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}"
'''
if isinstance(external, six.string_types):
external = salt.utils.yaml.safe_load(external)
pillar_obj = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
__opts__['saltenv'],
ext=external,
pillar_override=pillar)
ret = pillar_obj.compile_pillar()
return ret | .. versionchanged:: 2016.3.6,2016.11.3,2017.7.0
The supported ext_pillar types are now tunable using the
:conf_master:`on_demand_ext_pillar` config option. Earlier releases
used a hard-coded default.
Generate the pillar and apply an explicit external pillar
external
A single ext_pillar to add to the ext_pillar configuration. This must
be passed as a single section from the ext_pillar configuration (see
CLI examples below). For more complicated ``ext_pillar``
configurations, it can be helpful to use the Python shell to load YAML
configuration into a dictionary, and figure out
.. code-block:: python
>>> import salt.utils.yaml
>>> ext_pillar = salt.utils.yaml.safe_load("""
... ext_pillar:
... - git:
... - issue38440 https://github.com/terminalmage/git_pillar:
... - env: base
... """)
>>> ext_pillar
{'ext_pillar': [{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}]}
>>> ext_pillar['ext_pillar'][0]
{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}
In the above example, the value to pass would be
``{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}``.
Note that this would need to be quoted when passing on the CLI (as in
the CLI examples below).
pillar : None
If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. These pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' pillar.ext '{libvirt: _}'
salt '*' pillar.ext "{'git': ['master https://github.com/myuser/myrepo']}"
salt '*' pillar.ext "{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}" | Below is the the instruction that describes the task:
### Input:
.. versionchanged:: 2016.3.6,2016.11.3,2017.7.0
The supported ext_pillar types are now tunable using the
:conf_master:`on_demand_ext_pillar` config option. Earlier releases
used a hard-coded default.
Generate the pillar and apply an explicit external pillar
external
A single ext_pillar to add to the ext_pillar configuration. This must
be passed as a single section from the ext_pillar configuration (see
CLI examples below). For more complicated ``ext_pillar``
configurations, it can be helpful to use the Python shell to load YAML
configuration into a dictionary, and figure out
.. code-block:: python
>>> import salt.utils.yaml
>>> ext_pillar = salt.utils.yaml.safe_load("""
... ext_pillar:
... - git:
... - issue38440 https://github.com/terminalmage/git_pillar:
... - env: base
... """)
>>> ext_pillar
{'ext_pillar': [{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}]}
>>> ext_pillar['ext_pillar'][0]
{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}
In the above example, the value to pass would be
``{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}``.
Note that this would need to be quoted when passing on the CLI (as in
the CLI examples below).
pillar : None
If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. These pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' pillar.ext '{libvirt: _}'
salt '*' pillar.ext "{'git': ['master https://github.com/myuser/myrepo']}"
salt '*' pillar.ext "{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}"
### Response:
def ext(external, pillar=None):
'''
.. versionchanged:: 2016.3.6,2016.11.3,2017.7.0
The supported ext_pillar types are now tunable using the
:conf_master:`on_demand_ext_pillar` config option. Earlier releases
used a hard-coded default.
Generate the pillar and apply an explicit external pillar
external
A single ext_pillar to add to the ext_pillar configuration. This must
be passed as a single section from the ext_pillar configuration (see
CLI examples below). For more complicated ``ext_pillar``
configurations, it can be helpful to use the Python shell to load YAML
configuration into a dictionary, and figure out
.. code-block:: python
>>> import salt.utils.yaml
>>> ext_pillar = salt.utils.yaml.safe_load("""
... ext_pillar:
... - git:
... - issue38440 https://github.com/terminalmage/git_pillar:
... - env: base
... """)
>>> ext_pillar
{'ext_pillar': [{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}]}
>>> ext_pillar['ext_pillar'][0]
{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}
In the above example, the value to pass would be
``{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}``.
Note that this would need to be quoted when passing on the CLI (as in
the CLI examples below).
pillar : None
If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. These pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
.. versionadded:: 2015.5.0
CLI Examples:
.. code-block:: bash
salt '*' pillar.ext '{libvirt: _}'
salt '*' pillar.ext "{'git': ['master https://github.com/myuser/myrepo']}"
salt '*' pillar.ext "{'git': [{'mybranch https://github.com/myuser/myrepo': [{'env': 'base'}]}]}"
'''
if isinstance(external, six.string_types):
external = salt.utils.yaml.safe_load(external)
pillar_obj = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
__opts__['saltenv'],
ext=external,
pillar_override=pillar)
ret = pillar_obj.compile_pillar()
return ret |
def operate(self, vala, valb, oper):
"""Perform operation
args:
vala (mixed): 1st value
valb (mixed): 2nd value
oper (str): operation
returns:
mixed
"""
operation = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'=': operator.eq,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'=<': operator.le,
}.get(oper)
if operation is None:
raise SyntaxError("Unknown operation %s" % oper)
ret = operation(vala, valb)
if oper in '+-*/' and int(ret) == ret:
ret = int(ret)
return ret | Perform operation
args:
vala (mixed): 1st value
valb (mixed): 2nd value
oper (str): operation
returns:
mixed | Below is the the instruction that describes the task:
### Input:
Perform operation
args:
vala (mixed): 1st value
valb (mixed): 2nd value
oper (str): operation
returns:
mixed
### Response:
def operate(self, vala, valb, oper):
"""Perform operation
args:
vala (mixed): 1st value
valb (mixed): 2nd value
oper (str): operation
returns:
mixed
"""
operation = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'=': operator.eq,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'=<': operator.le,
}.get(oper)
if operation is None:
raise SyntaxError("Unknown operation %s" % oper)
ret = operation(vala, valb)
if oper in '+-*/' and int(ret) == ret:
ret = int(ret)
return ret |
def _twoByteStringToNum(bytestring, numberOfDecimals=0, signed=False):
"""Convert a two-byte string to a numerical value, possibly scaling it.
Args:
* bytestring (str): A string of length 2.
* numberOfDecimals (int): The number of decimals. Defaults to 0.
* signed (bol): Whether large positive values should be interpreted as negative values.
Returns:
The numerical value (int or float) calculated from the ``bytestring``.
Raises:
TypeError, ValueError
Use the parameter ``signed=True`` if converting a bytestring that can hold
negative values. Then upper range data will be automatically converted into
negative return values (two's complement).
Use ``numberOfDecimals=1`` to divide the received data by 10 before returning the value.
Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.
The byte order is big-endian, meaning that the most significant byte is sent first.
For example:
A string ``\\x03\\x02`` (which has the length 2) corresponds to 0302 (hex) = 770 (dec). If
``numberOfDecimals = 1``, then this is converted to 77.0 (float).
"""
_checkString(bytestring, minlength=2, maxlength=2, description='bytestring')
_checkInt(numberOfDecimals, minvalue=0, description='number of decimals')
_checkBool(signed, description='signed parameter')
formatcode = '>' # Big-endian
if signed:
formatcode += 'h' # (Signed) short (2 bytes)
else:
formatcode += 'H' # Unsigned short (2 bytes)
fullregister = _unpack(formatcode, bytestring)
if numberOfDecimals == 0:
return fullregister
divisor = 10 ** numberOfDecimals
return fullregister / float(divisor) | Convert a two-byte string to a numerical value, possibly scaling it.
Args:
* bytestring (str): A string of length 2.
* numberOfDecimals (int): The number of decimals. Defaults to 0.
* signed (bol): Whether large positive values should be interpreted as negative values.
Returns:
The numerical value (int or float) calculated from the ``bytestring``.
Raises:
TypeError, ValueError
Use the parameter ``signed=True`` if converting a bytestring that can hold
negative values. Then upper range data will be automatically converted into
negative return values (two's complement).
Use ``numberOfDecimals=1`` to divide the received data by 10 before returning the value.
Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.
The byte order is big-endian, meaning that the most significant byte is sent first.
For example:
A string ``\\x03\\x02`` (which has the length 2) corresponds to 0302 (hex) = 770 (dec). If
``numberOfDecimals = 1``, then this is converted to 77.0 (float). | Below is the the instruction that describes the task:
### Input:
Convert a two-byte string to a numerical value, possibly scaling it.
Args:
* bytestring (str): A string of length 2.
* numberOfDecimals (int): The number of decimals. Defaults to 0.
* signed (bol): Whether large positive values should be interpreted as negative values.
Returns:
The numerical value (int or float) calculated from the ``bytestring``.
Raises:
TypeError, ValueError
Use the parameter ``signed=True`` if converting a bytestring that can hold
negative values. Then upper range data will be automatically converted into
negative return values (two's complement).
Use ``numberOfDecimals=1`` to divide the received data by 10 before returning the value.
Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.
The byte order is big-endian, meaning that the most significant byte is sent first.
For example:
A string ``\\x03\\x02`` (which has the length 2) corresponds to 0302 (hex) = 770 (dec). If
``numberOfDecimals = 1``, then this is converted to 77.0 (float).
### Response:
def _twoByteStringToNum(bytestring, numberOfDecimals=0, signed=False):
"""Convert a two-byte string to a numerical value, possibly scaling it.
Args:
* bytestring (str): A string of length 2.
* numberOfDecimals (int): The number of decimals. Defaults to 0.
* signed (bol): Whether large positive values should be interpreted as negative values.
Returns:
The numerical value (int or float) calculated from the ``bytestring``.
Raises:
TypeError, ValueError
Use the parameter ``signed=True`` if converting a bytestring that can hold
negative values. Then upper range data will be automatically converted into
negative return values (two's complement).
Use ``numberOfDecimals=1`` to divide the received data by 10 before returning the value.
Similarly ``numberOfDecimals=2`` will divide the received data by 100 before returning the value.
The byte order is big-endian, meaning that the most significant byte is sent first.
For example:
A string ``\\x03\\x02`` (which has the length 2) corresponds to 0302 (hex) = 770 (dec). If
``numberOfDecimals = 1``, then this is converted to 77.0 (float).
"""
_checkString(bytestring, minlength=2, maxlength=2, description='bytestring')
_checkInt(numberOfDecimals, minvalue=0, description='number of decimals')
_checkBool(signed, description='signed parameter')
formatcode = '>' # Big-endian
if signed:
formatcode += 'h' # (Signed) short (2 bytes)
else:
formatcode += 'H' # Unsigned short (2 bytes)
fullregister = _unpack(formatcode, bytestring)
if numberOfDecimals == 0:
return fullregister
divisor = 10 ** numberOfDecimals
return fullregister / float(divisor) |
def generate_run_info():
"""
获取当前运行状态
"""
uptime = datetime.datetime.now() - datetime.datetime.fromtimestamp(glb.run_info.create_time())
memory_usage = glb.run_info.memory_info().rss
msg = '[当前时间] {now:%H:%M:%S}\n[运行时间] {uptime}\n[内存占用] {memory}\n[发送消息] {messages}'.format(
now=datetime.datetime.now(),
uptime=str(uptime).split('.')[0],
memory='{:.2f} MB'.format(memory_usage / 1024 ** 2),
messages=len(glb.wxbot.bot.messages)
)
return msg | 获取当前运行状态 | Below is the the instruction that describes the task:
### Input:
获取当前运行状态
### Response:
def generate_run_info():
"""
获取当前运行状态
"""
uptime = datetime.datetime.now() - datetime.datetime.fromtimestamp(glb.run_info.create_time())
memory_usage = glb.run_info.memory_info().rss
msg = '[当前时间] {now:%H:%M:%S}\n[运行时间] {uptime}\n[内存占用] {memory}\n[发送消息] {messages}'.format(
now=datetime.datetime.now(),
uptime=str(uptime).split('.')[0],
memory='{:.2f} MB'.format(memory_usage / 1024 ** 2),
messages=len(glb.wxbot.bot.messages)
)
return msg |
def f_get(self, *args):
"""Returns items handled by the result.
If only a single name is given, a single data item is returned. If several names are
given, a list is returned. For integer inputs the result returns `resultname_X`.
If the result contains only a single entry you can call `f_get()` without arguments.
If you call `f_get()` and the result contains more than one element a ValueError is
thrown.
If the requested item(s) cannot be found an AttributeError is thrown.
:param args: strings-names or integers
:return: Single data item or tuple of data
Example:
>>> res = Result('supergroup.subgroup.myresult', comment='I am a neat example!' \
[1000,2000], {'a':'b','c':333}, hitchhiker='Arthur Dent')
>>> res.f_get('hitchhiker')
'Arthur Dent'
>>> res.f_get(0)
[1000,2000]
>>> res.f_get('hitchhiker', 'myresult')
('Arthur Dent', [1000,2000])
"""
if len(args) == 0:
if len(self._data) == 1:
return list(self._data.values())[0]
elif len(self._data) > 1:
raise ValueError('Your result `%s` contains more than one entry: '
'`%s` Please use >>f_get<< with one of these.' %
(self.v_full_name, str(list(self._data.keys()))))
else:
raise AttributeError('Your result `%s` is empty, cannot access data.' %
self.v_full_name)
result_list = []
for name in args:
name = self.f_translate_key(name)
if not name in self._data:
if name == 'data' and len(self._data) == 1:
return self._data[list(self._data.keys())[0]]
else:
raise AttributeError('`%s` is not part of your result `%s`.' %
(name, self.v_full_name))
result_list.append(self._data[name])
if len(args) == 1:
return result_list[0]
else:
return result_list | Returns items handled by the result.
If only a single name is given, a single data item is returned. If several names are
given, a list is returned. For integer inputs the result returns `resultname_X`.
If the result contains only a single entry you can call `f_get()` without arguments.
If you call `f_get()` and the result contains more than one element a ValueError is
thrown.
If the requested item(s) cannot be found an AttributeError is thrown.
:param args: strings-names or integers
:return: Single data item or tuple of data
Example:
>>> res = Result('supergroup.subgroup.myresult', comment='I am a neat example!' \
[1000,2000], {'a':'b','c':333}, hitchhiker='Arthur Dent')
>>> res.f_get('hitchhiker')
'Arthur Dent'
>>> res.f_get(0)
[1000,2000]
>>> res.f_get('hitchhiker', 'myresult')
('Arthur Dent', [1000,2000]) | Below is the the instruction that describes the task:
### Input:
Returns items handled by the result.
If only a single name is given, a single data item is returned. If several names are
given, a list is returned. For integer inputs the result returns `resultname_X`.
If the result contains only a single entry you can call `f_get()` without arguments.
If you call `f_get()` and the result contains more than one element a ValueError is
thrown.
If the requested item(s) cannot be found an AttributeError is thrown.
:param args: strings-names or integers
:return: Single data item or tuple of data
Example:
>>> res = Result('supergroup.subgroup.myresult', comment='I am a neat example!' \
[1000,2000], {'a':'b','c':333}, hitchhiker='Arthur Dent')
>>> res.f_get('hitchhiker')
'Arthur Dent'
>>> res.f_get(0)
[1000,2000]
>>> res.f_get('hitchhiker', 'myresult')
('Arthur Dent', [1000,2000])
### Response:
def f_get(self, *args):
"""Returns items handled by the result.
If only a single name is given, a single data item is returned. If several names are
given, a list is returned. For integer inputs the result returns `resultname_X`.
If the result contains only a single entry you can call `f_get()` without arguments.
If you call `f_get()` and the result contains more than one element a ValueError is
thrown.
If the requested item(s) cannot be found an AttributeError is thrown.
:param args: strings-names or integers
:return: Single data item or tuple of data
Example:
>>> res = Result('supergroup.subgroup.myresult', comment='I am a neat example!' \
[1000,2000], {'a':'b','c':333}, hitchhiker='Arthur Dent')
>>> res.f_get('hitchhiker')
'Arthur Dent'
>>> res.f_get(0)
[1000,2000]
>>> res.f_get('hitchhiker', 'myresult')
('Arthur Dent', [1000,2000])
"""
if len(args) == 0:
if len(self._data) == 1:
return list(self._data.values())[0]
elif len(self._data) > 1:
raise ValueError('Your result `%s` contains more than one entry: '
'`%s` Please use >>f_get<< with one of these.' %
(self.v_full_name, str(list(self._data.keys()))))
else:
raise AttributeError('Your result `%s` is empty, cannot access data.' %
self.v_full_name)
result_list = []
for name in args:
name = self.f_translate_key(name)
if not name in self._data:
if name == 'data' and len(self._data) == 1:
return self._data[list(self._data.keys())[0]]
else:
raise AttributeError('`%s` is not part of your result `%s`.' %
(name, self.v_full_name))
result_list.append(self._data[name])
if len(args) == 1:
return result_list[0]
else:
return result_list |
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_group = ET.SubElement(trunk_list_groups, "trunk-list-group")
trunk_list_group.text = kwargs.pop('trunk_list_group')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fabric_trunk_info = ET.Element("show_fabric_trunk_info")
config = show_fabric_trunk_info
output = ET.SubElement(show_fabric_trunk_info, "output")
show_trunk_list = ET.SubElement(output, "show-trunk-list")
trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups")
trunk_list_group = ET.SubElement(trunk_list_groups, "trunk-list-group")
trunk_list_group.text = kwargs.pop('trunk_list_group')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def ReduceOpacity(im, opacity):
"""Reduces Opacity.
Returns an image with reduced opacity.
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879
"""
assert opacity >= 0 and opacity <= 1
if isalpha(im):
im = im.copy()
else:
im = im.convert('RGBA')
alpha = im.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im | Reduces Opacity.
Returns an image with reduced opacity.
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879 | Below is the the instruction that describes the task:
### Input:
Reduces Opacity.
Returns an image with reduced opacity.
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879
### Response:
def ReduceOpacity(im, opacity):
"""Reduces Opacity.
Returns an image with reduced opacity.
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879
"""
assert opacity >= 0 and opacity <= 1
if isalpha(im):
im = im.copy()
else:
im = im.convert('RGBA')
alpha = im.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im |
def first_parent(tag, nodename):
"""
Given a beautiful soup tag, look at its parents and return the first
tag name that matches nodename or the list nodename
"""
if nodename is not None and type(nodename) == str:
nodename = [nodename]
return first(list(filter(lambda tag: tag.name in nodename, tag.parents))) | Given a beautiful soup tag, look at its parents and return the first
tag name that matches nodename or the list nodename | Below is the the instruction that describes the task:
### Input:
Given a beautiful soup tag, look at its parents and return the first
tag name that matches nodename or the list nodename
### Response:
def first_parent(tag, nodename):
"""
Given a beautiful soup tag, look at its parents and return the first
tag name that matches nodename or the list nodename
"""
if nodename is not None and type(nodename) == str:
nodename = [nodename]
return first(list(filter(lambda tag: tag.name in nodename, tag.parents))) |
def database_exists(url):
"""Check if a database exists.
:param url: A SQLAlchemy engine URL.
Performs backend-specific testing to quickly determine if a database
exists on the server. ::
database_exists('postgres://postgres@localhost/name') #=> False
create_database('postgres://postgres@localhost/name')
database_exists('postgres://postgres@localhost/name') #=> True
Supports checking against a constructed URL as well. ::
engine = create_engine('postgres://postgres@localhost/name')
database_exists(engine.url) #=> False
create_database(engine.url)
database_exists(engine.url) #=> True
"""
url = copy(make_url(url))
database = url.database
if url.drivername.startswith('postgresql'):
url.database = 'template1'
else:
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgresql':
text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'mysql':
text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database)
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'sqlite':
return database == ':memory:' or os.path.exists(database)
else:
text = 'SELECT 1'
try:
url.database = database
engine = sa.create_engine(url)
engine.execute(text)
return True
except (ProgrammingError, OperationalError):
return False | Check if a database exists.
:param url: A SQLAlchemy engine URL.
Performs backend-specific testing to quickly determine if a database
exists on the server. ::
database_exists('postgres://postgres@localhost/name') #=> False
create_database('postgres://postgres@localhost/name')
database_exists('postgres://postgres@localhost/name') #=> True
Supports checking against a constructed URL as well. ::
engine = create_engine('postgres://postgres@localhost/name')
database_exists(engine.url) #=> False
create_database(engine.url)
database_exists(engine.url) #=> True | Below is the the instruction that describes the task:
### Input:
Check if a database exists.
:param url: A SQLAlchemy engine URL.
Performs backend-specific testing to quickly determine if a database
exists on the server. ::
database_exists('postgres://postgres@localhost/name') #=> False
create_database('postgres://postgres@localhost/name')
database_exists('postgres://postgres@localhost/name') #=> True
Supports checking against a constructed URL as well. ::
engine = create_engine('postgres://postgres@localhost/name')
database_exists(engine.url) #=> False
create_database(engine.url)
database_exists(engine.url) #=> True
### Response:
def database_exists(url):
"""Check if a database exists.
:param url: A SQLAlchemy engine URL.
Performs backend-specific testing to quickly determine if a database
exists on the server. ::
database_exists('postgres://postgres@localhost/name') #=> False
create_database('postgres://postgres@localhost/name')
database_exists('postgres://postgres@localhost/name') #=> True
Supports checking against a constructed URL as well. ::
engine = create_engine('postgres://postgres@localhost/name')
database_exists(engine.url) #=> False
create_database(engine.url)
database_exists(engine.url) #=> True
"""
url = copy(make_url(url))
database = url.database
if url.drivername.startswith('postgresql'):
url.database = 'template1'
else:
url.database = None
engine = sa.create_engine(url)
if engine.dialect.name == 'postgresql':
text = "SELECT 1 FROM pg_database WHERE datname='%s'" % database
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'mysql':
text = ("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database)
return bool(engine.execute(text).scalar())
elif engine.dialect.name == 'sqlite':
return database == ':memory:' or os.path.exists(database)
else:
text = 'SELECT 1'
try:
url.database = database
engine = sa.create_engine(url)
engine.execute(text)
return True
except (ProgrammingError, OperationalError):
return False |
def _clear_timeouts(self, cache_timeout):
"""
Clear the cache of timed out results.
"""
for key in list(self.timeouts):
if timer() - self.timeouts[key] > cache_timeout:
del self.timeouts[key]
del self.cache[key] | Clear the cache of timed out results. | Below is the the instruction that describes the task:
### Input:
Clear the cache of timed out results.
### Response:
def _clear_timeouts(self, cache_timeout):
"""
Clear the cache of timed out results.
"""
for key in list(self.timeouts):
if timer() - self.timeouts[key] > cache_timeout:
del self.timeouts[key]
del self.cache[key] |
def _filter_exact(self, term, field_name, field_type, is_not):
"""
Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list.
"""
if field_type == 'text' and field_name not in (DJANGO_CT,):
term = '^ %s $' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
query = self._term_query(term, field_name, field_type, stemmed=False)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query | Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list. | Below is the the instruction that describes the task:
### Input:
Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list.
### Response:
def _filter_exact(self, term, field_name, field_type, is_not):
"""
Returns a query that matches exactly the un-stemmed term
with positional order.
Assumes term is not a list.
"""
if field_type == 'text' and field_name not in (DJANGO_CT,):
term = '^ %s $' % term
query = self._phrase_query(term.split(), field_name, field_type)
else:
query = self._term_query(term, field_name, field_type, stemmed=False)
if is_not:
return xapian.Query(xapian.Query.OP_AND_NOT, self._all_query(), query)
else:
return query |
def interact_gridsearch_result_images(show_result_func, cfgdict_list,
cfglbl_list, cfgresult_list,
score_list=None, fnum=None, figtitle='',
unpack=False, max_plots=25, verbose=True,
precision=3, scorelbl='score',
onclick_func=None):
""" helper function for visualizing results of gridsearch """
assert callable(show_result_func), 'NEED FUNCTION GOT: %r' % (show_result_func,)
import utool as ut
import plottool as pt
from plottool import plot_helpers as ph
from plottool import interact_helpers as ih
if verbose:
print('Plotting gridsearch results figtitle=%r' % (figtitle,))
if score_list is None:
score_list = [None] * len(cfgdict_list)
else:
# sort by score if available
sortx_list = ut.list_argsort(score_list, reverse=True)
score_list = ut.take(score_list, sortx_list)
cfgdict_list = ut.take(cfgdict_list, sortx_list)
cfglbl_list = ut.take(cfglbl_list, sortx_list)
cfgresult_list = ut.take(cfgresult_list, sortx_list)
# Dont show too many results only the top few
score_list = ut.listclip(score_list, max_plots)
# Show the config results
fig = pt.figure(fnum=fnum)
# Get plots for each of the resutls
nRows, nCols = pt.get_square_row_cols(len(score_list), fix=True)
next_pnum = pt.make_pnum_nextgen(nRows, nCols)
for cfgdict, cfglbl, cfgresult, score in zip(cfgdict_list, cfglbl_list,
cfgresult_list,
score_list):
if score is not None:
cfglbl += '\n' + scorelbl + '=' + ut.repr2(score, precision=precision)
pnum = next_pnum()
try:
if unpack:
show_result_func(*cfgresult, fnum=fnum, pnum=pnum)
else:
show_result_func(cfgresult, fnum=fnum, pnum=pnum)
except Exception as ex:
if isinstance(cfgresult, tuple):
#print(ut.repr4(cfgresult))
print(ut.depth_profile(cfgresult))
print(ut.list_type_profile(cfgresult))
ut.printex(ex, 'error showing', keys=['cfgresult', 'fnum', 'pnum'])
raise
#pt.imshow(255 * cfgresult, fnum=fnum, pnum=next_pnum(), title=cfglbl)
ax = pt.gca()
pt.set_title(cfglbl, ax=ax) # , size)
ph.set_plotdat(ax, 'cfgdict', cfgdict)
ph.set_plotdat(ax, 'cfglbl', cfglbl)
ph.set_plotdat(ax, 'cfgresult', cfgresult)
# Define clicked callback
def on_clicked(event):
print('\n[pt] clicked gridsearch axes')
if event is None or event.xdata is None or event.inaxes is None:
print('out of axes')
pass
else:
ax = event.inaxes
plotdat_dict = ph.get_plotdat_dict(ax)
print(ut.repr4(plotdat_dict))
cfglbl = ph.get_plotdat(ax, 'cfglbl', None)
cfgdict = ph.get_plotdat(ax, 'cfgdict', {})
cfgresult = ph.get_plotdat(ax, 'cfgresult', {})
infostr_list = [
('cfglbl = %s' % (cfglbl,)),
'',
('cfgdict = ' + ut.repr4(cfgdict, sorted_=True)),
]
# Call a user defined function if given
if onclick_func is not None:
if unpack:
onclick_func(*cfgresult)
else:
onclick_func(cfgresult)
infostr = ut.msgblock('CLICKED', '\n'.join(infostr_list))
print(infostr)
# Connect callbacks
ih.connect_callback(fig, 'button_press_event', on_clicked)
pt.set_figtitle(figtitle) | helper function for visualizing results of gridsearch | Below is the the instruction that describes the task:
### Input:
helper function for visualizing results of gridsearch
### Response:
def interact_gridsearch_result_images(show_result_func, cfgdict_list,
cfglbl_list, cfgresult_list,
score_list=None, fnum=None, figtitle='',
unpack=False, max_plots=25, verbose=True,
precision=3, scorelbl='score',
onclick_func=None):
""" helper function for visualizing results of gridsearch """
assert callable(show_result_func), 'NEED FUNCTION GOT: %r' % (show_result_func,)
import utool as ut
import plottool as pt
from plottool import plot_helpers as ph
from plottool import interact_helpers as ih
if verbose:
print('Plotting gridsearch results figtitle=%r' % (figtitle,))
if score_list is None:
score_list = [None] * len(cfgdict_list)
else:
# sort by score if available
sortx_list = ut.list_argsort(score_list, reverse=True)
score_list = ut.take(score_list, sortx_list)
cfgdict_list = ut.take(cfgdict_list, sortx_list)
cfglbl_list = ut.take(cfglbl_list, sortx_list)
cfgresult_list = ut.take(cfgresult_list, sortx_list)
# Dont show too many results only the top few
score_list = ut.listclip(score_list, max_plots)
# Show the config results
fig = pt.figure(fnum=fnum)
# Get plots for each of the resutls
nRows, nCols = pt.get_square_row_cols(len(score_list), fix=True)
next_pnum = pt.make_pnum_nextgen(nRows, nCols)
for cfgdict, cfglbl, cfgresult, score in zip(cfgdict_list, cfglbl_list,
cfgresult_list,
score_list):
if score is not None:
cfglbl += '\n' + scorelbl + '=' + ut.repr2(score, precision=precision)
pnum = next_pnum()
try:
if unpack:
show_result_func(*cfgresult, fnum=fnum, pnum=pnum)
else:
show_result_func(cfgresult, fnum=fnum, pnum=pnum)
except Exception as ex:
if isinstance(cfgresult, tuple):
#print(ut.repr4(cfgresult))
print(ut.depth_profile(cfgresult))
print(ut.list_type_profile(cfgresult))
ut.printex(ex, 'error showing', keys=['cfgresult', 'fnum', 'pnum'])
raise
#pt.imshow(255 * cfgresult, fnum=fnum, pnum=next_pnum(), title=cfglbl)
ax = pt.gca()
pt.set_title(cfglbl, ax=ax) # , size)
ph.set_plotdat(ax, 'cfgdict', cfgdict)
ph.set_plotdat(ax, 'cfglbl', cfglbl)
ph.set_plotdat(ax, 'cfgresult', cfgresult)
# Define clicked callback
def on_clicked(event):
print('\n[pt] clicked gridsearch axes')
if event is None or event.xdata is None or event.inaxes is None:
print('out of axes')
pass
else:
ax = event.inaxes
plotdat_dict = ph.get_plotdat_dict(ax)
print(ut.repr4(plotdat_dict))
cfglbl = ph.get_plotdat(ax, 'cfglbl', None)
cfgdict = ph.get_plotdat(ax, 'cfgdict', {})
cfgresult = ph.get_plotdat(ax, 'cfgresult', {})
infostr_list = [
('cfglbl = %s' % (cfglbl,)),
'',
('cfgdict = ' + ut.repr4(cfgdict, sorted_=True)),
]
# Call a user defined function if given
if onclick_func is not None:
if unpack:
onclick_func(*cfgresult)
else:
onclick_func(cfgresult)
infostr = ut.msgblock('CLICKED', '\n'.join(infostr_list))
print(infostr)
# Connect callbacks
ih.connect_callback(fig, 'button_press_event', on_clicked)
pt.set_figtitle(figtitle) |
def _do_title_string(self,title_items,cycle):
'''
Create title string
Private method that creates a title string for a cycle plot
out of a list of title_items that are cycle attributes and can
be obtained with self.get
Parameters
----------
title_items : list
A list of cycle attributes.
cycle : scalar
The cycle for which the title string should be created.
Returns
-------
title_string: string
Title string that can be used to decorate plot.
'''
title_string=[]
form_str='%4.1F'
for item in title_items:
num=self.get(item,fname=cycle)
if num > 999 or num < 0.1:
num=log10(num)
prefix='log '
else:
prefix=''
title_string.append(prefix+item+'='+form_str%num)
tt=''
for thing in title_string:
tt = tt+thing+", "
return tt.rstrip(', ') | Create title string
Private method that creates a title string for a cycle plot
out of a list of title_items that are cycle attributes and can
be obtained with self.get
Parameters
----------
title_items : list
A list of cycle attributes.
cycle : scalar
The cycle for which the title string should be created.
Returns
-------
title_string: string
Title string that can be used to decorate plot. | Below is the the instruction that describes the task:
### Input:
Create title string
Private method that creates a title string for a cycle plot
out of a list of title_items that are cycle attributes and can
be obtained with self.get
Parameters
----------
title_items : list
A list of cycle attributes.
cycle : scalar
The cycle for which the title string should be created.
Returns
-------
title_string: string
Title string that can be used to decorate plot.
### Response:
def _do_title_string(self,title_items,cycle):
'''
Create title string
Private method that creates a title string for a cycle plot
out of a list of title_items that are cycle attributes and can
be obtained with self.get
Parameters
----------
title_items : list
A list of cycle attributes.
cycle : scalar
The cycle for which the title string should be created.
Returns
-------
title_string: string
Title string that can be used to decorate plot.
'''
title_string=[]
form_str='%4.1F'
for item in title_items:
num=self.get(item,fname=cycle)
if num > 999 or num < 0.1:
num=log10(num)
prefix='log '
else:
prefix=''
title_string.append(prefix+item+'='+form_str%num)
tt=''
for thing in title_string:
tt = tt+thing+", "
return tt.rstrip(', ') |
def saturation(self, color, *args):
""" Return the saturation value of a color
args:
color (str): color
raises:
ValueError
returns:
float
"""
if color:
h, l, s = self._hextohls(color)
return s * 100.0
raise ValueError('Illegal color values') | Return the saturation value of a color
args:
color (str): color
raises:
ValueError
returns:
float | Below is the the instruction that describes the task:
### Input:
Return the saturation value of a color
args:
color (str): color
raises:
ValueError
returns:
float
### Response:
def saturation(self, color, *args):
""" Return the saturation value of a color
args:
color (str): color
raises:
ValueError
returns:
float
"""
if color:
h, l, s = self._hextohls(color)
return s * 100.0
raise ValueError('Illegal color values') |
def normalize_datum(self, datum):
"""
Convert `datum` into something that umsgpack likes.
:param datum: something that we want to process with umsgpack
:return: a packable version of `datum`
:raises TypeError: if `datum` cannot be packed
This message is called by :meth:`.packb` to recursively normalize
an input value before passing it to :func:`umsgpack.packb`. Values
are normalized according to the following table.
+-------------------------------+-------------------------------+
| **Value** | **MsgPack Family** |
+-------------------------------+-------------------------------+
| :data:`None` | `nil byte`_ (0xC0) |
+-------------------------------+-------------------------------+
| :data:`True` | `true byte`_ (0xC3) |
+-------------------------------+-------------------------------+
| :data:`False` | `false byte`_ (0xC2) |
+-------------------------------+-------------------------------+
| :class:`int` | `integer family`_ |
+-------------------------------+-------------------------------+
| :class:`float` | `float family`_ |
+-------------------------------+-------------------------------+
| String | `str family`_ |
+-------------------------------+-------------------------------+
| :class:`bytes` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`bytearray` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`memoryview` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Sequence` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Set` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Mapping` | `map family`_ |
+-------------------------------+-------------------------------+
| :class:`uuid.UUID` | Converted to String |
+-------------------------------+-------------------------------+
.. _nil byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#formats-nil
.. _true byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _false byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _integer family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#int-format-family
.. _float family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#float-format-family
.. _str family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#str-format-family
.. _array family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#array-format-family
.. _map family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md
#mapping-format-family
.. _bin family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bin-format-family
"""
if datum is None:
return datum
if isinstance(datum, self.PACKABLE_TYPES):
return datum
if isinstance(datum, uuid.UUID):
datum = str(datum)
if isinstance(datum, bytearray):
datum = bytes(datum)
if isinstance(datum, memoryview):
datum = datum.tobytes()
if hasattr(datum, 'isoformat'):
datum = datum.isoformat()
if isinstance(datum, (bytes, str)):
return datum
if isinstance(datum, (collections.Sequence, collections.Set)):
return [self.normalize_datum(item) for item in datum]
if isinstance(datum, collections.Mapping):
out = {}
for k, v in datum.items():
out[k] = self.normalize_datum(v)
return out
raise TypeError(
'{} is not msgpackable'.format(datum.__class__.__name__)) | Convert `datum` into something that umsgpack likes.
:param datum: something that we want to process with umsgpack
:return: a packable version of `datum`
:raises TypeError: if `datum` cannot be packed
This message is called by :meth:`.packb` to recursively normalize
an input value before passing it to :func:`umsgpack.packb`. Values
are normalized according to the following table.
+-------------------------------+-------------------------------+
| **Value** | **MsgPack Family** |
+-------------------------------+-------------------------------+
| :data:`None` | `nil byte`_ (0xC0) |
+-------------------------------+-------------------------------+
| :data:`True` | `true byte`_ (0xC3) |
+-------------------------------+-------------------------------+
| :data:`False` | `false byte`_ (0xC2) |
+-------------------------------+-------------------------------+
| :class:`int` | `integer family`_ |
+-------------------------------+-------------------------------+
| :class:`float` | `float family`_ |
+-------------------------------+-------------------------------+
| String | `str family`_ |
+-------------------------------+-------------------------------+
| :class:`bytes` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`bytearray` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`memoryview` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Sequence` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Set` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Mapping` | `map family`_ |
+-------------------------------+-------------------------------+
| :class:`uuid.UUID` | Converted to String |
+-------------------------------+-------------------------------+
.. _nil byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#formats-nil
.. _true byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _false byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _integer family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#int-format-family
.. _float family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#float-format-family
.. _str family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#str-format-family
.. _array family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#array-format-family
.. _map family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md
#mapping-format-family
.. _bin family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bin-format-family | Below is the the instruction that describes the task:
### Input:
Convert `datum` into something that umsgpack likes.
:param datum: something that we want to process with umsgpack
:return: a packable version of `datum`
:raises TypeError: if `datum` cannot be packed
This message is called by :meth:`.packb` to recursively normalize
an input value before passing it to :func:`umsgpack.packb`. Values
are normalized according to the following table.
+-------------------------------+-------------------------------+
| **Value** | **MsgPack Family** |
+-------------------------------+-------------------------------+
| :data:`None` | `nil byte`_ (0xC0) |
+-------------------------------+-------------------------------+
| :data:`True` | `true byte`_ (0xC3) |
+-------------------------------+-------------------------------+
| :data:`False` | `false byte`_ (0xC2) |
+-------------------------------+-------------------------------+
| :class:`int` | `integer family`_ |
+-------------------------------+-------------------------------+
| :class:`float` | `float family`_ |
+-------------------------------+-------------------------------+
| String | `str family`_ |
+-------------------------------+-------------------------------+
| :class:`bytes` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`bytearray` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`memoryview` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Sequence` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Set` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Mapping` | `map family`_ |
+-------------------------------+-------------------------------+
| :class:`uuid.UUID` | Converted to String |
+-------------------------------+-------------------------------+
.. _nil byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#formats-nil
.. _true byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _false byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _integer family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#int-format-family
.. _float family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#float-format-family
.. _str family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#str-format-family
.. _array family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#array-format-family
.. _map family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md
#mapping-format-family
.. _bin family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bin-format-family
### Response:
def normalize_datum(self, datum):
"""
Convert `datum` into something that umsgpack likes.
:param datum: something that we want to process with umsgpack
:return: a packable version of `datum`
:raises TypeError: if `datum` cannot be packed
This message is called by :meth:`.packb` to recursively normalize
an input value before passing it to :func:`umsgpack.packb`. Values
are normalized according to the following table.
+-------------------------------+-------------------------------+
| **Value** | **MsgPack Family** |
+-------------------------------+-------------------------------+
| :data:`None` | `nil byte`_ (0xC0) |
+-------------------------------+-------------------------------+
| :data:`True` | `true byte`_ (0xC3) |
+-------------------------------+-------------------------------+
| :data:`False` | `false byte`_ (0xC2) |
+-------------------------------+-------------------------------+
| :class:`int` | `integer family`_ |
+-------------------------------+-------------------------------+
| :class:`float` | `float family`_ |
+-------------------------------+-------------------------------+
| String | `str family`_ |
+-------------------------------+-------------------------------+
| :class:`bytes` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`bytearray` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`memoryview` | `bin family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Sequence` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Set` | `array family`_ |
+-------------------------------+-------------------------------+
| :class:`collections.Mapping` | `map family`_ |
+-------------------------------+-------------------------------+
| :class:`uuid.UUID` | Converted to String |
+-------------------------------+-------------------------------+
.. _nil byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#formats-nil
.. _true byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _false byte: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bool-format-family
.. _integer family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#int-format-family
.. _float family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#float-format-family
.. _str family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#str-format-family
.. _array family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#array-format-family
.. _map family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md
#mapping-format-family
.. _bin family: https://github.com/msgpack/msgpack/blob/
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a/spec.md#bin-format-family
"""
if datum is None:
return datum
if isinstance(datum, self.PACKABLE_TYPES):
return datum
if isinstance(datum, uuid.UUID):
datum = str(datum)
if isinstance(datum, bytearray):
datum = bytes(datum)
if isinstance(datum, memoryview):
datum = datum.tobytes()
if hasattr(datum, 'isoformat'):
datum = datum.isoformat()
if isinstance(datum, (bytes, str)):
return datum
if isinstance(datum, (collections.Sequence, collections.Set)):
return [self.normalize_datum(item) for item in datum]
if isinstance(datum, collections.Mapping):
out = {}
for k, v in datum.items():
out[k] = self.normalize_datum(v)
return out
raise TypeError(
'{} is not msgpackable'.format(datum.__class__.__name__)) |
def interpolate(self, target, extent):
""" Move this vector towards the given towards the target by the given
extent. The extent should be between 0 and 1. """
target = cast_anything_to_vector(target)
self += extent * (target - self) | Move this vector towards the given towards the target by the given
extent. The extent should be between 0 and 1. | Below is the the instruction that describes the task:
### Input:
Move this vector towards the given towards the target by the given
extent. The extent should be between 0 and 1.
### Response:
def interpolate(self, target, extent):
""" Move this vector towards the given towards the target by the given
extent. The extent should be between 0 and 1. """
target = cast_anything_to_vector(target)
self += extent * (target - self) |
def nice(self):
"""Get or set process niceness (priority).
Deprecated, use get_nice() instead.
"""
msg = "this property is deprecated; use Process.get_nice() method instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return self.get_nice() | Get or set process niceness (priority).
Deprecated, use get_nice() instead. | Below is the the instruction that describes the task:
### Input:
Get or set process niceness (priority).
Deprecated, use get_nice() instead.
### Response:
def nice(self):
"""Get or set process niceness (priority).
Deprecated, use get_nice() instead.
"""
msg = "this property is deprecated; use Process.get_nice() method instead"
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return self.get_nice() |
def read(self, id, table='reaction'):
""" Return an entire row of a table
Parameters
---------
id: int
row integer
table: str
'reaction', 'publication', 'publication_system', 'reaction_system'
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
cur.execute('SELECT * FROM \n {} \n WHERE \n {}.id={}'.format(
table, table, id))
row = cur.fetchall()
if len(row) == 14: # Old schema
row = row.insert(5, 'None')
return row | Return an entire row of a table
Parameters
---------
id: int
row integer
table: str
'reaction', 'publication', 'publication_system', 'reaction_system' | Below is the the instruction that describes the task:
### Input:
Return an entire row of a table
Parameters
---------
id: int
row integer
table: str
'reaction', 'publication', 'publication_system', 'reaction_system'
### Response:
def read(self, id, table='reaction'):
""" Return an entire row of a table
Parameters
---------
id: int
row integer
table: str
'reaction', 'publication', 'publication_system', 'reaction_system'
"""
con = self.connection or self._connect()
self._initialize(con)
cur = con.cursor()
cur.execute('SELECT * FROM \n {} \n WHERE \n {}.id={}'.format(
table, table, id))
row = cur.fetchall()
if len(row) == 14: # Old schema
row = row.insert(5, 'None')
return row |
def validate_service(self, request, uid):
"""Validates the specs values from request for the service uid. Returns
a non-translated message if the validation failed."""
result = get_record_value(request, uid, 'result')
if not result:
# No result set for this service, dismiss
return None
if not api.is_floatable(result):
return "Expected result value must be numeric"
spec_min = get_record_value(request, uid, "min", result)
spec_max = get_record_value(request, uid, "max", result)
error = get_record_value(request, uid, "error", "0")
if not api.is_floatable(spec_min):
return "'Min' value must be numeric"
if not api.is_floatable(spec_max):
return "'Max' value must be numeric"
if api.to_float(spec_min) > api.to_float(result):
return "'Min' value must be below the expected result"
if api.to_float(spec_max) < api.to_float(result):
return "'Max' value must be above the expected result"
if not api.is_floatable(error) or 0.0 < api.to_float(error) > 100:
return "% Error must be between 0 and 100"
return None | Validates the specs values from request for the service uid. Returns
a non-translated message if the validation failed. | Below is the the instruction that describes the task:
### Input:
Validates the specs values from request for the service uid. Returns
a non-translated message if the validation failed.
### Response:
def validate_service(self, request, uid):
"""Validates the specs values from request for the service uid. Returns
a non-translated message if the validation failed."""
result = get_record_value(request, uid, 'result')
if not result:
# No result set for this service, dismiss
return None
if not api.is_floatable(result):
return "Expected result value must be numeric"
spec_min = get_record_value(request, uid, "min", result)
spec_max = get_record_value(request, uid, "max", result)
error = get_record_value(request, uid, "error", "0")
if not api.is_floatable(spec_min):
return "'Min' value must be numeric"
if not api.is_floatable(spec_max):
return "'Max' value must be numeric"
if api.to_float(spec_min) > api.to_float(result):
return "'Min' value must be below the expected result"
if api.to_float(spec_max) < api.to_float(result):
return "'Max' value must be above the expected result"
if not api.is_floatable(error) or 0.0 < api.to_float(error) > 100:
return "% Error must be between 0 and 100"
return None |
def cleanlines(self):
"""Cleaned code lines.
Implemented cleanups:
* eventually remove method version
* remove docstrings
* remove comments
* remove empty lines
* remove line brackes within brackets
* replace `modelutils` with nothing
* remove complete lines containing `fastaccess`
* replace shortcuts with complete references
"""
code = inspect.getsource(self.func)
code = '\n'.join(code.split('"""')[::2])
code = code.replace('modelutils.', '')
for (name, shortcut) in zip(self.collectornames,
self.collectorshortcuts):
code = code.replace('%s.' % shortcut, 'self.%s.' % name)
code = self.remove_linebreaks_within_equations(code)
lines = code.splitlines()
self.remove_imath_operators(lines)
lines[0] = 'def %s(self):' % self.funcname
lines = [l.split('#')[0] for l in lines]
lines = [l for l in lines if 'fastaccess' not in l]
lines = [l.rstrip() for l in lines if l.rstrip()]
return Lines(*lines) | Cleaned code lines.
Implemented cleanups:
* eventually remove method version
* remove docstrings
* remove comments
* remove empty lines
* remove line brackes within brackets
* replace `modelutils` with nothing
* remove complete lines containing `fastaccess`
* replace shortcuts with complete references | Below is the the instruction that describes the task:
### Input:
Cleaned code lines.
Implemented cleanups:
* eventually remove method version
* remove docstrings
* remove comments
* remove empty lines
* remove line brackes within brackets
* replace `modelutils` with nothing
* remove complete lines containing `fastaccess`
* replace shortcuts with complete references
### Response:
def cleanlines(self):
"""Cleaned code lines.
Implemented cleanups:
* eventually remove method version
* remove docstrings
* remove comments
* remove empty lines
* remove line brackes within brackets
* replace `modelutils` with nothing
* remove complete lines containing `fastaccess`
* replace shortcuts with complete references
"""
code = inspect.getsource(self.func)
code = '\n'.join(code.split('"""')[::2])
code = code.replace('modelutils.', '')
for (name, shortcut) in zip(self.collectornames,
self.collectorshortcuts):
code = code.replace('%s.' % shortcut, 'self.%s.' % name)
code = self.remove_linebreaks_within_equations(code)
lines = code.splitlines()
self.remove_imath_operators(lines)
lines[0] = 'def %s(self):' % self.funcname
lines = [l.split('#')[0] for l in lines]
lines = [l for l in lines if 'fastaccess' not in l]
lines = [l.rstrip() for l in lines if l.rstrip()]
return Lines(*lines) |
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_is_level_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
not self._is_label_reference(key, axis=axis)) | Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool | Below is the the instruction that describes the task:
### Input:
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
### Response:
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_is_level_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
not self._is_label_reference(key, axis=axis)) |
def make_pipeline(context):
"""
Create our pipeline.
"""
# Filter for primary share equities. IsPrimaryShare is a built-in filter.
primary_share = IsPrimaryShare()
# Equities listed as common stock (as opposed to, say, preferred stock).
# 'ST00000001' indicates common stock.
common_stock = morningstar.share_class_reference.security_type.latest.eq(
'ST00000001')
# Non-depositary receipts. Recall that the ~ operator inverts filters,
# turning Trues into Falses and vice versa
not_depositary = ~morningstar.share_class_reference.is_depositary_receipt.latest
# Equities not trading over-the-counter.
not_otc = ~morningstar.share_class_reference.exchange_id.latest.startswith(
'OTC')
# Not when-issued equities.
not_wi = ~morningstar.share_class_reference.symbol.latest.endswith('.WI')
# Equities without LP in their name, .matches does a match using a regular
# expression
not_lp_name = ~morningstar.company_reference.standard_name.latest.matches(
'.* L[. ]?P.?$')
# Equities with a null value in the limited_partnership Morningstar
# fundamental field.
not_lp_balance_sheet = morningstar.balance_sheet.limited_partnership.latest.isnull()
# Equities whose most recent Morningstar market cap is not null have
# fundamental data and therefore are not ETFs.
have_market_cap = morningstar.valuation.market_cap.latest.notnull()
# At least a certain price
price = USEquityPricing.close.latest
AtLeastPrice = (price >= context.MyLeastPrice)
AtMostPrice = (price <= context.MyMostPrice)
# Filter for stocks that pass all of our previous filters.
tradeable_stocks = (
primary_share
& common_stock
& not_depositary
& not_otc
& not_wi
& not_lp_name
& not_lp_balance_sheet
& have_market_cap
& AtLeastPrice
& AtMostPrice
)
LowVar = 6
HighVar = 40
log.info(
'''
Algorithm initialized variables:
context.MaxCandidates %s
LowVar %s
HighVar %s''' %
(context.MaxCandidates, LowVar, HighVar))
# High dollar volume filter.
base_universe = AverageDollarVolume(
window_length=20,
mask=tradeable_stocks
).percentile_between(LowVar, HighVar)
# Short close price average.
ShortAvg = SimpleMovingAverage(
inputs=[USEquityPricing.close],
window_length=3,
mask=base_universe
)
# Long close price average.
LongAvg = SimpleMovingAverage(
inputs=[USEquityPricing.close],
window_length=45,
mask=base_universe
)
percent_difference = (ShortAvg - LongAvg) / LongAvg
# Filter to select securities to long.
stocks_worst = percent_difference.bottom(context.MaxCandidates)
securities_to_trade = (stocks_worst)
return Pipeline(
columns={
'stocks_worst': stocks_worst
},
screen=(securities_to_trade),
) | Create our pipeline. | Below is the the instruction that describes the task:
### Input:
Create our pipeline.
### Response:
def make_pipeline(context):
"""
Create our pipeline.
"""
# Filter for primary share equities. IsPrimaryShare is a built-in filter.
primary_share = IsPrimaryShare()
# Equities listed as common stock (as opposed to, say, preferred stock).
# 'ST00000001' indicates common stock.
common_stock = morningstar.share_class_reference.security_type.latest.eq(
'ST00000001')
# Non-depositary receipts. Recall that the ~ operator inverts filters,
# turning Trues into Falses and vice versa
not_depositary = ~morningstar.share_class_reference.is_depositary_receipt.latest
# Equities not trading over-the-counter.
not_otc = ~morningstar.share_class_reference.exchange_id.latest.startswith(
'OTC')
# Not when-issued equities.
not_wi = ~morningstar.share_class_reference.symbol.latest.endswith('.WI')
# Equities without LP in their name, .matches does a match using a regular
# expression
not_lp_name = ~morningstar.company_reference.standard_name.latest.matches(
'.* L[. ]?P.?$')
# Equities with a null value in the limited_partnership Morningstar
# fundamental field.
not_lp_balance_sheet = morningstar.balance_sheet.limited_partnership.latest.isnull()
# Equities whose most recent Morningstar market cap is not null have
# fundamental data and therefore are not ETFs.
have_market_cap = morningstar.valuation.market_cap.latest.notnull()
# At least a certain price
price = USEquityPricing.close.latest
AtLeastPrice = (price >= context.MyLeastPrice)
AtMostPrice = (price <= context.MyMostPrice)
# Filter for stocks that pass all of our previous filters.
tradeable_stocks = (
primary_share
& common_stock
& not_depositary
& not_otc
& not_wi
& not_lp_name
& not_lp_balance_sheet
& have_market_cap
& AtLeastPrice
& AtMostPrice
)
LowVar = 6
HighVar = 40
log.info(
'''
Algorithm initialized variables:
context.MaxCandidates %s
LowVar %s
HighVar %s''' %
(context.MaxCandidates, LowVar, HighVar))
# High dollar volume filter.
base_universe = AverageDollarVolume(
window_length=20,
mask=tradeable_stocks
).percentile_between(LowVar, HighVar)
# Short close price average.
ShortAvg = SimpleMovingAverage(
inputs=[USEquityPricing.close],
window_length=3,
mask=base_universe
)
# Long close price average.
LongAvg = SimpleMovingAverage(
inputs=[USEquityPricing.close],
window_length=45,
mask=base_universe
)
percent_difference = (ShortAvg - LongAvg) / LongAvg
# Filter to select securities to long.
stocks_worst = percent_difference.bottom(context.MaxCandidates)
securities_to_trade = (stocks_worst)
return Pipeline(
columns={
'stocks_worst': stocks_worst
},
screen=(securities_to_trade),
) |
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise | A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests. | Below is the the instruction that describes the task:
### Input:
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
### Response:
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise |
def QueryItems(self, database_or_Container_link, query, options=None, partition_key=None):
"""Queries documents in a collection.
:param str database_or_Container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key:
Partition key for the query(default value None)
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
database_or_Container_link = base.TrimBeginningAndEndingSlashes(database_or_Container_link)
if options is None:
options = {}
if(base.IsDatabaseLink(database_or_Container_link)):
# Python doesn't have a good way of specifying an overloaded constructor, and this is how it's generally overloaded constructors are specified(by calling a @classmethod) and returning the 'self' instance
return query_iterable.QueryIterable.PartitioningQueryIterable(self, query, options, database_or_Container_link, partition_key)
else:
path = base.GetPathFromLink(database_or_Container_link, 'docs')
collection_id = base.GetResourceIdOrFullNameFromLink(database_or_Container_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'docs',
collection_id,
lambda r: r['Documents'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn, database_or_Container_link) | Queries documents in a collection.
:param str database_or_Container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key:
Partition key for the query(default value None)
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable | Below is the the instruction that describes the task:
### Input:
Queries documents in a collection.
:param str database_or_Container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key:
Partition key for the query(default value None)
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
### Response:
def QueryItems(self, database_or_Container_link, query, options=None, partition_key=None):
"""Queries documents in a collection.
:param str database_or_Container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:param str partition_key:
Partition key for the query(default value None)
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
database_or_Container_link = base.TrimBeginningAndEndingSlashes(database_or_Container_link)
if options is None:
options = {}
if(base.IsDatabaseLink(database_or_Container_link)):
# Python doesn't have a good way of specifying an overloaded constructor, and this is how it's generally overloaded constructors are specified(by calling a @classmethod) and returning the 'self' instance
return query_iterable.QueryIterable.PartitioningQueryIterable(self, query, options, database_or_Container_link, partition_key)
else:
path = base.GetPathFromLink(database_or_Container_link, 'docs')
collection_id = base.GetResourceIdOrFullNameFromLink(database_or_Container_link)
def fetch_fn(options):
return self.__QueryFeed(path,
'docs',
collection_id,
lambda r: r['Documents'],
lambda _, b: b,
query,
options), self.last_response_headers
return query_iterable.QueryIterable(self, query, options, fetch_fn, database_or_Container_link) |
def _store(self):
"""Returns a storage dictionary understood by the storage service.
Sparse matrices are extracted similar to the :class:`~pypet.parameter.SparseParameter` and
marked with the identifier `__spsp__`.
"""
store_dict = {}
for key in self._data:
val = self._data[key]
if SparseParameter._is_supported_matrix(val):
data_list, name_list, hash_tuple = SparseParameter._serialize_matrix(val)
rename_list = ['%s%s%s' % (key, SparseParameter.IDENTIFIER, name)
for name in name_list]
is_dia = int(len(rename_list) == 4)
store_dict[key + SparseResult.IDENTIFIER + 'is_dia'] = is_dia
for idx, name in enumerate(rename_list):
store_dict[name] = data_list[idx]
else:
store_dict[key] = val
return store_dict | Returns a storage dictionary understood by the storage service.
Sparse matrices are extracted similar to the :class:`~pypet.parameter.SparseParameter` and
marked with the identifier `__spsp__`. | Below is the the instruction that describes the task:
### Input:
Returns a storage dictionary understood by the storage service.
Sparse matrices are extracted similar to the :class:`~pypet.parameter.SparseParameter` and
marked with the identifier `__spsp__`.
### Response:
def _store(self):
"""Returns a storage dictionary understood by the storage service.
Sparse matrices are extracted similar to the :class:`~pypet.parameter.SparseParameter` and
marked with the identifier `__spsp__`.
"""
store_dict = {}
for key in self._data:
val = self._data[key]
if SparseParameter._is_supported_matrix(val):
data_list, name_list, hash_tuple = SparseParameter._serialize_matrix(val)
rename_list = ['%s%s%s' % (key, SparseParameter.IDENTIFIER, name)
for name in name_list]
is_dia = int(len(rename_list) == 4)
store_dict[key + SparseResult.IDENTIFIER + 'is_dia'] = is_dia
for idx, name in enumerate(rename_list):
store_dict[name] = data_list[idx]
else:
store_dict[key] = val
return store_dict |
def _process_pth(path, base, file_name):
"""Process a ``.pth`` file similar to site.addpackage(...)."""
pth_path = os.path.abspath(os.path.join(base, file_name))
# This is for `exec` below, as some packages (e.g. virtualenvwrapper)
# assume that `site.addpackage` is running them.
sitedir = os.path.dirname(base)
# Only process this once.
if pth_path in _processed_pths:
return
_processed_pths.add(pth_path)
log.log(1, '_process_pth(..., %r, %r)', base, file_name)
try:
fh = open(pth_path)
except IOError as e:
log.log(1, '_process_path IOError %s' % e)
return
for line in open(pth_path):
line = line.strip()
# Blanks and comments.
if not line or line.startswith('#'):
continue
# Execs.
if line.startswith('import'):
# Sorry easy-install: you break our environment.
if file_name == 'easy-install.pth' and 'sys.__plen' in line:
continue
log.log(1, '_process_pth exec %s' % line)
exec line
continue
# Replace "{platform_spec}" to allow per-platform paths.
line = line.format(
platform_spec=basic_platform_spec,
basic_platform_spec=basic_platform_spec,
extended_platform_spec=extended_platform_spec,
)
path.add(os.path.join(base, line)) | Process a ``.pth`` file similar to site.addpackage(...). | Below is the the instruction that describes the task:
### Input:
Process a ``.pth`` file similar to site.addpackage(...).
### Response:
def _process_pth(path, base, file_name):
"""Process a ``.pth`` file similar to site.addpackage(...)."""
pth_path = os.path.abspath(os.path.join(base, file_name))
# This is for `exec` below, as some packages (e.g. virtualenvwrapper)
# assume that `site.addpackage` is running them.
sitedir = os.path.dirname(base)
# Only process this once.
if pth_path in _processed_pths:
return
_processed_pths.add(pth_path)
log.log(1, '_process_pth(..., %r, %r)', base, file_name)
try:
fh = open(pth_path)
except IOError as e:
log.log(1, '_process_path IOError %s' % e)
return
for line in open(pth_path):
line = line.strip()
# Blanks and comments.
if not line or line.startswith('#'):
continue
# Execs.
if line.startswith('import'):
# Sorry easy-install: you break our environment.
if file_name == 'easy-install.pth' and 'sys.__plen' in line:
continue
log.log(1, '_process_pth exec %s' % line)
exec line
continue
# Replace "{platform_spec}" to allow per-platform paths.
line = line.format(
platform_spec=basic_platform_spec,
basic_platform_spec=basic_platform_spec,
extended_platform_spec=extended_platform_spec,
)
path.add(os.path.join(base, line)) |
def setLoggedMetrics(self, metricNames):
""" Tell the writer which metrics should be written
Parameters:
-----------------------------------------------------------------------
metricsNames: A list of metric lables to be written
"""
if metricNames is None:
self.__metricNames = set([])
else:
self.__metricNames = set(metricNames) | Tell the writer which metrics should be written
Parameters:
-----------------------------------------------------------------------
metricsNames: A list of metric lables to be written | Below is the the instruction that describes the task:
### Input:
Tell the writer which metrics should be written
Parameters:
-----------------------------------------------------------------------
metricsNames: A list of metric lables to be written
### Response:
def setLoggedMetrics(self, metricNames):
""" Tell the writer which metrics should be written
Parameters:
-----------------------------------------------------------------------
metricsNames: A list of metric lables to be written
"""
if metricNames is None:
self.__metricNames = set([])
else:
self.__metricNames = set(metricNames) |
def read_initializer_configuration(self, name, **kwargs): # noqa: E501
"""read_initializer_configuration # noqa: E501
read the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_initializer_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_initializer_configuration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_initializer_configuration_with_http_info(name, **kwargs) # noqa: E501
return data | read_initializer_configuration # noqa: E501
read the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_initializer_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
read_initializer_configuration # noqa: E501
read the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_initializer_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread.
### Response:
def read_initializer_configuration(self, name, **kwargs): # noqa: E501
"""read_initializer_configuration # noqa: E501
read the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_initializer_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_initializer_configuration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_initializer_configuration_with_http_info(name, **kwargs) # noqa: E501
return data |
def _handle_message(self, tags, source, command, target, msg):
"""Construct the correct events and handle them
:param tags: the tags of the message
:type tags: :class:`list` of :class:`message.Tag`
:param source: the sender of the message
:type source: :class:`str`
:param command: the event type
:type command: :class:`str`
:param target: the target of the message
:type target: :class:`str`
:param msg: the content
:type msg: :class:`str`
:returns: None
:rtype: None
:raises: None
"""
if isinstance(msg, tuple):
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
msg = list(msg)
log.debug("tags: %s, command: %s, source: %s, target: %s, "
"arguments: %s", tags, command, source, target, msg)
event = Event3(command, source, target, msg, tags=tags)
self._handle_event(event)
if command == "ctcp" and msg[0] == "ACTION":
event = Event3("action", source, target, msg[1:], tags=tags)
self._handle_event(event)
else:
log.debug("tags: %s, command: %s, source: %s, target: %s, "
"arguments: %s", tags, command, source, target, [msg])
event = Event3(command, source, target, [msg], tags=tags)
self._handle_event(event) | Construct the correct events and handle them
:param tags: the tags of the message
:type tags: :class:`list` of :class:`message.Tag`
:param source: the sender of the message
:type source: :class:`str`
:param command: the event type
:type command: :class:`str`
:param target: the target of the message
:type target: :class:`str`
:param msg: the content
:type msg: :class:`str`
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Construct the correct events and handle them
:param tags: the tags of the message
:type tags: :class:`list` of :class:`message.Tag`
:param source: the sender of the message
:type source: :class:`str`
:param command: the event type
:type command: :class:`str`
:param target: the target of the message
:type target: :class:`str`
:param msg: the content
:type msg: :class:`str`
:returns: None
:rtype: None
:raises: None
### Response:
def _handle_message(self, tags, source, command, target, msg):
"""Construct the correct events and handle them
:param tags: the tags of the message
:type tags: :class:`list` of :class:`message.Tag`
:param source: the sender of the message
:type source: :class:`str`
:param command: the event type
:type command: :class:`str`
:param target: the target of the message
:type target: :class:`str`
:param msg: the content
:type msg: :class:`str`
:returns: None
:rtype: None
:raises: None
"""
if isinstance(msg, tuple):
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
msg = list(msg)
log.debug("tags: %s, command: %s, source: %s, target: %s, "
"arguments: %s", tags, command, source, target, msg)
event = Event3(command, source, target, msg, tags=tags)
self._handle_event(event)
if command == "ctcp" and msg[0] == "ACTION":
event = Event3("action", source, target, msg[1:], tags=tags)
self._handle_event(event)
else:
log.debug("tags: %s, command: %s, source: %s, target: %s, "
"arguments: %s", tags, command, source, target, [msg])
event = Event3(command, source, target, [msg], tags=tags)
self._handle_event(event) |
def ok_cred_def_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
"""
cd_id_m = re.match('([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'.format(B58), token or '')
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did) | Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier | Below is the the instruction that describes the task:
### Input:
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
### Response:
def ok_cred_def_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid credential definition identifier from input issuer DID (default any); i.e.,
<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag> for protocol >= 1.4, or
<issuer-did>:3:CL:<schema-seq-no> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid credential definition identifier
"""
cd_id_m = re.match('([{}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?$'.format(B58), token or '')
return bool(cd_id_m) and ((not issuer_did) or cd_id_m.group(1) == issuer_did) |
def supports_gzip(self, context):
"""
Looks at the RequestContext object and determines if the client
supports gzip encoded content. If the client does, we will send them
to the gzipped version of files that are allowed to be compressed.
Clients without gzip support will be served the original media.
"""
if 'request' in context and client.supports_gzip():
enc = context['request'].META.get('HTTP_ACCEPT_ENCODING', '')
return 'gzip' in enc and msettings['SERVE_REMOTE']
return False | Looks at the RequestContext object and determines if the client
supports gzip encoded content. If the client does, we will send them
to the gzipped version of files that are allowed to be compressed.
Clients without gzip support will be served the original media. | Below is the the instruction that describes the task:
### Input:
Looks at the RequestContext object and determines if the client
supports gzip encoded content. If the client does, we will send them
to the gzipped version of files that are allowed to be compressed.
Clients without gzip support will be served the original media.
### Response:
def supports_gzip(self, context):
"""
Looks at the RequestContext object and determines if the client
supports gzip encoded content. If the client does, we will send them
to the gzipped version of files that are allowed to be compressed.
Clients without gzip support will be served the original media.
"""
if 'request' in context and client.supports_gzip():
enc = context['request'].META.get('HTTP_ACCEPT_ENCODING', '')
return 'gzip' in enc and msettings['SERVE_REMOTE']
return False |
def normpath(path: Optional[str]) -> Optional[str]:
""" Normalizes the path, returns None if the argument is None """
if path is not None:
return os.path.normpath(path) | Normalizes the path, returns None if the argument is None | Below is the the instruction that describes the task:
### Input:
Normalizes the path, returns None if the argument is None
### Response:
def normpath(path: Optional[str]) -> Optional[str]:
""" Normalizes the path, returns None if the argument is None """
if path is not None:
return os.path.normpath(path) |
def effect_repertoire(self, mechanism, purview):
"""Return the effect repertoire of a mechanism over a purview.
Args:
mechanism (tuple[int]): The mechanism for which to calculate the
effect repertoire.
purview (tuple[int]): The purview over which to calculate the
effect repertoire.
Returns:
np.ndarray: The effect repertoire of the mechanism over the
purview.
.. note::
The returned repertoire is a distribution over purview node states,
not the states of the whole network.
"""
# If the purview is empty, the distribution is empty, so return the
# multiplicative identity.
if not purview:
return np.array([1.0])
# Use a frozenset so the arguments to `_single_node_effect_repertoire`
# can be hashed and cached.
mechanism = frozenset(mechanism)
# Preallocate the repertoire with the proper shape, so that
# probabilities are broadcasted appropriately.
joint = np.ones(repertoire_shape(purview, self.tpm_size))
# The effect repertoire is the product of the effect repertoires of the
# individual nodes.
return joint * functools.reduce(
np.multiply, [self._single_node_effect_repertoire(mechanism, p)
for p in purview]
) | Return the effect repertoire of a mechanism over a purview.
Args:
mechanism (tuple[int]): The mechanism for which to calculate the
effect repertoire.
purview (tuple[int]): The purview over which to calculate the
effect repertoire.
Returns:
np.ndarray: The effect repertoire of the mechanism over the
purview.
.. note::
The returned repertoire is a distribution over purview node states,
not the states of the whole network. | Below is the the instruction that describes the task:
### Input:
Return the effect repertoire of a mechanism over a purview.
Args:
mechanism (tuple[int]): The mechanism for which to calculate the
effect repertoire.
purview (tuple[int]): The purview over which to calculate the
effect repertoire.
Returns:
np.ndarray: The effect repertoire of the mechanism over the
purview.
.. note::
The returned repertoire is a distribution over purview node states,
not the states of the whole network.
### Response:
def effect_repertoire(self, mechanism, purview):
"""Return the effect repertoire of a mechanism over a purview.
Args:
mechanism (tuple[int]): The mechanism for which to calculate the
effect repertoire.
purview (tuple[int]): The purview over which to calculate the
effect repertoire.
Returns:
np.ndarray: The effect repertoire of the mechanism over the
purview.
.. note::
The returned repertoire is a distribution over purview node states,
not the states of the whole network.
"""
# If the purview is empty, the distribution is empty, so return the
# multiplicative identity.
if not purview:
return np.array([1.0])
# Use a frozenset so the arguments to `_single_node_effect_repertoire`
# can be hashed and cached.
mechanism = frozenset(mechanism)
# Preallocate the repertoire with the proper shape, so that
# probabilities are broadcasted appropriately.
joint = np.ones(repertoire_shape(purview, self.tpm_size))
# The effect repertoire is the product of the effect repertoires of the
# individual nodes.
return joint * functools.reduce(
np.multiply, [self._single_node_effect_repertoire(mechanism, p)
for p in purview]
) |
def wait_for_interrupt(self, check_interval=1.0, max_time=None):
"""Run the event loop until we receive a ctrl-c interrupt or max_time passes.
This method will wake up every 1 second by default to check for any
interrupt signals or if the maximum runtime has expired. This can be
set lower for testing purpose to reduce latency but in production
settings, this can cause increased CPU usage so 1 second is an
appropriate value.
Args:
check_interval (float): How often to wake up and check for
a SIGTERM. Defaults to 1s. Setting this faster is useful
for unit testing. Cannot be < 0.01 s.
max_time (float): Stop the event loop after max_time seconds.
This is useful for testing purposes. Defaults to None,
which means run forever until interrupt.
"""
self.start()
wait = max(check_interval, 0.01)
accum = 0
try:
while max_time is None or accum < max_time:
try:
time.sleep(wait)
except IOError:
pass # IOError comes when this call is interrupted in a signal handler
accum += wait
except KeyboardInterrupt:
pass | Run the event loop until we receive a ctrl-c interrupt or max_time passes.
This method will wake up every 1 second by default to check for any
interrupt signals or if the maximum runtime has expired. This can be
set lower for testing purpose to reduce latency but in production
settings, this can cause increased CPU usage so 1 second is an
appropriate value.
Args:
check_interval (float): How often to wake up and check for
a SIGTERM. Defaults to 1s. Setting this faster is useful
for unit testing. Cannot be < 0.01 s.
max_time (float): Stop the event loop after max_time seconds.
This is useful for testing purposes. Defaults to None,
which means run forever until interrupt. | Below is the the instruction that describes the task:
### Input:
Run the event loop until we receive a ctrl-c interrupt or max_time passes.
This method will wake up every 1 second by default to check for any
interrupt signals or if the maximum runtime has expired. This can be
set lower for testing purpose to reduce latency but in production
settings, this can cause increased CPU usage so 1 second is an
appropriate value.
Args:
check_interval (float): How often to wake up and check for
a SIGTERM. Defaults to 1s. Setting this faster is useful
for unit testing. Cannot be < 0.01 s.
max_time (float): Stop the event loop after max_time seconds.
This is useful for testing purposes. Defaults to None,
which means run forever until interrupt.
### Response:
def wait_for_interrupt(self, check_interval=1.0, max_time=None):
"""Run the event loop until we receive a ctrl-c interrupt or max_time passes.
This method will wake up every 1 second by default to check for any
interrupt signals or if the maximum runtime has expired. This can be
set lower for testing purpose to reduce latency but in production
settings, this can cause increased CPU usage so 1 second is an
appropriate value.
Args:
check_interval (float): How often to wake up and check for
a SIGTERM. Defaults to 1s. Setting this faster is useful
for unit testing. Cannot be < 0.01 s.
max_time (float): Stop the event loop after max_time seconds.
This is useful for testing purposes. Defaults to None,
which means run forever until interrupt.
"""
self.start()
wait = max(check_interval, 0.01)
accum = 0
try:
while max_time is None or accum < max_time:
try:
time.sleep(wait)
except IOError:
pass # IOError comes when this call is interrupted in a signal handler
accum += wait
except KeyboardInterrupt:
pass |
def get_marathon_task(
task_name,
inactive=False,
completed=False
):
""" Get a dictionary describing a named marathon task
"""
return get_service_task('marathon', task_name, inactive, completed) | Get a dictionary describing a named marathon task | Below is the the instruction that describes the task:
### Input:
Get a dictionary describing a named marathon task
### Response:
def get_marathon_task(
task_name,
inactive=False,
completed=False
):
""" Get a dictionary describing a named marathon task
"""
return get_service_task('marathon', task_name, inactive, completed) |
def new_output_file_opt(self, opt, name):
""" Add an option and return a new file handle
"""
fil = File(name)
self.add_output_opt(opt, fil)
return fil | Add an option and return a new file handle | Below is the the instruction that describes the task:
### Input:
Add an option and return a new file handle
### Response:
def new_output_file_opt(self, opt, name):
""" Add an option and return a new file handle
"""
fil = File(name)
self.add_output_opt(opt, fil)
return fil |
def start_web_site_network_trace_operation(
self, resource_group_name, name, duration_in_seconds=None, max_frame_length=None, sas_url=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Start capturing network packets for the site.
Start capturing network packets for the site.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: The name of the web app.
:type name: str
:param duration_in_seconds: The duration to keep capturing in seconds.
:type duration_in_seconds: int
:param max_frame_length: The maximum frame length in bytes (Optional).
:type max_frame_length: int
:param sas_url: The Blob URL to store capture file.
:type sas_url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.web.models.NetworkTrace]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.web.models.NetworkTrace]]]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
raw_result = self._start_web_site_network_trace_operation_initial(
resource_group_name=resource_group_name,
name=name,
duration_in_seconds=duration_in_seconds,
max_frame_length=max_frame_length,
sas_url=sas_url,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('[NetworkTrace]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | Start capturing network packets for the site.
Start capturing network packets for the site.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: The name of the web app.
:type name: str
:param duration_in_seconds: The duration to keep capturing in seconds.
:type duration_in_seconds: int
:param max_frame_length: The maximum frame length in bytes (Optional).
:type max_frame_length: int
:param sas_url: The Blob URL to store capture file.
:type sas_url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.web.models.NetworkTrace]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.web.models.NetworkTrace]]]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>` | Below is the the instruction that describes the task:
### Input:
Start capturing network packets for the site.
Start capturing network packets for the site.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: The name of the web app.
:type name: str
:param duration_in_seconds: The duration to keep capturing in seconds.
:type duration_in_seconds: int
:param max_frame_length: The maximum frame length in bytes (Optional).
:type max_frame_length: int
:param sas_url: The Blob URL to store capture file.
:type sas_url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.web.models.NetworkTrace]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.web.models.NetworkTrace]]]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
### Response:
def start_web_site_network_trace_operation(
self, resource_group_name, name, duration_in_seconds=None, max_frame_length=None, sas_url=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Start capturing network packets for the site.
Start capturing network packets for the site.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: The name of the web app.
:type name: str
:param duration_in_seconds: The duration to keep capturing in seconds.
:type duration_in_seconds: int
:param max_frame_length: The maximum frame length in bytes (Optional).
:type max_frame_length: int
:param sas_url: The Blob URL to store capture file.
:type sas_url: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns list or
ClientRawResponse<list> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[list[~azure.mgmt.web.models.NetworkTrace]]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[list[~azure.mgmt.web.models.NetworkTrace]]]
:raises:
:class:`DefaultErrorResponseException<azure.mgmt.web.models.DefaultErrorResponseException>`
"""
raw_result = self._start_web_site_network_trace_operation_initial(
resource_group_name=resource_group_name,
name=name,
duration_in_seconds=duration_in_seconds,
max_frame_length=max_frame_length,
sas_url=sas_url,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('[NetworkTrace]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) |
def create_ingress_rule(self, app, rule):
"""Create a normalized ingress rule.
Args:
app (str): Application name
rule (dict or int): Allowed Security Group ports and protocols.
Returns:
dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id
"""
if isinstance(rule, dict):
# Advanced
start_port = rule.get('start_port')
end_port = rule.get('end_port')
protocol = rule.get('protocol', 'tcp')
requested_cross_account = rule.get('env', self.env)
if self.env == requested_cross_account:
# We are trying to use cross-account security group settings within the same account
# We should not allow this.
cross_account_env = None
cross_account_vpc_id = None
else:
cross_account_env = requested_cross_account
cross_account_vpc_id = get_vpc_id(cross_account_env, self.region)
else:
start_port = rule
end_port = rule
protocol = 'tcp'
cross_account_env = None
cross_account_vpc_id = None
created_rule = {
'app': app,
'start_port': start_port,
'end_port': end_port,
'protocol': protocol,
'cross_account_env': cross_account_env,
'cross_account_vpc_id': cross_account_vpc_id
}
self.log.debug('Normalized ingress rule: %s', created_rule)
return created_rule | Create a normalized ingress rule.
Args:
app (str): Application name
rule (dict or int): Allowed Security Group ports and protocols.
Returns:
dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id | Below is the the instruction that describes the task:
### Input:
Create a normalized ingress rule.
Args:
app (str): Application name
rule (dict or int): Allowed Security Group ports and protocols.
Returns:
dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id
### Response:
def create_ingress_rule(self, app, rule):
"""Create a normalized ingress rule.
Args:
app (str): Application name
rule (dict or int): Allowed Security Group ports and protocols.
Returns:
dict: Contains app, start_port, end_port, protocol, cross_account_env and cross_account_vpc_id
"""
if isinstance(rule, dict):
# Advanced
start_port = rule.get('start_port')
end_port = rule.get('end_port')
protocol = rule.get('protocol', 'tcp')
requested_cross_account = rule.get('env', self.env)
if self.env == requested_cross_account:
# We are trying to use cross-account security group settings within the same account
# We should not allow this.
cross_account_env = None
cross_account_vpc_id = None
else:
cross_account_env = requested_cross_account
cross_account_vpc_id = get_vpc_id(cross_account_env, self.region)
else:
start_port = rule
end_port = rule
protocol = 'tcp'
cross_account_env = None
cross_account_vpc_id = None
created_rule = {
'app': app,
'start_port': start_port,
'end_port': end_port,
'protocol': protocol,
'cross_account_env': cross_account_env,
'cross_account_vpc_id': cross_account_vpc_id
}
self.log.debug('Normalized ingress rule: %s', created_rule)
return created_rule |
def log(self, n=None, template=None):
"""
Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``)
"""
cmd = ['bzr', 'log']
if n:
cmd.append('-l%d' % n)
return self.sh(cmd, shell=False) | Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``) | Below is the the instruction that describes the task:
### Input:
Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``)
### Response:
def log(self, n=None, template=None):
"""
Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``)
"""
cmd = ['bzr', 'log']
if n:
cmd.append('-l%d' % n)
return self.sh(cmd, shell=False) |
def run_jobs(delete_completed=False, ignore_errors=False, now=None):
"""Run scheduled jobs.
You may specify a date to be treated as the current time.
"""
if ScheduledJob.objects.filter(status='running'):
raise ValueError('jobs in progress found; aborting')
if now is None:
now = datetime.datetime.now()
expire_jobs(now)
schedule_sticky_jobs()
start_scheduled_jobs(now, delete_completed, ignore_errors) | Run scheduled jobs.
You may specify a date to be treated as the current time. | Below is the the instruction that describes the task:
### Input:
Run scheduled jobs.
You may specify a date to be treated as the current time.
### Response:
def run_jobs(delete_completed=False, ignore_errors=False, now=None):
"""Run scheduled jobs.
You may specify a date to be treated as the current time.
"""
if ScheduledJob.objects.filter(status='running'):
raise ValueError('jobs in progress found; aborting')
if now is None:
now = datetime.datetime.now()
expire_jobs(now)
schedule_sticky_jobs()
start_scheduled_jobs(now, delete_completed, ignore_errors) |
def get_state_transition_function(self):
"""
Returns the transition of the state variables as nested dict in the
case of table type parameter and a nested structure in case of
decision diagram parameter
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_state_transition_function()
[{'Var': 'rover_1',
'Parent': ['action_rover', 'rover_0'],
'Type': 'TBL',
'Parameter': [{'Instance': ['amw', 's0', 's2'],
'ProbTable': ['1.0']},
{'Instance': ['amw', 's1', 's0'],
'ProbTable': ['1.0']},
...
]
}]
"""
state_transition_function = []
for variable in self.network.findall('StateTransitionFunction'):
for var in variable.findall('CondProb'):
cond_prob = defaultdict(list)
cond_prob['Var'] = var.find('Var').text
cond_prob['Parent'] = var.find('Parent').text.split()
if not var.find('Parameter').get('type'):
cond_prob['Type'] = 'TBL'
else:
cond_prob['Type'] = var.find('Parameter').get('type')
cond_prob['Parameter'] = self.get_parameter(var)
state_transition_function.append(cond_prob)
return state_transition_function | Returns the transition of the state variables as nested dict in the
case of table type parameter and a nested structure in case of
decision diagram parameter
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_state_transition_function()
[{'Var': 'rover_1',
'Parent': ['action_rover', 'rover_0'],
'Type': 'TBL',
'Parameter': [{'Instance': ['amw', 's0', 's2'],
'ProbTable': ['1.0']},
{'Instance': ['amw', 's1', 's0'],
'ProbTable': ['1.0']},
...
]
}] | Below is the the instruction that describes the task:
### Input:
Returns the transition of the state variables as nested dict in the
case of table type parameter and a nested structure in case of
decision diagram parameter
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_state_transition_function()
[{'Var': 'rover_1',
'Parent': ['action_rover', 'rover_0'],
'Type': 'TBL',
'Parameter': [{'Instance': ['amw', 's0', 's2'],
'ProbTable': ['1.0']},
{'Instance': ['amw', 's1', 's0'],
'ProbTable': ['1.0']},
...
]
}]
### Response:
def get_state_transition_function(self):
"""
Returns the transition of the state variables as nested dict in the
case of table type parameter and a nested structure in case of
decision diagram parameter
Example
--------
>>> reader = PomdpXReader('Test_PomdpX.xml')
>>> reader.get_state_transition_function()
[{'Var': 'rover_1',
'Parent': ['action_rover', 'rover_0'],
'Type': 'TBL',
'Parameter': [{'Instance': ['amw', 's0', 's2'],
'ProbTable': ['1.0']},
{'Instance': ['amw', 's1', 's0'],
'ProbTable': ['1.0']},
...
]
}]
"""
state_transition_function = []
for variable in self.network.findall('StateTransitionFunction'):
for var in variable.findall('CondProb'):
cond_prob = defaultdict(list)
cond_prob['Var'] = var.find('Var').text
cond_prob['Parent'] = var.find('Parent').text.split()
if not var.find('Parameter').get('type'):
cond_prob['Type'] = 'TBL'
else:
cond_prob['Type'] = var.find('Parameter').get('type')
cond_prob['Parameter'] = self.get_parameter(var)
state_transition_function.append(cond_prob)
return state_transition_function |
def send_line(self, line, parse_result=True):
"""Submit a raw line of text to the VW instance, returning a
VWResult() object.
If 'parse_result' is False, ignore the result and return None.
"""
self.vw_process.sendline(line) # Send line, along with newline
result = self._get_response(parse_result=parse_result)
return result | Submit a raw line of text to the VW instance, returning a
VWResult() object.
If 'parse_result' is False, ignore the result and return None. | Below is the the instruction that describes the task:
### Input:
Submit a raw line of text to the VW instance, returning a
VWResult() object.
If 'parse_result' is False, ignore the result and return None.
### Response:
def send_line(self, line, parse_result=True):
"""Submit a raw line of text to the VW instance, returning a
VWResult() object.
If 'parse_result' is False, ignore the result and return None.
"""
self.vw_process.sendline(line) # Send line, along with newline
result = self._get_response(parse_result=parse_result)
return result |
def send_command_return(self, obj, command, *arguments):
""" Send command and wait for single line output. """
index_command = obj._build_index_command(command, *arguments)
return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command)) | Send command and wait for single line output. | Below is the the instruction that describes the task:
### Input:
Send command and wait for single line output.
### Response:
def send_command_return(self, obj, command, *arguments):
""" Send command and wait for single line output. """
index_command = obj._build_index_command(command, *arguments)
return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command)) |
def remove_channel(self, channel, *, verbose=True):
"""Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
name = new.pop(channel_index)
del self[name]
self.channel_names = new
if verbose:
print("channel {0} removed".format(name)) | Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True. | Below is the the instruction that describes the task:
### Input:
Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True.
### Response:
def remove_channel(self, channel, *, verbose=True):
"""Remove channel from data.
Parameters
----------
channel : int or str
Channel index or name to remove.
verbose : boolean (optional)
Toggle talkback. Default is True.
"""
channel_index = wt_kit.get_index(self.channel_names, channel)
new = list(self.channel_names)
name = new.pop(channel_index)
del self[name]
self.channel_names = new
if verbose:
print("channel {0} removed".format(name)) |
def set_data_dir(path=None, store=None):
"""
Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs
"""
import sys
import os
import pysat
if sys.version_info[0] >= 3:
if sys.version_info[1] < 4:
import imp
re_load = imp.reload
else:
import importlib
re_load = importlib.reload
else:
re_load = reload
if store is None:
store = True
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path) | Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs | Below is the the instruction that describes the task:
### Input:
Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs
### Response:
def set_data_dir(path=None, store=None):
"""
Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs
"""
import sys
import os
import pysat
if sys.version_info[0] >= 3:
if sys.version_info[1] < 4:
import imp
re_load = imp.reload
else:
import importlib
re_load = importlib.reload
else:
re_load = reload
if store is None:
store = True
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path) |
def closenessScores(self, expValues, actValues, fractional=True):
""" See the function description in base.py
"""
expValue = expValues[0]
actValue = actValues[0]
if self.periodic:
expValue = expValue % self.maxval
actValue = actValue % self.maxval
err = abs(expValue - actValue)
if self.periodic:
err = min(err, self.maxval - err)
if fractional:
pctErr = float(err) / (self.maxval - self.minval)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
closeness = err
return numpy.array([closeness]) | See the function description in base.py | Below is the the instruction that describes the task:
### Input:
See the function description in base.py
### Response:
def closenessScores(self, expValues, actValues, fractional=True):
""" See the function description in base.py
"""
expValue = expValues[0]
actValue = actValues[0]
if self.periodic:
expValue = expValue % self.maxval
actValue = actValue % self.maxval
err = abs(expValue - actValue)
if self.periodic:
err = min(err, self.maxval - err)
if fractional:
pctErr = float(err) / (self.maxval - self.minval)
pctErr = min(1.0, pctErr)
closeness = 1.0 - pctErr
else:
closeness = err
return numpy.array([closeness]) |
def create_api_deployment(self):
"""Create API deployment of ENV name."""
try:
self.client.create_deployment(restApiId=self.api_id, stageName=self.env)
self.log.info('Created a deployment resource.')
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'TooManyRequestsException':
self.log.debug('Retrying. We have hit api limit.')
else:
self.log.debug('Retrying. We received %s.', error_code) | Create API deployment of ENV name. | Below is the the instruction that describes the task:
### Input:
Create API deployment of ENV name.
### Response:
def create_api_deployment(self):
"""Create API deployment of ENV name."""
try:
self.client.create_deployment(restApiId=self.api_id, stageName=self.env)
self.log.info('Created a deployment resource.')
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'TooManyRequestsException':
self.log.debug('Retrying. We have hit api limit.')
else:
self.log.debug('Retrying. We received %s.', error_code) |
def update_file_ext(filename, ext='txt', sep='.'):
r"""Force the file or path str to end with the indicated extension
Note: a dot (".") is assumed to delimit the extension
>>> from __future__ import unicode_literals
>>> update_file_ext('/home/hobs/extremofile', 'bac')
'/home/hobs/extremofile.bac'
>>> update_file_ext('/home/hobs/piano.file/', 'music')
'/home/hobs/piano.file/.music'
>>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk')
'/home/ninja.hobs/Anglofile.uk'
>>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-')
'/home/ninja-corsi/audio-file'
"""
path, filename = os.path.split(filename)
if ext and ext[0] == sep:
ext = ext[1:]
return os.path.join(path, sep.join(filename.split(sep)[:-1 if filename.count(sep) > 1 else 1] + [ext])) | r"""Force the file or path str to end with the indicated extension
Note: a dot (".") is assumed to delimit the extension
>>> from __future__ import unicode_literals
>>> update_file_ext('/home/hobs/extremofile', 'bac')
'/home/hobs/extremofile.bac'
>>> update_file_ext('/home/hobs/piano.file/', 'music')
'/home/hobs/piano.file/.music'
>>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk')
'/home/ninja.hobs/Anglofile.uk'
>>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-')
'/home/ninja-corsi/audio-file' | Below is the the instruction that describes the task:
### Input:
r"""Force the file or path str to end with the indicated extension
Note: a dot (".") is assumed to delimit the extension
>>> from __future__ import unicode_literals
>>> update_file_ext('/home/hobs/extremofile', 'bac')
'/home/hobs/extremofile.bac'
>>> update_file_ext('/home/hobs/piano.file/', 'music')
'/home/hobs/piano.file/.music'
>>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk')
'/home/ninja.hobs/Anglofile.uk'
>>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-')
'/home/ninja-corsi/audio-file'
### Response:
def update_file_ext(filename, ext='txt', sep='.'):
r"""Force the file or path str to end with the indicated extension
Note: a dot (".") is assumed to delimit the extension
>>> from __future__ import unicode_literals
>>> update_file_ext('/home/hobs/extremofile', 'bac')
'/home/hobs/extremofile.bac'
>>> update_file_ext('/home/hobs/piano.file/', 'music')
'/home/hobs/piano.file/.music'
>>> update_file_ext('/home/ninja.hobs/Anglofile', '.uk')
'/home/ninja.hobs/Anglofile.uk'
>>> update_file_ext('/home/ninja-corsi/audio', 'file', sep='-')
'/home/ninja-corsi/audio-file'
"""
path, filename = os.path.split(filename)
if ext and ext[0] == sep:
ext = ext[1:]
return os.path.join(path, sep.join(filename.split(sep)[:-1 if filename.count(sep) > 1 else 1] + [ext])) |
def handle_event(self,
event_handler,
event_name,
user_args,
event_timeout=None,
cond=None,
cond_timeout=None):
"""Handle events that don't have registered handlers
In a new thread, poll one event of specified type from its queue and
execute its handler. If no such event exists, the thread waits until
one appears.
Args:
event_handler: Handler for the event, which should take at least
one argument - the event json object.
event_name: Name of the event to be handled.
user_args: User arguments for the handler; to be passed in after
the event json.
event_timeout: Number of seconds to wait for the event to come.
cond: A condition to wait on before executing the handler. Should
be a threading.Event object.
cond_timeout: Number of seconds to wait before the condition times
out. Never times out if None.
Returns:
A concurrent.Future object associated with the handler.
If blocking call worker.result() is triggered, the handler
needs to return something to unblock.
"""
worker = self.executor.submit(self._handle, event_handler, event_name,
user_args, event_timeout, cond,
cond_timeout)
return worker | Handle events that don't have registered handlers
In a new thread, poll one event of specified type from its queue and
execute its handler. If no such event exists, the thread waits until
one appears.
Args:
event_handler: Handler for the event, which should take at least
one argument - the event json object.
event_name: Name of the event to be handled.
user_args: User arguments for the handler; to be passed in after
the event json.
event_timeout: Number of seconds to wait for the event to come.
cond: A condition to wait on before executing the handler. Should
be a threading.Event object.
cond_timeout: Number of seconds to wait before the condition times
out. Never times out if None.
Returns:
A concurrent.Future object associated with the handler.
If blocking call worker.result() is triggered, the handler
needs to return something to unblock. | Below is the the instruction that describes the task:
### Input:
Handle events that don't have registered handlers
In a new thread, poll one event of specified type from its queue and
execute its handler. If no such event exists, the thread waits until
one appears.
Args:
event_handler: Handler for the event, which should take at least
one argument - the event json object.
event_name: Name of the event to be handled.
user_args: User arguments for the handler; to be passed in after
the event json.
event_timeout: Number of seconds to wait for the event to come.
cond: A condition to wait on before executing the handler. Should
be a threading.Event object.
cond_timeout: Number of seconds to wait before the condition times
out. Never times out if None.
Returns:
A concurrent.Future object associated with the handler.
If blocking call worker.result() is triggered, the handler
needs to return something to unblock.
### Response:
def handle_event(self,
event_handler,
event_name,
user_args,
event_timeout=None,
cond=None,
cond_timeout=None):
"""Handle events that don't have registered handlers
In a new thread, poll one event of specified type from its queue and
execute its handler. If no such event exists, the thread waits until
one appears.
Args:
event_handler: Handler for the event, which should take at least
one argument - the event json object.
event_name: Name of the event to be handled.
user_args: User arguments for the handler; to be passed in after
the event json.
event_timeout: Number of seconds to wait for the event to come.
cond: A condition to wait on before executing the handler. Should
be a threading.Event object.
cond_timeout: Number of seconds to wait before the condition times
out. Never times out if None.
Returns:
A concurrent.Future object associated with the handler.
If blocking call worker.result() is triggered, the handler
needs to return something to unblock.
"""
worker = self.executor.submit(self._handle, event_handler, event_name,
user_args, event_timeout, cond,
cond_timeout)
return worker |
def close(self):
'''Close the underlying file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.'''
if self._mode in _allowed_write and self._valid is None:
self._finalize_write()
result = self._fp.close()
self._closed = True
return result | Close the underlying file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing. | Below is the the instruction that describes the task:
### Input:
Close the underlying file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
### Response:
def close(self):
'''Close the underlying file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.'''
if self._mode in _allowed_write and self._valid is None:
self._finalize_write()
result = self._fp.close()
self._closed = True
return result |
def matches_sample(output, target, threshold, is_correct, actual_output):
"""
Check if a sample with the given network output, target output, and threshold
is the classification (is_correct, actual_output) like true positive or false negative
"""
return (bool(output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold) | Check if a sample with the given network output, target output, and threshold
is the classification (is_correct, actual_output) like true positive or false negative | Below is the the instruction that describes the task:
### Input:
Check if a sample with the given network output, target output, and threshold
is the classification (is_correct, actual_output) like true positive or false negative
### Response:
def matches_sample(output, target, threshold, is_correct, actual_output):
"""
Check if a sample with the given network output, target output, and threshold
is the classification (is_correct, actual_output) like true positive or false negative
"""
return (bool(output > threshold) == bool(target)) == is_correct and actual_output == bool(output > threshold) |
def create_exchange(self):
"""
Creates MQ exchange for this channel
Needs to be defined only once.
"""
mq_channel = self._connect_mq()
mq_channel.exchange_declare(exchange=self.code_name,
exchange_type='fanout',
durable=True) | Creates MQ exchange for this channel
Needs to be defined only once. | Below is the the instruction that describes the task:
### Input:
Creates MQ exchange for this channel
Needs to be defined only once.
### Response:
def create_exchange(self):
"""
Creates MQ exchange for this channel
Needs to be defined only once.
"""
mq_channel = self._connect_mq()
mq_channel.exchange_declare(exchange=self.code_name,
exchange_type='fanout',
durable=True) |
def phi_progress(pst,logger=None,filename=None,**kwargs):
""" make plot of phi vs number of model runs - requires
available pestpp .iobj file
Parameters
----------
pst : pyemu.Pst
logger : Logger
if None, a generic one is created. Default is None
filename : str
PDF filename to save figures to. If None, figures are returned. Default is None
kwargs : dict
optional keyword args to pass to plotting functions
"""
if logger is None:
logger = Logger('Default_Loggger.log', echo=False)
logger.log("plot phi_progress")
iobj_file = pst.filename.replace(".pst",".iobj")
if not os.path.exists(iobj_file):
logger.lraise("couldn't find iobj file {0}".format(iobj_file))
df = pd.read_csv(iobj_file)
if "ax" in kwargs:
ax = kwargs["ax"]
else:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(1,1,1)
ax.plot(df.model_runs_completed,df.total_phi,marker='.')
ax.set_xlabel("model runs")
ax.set_ylabel("$\phi$")
ax.grid()
if filename is not None:
plt.savefig(filename)
logger.log("plot phi_progress")
return ax | make plot of phi vs number of model runs - requires
available pestpp .iobj file
Parameters
----------
pst : pyemu.Pst
logger : Logger
if None, a generic one is created. Default is None
filename : str
PDF filename to save figures to. If None, figures are returned. Default is None
kwargs : dict
optional keyword args to pass to plotting functions | Below is the the instruction that describes the task:
### Input:
make plot of phi vs number of model runs - requires
available pestpp .iobj file
Parameters
----------
pst : pyemu.Pst
logger : Logger
if None, a generic one is created. Default is None
filename : str
PDF filename to save figures to. If None, figures are returned. Default is None
kwargs : dict
optional keyword args to pass to plotting functions
### Response:
def phi_progress(pst,logger=None,filename=None,**kwargs):
""" make plot of phi vs number of model runs - requires
available pestpp .iobj file
Parameters
----------
pst : pyemu.Pst
logger : Logger
if None, a generic one is created. Default is None
filename : str
PDF filename to save figures to. If None, figures are returned. Default is None
kwargs : dict
optional keyword args to pass to plotting functions
"""
if logger is None:
logger = Logger('Default_Loggger.log', echo=False)
logger.log("plot phi_progress")
iobj_file = pst.filename.replace(".pst",".iobj")
if not os.path.exists(iobj_file):
logger.lraise("couldn't find iobj file {0}".format(iobj_file))
df = pd.read_csv(iobj_file)
if "ax" in kwargs:
ax = kwargs["ax"]
else:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(1,1,1)
ax.plot(df.model_runs_completed,df.total_phi,marker='.')
ax.set_xlabel("model runs")
ax.set_ylabel("$\phi$")
ax.grid()
if filename is not None:
plt.savefig(filename)
logger.log("plot phi_progress")
return ax |
def is_valid(self, request_data, raise_exceptions=False):
"""
Checks if the Logout Request received is valid
:param request_data: Request Data
:type request_data: dict
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
:return: If the Logout Request is or not valid
:rtype: boolean
"""
self.__error = None
lowercase_urlencoding = False
try:
dom = fromstring(self.__logout_request, forbid_dtd=True)
idp_data = self.__settings.get_idp_data()
idp_entity_id = idp_data['entityId']
if 'get_data' in request_data.keys():
get_data = request_data['get_data']
else:
get_data = {}
if 'lowercase_urlencoding' in request_data.keys():
lowercase_urlencoding = request_data['lowercase_urlencoding']
if self.__settings.is_strict():
res = OneLogin_Saml2_Utils.validate_xml(dom, 'saml-schema-protocol-2.0.xsd', self.__settings.is_debug_active())
if not isinstance(res, Document):
raise OneLogin_Saml2_ValidationError(
'Invalid SAML Logout Request. Not match the saml-schema-protocol-2.0.xsd',
OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT
)
security = self.__settings.get_security_data()
current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data)
# Check NotOnOrAfter
if dom.get('NotOnOrAfter', None):
na = OneLogin_Saml2_Utils.parse_SAML_to_time(dom.get('NotOnOrAfter'))
if na <= OneLogin_Saml2_Utils.now():
raise OneLogin_Saml2_ValidationError(
'Could not validate timestamp: expired. Check system clock.',
OneLogin_Saml2_ValidationError.RESPONSE_EXPIRED
)
# Check destination
if dom.get('Destination', None):
destination = dom.get('Destination')
if destination != '':
if current_url not in destination:
raise Exception(
'The LogoutRequest was received at '
'%(currentURL)s instead of %(destination)s' %
{
'currentURL': current_url,
'destination': destination,
},
OneLogin_Saml2_ValidationError.WRONG_DESTINATION
)
# Check issuer
issuer = OneLogin_Saml2_Logout_Request.get_issuer(dom)
if issuer is not None and issuer != idp_entity_id:
raise OneLogin_Saml2_ValidationError(
'Invalid issuer in the Logout Request (expected %(idpEntityId)s, got %(issuer)s)' %
{
'idpEntityId': idp_entity_id,
'issuer': issuer
},
OneLogin_Saml2_ValidationError.WRONG_ISSUER
)
if security['wantMessagesSigned']:
if 'Signature' not in get_data:
raise OneLogin_Saml2_ValidationError(
'The Message of the Logout Request is not signed and the SP require it',
OneLogin_Saml2_ValidationError.NO_SIGNED_MESSAGE
)
if 'Signature' in get_data:
if 'SigAlg' not in get_data:
sign_alg = OneLogin_Saml2_Constants.RSA_SHA1
else:
sign_alg = get_data['SigAlg']
signed_query = 'SAMLRequest=%s' % OneLogin_Saml2_Utils.get_encoded_parameter(get_data, 'SAMLRequest', lowercase_urlencoding=lowercase_urlencoding)
if 'RelayState' in get_data:
signed_query = '%s&RelayState=%s' % (signed_query, OneLogin_Saml2_Utils.get_encoded_parameter(get_data, 'RelayState', lowercase_urlencoding=lowercase_urlencoding))
signed_query = '%s&SigAlg=%s' % (signed_query, OneLogin_Saml2_Utils.get_encoded_parameter(get_data, 'SigAlg', OneLogin_Saml2_Constants.RSA_SHA1, lowercase_urlencoding=lowercase_urlencoding))
exists_x509cert = 'x509cert' in idp_data and idp_data['x509cert']
exists_multix509sign = 'x509certMulti' in idp_data and \
'signing' in idp_data['x509certMulti'] and \
idp_data['x509certMulti']['signing']
if not (exists_x509cert or exists_multix509sign):
raise OneLogin_Saml2_Error(
'In order to validate the sign on the Logout Request, the x509cert of the IdP is required',
OneLogin_Saml2_Error.CERT_NOT_FOUND
)
if exists_multix509sign:
for cert in idp_data['x509certMulti']['signing']:
if OneLogin_Saml2_Utils.validate_binary_sign(signed_query, b64decode(get_data['Signature']), cert, sign_alg):
return True
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. Logout Request rejected',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE
)
else:
cert = idp_data['x509cert']
if not OneLogin_Saml2_Utils.validate_binary_sign(signed_query, b64decode(get_data['Signature']), cert, sign_alg):
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. Logout Request rejected',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE
)
return True
except Exception as err:
# pylint: disable=R0801sign_alg
self.__error = err.__str__()
debug = self.__settings.is_debug_active()
if debug:
print(err.__str__())
if raise_exceptions:
raise err
return False | Checks if the Logout Request received is valid
:param request_data: Request Data
:type request_data: dict
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
:return: If the Logout Request is or not valid
:rtype: boolean | Below is the the instruction that describes the task:
### Input:
Checks if the Logout Request received is valid
:param request_data: Request Data
:type request_data: dict
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
:return: If the Logout Request is or not valid
:rtype: boolean
### Response:
def is_valid(self, request_data, raise_exceptions=False):
"""
Checks if the Logout Request received is valid
:param request_data: Request Data
:type request_data: dict
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
:return: If the Logout Request is or not valid
:rtype: boolean
"""
self.__error = None
lowercase_urlencoding = False
try:
dom = fromstring(self.__logout_request, forbid_dtd=True)
idp_data = self.__settings.get_idp_data()
idp_entity_id = idp_data['entityId']
if 'get_data' in request_data.keys():
get_data = request_data['get_data']
else:
get_data = {}
if 'lowercase_urlencoding' in request_data.keys():
lowercase_urlencoding = request_data['lowercase_urlencoding']
if self.__settings.is_strict():
res = OneLogin_Saml2_Utils.validate_xml(dom, 'saml-schema-protocol-2.0.xsd', self.__settings.is_debug_active())
if not isinstance(res, Document):
raise OneLogin_Saml2_ValidationError(
'Invalid SAML Logout Request. Not match the saml-schema-protocol-2.0.xsd',
OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT
)
security = self.__settings.get_security_data()
current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data)
# Check NotOnOrAfter
if dom.get('NotOnOrAfter', None):
na = OneLogin_Saml2_Utils.parse_SAML_to_time(dom.get('NotOnOrAfter'))
if na <= OneLogin_Saml2_Utils.now():
raise OneLogin_Saml2_ValidationError(
'Could not validate timestamp: expired. Check system clock.',
OneLogin_Saml2_ValidationError.RESPONSE_EXPIRED
)
# Check destination
if dom.get('Destination', None):
destination = dom.get('Destination')
if destination != '':
if current_url not in destination:
raise Exception(
'The LogoutRequest was received at '
'%(currentURL)s instead of %(destination)s' %
{
'currentURL': current_url,
'destination': destination,
},
OneLogin_Saml2_ValidationError.WRONG_DESTINATION
)
# Check issuer
issuer = OneLogin_Saml2_Logout_Request.get_issuer(dom)
if issuer is not None and issuer != idp_entity_id:
raise OneLogin_Saml2_ValidationError(
'Invalid issuer in the Logout Request (expected %(idpEntityId)s, got %(issuer)s)' %
{
'idpEntityId': idp_entity_id,
'issuer': issuer
},
OneLogin_Saml2_ValidationError.WRONG_ISSUER
)
if security['wantMessagesSigned']:
if 'Signature' not in get_data:
raise OneLogin_Saml2_ValidationError(
'The Message of the Logout Request is not signed and the SP require it',
OneLogin_Saml2_ValidationError.NO_SIGNED_MESSAGE
)
if 'Signature' in get_data:
if 'SigAlg' not in get_data:
sign_alg = OneLogin_Saml2_Constants.RSA_SHA1
else:
sign_alg = get_data['SigAlg']
signed_query = 'SAMLRequest=%s' % OneLogin_Saml2_Utils.get_encoded_parameter(get_data, 'SAMLRequest', lowercase_urlencoding=lowercase_urlencoding)
if 'RelayState' in get_data:
signed_query = '%s&RelayState=%s' % (signed_query, OneLogin_Saml2_Utils.get_encoded_parameter(get_data, 'RelayState', lowercase_urlencoding=lowercase_urlencoding))
signed_query = '%s&SigAlg=%s' % (signed_query, OneLogin_Saml2_Utils.get_encoded_parameter(get_data, 'SigAlg', OneLogin_Saml2_Constants.RSA_SHA1, lowercase_urlencoding=lowercase_urlencoding))
exists_x509cert = 'x509cert' in idp_data and idp_data['x509cert']
exists_multix509sign = 'x509certMulti' in idp_data and \
'signing' in idp_data['x509certMulti'] and \
idp_data['x509certMulti']['signing']
if not (exists_x509cert or exists_multix509sign):
raise OneLogin_Saml2_Error(
'In order to validate the sign on the Logout Request, the x509cert of the IdP is required',
OneLogin_Saml2_Error.CERT_NOT_FOUND
)
if exists_multix509sign:
for cert in idp_data['x509certMulti']['signing']:
if OneLogin_Saml2_Utils.validate_binary_sign(signed_query, b64decode(get_data['Signature']), cert, sign_alg):
return True
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. Logout Request rejected',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE
)
else:
cert = idp_data['x509cert']
if not OneLogin_Saml2_Utils.validate_binary_sign(signed_query, b64decode(get_data['Signature']), cert, sign_alg):
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. Logout Request rejected',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE
)
return True
except Exception as err:
# pylint: disable=R0801sign_alg
self.__error = err.__str__()
debug = self.__settings.is_debug_active()
if debug:
print(err.__str__())
if raise_exceptions:
raise err
return False |
def logdet(self):
"""
Implements log|K| = - log|D| + n⋅log|C₁|.
Returns
-------
logdet : float
Log-determinant of K.
"""
self._init_svd()
return -log(self._De).sum() + self.G.shape[0] * self.C1.logdet() | Implements log|K| = - log|D| + n⋅log|C₁|.
Returns
-------
logdet : float
Log-determinant of K. | Below is the the instruction that describes the task:
### Input:
Implements log|K| = - log|D| + n⋅log|C₁|.
Returns
-------
logdet : float
Log-determinant of K.
### Response:
def logdet(self):
"""
Implements log|K| = - log|D| + n⋅log|C₁|.
Returns
-------
logdet : float
Log-determinant of K.
"""
self._init_svd()
return -log(self._De).sum() + self.G.shape[0] * self.C1.logdet() |
def _setup_ipc(self):
'''
Setup the IPC pub and sub.
Subscript to the listener IPC
and publish to the device specific IPC.
'''
log.debug('Setting up the server IPC puller to receive from the listener')
self.ctx = zmq.Context()
# subscribe to listener
self.sub = self.ctx.socket(zmq.PULL)
self.sub.bind(LST_IPC_URL)
try:
self.sub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.sub.setsockopt(zmq.RCVHWM, self.opts['hwm'])
# device publishers
log.debug('Creating the router ICP on the server')
self.pub = self.ctx.socket(zmq.ROUTER)
self.pub.bind(DEV_IPC_URL)
try:
self.pub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.pub.setsockopt(zmq.SNDHWM, self.opts['hwm'])
# Pipe to the publishers
self.publisher_pub = self.ctx.socket(zmq.PUB)
self.publisher_pub.connect(PUB_PX_IPC_URL)
try:
self.publisher_pub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.publisher_pub.setsockopt(zmq.SNDHWM, self.opts['hwm']) | Setup the IPC pub and sub.
Subscript to the listener IPC
and publish to the device specific IPC. | Below is the the instruction that describes the task:
### Input:
Setup the IPC pub and sub.
Subscript to the listener IPC
and publish to the device specific IPC.
### Response:
def _setup_ipc(self):
'''
Setup the IPC pub and sub.
Subscript to the listener IPC
and publish to the device specific IPC.
'''
log.debug('Setting up the server IPC puller to receive from the listener')
self.ctx = zmq.Context()
# subscribe to listener
self.sub = self.ctx.socket(zmq.PULL)
self.sub.bind(LST_IPC_URL)
try:
self.sub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.sub.setsockopt(zmq.RCVHWM, self.opts['hwm'])
# device publishers
log.debug('Creating the router ICP on the server')
self.pub = self.ctx.socket(zmq.ROUTER)
self.pub.bind(DEV_IPC_URL)
try:
self.pub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.pub.setsockopt(zmq.SNDHWM, self.opts['hwm'])
# Pipe to the publishers
self.publisher_pub = self.ctx.socket(zmq.PUB)
self.publisher_pub.connect(PUB_PX_IPC_URL)
try:
self.publisher_pub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.publisher_pub.setsockopt(zmq.SNDHWM, self.opts['hwm']) |
def fit_isochrone(orbit, m0=2E11, b0=1., minimize_kwargs=None):
r"""
Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
.. math::
f(m,b) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm iso}(x_i\,|\,m,b) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
m0 : numeric (optional)
Initial mass guess.
b0 : numeric (optional)
Initial b guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
m : float
Best-fit scale mass for the Isochrone potential.
b : float
Best-fit core radius for the Isochrone potential.
"""
pot = orbit.hamiltonian.potential
if pot is None:
raise ValueError("The orbit object must have an associated potential")
w = np.squeeze(orbit.w(pot.units))
if w.ndim > 2:
raise ValueError("Input orbit object must be a single orbit.")
def f(p, w):
logm, logb = p
potential = IsochronePotential(m=np.exp(logm), b=np.exp(logb),
units=pot.units)
H = (potential.value(w[:3]).decompose(pot.units).value +
0.5*np.sum(w[3:]**2, axis=0))
return np.sum(np.squeeze(H - np.mean(H))**2)
logm0 = np.log(m0)
logb0 = np.log(b0)
if minimize_kwargs is None:
minimize_kwargs = dict()
minimize_kwargs['x0'] = np.array([logm0, logb0])
minimize_kwargs['method'] = minimize_kwargs.get('method', 'Nelder-Mead')
res = minimize(f, args=(w,), **minimize_kwargs)
if not res.success:
raise ValueError("Failed to fit toy potential to orbit.")
logm, logb = np.abs(res.x)
m = np.exp(logm)
b = np.exp(logb)
return IsochronePotential(m=m, b=b, units=pot.units) | r"""
Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
.. math::
f(m,b) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm iso}(x_i\,|\,m,b) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
m0 : numeric (optional)
Initial mass guess.
b0 : numeric (optional)
Initial b guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
m : float
Best-fit scale mass for the Isochrone potential.
b : float
Best-fit core radius for the Isochrone potential. | Below is the the instruction that describes the task:
### Input:
r"""
Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
.. math::
f(m,b) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm iso}(x_i\,|\,m,b) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
m0 : numeric (optional)
Initial mass guess.
b0 : numeric (optional)
Initial b guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
m : float
Best-fit scale mass for the Isochrone potential.
b : float
Best-fit core radius for the Isochrone potential.
### Response:
def fit_isochrone(orbit, m0=2E11, b0=1., minimize_kwargs=None):
r"""
Fit the toy Isochrone potential to the sum of the energy residuals relative
to the mean energy by minimizing the function
.. math::
f(m,b) = \sum_i (\frac{1}{2}v_i^2 + \Phi_{\rm iso}(x_i\,|\,m,b) - <E>)^2
TODO: This should fail if the Hamiltonian associated with the orbit has
a frame other than StaticFrame
Parameters
----------
orbit : `~gala.dynamics.Orbit`
m0 : numeric (optional)
Initial mass guess.
b0 : numeric (optional)
Initial b guess.
minimize_kwargs : dict (optional)
Keyword arguments to pass through to `scipy.optimize.minimize`.
Returns
-------
m : float
Best-fit scale mass for the Isochrone potential.
b : float
Best-fit core radius for the Isochrone potential.
"""
pot = orbit.hamiltonian.potential
if pot is None:
raise ValueError("The orbit object must have an associated potential")
w = np.squeeze(orbit.w(pot.units))
if w.ndim > 2:
raise ValueError("Input orbit object must be a single orbit.")
def f(p, w):
logm, logb = p
potential = IsochronePotential(m=np.exp(logm), b=np.exp(logb),
units=pot.units)
H = (potential.value(w[:3]).decompose(pot.units).value +
0.5*np.sum(w[3:]**2, axis=0))
return np.sum(np.squeeze(H - np.mean(H))**2)
logm0 = np.log(m0)
logb0 = np.log(b0)
if minimize_kwargs is None:
minimize_kwargs = dict()
minimize_kwargs['x0'] = np.array([logm0, logb0])
minimize_kwargs['method'] = minimize_kwargs.get('method', 'Nelder-Mead')
res = minimize(f, args=(w,), **minimize_kwargs)
if not res.success:
raise ValueError("Failed to fit toy potential to orbit.")
logm, logb = np.abs(res.x)
m = np.exp(logm)
b = np.exp(logb)
return IsochronePotential(m=m, b=b, units=pot.units) |
def _MapVowels(cls, string, also_p=False):
"""
Return a copy of ``string`` where characters that exist as keys in
cls._VOWELS have been replaced with the corresponding value. If
also_p is True, this function will also change capital P characters
into a Hebrew character Qof.
"""
return cls._PSUB_RE.sub(cls.Repl, string) | Return a copy of ``string`` where characters that exist as keys in
cls._VOWELS have been replaced with the corresponding value. If
also_p is True, this function will also change capital P characters
into a Hebrew character Qof. | Below is the the instruction that describes the task:
### Input:
Return a copy of ``string`` where characters that exist as keys in
cls._VOWELS have been replaced with the corresponding value. If
also_p is True, this function will also change capital P characters
into a Hebrew character Qof.
### Response:
def _MapVowels(cls, string, also_p=False):
"""
Return a copy of ``string`` where characters that exist as keys in
cls._VOWELS have been replaced with the corresponding value. If
also_p is True, this function will also change capital P characters
into a Hebrew character Qof.
"""
return cls._PSUB_RE.sub(cls.Repl, string) |
def _compute_acq(self, x):
"""
Computes the GP-Lower Confidence Bound
"""
m, s = self.model.predict(x)
f_acqu = -m + self.exploration_weight * s
return f_acqu | Computes the GP-Lower Confidence Bound | Below is the the instruction that describes the task:
### Input:
Computes the GP-Lower Confidence Bound
### Response:
def _compute_acq(self, x):
"""
Computes the GP-Lower Confidence Bound
"""
m, s = self.model.predict(x)
f_acqu = -m + self.exploration_weight * s
return f_acqu |
def perfcounters(infile):
"""
Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream.
"""
measurements = []
with open(infile, 'r') as in_file:
read_struct(in_file)
for region_struct in read_structs(in_file):
region = region_struct["1"][1]
core_info = region_struct["Region Info"]
measurements += \
get_measurements(region, core_info, region_struct)
for table_struct in read_tables(in_file):
core_info = None
if "Event" in table_struct:
offset = 1
core_info = table_struct["Event"][offset:]
measurements += get_measurements(region, core_info,
table_struct, offset)
elif "Metric" in table_struct:
core_info = table_struct["Metric"]
measurements += get_measurements(region, core_info,
table_struct)
return measurements | Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream. | Below is the the instruction that describes the task:
### Input:
Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream.
### Response:
def perfcounters(infile):
"""
Get a complete list of all measurements.
Args:
infile: The filestream containing all likwid output.
Returns:
A list of all measurements extracted from likwid's file stream.
"""
measurements = []
with open(infile, 'r') as in_file:
read_struct(in_file)
for region_struct in read_structs(in_file):
region = region_struct["1"][1]
core_info = region_struct["Region Info"]
measurements += \
get_measurements(region, core_info, region_struct)
for table_struct in read_tables(in_file):
core_info = None
if "Event" in table_struct:
offset = 1
core_info = table_struct["Event"][offset:]
measurements += get_measurements(region, core_info,
table_struct, offset)
elif "Metric" in table_struct:
core_info = table_struct["Metric"]
measurements += get_measurements(region, core_info,
table_struct)
return measurements |
def indent(indent_str=None):
"""
An example indentation ruleset.
"""
def indentation_rule():
inst = Indentator(indent_str)
return {'layout_handlers': {
Indent: inst.layout_handler_indent,
Dedent: inst.layout_handler_dedent,
Newline: inst.layout_handler_newline,
OptionalNewline: inst.layout_handler_newline_optional,
OpenBlock: layout_handler_openbrace,
CloseBlock: layout_handler_closebrace,
EndStatement: layout_handler_semicolon,
}}
return indentation_rule | An example indentation ruleset. | Below is the the instruction that describes the task:
### Input:
An example indentation ruleset.
### Response:
def indent(indent_str=None):
"""
An example indentation ruleset.
"""
def indentation_rule():
inst = Indentator(indent_str)
return {'layout_handlers': {
Indent: inst.layout_handler_indent,
Dedent: inst.layout_handler_dedent,
Newline: inst.layout_handler_newline,
OptionalNewline: inst.layout_handler_newline_optional,
OpenBlock: layout_handler_openbrace,
CloseBlock: layout_handler_closebrace,
EndStatement: layout_handler_semicolon,
}}
return indentation_rule |
def process_groupchat_message(self,stanza):
"""
Process <message type="groupchat"/> received from the room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message`
"""
fr=stanza.get_from()
user=self.get_user(fr,True)
s=stanza.get_subject()
if s:
self.subject=s
self.handler.subject_changed(user,stanza)
else:
self.handler.message_received(user,stanza) | Process <message type="groupchat"/> received from the room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message` | Below is the the instruction that describes the task:
### Input:
Process <message type="groupchat"/> received from the room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message`
### Response:
def process_groupchat_message(self,stanza):
"""
Process <message type="groupchat"/> received from the room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message`
"""
fr=stanza.get_from()
user=self.get_user(fr,True)
s=stanza.get_subject()
if s:
self.subject=s
self.handler.subject_changed(user,stanza)
else:
self.handler.message_received(user,stanza) |
def LDRD(cpu, dest1, dest2, src, offset=None):
"""Loads double width data from memory."""
assert dest1.type == 'register'
assert dest2.type == 'register'
assert src.type == 'memory'
mem1 = cpu.read_int(src.address(), 32)
mem2 = cpu.read_int(src.address() + 4, 32)
writeback = cpu._compute_writeback(src, offset)
dest1.write(mem1)
dest2.write(mem2)
cpu._cs_hack_ldr_str_writeback(src, offset, writeback) | Loads double width data from memory. | Below is the the instruction that describes the task:
### Input:
Loads double width data from memory.
### Response:
def LDRD(cpu, dest1, dest2, src, offset=None):
"""Loads double width data from memory."""
assert dest1.type == 'register'
assert dest2.type == 'register'
assert src.type == 'memory'
mem1 = cpu.read_int(src.address(), 32)
mem2 = cpu.read_int(src.address() + 4, 32)
writeback = cpu._compute_writeback(src, offset)
dest1.write(mem1)
dest2.write(mem2)
cpu._cs_hack_ldr_str_writeback(src, offset, writeback) |
def GetAllUsers(self, pagination_size=10):
"""Gets all user info from Gitkit server.
Args:
pagination_size: int, how many users should be returned per request.
The account info are retrieved in pagination.
Yields:
A generator to iterate all users.
"""
next_page_token, accounts = self.rpc_helper.DownloadAccount(
None, pagination_size)
while accounts:
for account in accounts:
yield GitkitUser.FromApiResponse(account)
next_page_token, accounts = self.rpc_helper.DownloadAccount(
next_page_token, pagination_size) | Gets all user info from Gitkit server.
Args:
pagination_size: int, how many users should be returned per request.
The account info are retrieved in pagination.
Yields:
A generator to iterate all users. | Below is the the instruction that describes the task:
### Input:
Gets all user info from Gitkit server.
Args:
pagination_size: int, how many users should be returned per request.
The account info are retrieved in pagination.
Yields:
A generator to iterate all users.
### Response:
def GetAllUsers(self, pagination_size=10):
"""Gets all user info from Gitkit server.
Args:
pagination_size: int, how many users should be returned per request.
The account info are retrieved in pagination.
Yields:
A generator to iterate all users.
"""
next_page_token, accounts = self.rpc_helper.DownloadAccount(
None, pagination_size)
while accounts:
for account in accounts:
yield GitkitUser.FromApiResponse(account)
next_page_token, accounts = self.rpc_helper.DownloadAccount(
next_page_token, pagination_size) |
def is_url(string, allowed_schemes=None):
"""
Check if a string is a valid url.
:param string: String to check.
:param allowed_schemes: List of valid schemes ('http', 'https', 'ftp'...). Default to None (any scheme is valid).
:return: True if url, false otherwise
:rtype: bool
"""
if not is_full_string(string):
return False
valid = bool(URL_RE.search(string))
if allowed_schemes:
return valid and any([string.startswith(s) for s in allowed_schemes])
return valid | Check if a string is a valid url.
:param string: String to check.
:param allowed_schemes: List of valid schemes ('http', 'https', 'ftp'...). Default to None (any scheme is valid).
:return: True if url, false otherwise
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Check if a string is a valid url.
:param string: String to check.
:param allowed_schemes: List of valid schemes ('http', 'https', 'ftp'...). Default to None (any scheme is valid).
:return: True if url, false otherwise
:rtype: bool
### Response:
def is_url(string, allowed_schemes=None):
"""
Check if a string is a valid url.
:param string: String to check.
:param allowed_schemes: List of valid schemes ('http', 'https', 'ftp'...). Default to None (any scheme is valid).
:return: True if url, false otherwise
:rtype: bool
"""
if not is_full_string(string):
return False
valid = bool(URL_RE.search(string))
if allowed_schemes:
return valid and any([string.startswith(s) for s in allowed_schemes])
return valid |
def alt40fms(msg):
"""Selected altitude, FMS
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
int: altitude in feet
"""
d = hex2bin(data(msg))
if d[13] == '0':
return None
alt = bin2int(d[14:26]) * 16 # ft
return alt | Selected altitude, FMS
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
int: altitude in feet | Below is the the instruction that describes the task:
### Input:
Selected altitude, FMS
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
int: altitude in feet
### Response:
def alt40fms(msg):
"""Selected altitude, FMS
Args:
msg (String): 28 bytes hexadecimal message (BDS40) string
Returns:
int: altitude in feet
"""
d = hex2bin(data(msg))
if d[13] == '0':
return None
alt = bin2int(d[14:26]) * 16 # ft
return alt |
def p_mp_setQualifier(p):
"""mp_setQualifier : qualifierDeclaration"""
qualdecl = p[1]
ns = p.parser.handle.default_namespace
if p.parser.verbose:
p.parser.log(
_format("Setting qualifier {0!A}", qualdecl.name))
try:
p.parser.handle.SetQualifier(qualdecl)
except CIMError as ce:
if ce.status_code == CIM_ERR_INVALID_NAMESPACE:
if p.parser.verbose:
p.parser.log(
_format("Creating namespace {0!A}", ns))
p.parser.server.create_namespace(ns)
if p.parser.verbose:
p.parser.log(
_format("Setting qualifier {0!A}", qualdecl.name))
p.parser.handle.SetQualifier(qualdecl)
elif ce.status_code == CIM_ERR_NOT_SUPPORTED:
if p.parser.verbose:
p.parser.log(
_format("Qualifier {0!A} already exists. Deleting...",
qualdecl.name))
p.parser.handle.DeleteQualifier(qualdecl.name)
if p.parser.verbose:
p.parser.log(
_format("Setting qualifier {0!A}", qualdecl.name))
p.parser.handle.SetQualifier(qualdecl)
else:
ce.file_line = (p.parser.file, p.lexer.lineno)
raise
p.parser.qualcache[ns][qualdecl.name] = qualdecl | mp_setQualifier : qualifierDeclaration | Below is the the instruction that describes the task:
### Input:
mp_setQualifier : qualifierDeclaration
### Response:
def p_mp_setQualifier(p):
"""mp_setQualifier : qualifierDeclaration"""
qualdecl = p[1]
ns = p.parser.handle.default_namespace
if p.parser.verbose:
p.parser.log(
_format("Setting qualifier {0!A}", qualdecl.name))
try:
p.parser.handle.SetQualifier(qualdecl)
except CIMError as ce:
if ce.status_code == CIM_ERR_INVALID_NAMESPACE:
if p.parser.verbose:
p.parser.log(
_format("Creating namespace {0!A}", ns))
p.parser.server.create_namespace(ns)
if p.parser.verbose:
p.parser.log(
_format("Setting qualifier {0!A}", qualdecl.name))
p.parser.handle.SetQualifier(qualdecl)
elif ce.status_code == CIM_ERR_NOT_SUPPORTED:
if p.parser.verbose:
p.parser.log(
_format("Qualifier {0!A} already exists. Deleting...",
qualdecl.name))
p.parser.handle.DeleteQualifier(qualdecl.name)
if p.parser.verbose:
p.parser.log(
_format("Setting qualifier {0!A}", qualdecl.name))
p.parser.handle.SetQualifier(qualdecl)
else:
ce.file_line = (p.parser.file, p.lexer.lineno)
raise
p.parser.qualcache[ns][qualdecl.name] = qualdecl |
def reindex_submitted_analyses(portal):
"""Reindex submitted analyses
"""
logger.info("Reindex submitted analyses")
brains = api.search({}, "bika_analysis_catalog")
total = len(brains)
logger.info("Processing {} analyses".format(total))
for num, brain in enumerate(brains):
# skip analyses which have an analyst
if brain.getAnalyst:
continue
# reindex analyses which have no annalyst set, but a result
if brain.getResult not in ["", None]:
analysis = brain.getObject()
analysis.reindexObject()
if num > 0 and num % 5000 == 0:
logger.info("Commiting reindexed analyses {}/{} ..."
.format(num, total))
transaction.commit() | Reindex submitted analyses | Below is the the instruction that describes the task:
### Input:
Reindex submitted analyses
### Response:
def reindex_submitted_analyses(portal):
"""Reindex submitted analyses
"""
logger.info("Reindex submitted analyses")
brains = api.search({}, "bika_analysis_catalog")
total = len(brains)
logger.info("Processing {} analyses".format(total))
for num, brain in enumerate(brains):
# skip analyses which have an analyst
if brain.getAnalyst:
continue
# reindex analyses which have no annalyst set, but a result
if brain.getResult not in ["", None]:
analysis = brain.getObject()
analysis.reindexObject()
if num > 0 and num % 5000 == 0:
logger.info("Commiting reindexed analyses {}/{} ..."
.format(num, total))
transaction.commit() |
def is_path_python_module(thepath):
"""
Given a path, find out of the path is a python module or is inside
a python module.
"""
thepath = path.normpath(thepath)
if path.isfile(thepath):
base, ext = path.splitext(thepath)
if ext in _py_suffixes:
return True
return False
if path.isdir(thepath):
for suffix in _py_suffixes:
if path.isfile(path.join(thepath, '__init__%s' % suffix)):
return True
return False | Given a path, find out of the path is a python module or is inside
a python module. | Below is the the instruction that describes the task:
### Input:
Given a path, find out of the path is a python module or is inside
a python module.
### Response:
def is_path_python_module(thepath):
"""
Given a path, find out of the path is a python module or is inside
a python module.
"""
thepath = path.normpath(thepath)
if path.isfile(thepath):
base, ext = path.splitext(thepath)
if ext in _py_suffixes:
return True
return False
if path.isdir(thepath):
for suffix in _py_suffixes:
if path.isfile(path.join(thepath, '__init__%s' % suffix)):
return True
return False |
def has_data_file(self, sha1hash): # type: (str) -> bool
"""Confirms the presence of the given file in the RO."""
folder = os.path.join(self.folder, DATA, sha1hash[0:2])
hash_path = os.path.join(folder, sha1hash)
return os.path.isfile(hash_path) | Confirms the presence of the given file in the RO. | Below is the the instruction that describes the task:
### Input:
Confirms the presence of the given file in the RO.
### Response:
def has_data_file(self, sha1hash): # type: (str) -> bool
"""Confirms the presence of the given file in the RO."""
folder = os.path.join(self.folder, DATA, sha1hash[0:2])
hash_path = os.path.join(folder, sha1hash)
return os.path.isfile(hash_path) |
def query_paths(
service_config: Dict[str, Any],
our_address: Address,
privkey: bytes,
current_block_number: BlockNumber,
token_network_address: Union[TokenNetworkAddress, TokenNetworkID],
route_from: InitiatorAddress,
route_to: TargetAddress,
value: PaymentAmount,
) -> List[Dict[str, Any]]:
""" Query paths from the PFS.
Send a request to the /paths endpoint of the PFS specified in service_config, and
retry in case of a failed request if it makes sense.
"""
max_paths = service_config['pathfinding_max_paths']
url = service_config['pathfinding_service_address']
payload = {
'from': to_checksum_address(route_from),
'to': to_checksum_address(route_to),
'value': value,
'max_paths': max_paths,
}
offered_fee = service_config.get('pathfinding_fee', service_config['pathfinding_max_fee'])
scrap_existing_iou = False
for retries in reversed(range(MAX_PATHS_QUERY_ATTEMPTS)):
payload['iou'] = create_current_iou(
config=service_config,
token_network_address=token_network_address,
our_address=our_address,
privkey=privkey,
block_number=current_block_number,
offered_fee=offered_fee,
scrap_existing_iou=scrap_existing_iou,
)
try:
return post_pfs_paths(
url=url,
token_network_address=token_network_address,
payload=payload,
)
except ServiceRequestIOURejected as error:
code = error.error_code
if retries == 0 or code in (PFSError.WRONG_IOU_RECIPIENT, PFSError.DEPOSIT_TOO_LOW):
raise
elif code in (PFSError.IOU_ALREADY_CLAIMED, PFSError.IOU_EXPIRED_TOO_EARLY):
scrap_existing_iou = True
elif code == PFSError.INSUFFICIENT_SERVICE_PAYMENT:
if offered_fee < service_config['pathfinding_max_fee']:
offered_fee = service_config['pathfinding_max_fee']
# TODO: Query the PFS for the fee here instead of using the max fee
else:
raise
log.info(f'PFS rejected our IOU, reason: {error}. Attempting again.')
# If we got no results after MAX_PATHS_QUERY_ATTEMPTS return empty list of paths
return list() | Query paths from the PFS.
Send a request to the /paths endpoint of the PFS specified in service_config, and
retry in case of a failed request if it makes sense. | Below is the the instruction that describes the task:
### Input:
Query paths from the PFS.
Send a request to the /paths endpoint of the PFS specified in service_config, and
retry in case of a failed request if it makes sense.
### Response:
def query_paths(
service_config: Dict[str, Any],
our_address: Address,
privkey: bytes,
current_block_number: BlockNumber,
token_network_address: Union[TokenNetworkAddress, TokenNetworkID],
route_from: InitiatorAddress,
route_to: TargetAddress,
value: PaymentAmount,
) -> List[Dict[str, Any]]:
""" Query paths from the PFS.
Send a request to the /paths endpoint of the PFS specified in service_config, and
retry in case of a failed request if it makes sense.
"""
max_paths = service_config['pathfinding_max_paths']
url = service_config['pathfinding_service_address']
payload = {
'from': to_checksum_address(route_from),
'to': to_checksum_address(route_to),
'value': value,
'max_paths': max_paths,
}
offered_fee = service_config.get('pathfinding_fee', service_config['pathfinding_max_fee'])
scrap_existing_iou = False
for retries in reversed(range(MAX_PATHS_QUERY_ATTEMPTS)):
payload['iou'] = create_current_iou(
config=service_config,
token_network_address=token_network_address,
our_address=our_address,
privkey=privkey,
block_number=current_block_number,
offered_fee=offered_fee,
scrap_existing_iou=scrap_existing_iou,
)
try:
return post_pfs_paths(
url=url,
token_network_address=token_network_address,
payload=payload,
)
except ServiceRequestIOURejected as error:
code = error.error_code
if retries == 0 or code in (PFSError.WRONG_IOU_RECIPIENT, PFSError.DEPOSIT_TOO_LOW):
raise
elif code in (PFSError.IOU_ALREADY_CLAIMED, PFSError.IOU_EXPIRED_TOO_EARLY):
scrap_existing_iou = True
elif code == PFSError.INSUFFICIENT_SERVICE_PAYMENT:
if offered_fee < service_config['pathfinding_max_fee']:
offered_fee = service_config['pathfinding_max_fee']
# TODO: Query the PFS for the fee here instead of using the max fee
else:
raise
log.info(f'PFS rejected our IOU, reason: {error}. Attempting again.')
# If we got no results after MAX_PATHS_QUERY_ATTEMPTS return empty list of paths
return list() |
def get_by_name_from(self, root: Account, name: str) -> List[Account]:
""" Searches child accounts by name, starting from the given account """
result = []
if root.name == name:
result.append(root)
for child in root.children:
child_results = self.get_by_name_from(child, name)
result += child_results
return result | Searches child accounts by name, starting from the given account | Below is the the instruction that describes the task:
### Input:
Searches child accounts by name, starting from the given account
### Response:
def get_by_name_from(self, root: Account, name: str) -> List[Account]:
""" Searches child accounts by name, starting from the given account """
result = []
if root.name == name:
result.append(root)
for child in root.children:
child_results = self.get_by_name_from(child, name)
result += child_results
return result |
def get_repr(self, obj, referent=None):
"""Return an HTML tree block describing the given object."""
objtype = type(obj)
typename = str(objtype.__module__) + "." + objtype.__name__
prettytype = typename.replace("__builtin__.", "")
name = getattr(obj, "__name__", "")
if name:
prettytype = "%s %r" % (prettytype, name)
key = ""
if referent:
key = self.get_refkey(obj, referent)
url = reverse('dowser_trace_object', args=(
typename,
id(obj)
))
return ('<a class="objectid" href="%s">%s</a> '
'<span class="typename">%s</span>%s<br />'
'<span class="repr">%s</span>'
% (url, id(obj), prettytype, key, get_repr(obj, 100))
) | Return an HTML tree block describing the given object. | Below is the the instruction that describes the task:
### Input:
Return an HTML tree block describing the given object.
### Response:
def get_repr(self, obj, referent=None):
"""Return an HTML tree block describing the given object."""
objtype = type(obj)
typename = str(objtype.__module__) + "." + objtype.__name__
prettytype = typename.replace("__builtin__.", "")
name = getattr(obj, "__name__", "")
if name:
prettytype = "%s %r" % (prettytype, name)
key = ""
if referent:
key = self.get_refkey(obj, referent)
url = reverse('dowser_trace_object', args=(
typename,
id(obj)
))
return ('<a class="objectid" href="%s">%s</a> '
'<span class="typename">%s</span>%s<br />'
'<span class="repr">%s</span>'
% (url, id(obj), prettytype, key, get_repr(obj, 100))
) |
def export(self, export_auto_config=False):
"""
Export the cluster template for the given cluster. ccluster must have host
templates defined. It cluster does not have host templates defined it will
export host templates based on roles assignment.
@param export_auto_config: Also export auto configured configs
@return: Return cluster template
@since: API v12
"""
return self._get("export", ApiClusterTemplate, False,
params=dict(exportAutoConfig=export_auto_config), api_version=12) | Export the cluster template for the given cluster. ccluster must have host
templates defined. It cluster does not have host templates defined it will
export host templates based on roles assignment.
@param export_auto_config: Also export auto configured configs
@return: Return cluster template
@since: API v12 | Below is the the instruction that describes the task:
### Input:
Export the cluster template for the given cluster. ccluster must have host
templates defined. It cluster does not have host templates defined it will
export host templates based on roles assignment.
@param export_auto_config: Also export auto configured configs
@return: Return cluster template
@since: API v12
### Response:
def export(self, export_auto_config=False):
"""
Export the cluster template for the given cluster. ccluster must have host
templates defined. It cluster does not have host templates defined it will
export host templates based on roles assignment.
@param export_auto_config: Also export auto configured configs
@return: Return cluster template
@since: API v12
"""
return self._get("export", ApiClusterTemplate, False,
params=dict(exportAutoConfig=export_auto_config), api_version=12) |
def _grab_history(self):
"""Calculate the needed history/changelog changes
Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them,
check if the first one matches the version and whether it has a the
current date.
"""
default_location = None
config = self.setup_cfg.config
if config and config.has_option('zest.releaser', 'history_file'):
default_location = config.get('zest.releaser', 'history_file')
history_file = self.vcs.history_file(location=default_location)
if not history_file:
logger.warn("No history file found")
self.data['history_lines'] = None
self.data['history_file'] = None
return
logger.debug("Checking %s", history_file)
history_lines = open(history_file).read().split('\n')
# ^^^ TODO: .readlines()?
headings = utils.extract_headings_from_history(history_lines)
if not len(headings):
logger.error("No detectable version heading in the history "
"file %s", history_file)
sys.exit()
good_heading = self.data['history_header'] % self.data
# ^^^ history_header is a string with %(abc)s replacements.
line = headings[0]['line']
previous = history_lines[line]
history_lines[line] = good_heading
logger.debug("Set heading from %r to %r.", previous, good_heading)
history_lines[line + 1] = utils.fix_rst_heading(
heading=good_heading,
below=history_lines[line + 1])
logger.debug("Set line below heading to %r",
history_lines[line + 1])
self.data['history_lines'] = history_lines
self.data['history_file'] = history_file | Calculate the needed history/changelog changes
Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them,
check if the first one matches the version and whether it has a the
current date. | Below is the the instruction that describes the task:
### Input:
Calculate the needed history/changelog changes
Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them,
check if the first one matches the version and whether it has a the
current date.
### Response:
def _grab_history(self):
"""Calculate the needed history/changelog changes
Every history heading looks like '1.0 b4 (1972-12-25)'. Extract them,
check if the first one matches the version and whether it has a the
current date.
"""
default_location = None
config = self.setup_cfg.config
if config and config.has_option('zest.releaser', 'history_file'):
default_location = config.get('zest.releaser', 'history_file')
history_file = self.vcs.history_file(location=default_location)
if not history_file:
logger.warn("No history file found")
self.data['history_lines'] = None
self.data['history_file'] = None
return
logger.debug("Checking %s", history_file)
history_lines = open(history_file).read().split('\n')
# ^^^ TODO: .readlines()?
headings = utils.extract_headings_from_history(history_lines)
if not len(headings):
logger.error("No detectable version heading in the history "
"file %s", history_file)
sys.exit()
good_heading = self.data['history_header'] % self.data
# ^^^ history_header is a string with %(abc)s replacements.
line = headings[0]['line']
previous = history_lines[line]
history_lines[line] = good_heading
logger.debug("Set heading from %r to %r.", previous, good_heading)
history_lines[line + 1] = utils.fix_rst_heading(
heading=good_heading,
below=history_lines[line + 1])
logger.debug("Set line below heading to %r",
history_lines[line + 1])
self.data['history_lines'] = history_lines
self.data['history_file'] = history_file |
def matches(self, other):
"""Do a loose equivalency test suitable for comparing page names.
*other* can be any string-like object, including :class:`.Wikicode`, or
an iterable of these. This operation is symmetric; both sides are
adjusted. Specifically, whitespace and markup is stripped and the first
letter's case is normalized. Typical usage is
``if template.name.matches("stub"): ...``.
"""
cmp = lambda a, b: (a[0].upper() + a[1:] == b[0].upper() + b[1:]
if a and b else a == b)
this = self.strip_code().strip()
if isinstance(other, (str, bytes, Wikicode, Node)):
that = parse_anything(other).strip_code().strip()
return cmp(this, that)
for obj in other:
that = parse_anything(obj).strip_code().strip()
if cmp(this, that):
return True
return False | Do a loose equivalency test suitable for comparing page names.
*other* can be any string-like object, including :class:`.Wikicode`, or
an iterable of these. This operation is symmetric; both sides are
adjusted. Specifically, whitespace and markup is stripped and the first
letter's case is normalized. Typical usage is
``if template.name.matches("stub"): ...``. | Below is the the instruction that describes the task:
### Input:
Do a loose equivalency test suitable for comparing page names.
*other* can be any string-like object, including :class:`.Wikicode`, or
an iterable of these. This operation is symmetric; both sides are
adjusted. Specifically, whitespace and markup is stripped and the first
letter's case is normalized. Typical usage is
``if template.name.matches("stub"): ...``.
### Response:
def matches(self, other):
"""Do a loose equivalency test suitable for comparing page names.
*other* can be any string-like object, including :class:`.Wikicode`, or
an iterable of these. This operation is symmetric; both sides are
adjusted. Specifically, whitespace and markup is stripped and the first
letter's case is normalized. Typical usage is
``if template.name.matches("stub"): ...``.
"""
cmp = lambda a, b: (a[0].upper() + a[1:] == b[0].upper() + b[1:]
if a and b else a == b)
this = self.strip_code().strip()
if isinstance(other, (str, bytes, Wikicode, Node)):
that = parse_anything(other).strip_code().strip()
return cmp(this, that)
for obj in other:
that = parse_anything(obj).strip_code().strip()
if cmp(this, that):
return True
return False |
def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy)) | Evaluate performance of Averaged Perceptron POS Tagger. | Below is the the instruction that describes the task:
### Input:
Evaluate performance of Averaged Perceptron POS Tagger.
### Response:
def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy)) |
def _fix_pooling(pool_type, inputs, new_attr):
"""onnx pooling operator supports asymmetrical padding
Adding pad operator before pooling in mxnet to work with onnx"""
stride = new_attr.get('stride')
kernel = new_attr.get('kernel')
padding = new_attr.get('pad')
p_value = new_attr.get('p_value')
# Adding default stride.
if stride is None:
stride = (1,) * len(kernel)
# Add padding attr if not provided.
if padding is None:
padding = (0,) * len(kernel) * 2
# Mxnet Pad operator supports only 4D/5D tensors.
# For 1D case, these are the steps:
# Step 1. Add extra dummy dimension to make it 4D. Adding to axis = 2
# Step 2. Apply padding to this changed tensor
# Step 3. Remove the extra dimension added in step 1.
if len(kernel) == 1:
dummy_axis = 2
# setting 0 padding to the new dim to be added.
padding = (0, padding[0], 0, padding[1])
pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=2)
# Step 1.
curr_sym = symbol.expand_dims(inputs[0], axis=dummy_axis)
# Step 2. Common for all tensor sizes
new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width)
# Step 3: Removing extra dim added.
new_pad_op = symbol.split(new_pad_op, axis=dummy_axis, num_outputs=1, squeeze_axis=1)
else:
# For 2D/3D cases:
# Apply padding
pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=len(kernel))
curr_sym = inputs[0]
if pool_type == 'max':
# For max pool : mode = 'edge', we should replicate the
# edge values to pad, so that we only include input data values
# for calculating 'max'
new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width)
else:
# For avg pool, we should add 'zeros' for padding so mode='constant'
new_pad_op = symbol.pad(curr_sym, mode='constant', pad_width=pad_width)
# Apply pooling without pads.
if pool_type == 'lp':
new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel, p_value=p_value)
else:
new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel)
return new_pooling_op | onnx pooling operator supports asymmetrical padding
Adding pad operator before pooling in mxnet to work with onnx | Below is the the instruction that describes the task:
### Input:
onnx pooling operator supports asymmetrical padding
Adding pad operator before pooling in mxnet to work with onnx
### Response:
def _fix_pooling(pool_type, inputs, new_attr):
"""onnx pooling operator supports asymmetrical padding
Adding pad operator before pooling in mxnet to work with onnx"""
stride = new_attr.get('stride')
kernel = new_attr.get('kernel')
padding = new_attr.get('pad')
p_value = new_attr.get('p_value')
# Adding default stride.
if stride is None:
stride = (1,) * len(kernel)
# Add padding attr if not provided.
if padding is None:
padding = (0,) * len(kernel) * 2
# Mxnet Pad operator supports only 4D/5D tensors.
# For 1D case, these are the steps:
# Step 1. Add extra dummy dimension to make it 4D. Adding to axis = 2
# Step 2. Apply padding to this changed tensor
# Step 3. Remove the extra dimension added in step 1.
if len(kernel) == 1:
dummy_axis = 2
# setting 0 padding to the new dim to be added.
padding = (0, padding[0], 0, padding[1])
pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=2)
# Step 1.
curr_sym = symbol.expand_dims(inputs[0], axis=dummy_axis)
# Step 2. Common for all tensor sizes
new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width)
# Step 3: Removing extra dim added.
new_pad_op = symbol.split(new_pad_op, axis=dummy_axis, num_outputs=1, squeeze_axis=1)
else:
# For 2D/3D cases:
# Apply padding
pad_width = (0, 0, 0, 0) + _pad_sequence_fix(padding, kernel_dim=len(kernel))
curr_sym = inputs[0]
if pool_type == 'max':
# For max pool : mode = 'edge', we should replicate the
# edge values to pad, so that we only include input data values
# for calculating 'max'
new_pad_op = symbol.pad(curr_sym, mode='edge', pad_width=pad_width)
else:
# For avg pool, we should add 'zeros' for padding so mode='constant'
new_pad_op = symbol.pad(curr_sym, mode='constant', pad_width=pad_width)
# Apply pooling without pads.
if pool_type == 'lp':
new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel, p_value=p_value)
else:
new_pooling_op = symbol.Pooling(new_pad_op, pool_type=pool_type, stride=stride, kernel=kernel)
return new_pooling_op |
def high_density_objs(row_obj: CityInfo) -> dict:
"""Color rows with extremely high population density red."""
opts = dict()
if float(pop_density(row_obj)) > EXTREMELY_HIGH_POULATION_DENSITY:
opts[tf.TableFormatter.ROW_OPT_TEXT_COLOR] = tf.TableColors.TEXT_COLOR_RED
return opts | Color rows with extremely high population density red. | Below is the the instruction that describes the task:
### Input:
Color rows with extremely high population density red.
### Response:
def high_density_objs(row_obj: CityInfo) -> dict:
"""Color rows with extremely high population density red."""
opts = dict()
if float(pop_density(row_obj)) > EXTREMELY_HIGH_POULATION_DENSITY:
opts[tf.TableFormatter.ROW_OPT_TEXT_COLOR] = tf.TableColors.TEXT_COLOR_RED
return opts |
def _render_children(self, contexts, partials):
"""Render the children tokens"""
ret = []
for child in self.children:
ret.append(child._render(contexts, partials))
return EMPTYSTRING.join(ret) | Render the children tokens | Below is the the instruction that describes the task:
### Input:
Render the children tokens
### Response:
def _render_children(self, contexts, partials):
"""Render the children tokens"""
ret = []
for child in self.children:
ret.append(child._render(contexts, partials))
return EMPTYSTRING.join(ret) |
def calc_qv_v1(self):
"""Calculate the discharge of both forelands after Manning-Strickler.
Required control parameters:
|EKV|
|SKV|
|Gef|
Required flux sequence:
|AV|
|UV|
Calculated flux sequence:
|lstream_fluxes.QV|
Examples:
For appropriate strictly positive values:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> ekv(2.0)
>>> skv(50.0)
>>> gef(0.01)
>>> fluxes.av = 3.0
>>> fluxes.uv = 7.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(17.053102, 17.053102)
For zero or negative values of the flown through surface or
the wetted perimeter:
>>> fluxes.av = -1.0, 3.0
>>> fluxes.uv = 7.0, 0.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(0.0, 0.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if (flu.av[i] > 0.) and (flu.uv[i] > 0.):
flu.qv[i] = (con.ekv[i]*con.skv[i] *
flu.av[i]**(5./3.)/flu.uv[i]**(2./3.)*con.gef**.5)
else:
flu.qv[i] = 0. | Calculate the discharge of both forelands after Manning-Strickler.
Required control parameters:
|EKV|
|SKV|
|Gef|
Required flux sequence:
|AV|
|UV|
Calculated flux sequence:
|lstream_fluxes.QV|
Examples:
For appropriate strictly positive values:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> ekv(2.0)
>>> skv(50.0)
>>> gef(0.01)
>>> fluxes.av = 3.0
>>> fluxes.uv = 7.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(17.053102, 17.053102)
For zero or negative values of the flown through surface or
the wetted perimeter:
>>> fluxes.av = -1.0, 3.0
>>> fluxes.uv = 7.0, 0.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(0.0, 0.0) | Below is the the instruction that describes the task:
### Input:
Calculate the discharge of both forelands after Manning-Strickler.
Required control parameters:
|EKV|
|SKV|
|Gef|
Required flux sequence:
|AV|
|UV|
Calculated flux sequence:
|lstream_fluxes.QV|
Examples:
For appropriate strictly positive values:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> ekv(2.0)
>>> skv(50.0)
>>> gef(0.01)
>>> fluxes.av = 3.0
>>> fluxes.uv = 7.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(17.053102, 17.053102)
For zero or negative values of the flown through surface or
the wetted perimeter:
>>> fluxes.av = -1.0, 3.0
>>> fluxes.uv = 7.0, 0.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(0.0, 0.0)
### Response:
def calc_qv_v1(self):
"""Calculate the discharge of both forelands after Manning-Strickler.
Required control parameters:
|EKV|
|SKV|
|Gef|
Required flux sequence:
|AV|
|UV|
Calculated flux sequence:
|lstream_fluxes.QV|
Examples:
For appropriate strictly positive values:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> ekv(2.0)
>>> skv(50.0)
>>> gef(0.01)
>>> fluxes.av = 3.0
>>> fluxes.uv = 7.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(17.053102, 17.053102)
For zero or negative values of the flown through surface or
the wetted perimeter:
>>> fluxes.av = -1.0, 3.0
>>> fluxes.uv = 7.0, 0.0
>>> model.calc_qv_v1()
>>> fluxes.qv
qv(0.0, 0.0)
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if (flu.av[i] > 0.) and (flu.uv[i] > 0.):
flu.qv[i] = (con.ekv[i]*con.skv[i] *
flu.av[i]**(5./3.)/flu.uv[i]**(2./3.)*con.gef**.5)
else:
flu.qv[i] = 0. |
def sgc_game(k):
"""
Return a NormalFormGame instance of the 2-player game introduced by
Sandholm, Gilpin, and Conitzer (2005), which has a unique Nash
equilibrium, where each player plays half of the actions with
positive probabilities. Payoffs are normalized so that the minimum
and the maximum payoffs are 0 and 1, respectively.
Parameters
----------
k : scalar(int)
Positive integer determining the number of actions. The returned
game will have `4*k-1` actions for each player.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = sgc_game(2)
>>> g.players[0]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75]])
>>> g.players[1]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ]])
"""
payoff_arrays = tuple(np.empty((4*k-1, 4*k-1)) for i in range(2))
_populate_sgc_payoff_arrays(payoff_arrays)
g = NormalFormGame(
[Player(payoff_array) for payoff_array in payoff_arrays]
)
return g | Return a NormalFormGame instance of the 2-player game introduced by
Sandholm, Gilpin, and Conitzer (2005), which has a unique Nash
equilibrium, where each player plays half of the actions with
positive probabilities. Payoffs are normalized so that the minimum
and the maximum payoffs are 0 and 1, respectively.
Parameters
----------
k : scalar(int)
Positive integer determining the number of actions. The returned
game will have `4*k-1` actions for each player.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = sgc_game(2)
>>> g.players[0]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75]])
>>> g.players[1]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ]]) | Below is the the instruction that describes the task:
### Input:
Return a NormalFormGame instance of the 2-player game introduced by
Sandholm, Gilpin, and Conitzer (2005), which has a unique Nash
equilibrium, where each player plays half of the actions with
positive probabilities. Payoffs are normalized so that the minimum
and the maximum payoffs are 0 and 1, respectively.
Parameters
----------
k : scalar(int)
Positive integer determining the number of actions. The returned
game will have `4*k-1` actions for each player.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = sgc_game(2)
>>> g.players[0]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75]])
>>> g.players[1]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ]])
### Response:
def sgc_game(k):
"""
Return a NormalFormGame instance of the 2-player game introduced by
Sandholm, Gilpin, and Conitzer (2005), which has a unique Nash
equilibrium, where each player plays half of the actions with
positive probabilities. Payoffs are normalized so that the minimum
and the maximum payoffs are 0 and 1, respectively.
Parameters
----------
k : scalar(int)
Positive integer determining the number of actions. The returned
game will have `4*k-1` actions for each player.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = sgc_game(2)
>>> g.players[0]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75]])
>>> g.players[1]
Player([[ 0.75, 0.5 , 1. , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0.5 , 1. , 0.75, 0.5 , 0.5 , 0.5 , 0.5 ],
[ 0. , 0. , 0. , 0. , 0.75, 0. , 0. ],
[ 0. , 0. , 0. , 0.75, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0.75],
[ 0. , 0. , 0. , 0. , 0. , 0.75, 0. ]])
"""
payoff_arrays = tuple(np.empty((4*k-1, 4*k-1)) for i in range(2))
_populate_sgc_payoff_arrays(payoff_arrays)
g = NormalFormGame(
[Player(payoff_array) for payoff_array in payoff_arrays]
)
return g |
def update_author_min_max_date(min_date, max_date, target_author, author_field="author_uuid"):
"""
Get the query to update demography_min_date and demography_max_date of a given author
:param min_date: new demography_min_date
:param max_date: new demography_max_date
:param target_author: target author to be updated
:param author_field: author field
:return: the query to be executed to update demography data of an author
"""
es_query = '''
{
"script": {
"source":
"ctx._source.demography_min_date = params.min_date;ctx._source.demography_max_date = params.max_date;",
"lang": "painless",
"params": {
"min_date": "%s",
"max_date": "%s"
}
},
"query": {
"term": {
"%s": "%s"
}
}
}
''' % (min_date, max_date, author_field, target_author)
return es_query | Get the query to update demography_min_date and demography_max_date of a given author
:param min_date: new demography_min_date
:param max_date: new demography_max_date
:param target_author: target author to be updated
:param author_field: author field
:return: the query to be executed to update demography data of an author | Below is the the instruction that describes the task:
### Input:
Get the query to update demography_min_date and demography_max_date of a given author
:param min_date: new demography_min_date
:param max_date: new demography_max_date
:param target_author: target author to be updated
:param author_field: author field
:return: the query to be executed to update demography data of an author
### Response:
def update_author_min_max_date(min_date, max_date, target_author, author_field="author_uuid"):
"""
Get the query to update demography_min_date and demography_max_date of a given author
:param min_date: new demography_min_date
:param max_date: new demography_max_date
:param target_author: target author to be updated
:param author_field: author field
:return: the query to be executed to update demography data of an author
"""
es_query = '''
{
"script": {
"source":
"ctx._source.demography_min_date = params.min_date;ctx._source.demography_max_date = params.max_date;",
"lang": "painless",
"params": {
"min_date": "%s",
"max_date": "%s"
}
},
"query": {
"term": {
"%s": "%s"
}
}
}
''' % (min_date, max_date, author_field, target_author)
return es_query |
def find_funcdef(source):
"""Find the first FuncDef ast object in source"""
try:
module_node = compile(
source, "<string>", mode="exec", flags=ast.PyCF_ONLY_AST
)
except SyntaxError:
return find_funcdef(fix_lamdaline(source))
for node in ast.walk(module_node):
if isinstance(node, ast.FunctionDef) or isinstance(node, ast.Lambda):
return node
raise ValueError("function definition not found") | Find the first FuncDef ast object in source | Below is the the instruction that describes the task:
### Input:
Find the first FuncDef ast object in source
### Response:
def find_funcdef(source):
"""Find the first FuncDef ast object in source"""
try:
module_node = compile(
source, "<string>", mode="exec", flags=ast.PyCF_ONLY_AST
)
except SyntaxError:
return find_funcdef(fix_lamdaline(source))
for node in ast.walk(module_node):
if isinstance(node, ast.FunctionDef) or isinstance(node, ast.Lambda):
return node
raise ValueError("function definition not found") |
def _got_srv(self, addrs):
"""Handle SRV lookup result.
:Parameters:
- `addrs`: properly sorted list of (hostname, port) tuples
"""
with self.lock:
if not addrs:
self._dst_service = None
if self._dst_port:
self._dst_nameports = [(self._dst_name, self._dst_port)]
else:
self._dst_nameports = []
self._set_state("aborted")
raise DNSError("Could not resolve SRV for service {0!r}"
" on host {1!r} and fallback port number not given"
.format(self._dst_service, self._dst_name))
elif addrs == [(".", 0)]:
self._dst_nameports = []
self._set_state("aborted")
raise DNSError("Service {0!r} not available on host {1!r}"
.format(self._dst_service, self._dst_name))
else:
self._dst_nameports = addrs
self._set_state("resolve-hostname") | Handle SRV lookup result.
:Parameters:
- `addrs`: properly sorted list of (hostname, port) tuples | Below is the the instruction that describes the task:
### Input:
Handle SRV lookup result.
:Parameters:
- `addrs`: properly sorted list of (hostname, port) tuples
### Response:
def _got_srv(self, addrs):
"""Handle SRV lookup result.
:Parameters:
- `addrs`: properly sorted list of (hostname, port) tuples
"""
with self.lock:
if not addrs:
self._dst_service = None
if self._dst_port:
self._dst_nameports = [(self._dst_name, self._dst_port)]
else:
self._dst_nameports = []
self._set_state("aborted")
raise DNSError("Could not resolve SRV for service {0!r}"
" on host {1!r} and fallback port number not given"
.format(self._dst_service, self._dst_name))
elif addrs == [(".", 0)]:
self._dst_nameports = []
self._set_state("aborted")
raise DNSError("Service {0!r} not available on host {1!r}"
.format(self._dst_service, self._dst_name))
else:
self._dst_nameports = addrs
self._set_state("resolve-hostname") |
def _file_lists(self, load, form):
'''
Return a dict containing the file lists for files and dirs
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not os.path.isdir(self.file_list_cachedir):
try:
os.makedirs(self.file_list_cachedir)
except os.error:
log.error('Unable to make cachedir %s', self.file_list_cachedir)
return []
list_cache = salt.utils.path.join(
self.file_list_cachedir,
'{0}.p'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
w_lock = salt.utils.path.join(
self.file_list_cachedir,
'.{0}.w'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
self.opts, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {'files': set(), 'symlinks': {}, 'dirs': set()}
if salt.utils.stringutils.is_hex(load['saltenv']) \
or load['saltenv'] in self.envs():
for repo in self.remotes:
repo_files, repo_symlinks = repo.file_list(load['saltenv'])
ret['files'].update(repo_files)
ret['symlinks'].update(repo_symlinks)
ret['dirs'].update(repo.dir_list(load['saltenv']))
ret['files'] = sorted(ret['files'])
ret['dirs'] = sorted(ret['dirs'])
if save_cache:
salt.fileserver.write_file_list_cache(
self.opts, ret, list_cache, w_lock
)
# NOTE: symlinks are organized in a dict instead of a list, however
# the 'symlinks' key will be defined above so it will never get to
# the default value in the call to ret.get() below.
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return {} if form == 'symlinks' else [] | Return a dict containing the file lists for files and dirs | Below is the the instruction that describes the task:
### Input:
Return a dict containing the file lists for files and dirs
### Response:
def _file_lists(self, load, form):
'''
Return a dict containing the file lists for files and dirs
'''
if 'env' in load:
# "env" is not supported; Use "saltenv".
load.pop('env')
if not os.path.isdir(self.file_list_cachedir):
try:
os.makedirs(self.file_list_cachedir)
except os.error:
log.error('Unable to make cachedir %s', self.file_list_cachedir)
return []
list_cache = salt.utils.path.join(
self.file_list_cachedir,
'{0}.p'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
w_lock = salt.utils.path.join(
self.file_list_cachedir,
'.{0}.w'.format(load['saltenv'].replace(os.path.sep, '_|-'))
)
cache_match, refresh_cache, save_cache = \
salt.fileserver.check_file_list_cache(
self.opts, form, list_cache, w_lock
)
if cache_match is not None:
return cache_match
if refresh_cache:
ret = {'files': set(), 'symlinks': {}, 'dirs': set()}
if salt.utils.stringutils.is_hex(load['saltenv']) \
or load['saltenv'] in self.envs():
for repo in self.remotes:
repo_files, repo_symlinks = repo.file_list(load['saltenv'])
ret['files'].update(repo_files)
ret['symlinks'].update(repo_symlinks)
ret['dirs'].update(repo.dir_list(load['saltenv']))
ret['files'] = sorted(ret['files'])
ret['dirs'] = sorted(ret['dirs'])
if save_cache:
salt.fileserver.write_file_list_cache(
self.opts, ret, list_cache, w_lock
)
# NOTE: symlinks are organized in a dict instead of a list, however
# the 'symlinks' key will be defined above so it will never get to
# the default value in the call to ret.get() below.
return ret.get(form, [])
# Shouldn't get here, but if we do, this prevents a TypeError
return {} if form == 'symlinks' else [] |
def __spawn_new_requests(self):
"""Spawn new requests until the max threads option value is reached.
Note:
If no new requests were spawned and there are no requests in progress
the crawler will stop crawling.
"""
self.__should_spawn_new_requests = False
in_progress_count = len(self.queue.get_all(QueueItem.STATUS_IN_PROGRESS))
while in_progress_count < self.__options.performance.max_threads:
if self.__spawn_new_request():
in_progress_count += 1
else:
break
if in_progress_count == 0:
self.__crawler_stop() | Spawn new requests until the max threads option value is reached.
Note:
If no new requests were spawned and there are no requests in progress
the crawler will stop crawling. | Below is the the instruction that describes the task:
### Input:
Spawn new requests until the max threads option value is reached.
Note:
If no new requests were spawned and there are no requests in progress
the crawler will stop crawling.
### Response:
def __spawn_new_requests(self):
"""Spawn new requests until the max threads option value is reached.
Note:
If no new requests were spawned and there are no requests in progress
the crawler will stop crawling.
"""
self.__should_spawn_new_requests = False
in_progress_count = len(self.queue.get_all(QueueItem.STATUS_IN_PROGRESS))
while in_progress_count < self.__options.performance.max_threads:
if self.__spawn_new_request():
in_progress_count += 1
else:
break
if in_progress_count == 0:
self.__crawler_stop() |
def get_all_projects(url, token, top=HARD_CODED_TOP):
"""
Returns a list of all projects with their collection info from the server. Currently limited functionality to only return the first 1000 projects.
#TODO refactor to add multiple calls to api to retrieve all projects if more exist beyond top.
"""
project_list = []
tfs_client = create_tfs_core_client(url, token)
collections = tfs_client.get_project_collections(top=top)
for collection in collections:
collection_client = create_tfs_core_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token)
logger.debug('Retrieving Projects for Project Collection: {collection_name}'.format(collection_name=collection.name))
# Retrieves all projects in the project collection
projects = collection_client.get_projects(top=HARD_CODED_TOP)
# get_projects only gets the project references, have to call get_project_history_entries to get last update info for projects
# Only calling this once per collection as its an expensive API call, wil refactor later if there is a better API call to use
collection_history_list = collection_client.get_project_history_entries()
for project in projects:
# get_projects only gets team project ref objects,
# have to call get_project to get the team project object which includes the TFS Web Url for the project
logger.debug('Retrieving Team Project for Project: {project_name}'.format(project_name=project.name))
projectInfo = collection_client.get_project(project.id, True, True)
tfsProject = TFSProject(projectInfo, collection)
logger.debug('Retrieving Last Updated and Created Info for Project: {project_name}'.format(project_name=project.name))
tfsProject.projectLastUpdateInfo = get_project_last_update_time(collection_history_list, project.id)
tfsProject.projectCreateInfo = get_project_create_time(collection_history_list, project.id)
project_list.append(tfsProject)
return project_list | Returns a list of all projects with their collection info from the server. Currently limited functionality to only return the first 1000 projects.
#TODO refactor to add multiple calls to api to retrieve all projects if more exist beyond top. | Below is the the instruction that describes the task:
### Input:
Returns a list of all projects with their collection info from the server. Currently limited functionality to only return the first 1000 projects.
#TODO refactor to add multiple calls to api to retrieve all projects if more exist beyond top.
### Response:
def get_all_projects(url, token, top=HARD_CODED_TOP):
"""
Returns a list of all projects with their collection info from the server. Currently limited functionality to only return the first 1000 projects.
#TODO refactor to add multiple calls to api to retrieve all projects if more exist beyond top.
"""
project_list = []
tfs_client = create_tfs_core_client(url, token)
collections = tfs_client.get_project_collections(top=top)
for collection in collections:
collection_client = create_tfs_core_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token)
logger.debug('Retrieving Projects for Project Collection: {collection_name}'.format(collection_name=collection.name))
# Retrieves all projects in the project collection
projects = collection_client.get_projects(top=HARD_CODED_TOP)
# get_projects only gets the project references, have to call get_project_history_entries to get last update info for projects
# Only calling this once per collection as its an expensive API call, wil refactor later if there is a better API call to use
collection_history_list = collection_client.get_project_history_entries()
for project in projects:
# get_projects only gets team project ref objects,
# have to call get_project to get the team project object which includes the TFS Web Url for the project
logger.debug('Retrieving Team Project for Project: {project_name}'.format(project_name=project.name))
projectInfo = collection_client.get_project(project.id, True, True)
tfsProject = TFSProject(projectInfo, collection)
logger.debug('Retrieving Last Updated and Created Info for Project: {project_name}'.format(project_name=project.name))
tfsProject.projectLastUpdateInfo = get_project_last_update_time(collection_history_list, project.id)
tfsProject.projectCreateInfo = get_project_create_time(collection_history_list, project.id)
project_list.append(tfsProject)
return project_list |
def sort_by_decreasing_count(self):
"""Return a **new** `Vocab` object that is ordered by decreasing count.
The word at index 1 will be most common, the word at index 2 will be
next most common, and so on.
:return: A new vocabulary sorted by decreasing count.
NOTE: UNK will remain at index 0, regardless of its frequency.
"""
words = [w for w, ct in self._counts.most_common()]
v = self.subset(words)
return v | Return a **new** `Vocab` object that is ordered by decreasing count.
The word at index 1 will be most common, the word at index 2 will be
next most common, and so on.
:return: A new vocabulary sorted by decreasing count.
NOTE: UNK will remain at index 0, regardless of its frequency. | Below is the the instruction that describes the task:
### Input:
Return a **new** `Vocab` object that is ordered by decreasing count.
The word at index 1 will be most common, the word at index 2 will be
next most common, and so on.
:return: A new vocabulary sorted by decreasing count.
NOTE: UNK will remain at index 0, regardless of its frequency.
### Response:
def sort_by_decreasing_count(self):
"""Return a **new** `Vocab` object that is ordered by decreasing count.
The word at index 1 will be most common, the word at index 2 will be
next most common, and so on.
:return: A new vocabulary sorted by decreasing count.
NOTE: UNK will remain at index 0, regardless of its frequency.
"""
words = [w for w, ct in self._counts.most_common()]
v = self.subset(words)
return v |
def showPanelMenu(self, panel, point=None):
"""
Creates the panel menu for this view widget. If no point is supplied,\
then the current cursor position will be used.
:param panel | <XViewPanel>
point | <QPoint> || None
"""
if not self._panelMenu:
self._panelMenu = XViewPanelMenu(self)
if point is None:
point = QtGui.QCursor.pos()
self._panelMenu.setCurrentPanel(panel)
self._panelMenu.exec_(point) | Creates the panel menu for this view widget. If no point is supplied,\
then the current cursor position will be used.
:param panel | <XViewPanel>
point | <QPoint> || None | Below is the the instruction that describes the task:
### Input:
Creates the panel menu for this view widget. If no point is supplied,\
then the current cursor position will be used.
:param panel | <XViewPanel>
point | <QPoint> || None
### Response:
def showPanelMenu(self, panel, point=None):
"""
Creates the panel menu for this view widget. If no point is supplied,\
then the current cursor position will be used.
:param panel | <XViewPanel>
point | <QPoint> || None
"""
if not self._panelMenu:
self._panelMenu = XViewPanelMenu(self)
if point is None:
point = QtGui.QCursor.pos()
self._panelMenu.setCurrentPanel(panel)
self._panelMenu.exec_(point) |
def set_path(self, path):
'''Sets the listitem's path'''
self._path = path
return self._listitem.setPath(path) | Sets the listitem's path | Below is the the instruction that describes the task:
### Input:
Sets the listitem's path
### Response:
def set_path(self, path):
'''Sets the listitem's path'''
self._path = path
return self._listitem.setPath(path) |
def _create_body(self, name, label=None, cidr=None):
"""
Used to create the dict required to create a network. Accepts either
'label' or 'name' as the keyword parameter for the label attribute.
"""
label = label or name
body = {"network": {
"label": label,
"cidr": cidr,
}}
return body | Used to create the dict required to create a network. Accepts either
'label' or 'name' as the keyword parameter for the label attribute. | Below is the the instruction that describes the task:
### Input:
Used to create the dict required to create a network. Accepts either
'label' or 'name' as the keyword parameter for the label attribute.
### Response:
def _create_body(self, name, label=None, cidr=None):
"""
Used to create the dict required to create a network. Accepts either
'label' or 'name' as the keyword parameter for the label attribute.
"""
label = label or name
body = {"network": {
"label": label,
"cidr": cidr,
}}
return body |
def __unget_service_from_factory(self, bundle, reference, service=None):
# type: (Any, ServiceReference, Any) -> bool
"""
Removes the usage of a a service factory or a prototype
service factory by a bundle
:param bundle: The bundle that used the service
:param reference: A service reference
:param service: Service instance (for prototype factories)
:return: True if the bundle usage has been removed
"""
try:
factory, svc_reg = self.__svc_factories[reference]
except KeyError:
# Unknown service reference
return False
# Check the per-bundle usage counter
try:
counter = self.__factory_usage[bundle]
except KeyError:
# Unknown reference to a factory
return False
else:
if counter.unget_service(factory, svc_reg, service):
try:
# No more dependency
reference.unused_by(bundle)
# All references have been taken away: clean up
if not self.__factory_usage[bundle].is_used():
del self.__factory_usage[bundle]
# Remove the service reference from the bundle
imports = self.__bundle_imports[bundle]
del imports[reference]
except KeyError:
# Unknown reference
return False
else:
# Clean up
if not imports:
del self.__bundle_imports[bundle]
return True | Removes the usage of a a service factory or a prototype
service factory by a bundle
:param bundle: The bundle that used the service
:param reference: A service reference
:param service: Service instance (for prototype factories)
:return: True if the bundle usage has been removed | Below is the the instruction that describes the task:
### Input:
Removes the usage of a a service factory or a prototype
service factory by a bundle
:param bundle: The bundle that used the service
:param reference: A service reference
:param service: Service instance (for prototype factories)
:return: True if the bundle usage has been removed
### Response:
def __unget_service_from_factory(self, bundle, reference, service=None):
# type: (Any, ServiceReference, Any) -> bool
"""
Removes the usage of a a service factory or a prototype
service factory by a bundle
:param bundle: The bundle that used the service
:param reference: A service reference
:param service: Service instance (for prototype factories)
:return: True if the bundle usage has been removed
"""
try:
factory, svc_reg = self.__svc_factories[reference]
except KeyError:
# Unknown service reference
return False
# Check the per-bundle usage counter
try:
counter = self.__factory_usage[bundle]
except KeyError:
# Unknown reference to a factory
return False
else:
if counter.unget_service(factory, svc_reg, service):
try:
# No more dependency
reference.unused_by(bundle)
# All references have been taken away: clean up
if not self.__factory_usage[bundle].is_used():
del self.__factory_usage[bundle]
# Remove the service reference from the bundle
imports = self.__bundle_imports[bundle]
del imports[reference]
except KeyError:
# Unknown reference
return False
else:
# Clean up
if not imports:
del self.__bundle_imports[bundle]
return True |
def plot_density(population, # pylint: disable=too-many-arguments, too-many-locals
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
'''Plots the 2d histogram of the center
coordinates of segments in the selected plane.
'''
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
H1, xedges1, yedges1 = extract_density(population, plane=plane, bins=bins,
neurite_type=neurite_type)
mask = H1 < threshold # mask = H1==0
H2 = np.ma.masked_array(H1, mask)
getattr(plt.cm, color_map).set_bad(color='white', alpha=None)
plots = ax.contourf((xedges1[:-1] + xedges1[1:]) / 2,
(yedges1[:-1] + yedges1[1:]) / 2,
np.transpose(H2), # / np.max(H2),
cmap=getattr(plt.cm, color_map), levels=levels)
if not no_colorbar:
cbar = plt.colorbar(plots)
cbar.ax.set_ylabel(colorlabel, fontsize=labelfontsize)
kwargs['title'] = kwargs.get('title', '')
kwargs['xlabel'] = kwargs.get('xlabel', plane[0])
kwargs['ylabel'] = kwargs.get('ylabel', plane[1])
return common.plot_style(fig=fig, ax=ax, **kwargs) | Plots the 2d histogram of the center
coordinates of segments in the selected plane. | Below is the the instruction that describes the task:
### Input:
Plots the 2d histogram of the center
coordinates of segments in the selected plane.
### Response:
def plot_density(population, # pylint: disable=too-many-arguments, too-many-locals
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
'''Plots the 2d histogram of the center
coordinates of segments in the selected plane.
'''
fig, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
H1, xedges1, yedges1 = extract_density(population, plane=plane, bins=bins,
neurite_type=neurite_type)
mask = H1 < threshold # mask = H1==0
H2 = np.ma.masked_array(H1, mask)
getattr(plt.cm, color_map).set_bad(color='white', alpha=None)
plots = ax.contourf((xedges1[:-1] + xedges1[1:]) / 2,
(yedges1[:-1] + yedges1[1:]) / 2,
np.transpose(H2), # / np.max(H2),
cmap=getattr(plt.cm, color_map), levels=levels)
if not no_colorbar:
cbar = plt.colorbar(plots)
cbar.ax.set_ylabel(colorlabel, fontsize=labelfontsize)
kwargs['title'] = kwargs.get('title', '')
kwargs['xlabel'] = kwargs.get('xlabel', plane[0])
kwargs['ylabel'] = kwargs.get('ylabel', plane[1])
return common.plot_style(fig=fig, ax=ax, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.