code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def shoebot_example(**shoebot_kwargs):
"""
Decorator to run some code in a bot instance.
"""
def decorator(f):
def run():
from shoebot import ShoebotInstallError # https://github.com/shoebot/shoebot/issues/206
print(" Shoebot - %s:" % f.__name__.replace("_", " "))
try:
import shoebot
outputfile = "/tmp/shoebot-%s.png" % f.__name__
bot = shoebot.create_bot(outputfile=outputfile)
f(bot)
bot.finish()
print(' [passed] : %s' % outputfile)
print('')
except ShoebotInstallError as e:
print(' [failed]', e.args[0])
print('')
except Exception:
print(' [failed] - traceback:')
for line in traceback.format_exc().splitlines():
print(' %s' % line)
print('')
return run
return decorator | Decorator to run some code in a bot instance. | Below is the the instruction that describes the task:
### Input:
Decorator to run some code in a bot instance.
### Response:
def shoebot_example(**shoebot_kwargs):
"""
Decorator to run some code in a bot instance.
"""
def decorator(f):
def run():
from shoebot import ShoebotInstallError # https://github.com/shoebot/shoebot/issues/206
print(" Shoebot - %s:" % f.__name__.replace("_", " "))
try:
import shoebot
outputfile = "/tmp/shoebot-%s.png" % f.__name__
bot = shoebot.create_bot(outputfile=outputfile)
f(bot)
bot.finish()
print(' [passed] : %s' % outputfile)
print('')
except ShoebotInstallError as e:
print(' [failed]', e.args[0])
print('')
except Exception:
print(' [failed] - traceback:')
for line in traceback.format_exc().splitlines():
print(' %s' % line)
print('')
return run
return decorator |
def list_group_members(self, name, url_prefix, auth, session, send_opts):
"""Get the members of a group (does not include maintainers).
Args:
name (string): Name of group to query.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of member names.
Raises:
requests.HTTPError on failure.
"""
req = self.get_group_members_request(
'GET', 'application/json', url_prefix, auth, name)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
resp_json = resp.json()
return resp_json['members']
msg = ('Failed getting members of group {}, got HTTP response: ({}) - {}'.format(
name, resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp) | Get the members of a group (does not include maintainers).
Args:
name (string): Name of group to query.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of member names.
Raises:
requests.HTTPError on failure. | Below is the the instruction that describes the task:
### Input:
Get the members of a group (does not include maintainers).
Args:
name (string): Name of group to query.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of member names.
Raises:
requests.HTTPError on failure.
### Response:
def list_group_members(self, name, url_prefix, auth, session, send_opts):
"""Get the members of a group (does not include maintainers).
Args:
name (string): Name of group to query.
url_prefix (string): Protocol + host such as https://api.theboss.io
auth (string): Token to send in the request header.
session (requests.Session): HTTP session to use for request.
send_opts (dictionary): Additional arguments to pass to session.send().
Returns:
(list[string]): List of member names.
Raises:
requests.HTTPError on failure.
"""
req = self.get_group_members_request(
'GET', 'application/json', url_prefix, auth, name)
prep = session.prepare_request(req)
resp = session.send(prep, **send_opts)
if resp.status_code == 200:
resp_json = resp.json()
return resp_json['members']
msg = ('Failed getting members of group {}, got HTTP response: ({}) - {}'.format(
name, resp.status_code, resp.text))
raise HTTPError(msg, request = req, response = resp) |
def _vpc_config(self):
"""Get VPC config."""
if self.vpc_enabled:
subnets = get_subnets(env=self.env, region=self.region, purpose='internal')['subnet_ids'][self.region]
security_groups = self._get_sg_ids()
vpc_config = {'SubnetIds': subnets, 'SecurityGroupIds': security_groups}
else:
vpc_config = {'SubnetIds': [], 'SecurityGroupIds': []}
LOG.debug("Lambda VPC config setup: %s", vpc_config)
return vpc_config | Get VPC config. | Below is the the instruction that describes the task:
### Input:
Get VPC config.
### Response:
def _vpc_config(self):
"""Get VPC config."""
if self.vpc_enabled:
subnets = get_subnets(env=self.env, region=self.region, purpose='internal')['subnet_ids'][self.region]
security_groups = self._get_sg_ids()
vpc_config = {'SubnetIds': subnets, 'SecurityGroupIds': security_groups}
else:
vpc_config = {'SubnetIds': [], 'SecurityGroupIds': []}
LOG.debug("Lambda VPC config setup: %s", vpc_config)
return vpc_config |
def spheres_intersect(ar, aR, br, bR):
"""Return whether or not two spheres intersect each other.
Parameters
----------
ar, br: array-like, shape (n,) in n dimensions
Coordinates of the centres of the spheres `a` and `b`.
aR, bR: float
Radiuses of the spheres `a` and `b`.
Returns
-------
intersecting: boolean
True if the spheres intersect.
"""
return vector.vector_mag_sq(ar - br) < (aR + bR) ** 2 | Return whether or not two spheres intersect each other.
Parameters
----------
ar, br: array-like, shape (n,) in n dimensions
Coordinates of the centres of the spheres `a` and `b`.
aR, bR: float
Radiuses of the spheres `a` and `b`.
Returns
-------
intersecting: boolean
True if the spheres intersect. | Below is the the instruction that describes the task:
### Input:
Return whether or not two spheres intersect each other.
Parameters
----------
ar, br: array-like, shape (n,) in n dimensions
Coordinates of the centres of the spheres `a` and `b`.
aR, bR: float
Radiuses of the spheres `a` and `b`.
Returns
-------
intersecting: boolean
True if the spheres intersect.
### Response:
def spheres_intersect(ar, aR, br, bR):
"""Return whether or not two spheres intersect each other.
Parameters
----------
ar, br: array-like, shape (n,) in n dimensions
Coordinates of the centres of the spheres `a` and `b`.
aR, bR: float
Radiuses of the spheres `a` and `b`.
Returns
-------
intersecting: boolean
True if the spheres intersect.
"""
return vector.vector_mag_sq(ar - br) < (aR + bR) ** 2 |
def _dereference(cls, documents, references):
"""Dereference one or more documents"""
# Dereference each reference
for path, projection in references.items():
# Check there is a $ref in the projection, else skip it
if '$ref' not in projection:
continue
# Collect Ids of documents to dereference
ids = set()
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue
if isinstance(value, list):
ids.update(value)
elif isinstance(value, dict):
ids.update(value.values())
else:
ids.add(value)
# Find the referenced documents
ref = projection.pop('$ref')
frames = ref.many(
{'_id': {'$in': list(ids)}},
projection=projection
)
frames = {f._id: f for f in frames}
# Add dereferenced frames to the document
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue
if isinstance(value, list):
# List of references
value = [frames[id] for id in value if id in frames]
elif isinstance(value, dict):
# Dictionary of references
value = {key: frames.get(id) for key, id in value.items()}
else:
value = frames.get(value, None)
child_document = document
keys = cls._path_to_keys(path)
for key in keys[:-1]:
child_document = child_document[key]
child_document[keys[-1]] = value | Dereference one or more documents | Below is the the instruction that describes the task:
### Input:
Dereference one or more documents
### Response:
def _dereference(cls, documents, references):
"""Dereference one or more documents"""
# Dereference each reference
for path, projection in references.items():
# Check there is a $ref in the projection, else skip it
if '$ref' not in projection:
continue
# Collect Ids of documents to dereference
ids = set()
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue
if isinstance(value, list):
ids.update(value)
elif isinstance(value, dict):
ids.update(value.values())
else:
ids.add(value)
# Find the referenced documents
ref = projection.pop('$ref')
frames = ref.many(
{'_id': {'$in': list(ids)}},
projection=projection
)
frames = {f._id: f for f in frames}
# Add dereferenced frames to the document
for document in documents:
value = cls._path_to_value(path, document)
if not value:
continue
if isinstance(value, list):
# List of references
value = [frames[id] for id in value if id in frames]
elif isinstance(value, dict):
# Dictionary of references
value = {key: frames.get(id) for key, id in value.items()}
else:
value = frames.get(value, None)
child_document = document
keys = cls._path_to_keys(path)
for key in keys[:-1]:
child_document = child_document[key]
child_document[keys[-1]] = value |
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out) | Start a "service" on the ssh server
.. versionadded:: 2015.8.2 | Below is the the instruction that describes the task:
### Input:
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
### Response:
def service_start(name):
'''
Start a "service" on the ssh server
.. versionadded:: 2015.8.2
'''
cmd = 'start ' + name
# Send the command to execute
out, err = DETAILS['server'].sendline(cmd)
# "scrape" the output and return the right fields as a dict
return parse(out) |
def SA_torispheroidal(D, fd, fk):
r'''Calculates surface area of a torispherical head according to [1]_.
Somewhat involved. Equations are adapted to be used for a full head.
.. math::
SA = S_1 + S_2
.. math::
S_1 = 2\pi D^2 f_d \alpha
.. math::
S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1}
\left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{
\alpha_1-\alpha_2}{f_k}\right)\right)\right)
.. math::
\alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k}
\right)^2}\right)
.. math::
\alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25}
.. math::
\alpha = \frac{a}{D_i}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
fd : float
Dish-radius parameter = f; fD = dish radius [1/m]
fk : float
knuckle-radius parameter = k; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Example from [1]_.
>>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205)
6.00394283477063
References
----------
.. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in
Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf
Whitepaper. 2014.
'''
alpha_1 = fd*(1 - (1 - ((0.5-fk)/(fd-fk))**2)**0.5)
alpha_2 = fd - (fd**2 - 2*fd*fk + fk - 0.25)**0.5
alpha = alpha_1 # Up to top of dome
S1 = 2*pi*D**2*fd*alpha_1
alpha = alpha_2 # up to top of torus
S2_sub = asin((alpha-alpha_2)/fk) - asin((alpha_1-alpha_2)/fk)
S2 = 2*pi*D**2*fk*(alpha - alpha_1 + (0.5-fk)*S2_sub)
return S1 + S2 | r'''Calculates surface area of a torispherical head according to [1]_.
Somewhat involved. Equations are adapted to be used for a full head.
.. math::
SA = S_1 + S_2
.. math::
S_1 = 2\pi D^2 f_d \alpha
.. math::
S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1}
\left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{
\alpha_1-\alpha_2}{f_k}\right)\right)\right)
.. math::
\alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k}
\right)^2}\right)
.. math::
\alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25}
.. math::
\alpha = \frac{a}{D_i}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
fd : float
Dish-radius parameter = f; fD = dish radius [1/m]
fk : float
knuckle-radius parameter = k; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Example from [1]_.
>>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205)
6.00394283477063
References
----------
.. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in
Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf
Whitepaper. 2014. | Below is the the instruction that describes the task:
### Input:
r'''Calculates surface area of a torispherical head according to [1]_.
Somewhat involved. Equations are adapted to be used for a full head.
.. math::
SA = S_1 + S_2
.. math::
S_1 = 2\pi D^2 f_d \alpha
.. math::
S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1}
\left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{
\alpha_1-\alpha_2}{f_k}\right)\right)\right)
.. math::
\alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k}
\right)^2}\right)
.. math::
\alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25}
.. math::
\alpha = \frac{a}{D_i}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
fd : float
Dish-radius parameter = f; fD = dish radius [1/m]
fk : float
knuckle-radius parameter = k; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Example from [1]_.
>>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205)
6.00394283477063
References
----------
.. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in
Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf
Whitepaper. 2014.
### Response:
def SA_torispheroidal(D, fd, fk):
r'''Calculates surface area of a torispherical head according to [1]_.
Somewhat involved. Equations are adapted to be used for a full head.
.. math::
SA = S_1 + S_2
.. math::
S_1 = 2\pi D^2 f_d \alpha
.. math::
S_2 = 2\pi D^2 f_k\left(\alpha - \alpha_1 + (0.5 - f_k)\left(\sin^{-1}
\left(\frac{\alpha-\alpha_2}{f_k}\right) - \sin^{-1}\left(\frac{
\alpha_1-\alpha_2}{f_k}\right)\right)\right)
.. math::
\alpha_1 = f_d\left(1 - \sqrt{1 - \left(\frac{0.5 - f_k}{f_d-f_k}
\right)^2}\right)
.. math::
\alpha_2 = f_d - \sqrt{f_d^2 - 2f_d f_k + f_k - 0.25}
.. math::
\alpha = \frac{a}{D_i}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
fd : float
Dish-radius parameter = f; fD = dish radius [1/m]
fk : float
knuckle-radius parameter = k; kD = knuckle radius [1/m]
Returns
-------
SA : float
Surface area [m^2]
Examples
--------
Example from [1]_.
>>> SA_torispheroidal(D=2.54, fd=1.039370079, fk=0.062362205)
6.00394283477063
References
----------
.. [1] Honeywell. "Calculate Surface Areas and Cross-sectional Areas in
Vessels with Dished Heads". https://www.honeywellprocess.com/library/marketing/whitepapers/WP-VesselsWithDishedHeads-UniSimDesign.pdf
Whitepaper. 2014.
'''
alpha_1 = fd*(1 - (1 - ((0.5-fk)/(fd-fk))**2)**0.5)
alpha_2 = fd - (fd**2 - 2*fd*fk + fk - 0.25)**0.5
alpha = alpha_1 # Up to top of dome
S1 = 2*pi*D**2*fd*alpha_1
alpha = alpha_2 # up to top of torus
S2_sub = asin((alpha-alpha_2)/fk) - asin((alpha_1-alpha_2)/fk)
S2 = 2*pi*D**2*fk*(alpha - alpha_1 + (0.5-fk)*S2_sub)
return S1 + S2 |
def dictfetchall(cursor: Cursor) -> List[Dict[str, Any]]:
"""
Return all rows from a cursor as a list of :class:`OrderedDict` objects.
Args:
cursor: the cursor
Returns:
a list (one item per row) of :class:`OrderedDict` objects whose key are
column names and whose values are the row values
"""
columns = get_fieldnames_from_cursor(cursor)
return [
OrderedDict(zip(columns, row))
for row in cursor.fetchall()
] | Return all rows from a cursor as a list of :class:`OrderedDict` objects.
Args:
cursor: the cursor
Returns:
a list (one item per row) of :class:`OrderedDict` objects whose key are
column names and whose values are the row values | Below is the the instruction that describes the task:
### Input:
Return all rows from a cursor as a list of :class:`OrderedDict` objects.
Args:
cursor: the cursor
Returns:
a list (one item per row) of :class:`OrderedDict` objects whose key are
column names and whose values are the row values
### Response:
def dictfetchall(cursor: Cursor) -> List[Dict[str, Any]]:
"""
Return all rows from a cursor as a list of :class:`OrderedDict` objects.
Args:
cursor: the cursor
Returns:
a list (one item per row) of :class:`OrderedDict` objects whose key are
column names and whose values are the row values
"""
columns = get_fieldnames_from_cursor(cursor)
return [
OrderedDict(zip(columns, row))
for row in cursor.fetchall()
] |
def _get_word_end_cursor(self, position):
""" Find the end of the word to the right the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.)
"""
document = self._control.document()
end = self._get_end_cursor().position()
while position < end and \
not is_letter_or_number(document.characterAt(position)):
position += 1
while position < end and \
is_letter_or_number(document.characterAt(position)):
position += 1
cursor = self._control.textCursor()
cursor.setPosition(position)
return cursor | Find the end of the word to the right the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.) | Below is the the instruction that describes the task:
### Input:
Find the end of the word to the right the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.)
### Response:
def _get_word_end_cursor(self, position):
""" Find the end of the word to the right the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.)
"""
document = self._control.document()
end = self._get_end_cursor().position()
while position < end and \
not is_letter_or_number(document.characterAt(position)):
position += 1
while position < end and \
is_letter_or_number(document.characterAt(position)):
position += 1
cursor = self._control.textCursor()
cursor.setPosition(position)
return cursor |
def init(opts):
'''
This function gets called when the proxy starts up. For
login
the protocol and port are cached.
'''
log.debug('Initting esxcluster proxy module in process %s', os.getpid())
log.debug('Validating esxcluster proxy input')
schema = EsxclusterProxySchema.serialize()
log.trace('schema = %s', schema)
proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {}))
log.trace('proxy_conf = %s', proxy_conf)
try:
jsonschema.validate(proxy_conf, schema)
except jsonschema.exceptions.ValidationError as exc:
raise salt.exceptions.InvalidConfigError(exc)
# Save mandatory fields in cache
for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'):
DETAILS[key] = proxy_conf[key]
# Additional validation
if DETAILS['mechanism'] == 'userpass':
if 'username' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\', but no '
'\'username\' key found in proxy config.')
if 'passwords' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\', but no '
'\'passwords\' key found in proxy config.')
for key in ('username', 'passwords'):
DETAILS[key] = proxy_conf[key]
else:
if 'domain' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\', but no '
'\'domain\' key found in proxy config.')
if 'principal' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\', but no '
'\'principal\' key found in proxy config.')
for key in ('domain', 'principal'):
DETAILS[key] = proxy_conf[key]
# Save optional
DETAILS['protocol'] = proxy_conf.get('protocol')
DETAILS['port'] = proxy_conf.get('port')
# Test connection
if DETAILS['mechanism'] == 'userpass':
# Get the correct login details
log.debug('Retrieving credentials and testing vCenter connection for '
'mehchanism \'userpass\'')
try:
username, password = find_credentials()
DETAILS['password'] = password
except salt.exceptions.SaltSystemExit as err:
log.critical('Error: %s', err)
return False
return True | This function gets called when the proxy starts up. For
login
the protocol and port are cached. | Below is the the instruction that describes the task:
### Input:
This function gets called when the proxy starts up. For
login
the protocol and port are cached.
### Response:
def init(opts):
'''
This function gets called when the proxy starts up. For
login
the protocol and port are cached.
'''
log.debug('Initting esxcluster proxy module in process %s', os.getpid())
log.debug('Validating esxcluster proxy input')
schema = EsxclusterProxySchema.serialize()
log.trace('schema = %s', schema)
proxy_conf = merge(opts.get('proxy', {}), __pillar__.get('proxy', {}))
log.trace('proxy_conf = %s', proxy_conf)
try:
jsonschema.validate(proxy_conf, schema)
except jsonschema.exceptions.ValidationError as exc:
raise salt.exceptions.InvalidConfigError(exc)
# Save mandatory fields in cache
for key in ('vcenter', 'datacenter', 'cluster', 'mechanism'):
DETAILS[key] = proxy_conf[key]
# Additional validation
if DETAILS['mechanism'] == 'userpass':
if 'username' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\', but no '
'\'username\' key found in proxy config.')
if 'passwords' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'userpass\', but no '
'\'passwords\' key found in proxy config.')
for key in ('username', 'passwords'):
DETAILS[key] = proxy_conf[key]
else:
if 'domain' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\', but no '
'\'domain\' key found in proxy config.')
if 'principal' not in proxy_conf:
raise salt.exceptions.InvalidConfigError(
'Mechanism is set to \'sspi\', but no '
'\'principal\' key found in proxy config.')
for key in ('domain', 'principal'):
DETAILS[key] = proxy_conf[key]
# Save optional
DETAILS['protocol'] = proxy_conf.get('protocol')
DETAILS['port'] = proxy_conf.get('port')
# Test connection
if DETAILS['mechanism'] == 'userpass':
# Get the correct login details
log.debug('Retrieving credentials and testing vCenter connection for '
'mehchanism \'userpass\'')
try:
username, password = find_credentials()
DETAILS['password'] = password
except salt.exceptions.SaltSystemExit as err:
log.critical('Error: %s', err)
return False
return True |
def fullname(self) -> str:
"""
Description of the process.
"""
fullname = "Process {}/{} ({})".format(self.procnum, self.nprocs,
self.details.name)
if self.running:
fullname += " (PID={})".format(self.process.pid)
return fullname | Description of the process. | Below is the the instruction that describes the task:
### Input:
Description of the process.
### Response:
def fullname(self) -> str:
"""
Description of the process.
"""
fullname = "Process {}/{} ({})".format(self.procnum, self.nprocs,
self.details.name)
if self.running:
fullname += " (PID={})".format(self.process.pid)
return fullname |
def confirmation(self, pdu):
"""Packets coming up the stack are APDU's."""
if _debug: StateMachineAccessPoint._debug("confirmation %r", pdu)
# check device communication control
if self.dccEnableDisable == 'enable':
if _debug: StateMachineAccessPoint._debug(" - communications enabled")
elif self.dccEnableDisable == 'disable':
if (pdu.apduType == 0) and (pdu.apduService == 17):
if _debug: StateMachineAccessPoint._debug(" - continue with DCC request")
elif (pdu.apduType == 0) and (pdu.apduService == 20):
if _debug: StateMachineAccessPoint._debug(" - continue with reinitialize device")
elif (pdu.apduType == 1) and (pdu.apduService == 8):
if _debug: StateMachineAccessPoint._debug(" - continue with Who-Is")
else:
if _debug: StateMachineAccessPoint._debug(" - not a Who-Is, dropped")
return
elif self.dccEnableDisable == 'disableInitiation':
if _debug: StateMachineAccessPoint._debug(" - initiation disabled")
# make a more focused interpretation
atype = apdu_types.get(pdu.apduType)
if not atype:
StateMachineAccessPoint._warning(" - unknown apduType: %r", pdu.apduType)
return
# decode it
apdu = atype()
apdu.decode(pdu)
if _debug: StateMachineAccessPoint._debug(" - apdu: %r", apdu)
if isinstance(apdu, ConfirmedRequestPDU):
# find duplicates of this request
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
# build a server transaction
tr = ServerSSM(self, apdu.pduSource)
# add it to our transactions to track it
self.serverTransactions.append(tr)
# let it run with the apdu
tr.indication(apdu)
elif isinstance(apdu, UnconfirmedRequestPDU):
# deliver directly to the application
self.sap_request(apdu)
elif isinstance(apdu, SimpleAckPDU) \
or isinstance(apdu, ComplexAckPDU) \
or isinstance(apdu, ErrorPDU) \
or isinstance(apdu, RejectPDU):
# find the client transaction this is acking
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
elif isinstance(apdu, AbortPDU):
# find the transaction being aborted
if apdu.apduSrv:
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
else:
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.indication(apdu)
elif isinstance(apdu, SegmentAckPDU):
# find the transaction being aborted
if apdu.apduSrv:
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
else:
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.indication(apdu)
else:
raise RuntimeError("invalid APDU (8)") | Packets coming up the stack are APDU's. | Below is the the instruction that describes the task:
### Input:
Packets coming up the stack are APDU's.
### Response:
def confirmation(self, pdu):
"""Packets coming up the stack are APDU's."""
if _debug: StateMachineAccessPoint._debug("confirmation %r", pdu)
# check device communication control
if self.dccEnableDisable == 'enable':
if _debug: StateMachineAccessPoint._debug(" - communications enabled")
elif self.dccEnableDisable == 'disable':
if (pdu.apduType == 0) and (pdu.apduService == 17):
if _debug: StateMachineAccessPoint._debug(" - continue with DCC request")
elif (pdu.apduType == 0) and (pdu.apduService == 20):
if _debug: StateMachineAccessPoint._debug(" - continue with reinitialize device")
elif (pdu.apduType == 1) and (pdu.apduService == 8):
if _debug: StateMachineAccessPoint._debug(" - continue with Who-Is")
else:
if _debug: StateMachineAccessPoint._debug(" - not a Who-Is, dropped")
return
elif self.dccEnableDisable == 'disableInitiation':
if _debug: StateMachineAccessPoint._debug(" - initiation disabled")
# make a more focused interpretation
atype = apdu_types.get(pdu.apduType)
if not atype:
StateMachineAccessPoint._warning(" - unknown apduType: %r", pdu.apduType)
return
# decode it
apdu = atype()
apdu.decode(pdu)
if _debug: StateMachineAccessPoint._debug(" - apdu: %r", apdu)
if isinstance(apdu, ConfirmedRequestPDU):
# find duplicates of this request
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
# build a server transaction
tr = ServerSSM(self, apdu.pduSource)
# add it to our transactions to track it
self.serverTransactions.append(tr)
# let it run with the apdu
tr.indication(apdu)
elif isinstance(apdu, UnconfirmedRequestPDU):
# deliver directly to the application
self.sap_request(apdu)
elif isinstance(apdu, SimpleAckPDU) \
or isinstance(apdu, ComplexAckPDU) \
or isinstance(apdu, ErrorPDU) \
or isinstance(apdu, RejectPDU):
# find the client transaction this is acking
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
elif isinstance(apdu, AbortPDU):
# find the transaction being aborted
if apdu.apduSrv:
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
else:
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.indication(apdu)
elif isinstance(apdu, SegmentAckPDU):
# find the transaction being aborted
if apdu.apduSrv:
for tr in self.clientTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.confirmation(apdu)
else:
for tr in self.serverTransactions:
if (apdu.apduInvokeID == tr.invokeID) and (apdu.pduSource == tr.pdu_address):
break
else:
return
# send the packet on to the transaction
tr.indication(apdu)
else:
raise RuntimeError("invalid APDU (8)") |
def seconds_to_hms_verbose(t):
"""
Converts seconds float to 'H hours 8 minutes, 30 seconds' format
"""
hours = int((t / 3600))
mins = int((t / 60) % 60)
secs = int(t % 60)
return ' '.join([
(hours + ' hour' + ('s' if hours > 1 else '')) if hours > 0 else '',
(mins + ' minute' + ('s' if mins > 1 else '')) if mins > 0 else '',
(secs + ' second' + ('s' if secs > 1 else '')) if secs > 0 else ''
]) | Converts seconds float to 'H hours 8 minutes, 30 seconds' format | Below is the the instruction that describes the task:
### Input:
Converts seconds float to 'H hours 8 minutes, 30 seconds' format
### Response:
def seconds_to_hms_verbose(t):
"""
Converts seconds float to 'H hours 8 minutes, 30 seconds' format
"""
hours = int((t / 3600))
mins = int((t / 60) % 60)
secs = int(t % 60)
return ' '.join([
(hours + ' hour' + ('s' if hours > 1 else '')) if hours > 0 else '',
(mins + ' minute' + ('s' if mins > 1 else '')) if mins > 0 else '',
(secs + ' second' + ('s' if secs > 1 else '')) if secs > 0 else ''
]) |
def handle(self, sock, read_data, path, headers):
"Just waits, and checks for other actions to replace us"
for i in range(self.timeout // self.check_interval):
# Sleep first
eventlet.sleep(self.check_interval)
# Check for another action
action = self.balancer.resolve_host(self.host)
if not isinstance(action, Spin):
return action.handle(sock, read_data, path, headers)
# OK, nothing happened, so give up.
action = Static(self.balancer, self.host, self.matched_host, type="timeout")
return action.handle(sock, read_data, path, headers) | Just waits, and checks for other actions to replace us | Below is the the instruction that describes the task:
### Input:
Just waits, and checks for other actions to replace us
### Response:
def handle(self, sock, read_data, path, headers):
"Just waits, and checks for other actions to replace us"
for i in range(self.timeout // self.check_interval):
# Sleep first
eventlet.sleep(self.check_interval)
# Check for another action
action = self.balancer.resolve_host(self.host)
if not isinstance(action, Spin):
return action.handle(sock, read_data, path, headers)
# OK, nothing happened, so give up.
action = Static(self.balancer, self.host, self.matched_host, type="timeout")
return action.handle(sock, read_data, path, headers) |
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2))) | Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)] | Below is the the instruction that describes the task:
### Input:
Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
### Response:
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2))) |
def get_info_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
"""
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_data(url) | Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10) | Below is the the instruction that describes the task:
### Input:
Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
### Response:
def get_info_by_tail_number(self, tail_number, page=1, limit=100):
"""Fetch the details of a particular aircraft by its tail number.
This method can be used to get the details of a particular aircraft by its tail number.
Details include the serial number, age etc along with links to the images of the aircraft.
It checks the user authentication and returns the data accordingly.
Args:
tail_number (str): The tail number, e.g. VT-ANL
page (int): Optional page number; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit (int): Optional limit on number of records returned
Returns:
A list of dicts with the data; one dict for each row of data from flightradar24
Example::
from pyflightdata import FlightData
f=FlightData()
#optional login
f.login(myemail,mypassword)
f.get_info_by_flight_number('VT-ANL')
f.get_info_by_flight_number('VT-ANL',page=1,limit=10)
"""
url = REG_BASE.format(tail_number, str(self.AUTH_TOKEN), page, limit)
return self._fr24.get_aircraft_data(url) |
def _expand_nbest_translation(translation: Translation) -> List[Translation]:
"""
Expand nbest translations in a single Translation object to one Translation
object per nbest translation.
:param translation: A Translation object.
:return: A list of Translation objects.
"""
nbest_list = [] # type = List[Translation]
for target_ids, attention_matrix, score in zip(translation.nbest_translations.target_ids_list,
translation.nbest_translations.attention_matrices,
translation.nbest_translations.scores):
nbest_list.append(Translation(target_ids, attention_matrix, score, translation.beam_histories,
estimated_reference_length=translation.estimated_reference_length))
return nbest_list | Expand nbest translations in a single Translation object to one Translation
object per nbest translation.
:param translation: A Translation object.
:return: A list of Translation objects. | Below is the the instruction that describes the task:
### Input:
Expand nbest translations in a single Translation object to one Translation
object per nbest translation.
:param translation: A Translation object.
:return: A list of Translation objects.
### Response:
def _expand_nbest_translation(translation: Translation) -> List[Translation]:
"""
Expand nbest translations in a single Translation object to one Translation
object per nbest translation.
:param translation: A Translation object.
:return: A list of Translation objects.
"""
nbest_list = [] # type = List[Translation]
for target_ids, attention_matrix, score in zip(translation.nbest_translations.target_ids_list,
translation.nbest_translations.attention_matrices,
translation.nbest_translations.scores):
nbest_list.append(Translation(target_ids, attention_matrix, score, translation.beam_histories,
estimated_reference_length=translation.estimated_reference_length))
return nbest_list |
def create_logger(name):
"""
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
"""
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
'%(message)s')
ch.setFormatter(formatter)
base.addHandler(ch)
return base | Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter. | Below is the the instruction that describes the task:
### Input:
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
### Response:
def create_logger(name):
"""
Create a logger object with the given name.
If this is the first time that we call this method, then initialize the
formatter.
"""
base = logging.getLogger("cleverhans")
if len(base.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' +
'%(message)s')
ch.setFormatter(formatter)
base.addHandler(ch)
return base |
def _folder_item_uncertainty(self, analysis_brain, item):
"""Fills the analysis' uncertainty to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item["Uncertainty"] = ""
if not self.has_permission(ViewResults, analysis_brain):
return
result = analysis_brain.getResult
obj = self.get_object(analysis_brain)
formatted = format_uncertainty(obj, result, decimalmark=self.dmk,
sciformat=int(self.scinot))
if formatted:
item["Uncertainty"] = formatted
else:
item["Uncertainty"] = obj.getUncertainty(result)
if self.is_uncertainty_edition_allowed(analysis_brain):
item["allow_edit"].append("Uncertainty") | Fills the analysis' uncertainty to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row | Below is the the instruction that describes the task:
### Input:
Fills the analysis' uncertainty to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
### Response:
def _folder_item_uncertainty(self, analysis_brain, item):
"""Fills the analysis' uncertainty to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
item["Uncertainty"] = ""
if not self.has_permission(ViewResults, analysis_brain):
return
result = analysis_brain.getResult
obj = self.get_object(analysis_brain)
formatted = format_uncertainty(obj, result, decimalmark=self.dmk,
sciformat=int(self.scinot))
if formatted:
item["Uncertainty"] = formatted
else:
item["Uncertainty"] = obj.getUncertainty(result)
if self.is_uncertainty_edition_allowed(analysis_brain):
item["allow_edit"].append("Uncertainty") |
def load(self, *args, **kwargs):
""" Imports metadata by the use of HTTP GET.
If the fingerprint is known the file will be checked for
compliance before it is imported.
"""
response = self.http.send(self.url)
if response.status_code == 200:
_txt = response.content
return self.parse_and_check_signature(_txt)
else:
logger.info("Response status: %s", response.status_code)
raise SourceNotFound(self.url) | Imports metadata by the use of HTTP GET.
If the fingerprint is known the file will be checked for
compliance before it is imported. | Below is the the instruction that describes the task:
### Input:
Imports metadata by the use of HTTP GET.
If the fingerprint is known the file will be checked for
compliance before it is imported.
### Response:
def load(self, *args, **kwargs):
""" Imports metadata by the use of HTTP GET.
If the fingerprint is known the file will be checked for
compliance before it is imported.
"""
response = self.http.send(self.url)
if response.status_code == 200:
_txt = response.content
return self.parse_and_check_signature(_txt)
else:
logger.info("Response status: %s", response.status_code)
raise SourceNotFound(self.url) |
def string(self, writesize=None):
'''
Looks like a file handle
'''
if not self.finished:
self.finished = True
return self.content
return '' | Looks like a file handle | Below is the the instruction that describes the task:
### Input:
Looks like a file handle
### Response:
def string(self, writesize=None):
'''
Looks like a file handle
'''
if not self.finished:
self.finished = True
return self.content
return '' |
def are_equivalent(*args, **kwargs):
"""Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
if len(args) == 1:
return True
first_item = args[0]
for item in args[1:]:
if type(item) != type(first_item): # pylint: disable=C0123
return False
if isinstance(item, dict):
if not are_dicts_equivalent(item, first_item):
return False
elif hasattr(item, '__iter__') and not isinstance(item, (str, bytes, dict)):
if len(item) != len(first_item):
return False
for value in item:
if value not in first_item:
return False
for value in first_item:
if value not in item:
return False
else:
if item != first_item:
return False
return True | Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator | Below is the the instruction that describes the task:
### Input:
Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
### Response:
def are_equivalent(*args, **kwargs):
"""Indicate if arguments passed to this function are equivalent.
.. hint::
This checker operates recursively on the members contained within iterables
and :class:`dict <python:dict>` objects.
.. caution::
If you only pass one argument to this checker - even if it is an iterable -
the checker will *always* return ``True``.
To evaluate members of an iterable for equivalence, you should instead
unpack the iterable into the function like so:
.. code-block:: python
obj = [1, 1, 1, 2]
result = are_equivalent(*obj)
# Will return ``False`` by unpacking and evaluating the iterable's members
result = are_equivalent(obj)
# Will always return True
:param args: One or more values, passed as positional arguments.
:returns: ``True`` if ``args`` are equivalent, and ``False`` if not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
if len(args) == 1:
return True
first_item = args[0]
for item in args[1:]:
if type(item) != type(first_item): # pylint: disable=C0123
return False
if isinstance(item, dict):
if not are_dicts_equivalent(item, first_item):
return False
elif hasattr(item, '__iter__') and not isinstance(item, (str, bytes, dict)):
if len(item) != len(first_item):
return False
for value in item:
if value not in first_item:
return False
for value in first_item:
if value not in item:
return False
else:
if item != first_item:
return False
return True |
def check_if_song_name(self,html):
'''
Returns true if user entered artist or movie name
'''
soup=BeautifulSoup(html)
a_list=soup.findAll('a','touch')
#print a_list
text=[str(x) for x in a_list]
text=''.join(text)
text=text.lower()
string1='download in 48 kbps'
string2='download in 128 kbps'
string3='download in 320 kbps'
href=''
if string3 in text:
#print 'Downloading in 320 kbps'
href=a_list[2].get('href')
elif string2 in text:
#print 'Downloading in 128 kbps'
href=a_list[1].get('href')
elif string1 in text:
#print 'Downloading in 48 kbps'
href=a_list[0].get('href')
else:
return (True,'nothing')
return (False,href) | Returns true if user entered artist or movie name | Below is the the instruction that describes the task:
### Input:
Returns true if user entered artist or movie name
### Response:
def check_if_song_name(self,html):
'''
Returns true if user entered artist or movie name
'''
soup=BeautifulSoup(html)
a_list=soup.findAll('a','touch')
#print a_list
text=[str(x) for x in a_list]
text=''.join(text)
text=text.lower()
string1='download in 48 kbps'
string2='download in 128 kbps'
string3='download in 320 kbps'
href=''
if string3 in text:
#print 'Downloading in 320 kbps'
href=a_list[2].get('href')
elif string2 in text:
#print 'Downloading in 128 kbps'
href=a_list[1].get('href')
elif string1 in text:
#print 'Downloading in 48 kbps'
href=a_list[0].get('href')
else:
return (True,'nothing')
return (False,href) |
def get_enroll(self):
"""Returns new enroll seed"""
devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()]
enroll = start_register(self.__appid, devices)
enroll['status'] = 'ok'
session['_u2f_enroll_'] = enroll.json
return enroll | Returns new enroll seed | Below is the the instruction that describes the task:
### Input:
Returns new enroll seed
### Response:
def get_enroll(self):
"""Returns new enroll seed"""
devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()]
enroll = start_register(self.__appid, devices)
enroll['status'] = 'ok'
session['_u2f_enroll_'] = enroll.json
return enroll |
def create_mysql_oursql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using oursql.
"""
return create_engine(
_create_mysql_oursql(username, password, host, port, database),
**kwargs
) | create an engine connected to a mysql database using oursql. | Below is the the instruction that describes the task:
### Input:
create an engine connected to a mysql database using oursql.
### Response:
def create_mysql_oursql(username, password, host, port, database, **kwargs): # pragma: no cover
"""
create an engine connected to a mysql database using oursql.
"""
return create_engine(
_create_mysql_oursql(username, password, host, port, database),
**kwargs
) |
def field_schema(
field: Field,
*,
by_alias: bool = True,
model_name_map: Dict[Type['main.BaseModel'], str],
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.
Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field
is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they
will be included in the definitions and referenced in the schema instead of included recursively.
:param field: a Pydantic ``Field``
:param by_alias: use the defined alias (if any) in the returned schema
:param model_name_map: used to generate the JSON Schema references to other models included in the definitions
:param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of
#/definitions/ will be used
:return: tuple of the schema for this field and additional definitions
"""
ref_prefix = ref_prefix or default_prefix
schema_overrides = False
schema = cast('Schema', field.schema)
s = dict(title=schema.title or field.alias.title())
if schema.title:
schema_overrides = True
if schema.description:
s['description'] = schema.description
schema_overrides = True
if not field.required and field.default is not None:
s['default'] = encode_default(field.default)
schema_overrides = True
validation_schema = get_field_schema_validations(field)
if validation_schema:
s.update(validation_schema)
schema_overrides = True
f_schema, f_definitions = field_type_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
# $ref will only be returned when there are no schema_overrides
if '$ref' in f_schema:
return f_schema, f_definitions
else:
s.update(f_schema)
return s, f_definitions | Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.
Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field
is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they
will be included in the definitions and referenced in the schema instead of included recursively.
:param field: a Pydantic ``Field``
:param by_alias: use the defined alias (if any) in the returned schema
:param model_name_map: used to generate the JSON Schema references to other models included in the definitions
:param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of
#/definitions/ will be used
:return: tuple of the schema for this field and additional definitions | Below is the the instruction that describes the task:
### Input:
Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.
Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field
is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they
will be included in the definitions and referenced in the schema instead of included recursively.
:param field: a Pydantic ``Field``
:param by_alias: use the defined alias (if any) in the returned schema
:param model_name_map: used to generate the JSON Schema references to other models included in the definitions
:param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of
#/definitions/ will be used
:return: tuple of the schema for this field and additional definitions
### Response:
def field_schema(
field: Field,
*,
by_alias: bool = True,
model_name_map: Dict[Type['main.BaseModel'], str],
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Process a Pydantic field and return a tuple with a JSON Schema for it as the first item.
Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field
is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they
will be included in the definitions and referenced in the schema instead of included recursively.
:param field: a Pydantic ``Field``
:param by_alias: use the defined alias (if any) in the returned schema
:param model_name_map: used to generate the JSON Schema references to other models included in the definitions
:param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of
#/definitions/ will be used
:return: tuple of the schema for this field and additional definitions
"""
ref_prefix = ref_prefix or default_prefix
schema_overrides = False
schema = cast('Schema', field.schema)
s = dict(title=schema.title or field.alias.title())
if schema.title:
schema_overrides = True
if schema.description:
s['description'] = schema.description
schema_overrides = True
if not field.required and field.default is not None:
s['default'] = encode_default(field.default)
schema_overrides = True
validation_schema = get_field_schema_validations(field)
if validation_schema:
s.update(validation_schema)
schema_overrides = True
f_schema, f_definitions = field_type_schema(
field,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
# $ref will only be returned when there are no schema_overrides
if '$ref' in f_schema:
return f_schema, f_definitions
else:
s.update(f_schema)
return s, f_definitions |
def write_offsets_to_file(cls, json_file_name, consumer_offsets_data):
"""Save built consumer-offsets data to given json file."""
# Save consumer-offsets to file
with open(json_file_name, "w") as json_file:
try:
json.dump(consumer_offsets_data, json_file)
except ValueError:
print("Error: Invalid json data {data}".format(data=consumer_offsets_data))
raise
print("Consumer offset data saved in json-file {file}".format(file=json_file_name)) | Save built consumer-offsets data to given json file. | Below is the the instruction that describes the task:
### Input:
Save built consumer-offsets data to given json file.
### Response:
def write_offsets_to_file(cls, json_file_name, consumer_offsets_data):
"""Save built consumer-offsets data to given json file."""
# Save consumer-offsets to file
with open(json_file_name, "w") as json_file:
try:
json.dump(consumer_offsets_data, json_file)
except ValueError:
print("Error: Invalid json data {data}".format(data=consumer_offsets_data))
raise
print("Consumer offset data saved in json-file {file}".format(file=json_file_name)) |
def run_periodfinding(simbasedir,
pfmethods=('gls','pdm','bls'),
pfkwargs=({},{},{'startp':1.0,'maxtransitduration':0.3}),
getblssnr=False,
sigclip=5.0,
nperiodworkers=10,
ncontrolworkers=4,
liststartindex=None,
listmaxobjects=None):
'''This runs periodfinding using several period-finders on a collection of
fake LCs.
As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take
about 26 days in total to run on an invocation of this function using
GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on
a 2 x Xeon E5-2660v3 machine.
Parameters
----------
pfmethods : sequence of str
This is used to specify which periodfinders to run. These must be in the
`lcproc.periodsearch.PFMETHODS` dict.
pfkwargs : sequence of dict
This is used to provide optional kwargs to the period-finders.
getblssnr : bool
If this is True, will run BLS SNR calculations for each object and
magcol. This takes a while to run, so it's disabled (False) by default.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
ncontrolworkers : int
This is the number of parallel period-finding control workers to
use. Each control worker will launch `nperiodworkers` worker processes.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the fake LCs in `simbasedir`.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input light curves over several sessions or machines.
Returns
-------
str
The path to the output summary pickle produced by
`lcproc.periodsearch.parallel_pf`
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
pfdir = os.path.join(simbasedir,'periodfinding')
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
fakelc_formatkey = 'fake-%s' % siminfo['lcformat']
lcproc.register_lcformat(
fakelc_formatkey,
'*-fakelc.pkl',
timecols,
magcols,
errcols,
'astrobase.lcproc',
'_read_pklc',
magsarefluxes=siminfo['magsarefluxes']
)
if liststartindex:
lcfpaths = lcfpaths[liststartindex:]
if listmaxobjects:
lcfpaths = lcfpaths[:listmaxobjects]
pfinfo = periodsearch.parallel_pf(lcfpaths,
pfdir,
lcformat=fakelc_formatkey,
pfmethods=pfmethods,
pfkwargs=pfkwargs,
getblssnr=getblssnr,
sigclip=sigclip,
nperiodworkers=nperiodworkers,
ncontrolworkers=ncontrolworkers)
with open(os.path.join(simbasedir,
'fakelc-periodsearch.pkl'),'wb') as outfd:
pickle.dump(pfinfo, outfd, pickle.HIGHEST_PROTOCOL)
return os.path.join(simbasedir,'fakelc-periodsearch.pkl') | This runs periodfinding using several period-finders on a collection of
fake LCs.
As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take
about 26 days in total to run on an invocation of this function using
GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on
a 2 x Xeon E5-2660v3 machine.
Parameters
----------
pfmethods : sequence of str
This is used to specify which periodfinders to run. These must be in the
`lcproc.periodsearch.PFMETHODS` dict.
pfkwargs : sequence of dict
This is used to provide optional kwargs to the period-finders.
getblssnr : bool
If this is True, will run BLS SNR calculations for each object and
magcol. This takes a while to run, so it's disabled (False) by default.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
ncontrolworkers : int
This is the number of parallel period-finding control workers to
use. Each control worker will launch `nperiodworkers` worker processes.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the fake LCs in `simbasedir`.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input light curves over several sessions or machines.
Returns
-------
str
The path to the output summary pickle produced by
`lcproc.periodsearch.parallel_pf` | Below is the the instruction that describes the task:
### Input:
This runs periodfinding using several period-finders on a collection of
fake LCs.
As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take
about 26 days in total to run on an invocation of this function using
GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on
a 2 x Xeon E5-2660v3 machine.
Parameters
----------
pfmethods : sequence of str
This is used to specify which periodfinders to run. These must be in the
`lcproc.periodsearch.PFMETHODS` dict.
pfkwargs : sequence of dict
This is used to provide optional kwargs to the period-finders.
getblssnr : bool
If this is True, will run BLS SNR calculations for each object and
magcol. This takes a while to run, so it's disabled (False) by default.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
ncontrolworkers : int
This is the number of parallel period-finding control workers to
use. Each control worker will launch `nperiodworkers` worker processes.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the fake LCs in `simbasedir`.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input light curves over several sessions or machines.
Returns
-------
str
The path to the output summary pickle produced by
`lcproc.periodsearch.parallel_pf`
### Response:
def run_periodfinding(simbasedir,
pfmethods=('gls','pdm','bls'),
pfkwargs=({},{},{'startp':1.0,'maxtransitduration':0.3}),
getblssnr=False,
sigclip=5.0,
nperiodworkers=10,
ncontrolworkers=4,
liststartindex=None,
listmaxobjects=None):
'''This runs periodfinding using several period-finders on a collection of
fake LCs.
As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take
about 26 days in total to run on an invocation of this function using
GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on
a 2 x Xeon E5-2660v3 machine.
Parameters
----------
pfmethods : sequence of str
This is used to specify which periodfinders to run. These must be in the
`lcproc.periodsearch.PFMETHODS` dict.
pfkwargs : sequence of dict
This is used to provide optional kwargs to the period-finders.
getblssnr : bool
If this is True, will run BLS SNR calculations for each object and
magcol. This takes a while to run, so it's disabled (False) by default.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
ncontrolworkers : int
This is the number of parallel period-finding control workers to
use. Each control worker will launch `nperiodworkers` worker processes.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the fake LCs in `simbasedir`.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input light curves over several sessions or machines.
Returns
-------
str
The path to the output summary pickle produced by
`lcproc.periodsearch.parallel_pf`
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
pfdir = os.path.join(simbasedir,'periodfinding')
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
fakelc_formatkey = 'fake-%s' % siminfo['lcformat']
lcproc.register_lcformat(
fakelc_formatkey,
'*-fakelc.pkl',
timecols,
magcols,
errcols,
'astrobase.lcproc',
'_read_pklc',
magsarefluxes=siminfo['magsarefluxes']
)
if liststartindex:
lcfpaths = lcfpaths[liststartindex:]
if listmaxobjects:
lcfpaths = lcfpaths[:listmaxobjects]
pfinfo = periodsearch.parallel_pf(lcfpaths,
pfdir,
lcformat=fakelc_formatkey,
pfmethods=pfmethods,
pfkwargs=pfkwargs,
getblssnr=getblssnr,
sigclip=sigclip,
nperiodworkers=nperiodworkers,
ncontrolworkers=ncontrolworkers)
with open(os.path.join(simbasedir,
'fakelc-periodsearch.pkl'),'wb') as outfd:
pickle.dump(pfinfo, outfd, pickle.HIGHEST_PROTOCOL)
return os.path.join(simbasedir,'fakelc-periodsearch.pkl') |
def version(rest):
"Get the version of pmxbot or one of its plugins"
pkg = rest.strip() or 'pmxbot'
if pkg.lower() == 'python':
return sys.version.split()[0]
return importlib_metadata.version(pkg) | Get the version of pmxbot or one of its plugins | Below is the the instruction that describes the task:
### Input:
Get the version of pmxbot or one of its plugins
### Response:
def version(rest):
"Get the version of pmxbot or one of its plugins"
pkg = rest.strip() or 'pmxbot'
if pkg.lower() == 'python':
return sys.version.split()[0]
return importlib_metadata.version(pkg) |
def pos3(self):
''' Use pos-sc1-sc2 as POS '''
parts = [self.pos]
if self.sc1 and self.sc1 != '*':
parts.append(self.sc1)
if self.sc2 and self.sc2 != '*':
parts.append(self.sc2)
return '-'.join(parts) | Use pos-sc1-sc2 as POS | Below is the the instruction that describes the task:
### Input:
Use pos-sc1-sc2 as POS
### Response:
def pos3(self):
''' Use pos-sc1-sc2 as POS '''
parts = [self.pos]
if self.sc1 and self.sc1 != '*':
parts.append(self.sc1)
if self.sc2 and self.sc2 != '*':
parts.append(self.sc2)
return '-'.join(parts) |
def start_search(self):
"""
Start the Gateway Search Request and return the address information
:rtype: (string,int)
:return: a tuple(string(IP),int(Port) when found or None when
timeout occurs
"""
self._asyncio_loop = asyncio.get_event_loop()
# Creating Broadcast Receiver
coroutine_listen = self._asyncio_loop.create_datagram_endpoint(
lambda: self.KNXSearchBroadcastReceiverProtocol(
self._process_response,
self._timeout_handling,
self._timeout,
self._asyncio_loop
), local_addr=(self._broadcast_ip_address, 0)
)
self._listener_transport, listener_protocol = \
self._asyncio_loop.run_until_complete(coroutine_listen)
# We are ready to fire the broadcast message
coroutine_broadcaster = self._asyncio_loop.create_datagram_endpoint(
lambda: self.KNXSearchBroadcastProtocol(
self._asyncio_loop,
self._listener_transport.get_extra_info('sockname')
[1]),
remote_addr=(self._broadcast_address, self._broadcast_port))
self._broadcaster_transport, broadcast_protocol = \
self._asyncio_loop.run_until_complete(coroutine_broadcaster)
# Waiting for all Broadcast receive or timeout
self._asyncio_loop.run_forever()
# Got Response or Timeout
if self._resolved_gateway_ip_address is None and \
self._resolved_gateway_ip_port is None:
LOGGER.debug("Gateway not found!")
return None
else:
LOGGER.debug("Gateway found at %s:%s",
self._resolved_gateway_ip_address,
self._resolved_gateway_ip_port)
return self._resolved_gateway_ip_address, \
self._resolved_gateway_ip_port | Start the Gateway Search Request and return the address information
:rtype: (string,int)
:return: a tuple(string(IP),int(Port) when found or None when
timeout occurs | Below is the the instruction that describes the task:
### Input:
Start the Gateway Search Request and return the address information
:rtype: (string,int)
:return: a tuple(string(IP),int(Port) when found or None when
timeout occurs
### Response:
def start_search(self):
"""
Start the Gateway Search Request and return the address information
:rtype: (string,int)
:return: a tuple(string(IP),int(Port) when found or None when
timeout occurs
"""
self._asyncio_loop = asyncio.get_event_loop()
# Creating Broadcast Receiver
coroutine_listen = self._asyncio_loop.create_datagram_endpoint(
lambda: self.KNXSearchBroadcastReceiverProtocol(
self._process_response,
self._timeout_handling,
self._timeout,
self._asyncio_loop
), local_addr=(self._broadcast_ip_address, 0)
)
self._listener_transport, listener_protocol = \
self._asyncio_loop.run_until_complete(coroutine_listen)
# We are ready to fire the broadcast message
coroutine_broadcaster = self._asyncio_loop.create_datagram_endpoint(
lambda: self.KNXSearchBroadcastProtocol(
self._asyncio_loop,
self._listener_transport.get_extra_info('sockname')
[1]),
remote_addr=(self._broadcast_address, self._broadcast_port))
self._broadcaster_transport, broadcast_protocol = \
self._asyncio_loop.run_until_complete(coroutine_broadcaster)
# Waiting for all Broadcast receive or timeout
self._asyncio_loop.run_forever()
# Got Response or Timeout
if self._resolved_gateway_ip_address is None and \
self._resolved_gateway_ip_port is None:
LOGGER.debug("Gateway not found!")
return None
else:
LOGGER.debug("Gateway found at %s:%s",
self._resolved_gateway_ip_address,
self._resolved_gateway_ip_port)
return self._resolved_gateway_ip_address, \
self._resolved_gateway_ip_port |
def scale_points(df_points, scale=INKSCAPE_PPmm.magnitude, inplace=False):
'''
Translate points such that bounding box is anchored at (0, 0) and scale
``x`` and ``y`` columns of input frame by specified :data:`scale`.
Parameters
----------
df_points : pandas.DataFrame
Table of ``x``/``y`` point positions.
Must have at least the following columns:
- ``x``: x-coordinate
- ``y``: y-coordinate
scale : float, optional
Factor to scale points by.
By default, scale to millimeters based on Inkscape default of 90
pixels-per-inch.
scale : float, optional
Factor to scale points by.
in_place : bool, optional
If ``True``, input frame will be modified.
Otherwise, the scaled points are written to a new frame, leaving the
input frame unmodified.
Returns
-------
pandas.DataFrame
Input frame with the points translated such that bounding box is
anchored at (0, 0) and ``x`` and ``y`` values scaled by specified
:data:`scale`.
'''
if not inplace:
df_points = df_points.copy()
# Offset device, such that all coordinates are >= 0.
df_points.x -= df_points.x.min()
df_points.y -= df_points.y.min()
# Scale path coordinates.
df_points.x /= scale
df_points.y /= scale
return df_points | Translate points such that bounding box is anchored at (0, 0) and scale
``x`` and ``y`` columns of input frame by specified :data:`scale`.
Parameters
----------
df_points : pandas.DataFrame
Table of ``x``/``y`` point positions.
Must have at least the following columns:
- ``x``: x-coordinate
- ``y``: y-coordinate
scale : float, optional
Factor to scale points by.
By default, scale to millimeters based on Inkscape default of 90
pixels-per-inch.
scale : float, optional
Factor to scale points by.
in_place : bool, optional
If ``True``, input frame will be modified.
Otherwise, the scaled points are written to a new frame, leaving the
input frame unmodified.
Returns
-------
pandas.DataFrame
Input frame with the points translated such that bounding box is
anchored at (0, 0) and ``x`` and ``y`` values scaled by specified
:data:`scale`. | Below is the the instruction that describes the task:
### Input:
Translate points such that bounding box is anchored at (0, 0) and scale
``x`` and ``y`` columns of input frame by specified :data:`scale`.
Parameters
----------
df_points : pandas.DataFrame
Table of ``x``/``y`` point positions.
Must have at least the following columns:
- ``x``: x-coordinate
- ``y``: y-coordinate
scale : float, optional
Factor to scale points by.
By default, scale to millimeters based on Inkscape default of 90
pixels-per-inch.
scale : float, optional
Factor to scale points by.
in_place : bool, optional
If ``True``, input frame will be modified.
Otherwise, the scaled points are written to a new frame, leaving the
input frame unmodified.
Returns
-------
pandas.DataFrame
Input frame with the points translated such that bounding box is
anchored at (0, 0) and ``x`` and ``y`` values scaled by specified
:data:`scale`.
### Response:
def scale_points(df_points, scale=INKSCAPE_PPmm.magnitude, inplace=False):
'''
Translate points such that bounding box is anchored at (0, 0) and scale
``x`` and ``y`` columns of input frame by specified :data:`scale`.
Parameters
----------
df_points : pandas.DataFrame
Table of ``x``/``y`` point positions.
Must have at least the following columns:
- ``x``: x-coordinate
- ``y``: y-coordinate
scale : float, optional
Factor to scale points by.
By default, scale to millimeters based on Inkscape default of 90
pixels-per-inch.
scale : float, optional
Factor to scale points by.
in_place : bool, optional
If ``True``, input frame will be modified.
Otherwise, the scaled points are written to a new frame, leaving the
input frame unmodified.
Returns
-------
pandas.DataFrame
Input frame with the points translated such that bounding box is
anchored at (0, 0) and ``x`` and ``y`` values scaled by specified
:data:`scale`.
'''
if not inplace:
df_points = df_points.copy()
# Offset device, such that all coordinates are >= 0.
df_points.x -= df_points.x.min()
df_points.y -= df_points.y.min()
# Scale path coordinates.
df_points.x /= scale
df_points.y /= scale
return df_points |
def evaluate_http_conditionals(dav_res, last_modified, entitytag, environ):
"""Handle 'If-...:' headers (but not 'If:' header).
If-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
Only perform the action if the client supplied entity matches the
same entity on the server. This is mainly for methods like
PUT to only update a resource if it has not been modified since the
user last updated it.
If-Match: "737060cd8c284d8af7ad3082f209582d"
If-Modified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25
Allows a 304 Not Modified to be returned if content is unchanged
If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT
If-None-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
Allows a 304 Not Modified to be returned if content is unchanged,
see HTTP ETag
If-None-Match: "737060cd8c284d8af7ad3082f209582d"
If-Unmodified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28
Only send the response if the entity has not been modified since a
specific time.
"""
if not dav_res:
return
# Conditions
# An HTTP/1.1 origin server, upon receiving a conditional request that includes both a
# Last-Modified date (e.g., in an If-Modified-Since or If-Unmodified-Since header field) and
# one or more entity tags (e.g., in an If-Match, If-None-Match, or If-Range header field) as
# cache validators, MUST NOT return a response status of 304 (Not Modified) unless doing so
# is consistent with all of the conditional header fields in the request.
if "HTTP_IF_MATCH" in environ and dav_res.support_etag():
ifmatchlist = environ["HTTP_IF_MATCH"].split(",")
for ifmatchtag in ifmatchlist:
ifmatchtag = ifmatchtag.strip(' "\t')
if ifmatchtag == entitytag or ifmatchtag == "*":
break
raise DAVError(HTTP_PRECONDITION_FAILED, "If-Match header condition failed")
# TODO: after the refactoring
ifModifiedSinceFailed = False
if "HTTP_IF_MODIFIED_SINCE" in environ and dav_res.support_modified():
ifmodtime = parse_time_string(environ["HTTP_IF_MODIFIED_SINCE"])
if ifmodtime and ifmodtime > last_modified:
ifModifiedSinceFailed = True
# If-None-Match
# If none of the entity tags match, then the server MAY perform the requested method as if the
# If-None-Match header field did not exist, but MUST also ignore any If-Modified-Since header
# field (s) in the request. That is, if no entity tags match, then the server MUST NOT return
# a 304 (Not Modified) response.
ignoreIfModifiedSince = False
if "HTTP_IF_NONE_MATCH" in environ and dav_res.support_etag():
ifmatchlist = environ["HTTP_IF_NONE_MATCH"].split(",")
for ifmatchtag in ifmatchlist:
ifmatchtag = ifmatchtag.strip(' "\t')
if ifmatchtag == entitytag or ifmatchtag == "*":
# ETag matched. If it's a GET request and we don't have an
# conflicting If-Modified header, we return NOT_MODIFIED
if (
environ["REQUEST_METHOD"] in ("GET", "HEAD")
and not ifModifiedSinceFailed
):
raise DAVError(HTTP_NOT_MODIFIED, "If-None-Match header failed")
raise DAVError(
HTTP_PRECONDITION_FAILED, "If-None-Match header condition failed"
)
ignoreIfModifiedSince = True
if "HTTP_IF_UNMODIFIED_SINCE" in environ and dav_res.support_modified():
ifunmodtime = parse_time_string(environ["HTTP_IF_UNMODIFIED_SINCE"])
if ifunmodtime and ifunmodtime <= last_modified:
raise DAVError(
HTTP_PRECONDITION_FAILED, "If-Unmodified-Since header condition failed"
)
if ifModifiedSinceFailed and not ignoreIfModifiedSince:
raise DAVError(HTTP_NOT_MODIFIED, "If-Modified-Since header condition failed")
return | Handle 'If-...:' headers (but not 'If:' header).
If-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
Only perform the action if the client supplied entity matches the
same entity on the server. This is mainly for methods like
PUT to only update a resource if it has not been modified since the
user last updated it.
If-Match: "737060cd8c284d8af7ad3082f209582d"
If-Modified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25
Allows a 304 Not Modified to be returned if content is unchanged
If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT
If-None-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
Allows a 304 Not Modified to be returned if content is unchanged,
see HTTP ETag
If-None-Match: "737060cd8c284d8af7ad3082f209582d"
If-Unmodified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28
Only send the response if the entity has not been modified since a
specific time. | Below is the the instruction that describes the task:
### Input:
Handle 'If-...:' headers (but not 'If:' header).
If-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
Only perform the action if the client supplied entity matches the
same entity on the server. This is mainly for methods like
PUT to only update a resource if it has not been modified since the
user last updated it.
If-Match: "737060cd8c284d8af7ad3082f209582d"
If-Modified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25
Allows a 304 Not Modified to be returned if content is unchanged
If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT
If-None-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
Allows a 304 Not Modified to be returned if content is unchanged,
see HTTP ETag
If-None-Match: "737060cd8c284d8af7ad3082f209582d"
If-Unmodified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28
Only send the response if the entity has not been modified since a
specific time.
### Response:
def evaluate_http_conditionals(dav_res, last_modified, entitytag, environ):
"""Handle 'If-...:' headers (but not 'If:' header).
If-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.24
Only perform the action if the client supplied entity matches the
same entity on the server. This is mainly for methods like
PUT to only update a resource if it has not been modified since the
user last updated it.
If-Match: "737060cd8c284d8af7ad3082f209582d"
If-Modified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.25
Allows a 304 Not Modified to be returned if content is unchanged
If-Modified-Since: Sat, 29 Oct 1994 19:43:31 GMT
If-None-Match
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.26
Allows a 304 Not Modified to be returned if content is unchanged,
see HTTP ETag
If-None-Match: "737060cd8c284d8af7ad3082f209582d"
If-Unmodified-Since
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.28
Only send the response if the entity has not been modified since a
specific time.
"""
if not dav_res:
return
# Conditions
# An HTTP/1.1 origin server, upon receiving a conditional request that includes both a
# Last-Modified date (e.g., in an If-Modified-Since or If-Unmodified-Since header field) and
# one or more entity tags (e.g., in an If-Match, If-None-Match, or If-Range header field) as
# cache validators, MUST NOT return a response status of 304 (Not Modified) unless doing so
# is consistent with all of the conditional header fields in the request.
if "HTTP_IF_MATCH" in environ and dav_res.support_etag():
ifmatchlist = environ["HTTP_IF_MATCH"].split(",")
for ifmatchtag in ifmatchlist:
ifmatchtag = ifmatchtag.strip(' "\t')
if ifmatchtag == entitytag or ifmatchtag == "*":
break
raise DAVError(HTTP_PRECONDITION_FAILED, "If-Match header condition failed")
# TODO: after the refactoring
ifModifiedSinceFailed = False
if "HTTP_IF_MODIFIED_SINCE" in environ and dav_res.support_modified():
ifmodtime = parse_time_string(environ["HTTP_IF_MODIFIED_SINCE"])
if ifmodtime and ifmodtime > last_modified:
ifModifiedSinceFailed = True
# If-None-Match
# If none of the entity tags match, then the server MAY perform the requested method as if the
# If-None-Match header field did not exist, but MUST also ignore any If-Modified-Since header
# field (s) in the request. That is, if no entity tags match, then the server MUST NOT return
# a 304 (Not Modified) response.
ignoreIfModifiedSince = False
if "HTTP_IF_NONE_MATCH" in environ and dav_res.support_etag():
ifmatchlist = environ["HTTP_IF_NONE_MATCH"].split(",")
for ifmatchtag in ifmatchlist:
ifmatchtag = ifmatchtag.strip(' "\t')
if ifmatchtag == entitytag or ifmatchtag == "*":
# ETag matched. If it's a GET request and we don't have an
# conflicting If-Modified header, we return NOT_MODIFIED
if (
environ["REQUEST_METHOD"] in ("GET", "HEAD")
and not ifModifiedSinceFailed
):
raise DAVError(HTTP_NOT_MODIFIED, "If-None-Match header failed")
raise DAVError(
HTTP_PRECONDITION_FAILED, "If-None-Match header condition failed"
)
ignoreIfModifiedSince = True
if "HTTP_IF_UNMODIFIED_SINCE" in environ and dav_res.support_modified():
ifunmodtime = parse_time_string(environ["HTTP_IF_UNMODIFIED_SINCE"])
if ifunmodtime and ifunmodtime <= last_modified:
raise DAVError(
HTTP_PRECONDITION_FAILED, "If-Unmodified-Since header condition failed"
)
if ifModifiedSinceFailed and not ignoreIfModifiedSince:
raise DAVError(HTTP_NOT_MODIFIED, "If-Modified-Since header condition failed")
return |
def check_for_git_repo(url):
"""Check if a url points to a git repository."""
u = parse.urlparse(url)
is_git = False
if os.path.splitext(u.path)[1] == '.git':
is_git = True
elif u.scheme in ('', 'file'):
from git import InvalidGitRepositoryError, Repo
try:
Repo(u.path, search_parent_directories=True)
is_git = True
except InvalidGitRepositoryError:
is_git = False
return is_git | Check if a url points to a git repository. | Below is the the instruction that describes the task:
### Input:
Check if a url points to a git repository.
### Response:
def check_for_git_repo(url):
"""Check if a url points to a git repository."""
u = parse.urlparse(url)
is_git = False
if os.path.splitext(u.path)[1] == '.git':
is_git = True
elif u.scheme in ('', 'file'):
from git import InvalidGitRepositoryError, Repo
try:
Repo(u.path, search_parent_directories=True)
is_git = True
except InvalidGitRepositoryError:
is_git = False
return is_git |
def find_objects(self, ObjectClass, **kwargs):
""" Retrieve all objects of type ``ObjectClass``,
matching the filters specified in ``**kwargs`` -- case sensitive.
"""
print('dynamo.find_objects(%s, %s)' % (ObjectClass, str(kwargs)))
query = self.db.engine.query(ObjectClass)
for field_name, field_value in kwargs.items():
# Make sure that ObjectClass has a 'field_name' property
field = getattr(ObjectClass, field_name, None)
if field is None:
raise KeyError("DynamoDBAdapter.find_objects(): Class '%s' has no field '%s'." % (ObjectClass, field_name))
# Add a case sensitive filter to the query
query = query.filter(field == field_value)
# Execute query
return query.all(desc=True) | Retrieve all objects of type ``ObjectClass``,
matching the filters specified in ``**kwargs`` -- case sensitive. | Below is the the instruction that describes the task:
### Input:
Retrieve all objects of type ``ObjectClass``,
matching the filters specified in ``**kwargs`` -- case sensitive.
### Response:
def find_objects(self, ObjectClass, **kwargs):
""" Retrieve all objects of type ``ObjectClass``,
matching the filters specified in ``**kwargs`` -- case sensitive.
"""
print('dynamo.find_objects(%s, %s)' % (ObjectClass, str(kwargs)))
query = self.db.engine.query(ObjectClass)
for field_name, field_value in kwargs.items():
# Make sure that ObjectClass has a 'field_name' property
field = getattr(ObjectClass, field_name, None)
if field is None:
raise KeyError("DynamoDBAdapter.find_objects(): Class '%s' has no field '%s'." % (ObjectClass, field_name))
# Add a case sensitive filter to the query
query = query.filter(field == field_value)
# Execute query
return query.all(desc=True) |
def uvw2enu(u: float, v: float, w: float,
lat0: float, lon0: float, deg: bool = True) -> Tuple[float, float, float]:
"""
Parameters
----------
u : float or numpy.ndarray of float
v : float or numpy.ndarray of float
w : float or numpy.ndarray of float
Results
-------
East : float or numpy.ndarray of float
target east ENU coordinate (meters)
North : float or numpy.ndarray of float
target north ENU coordinate (meters)
Up : float or numpy.ndarray of float
target up ENU coordinate (meters)
"""
if deg:
lat0 = radians(lat0)
lon0 = radians(lon0)
t = cos(lon0) * u + sin(lon0) * v
East = -sin(lon0) * u + cos(lon0) * v
Up = cos(lat0) * t + sin(lat0) * w
North = -sin(lat0) * t + cos(lat0) * w
return East, North, Up | Parameters
----------
u : float or numpy.ndarray of float
v : float or numpy.ndarray of float
w : float or numpy.ndarray of float
Results
-------
East : float or numpy.ndarray of float
target east ENU coordinate (meters)
North : float or numpy.ndarray of float
target north ENU coordinate (meters)
Up : float or numpy.ndarray of float
target up ENU coordinate (meters) | Below is the the instruction that describes the task:
### Input:
Parameters
----------
u : float or numpy.ndarray of float
v : float or numpy.ndarray of float
w : float or numpy.ndarray of float
Results
-------
East : float or numpy.ndarray of float
target east ENU coordinate (meters)
North : float or numpy.ndarray of float
target north ENU coordinate (meters)
Up : float or numpy.ndarray of float
target up ENU coordinate (meters)
### Response:
def uvw2enu(u: float, v: float, w: float,
lat0: float, lon0: float, deg: bool = True) -> Tuple[float, float, float]:
"""
Parameters
----------
u : float or numpy.ndarray of float
v : float or numpy.ndarray of float
w : float or numpy.ndarray of float
Results
-------
East : float or numpy.ndarray of float
target east ENU coordinate (meters)
North : float or numpy.ndarray of float
target north ENU coordinate (meters)
Up : float or numpy.ndarray of float
target up ENU coordinate (meters)
"""
if deg:
lat0 = radians(lat0)
lon0 = radians(lon0)
t = cos(lon0) * u + sin(lon0) * v
East = -sin(lon0) * u + cos(lon0) * v
Up = cos(lat0) * t + sin(lat0) * w
North = -sin(lat0) * t + cos(lat0) * w
return East, North, Up |
def check_table(
problems: List,
table: str,
df: DataFrame,
condition,
message: str,
type_: str = "error",
) -> List:
"""
Check the given GTFS table for the given problem condition.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
condition : boolean expression
One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``
message : string
Problem message, e.g. ``'Invalid route_id'``
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` that statisfy the condition.
If the list of indices is nonempty, append to the
problems the item ``[type_, message, table, indices]``;
otherwise do not append anything.
"""
indices = df.loc[condition].index.tolist()
if indices:
problems.append([type_, message, table, indices])
return problems | Check the given GTFS table for the given problem condition.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
condition : boolean expression
One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``
message : string
Problem message, e.g. ``'Invalid route_id'``
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` that statisfy the condition.
If the list of indices is nonempty, append to the
problems the item ``[type_, message, table, indices]``;
otherwise do not append anything. | Below is the the instruction that describes the task:
### Input:
Check the given GTFS table for the given problem condition.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
condition : boolean expression
One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``
message : string
Problem message, e.g. ``'Invalid route_id'``
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` that statisfy the condition.
If the list of indices is nonempty, append to the
problems the item ``[type_, message, table, indices]``;
otherwise do not append anything.
### Response:
def check_table(
problems: List,
table: str,
df: DataFrame,
condition,
message: str,
type_: str = "error",
) -> List:
"""
Check the given GTFS table for the given problem condition.
Parameters
----------
problems : list
A four-tuple containing
1. A problem type (string) equal to ``'error'`` or ``'warning'``;
``'error'`` means the GTFS is violated;
``'warning'`` means there is a problem but it is not a
GTFS violation
2. A message (string) that describes the problem
3. A GTFS table name, e.g. ``'routes'``, in which the problem
occurs
4. A list of rows (integers) of the table's DataFrame where the
problem occurs
table : string
Name of a GTFS table
df : DataFrame
The GTFS table corresponding to ``table``
condition : boolean expression
One involving ``df``, e.g.`df['route_id'].map(is_valid_str)``
message : string
Problem message, e.g. ``'Invalid route_id'``
type_ : string
``'error'`` or ``'warning'`` indicating the type of problem
encountered
Returns
-------
list
The ``problems`` list extended as follows.
Record the indices of ``df`` that statisfy the condition.
If the list of indices is nonempty, append to the
problems the item ``[type_, message, table, indices]``;
otherwise do not append anything.
"""
indices = df.loc[condition].index.tolist()
if indices:
problems.append([type_, message, table, indices])
return problems |
def reads(self, s, **_):
"""Read a notebook represented as text"""
if self.fmt.get('format_name') == 'pandoc':
return md_to_notebook(s)
lines = s.splitlines()
cells = []
metadata, jupyter_md, header_cell, pos = header_to_metadata_and_cell(lines,
self.implementation.header_prefix,
self.implementation.extension)
default_language = default_language_from_metadata_and_ext(metadata, self.implementation.extension)
self.update_fmt_with_notebook_options(metadata)
if header_cell:
cells.append(header_cell)
lines = lines[pos:]
if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'):
cells.append(new_code_cell(source='%matplotlib inline'))
cell_metadata = set()
while lines:
reader = self.implementation.cell_reader_class(self.fmt, default_language)
cell, pos = reader.read(lines)
cells.append(cell)
cell_metadata.update(cell.metadata.keys())
if pos <= 0:
raise Exception('Blocked at lines ' + '\n'.join(lines[:6])) # pragma: no cover
lines = lines[pos:]
update_metadata_filters(metadata, jupyter_md, cell_metadata)
set_main_and_cell_language(metadata, cells, self.implementation.extension)
if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'):
filtered_cells = []
for i, cell in enumerate(cells):
if cell.source == '' and i > 0 and i + 1 < len(cells) \
and cells[i - 1].cell_type != 'markdown' and cells[i + 1].cell_type != 'markdown':
continue
filtered_cells.append(cell)
cells = filtered_cells
return new_notebook(cells=cells, metadata=metadata) | Read a notebook represented as text | Below is the the instruction that describes the task:
### Input:
Read a notebook represented as text
### Response:
def reads(self, s, **_):
"""Read a notebook represented as text"""
if self.fmt.get('format_name') == 'pandoc':
return md_to_notebook(s)
lines = s.splitlines()
cells = []
metadata, jupyter_md, header_cell, pos = header_to_metadata_and_cell(lines,
self.implementation.header_prefix,
self.implementation.extension)
default_language = default_language_from_metadata_and_ext(metadata, self.implementation.extension)
self.update_fmt_with_notebook_options(metadata)
if header_cell:
cells.append(header_cell)
lines = lines[pos:]
if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'):
cells.append(new_code_cell(source='%matplotlib inline'))
cell_metadata = set()
while lines:
reader = self.implementation.cell_reader_class(self.fmt, default_language)
cell, pos = reader.read(lines)
cells.append(cell)
cell_metadata.update(cell.metadata.keys())
if pos <= 0:
raise Exception('Blocked at lines ' + '\n'.join(lines[:6])) # pragma: no cover
lines = lines[pos:]
update_metadata_filters(metadata, jupyter_md, cell_metadata)
set_main_and_cell_language(metadata, cells, self.implementation.extension)
if self.implementation.format_name and self.implementation.format_name.startswith('sphinx'):
filtered_cells = []
for i, cell in enumerate(cells):
if cell.source == '' and i > 0 and i + 1 < len(cells) \
and cells[i - 1].cell_type != 'markdown' and cells[i + 1].cell_type != 'markdown':
continue
filtered_cells.append(cell)
cells = filtered_cells
return new_notebook(cells=cells, metadata=metadata) |
def register_printer(self, printer_class):
"""
:param printer_class: Class inheriting from `AbstractPrinter`.
"""
self._check_common_things('printer', printer_class, AbstractPrinter, self._printers)
instance = printer_class(self, logger_printer)
self._printers.append(instance) | :param printer_class: Class inheriting from `AbstractPrinter`. | Below is the the instruction that describes the task:
### Input:
:param printer_class: Class inheriting from `AbstractPrinter`.
### Response:
def register_printer(self, printer_class):
"""
:param printer_class: Class inheriting from `AbstractPrinter`.
"""
self._check_common_things('printer', printer_class, AbstractPrinter, self._printers)
instance = printer_class(self, logger_printer)
self._printers.append(instance) |
def load_scripts():
'''Import all of the modules named in REGISTERED_SCRIPTS'''
# Add scrypture package package to the path before importing
# so everything can import everything else regardless of package
scrypture_dir = os.path.realpath(
os.path.abspath(
os.path.split(
inspect.getfile( inspect.currentframe() ))[0]))
if scrypture_dir not in sys.path:
sys.path.insert(0, scrypture_dir)
# Load list of registered scripts
registered_scripts = app.config['REGISTERED_SCRIPTS']
for script in registered_scripts:
try:
s = import_module('.'+script,
package=os.path.split(app.config['SCRIPTS_DIR'])[-1])
s.package = s.__name__.split('.')[1]
#remove package from script name:
script_name = script.split('.')[-1]
registered_modules[script_name] = s
except Exception as e:
logging.warning('Could not import ' + \
str(script)+': '+str(e.message))
logging.debug(str(traceback.format_exc()))
continue | Import all of the modules named in REGISTERED_SCRIPTS | Below is the the instruction that describes the task:
### Input:
Import all of the modules named in REGISTERED_SCRIPTS
### Response:
def load_scripts():
'''Import all of the modules named in REGISTERED_SCRIPTS'''
# Add scrypture package package to the path before importing
# so everything can import everything else regardless of package
scrypture_dir = os.path.realpath(
os.path.abspath(
os.path.split(
inspect.getfile( inspect.currentframe() ))[0]))
if scrypture_dir not in sys.path:
sys.path.insert(0, scrypture_dir)
# Load list of registered scripts
registered_scripts = app.config['REGISTERED_SCRIPTS']
for script in registered_scripts:
try:
s = import_module('.'+script,
package=os.path.split(app.config['SCRIPTS_DIR'])[-1])
s.package = s.__name__.split('.')[1]
#remove package from script name:
script_name = script.split('.')[-1]
registered_modules[script_name] = s
except Exception as e:
logging.warning('Could not import ' + \
str(script)+': '+str(e.message))
logging.debug(str(traceback.format_exc()))
continue |
def update(self, campaign_id, schedule, nick=None):
'''xxxxx.xxxxx.campaign.schedule.update
===================================
更新一个推广计划的分时折扣设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.schedule.update')
request['campaign_id'] = campaign_id
request['schedule'] = schedule
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignSchedule})
return self.result | xxxxx.xxxxx.campaign.schedule.update
===================================
更新一个推广计划的分时折扣设置 | Below is the the instruction that describes the task:
### Input:
xxxxx.xxxxx.campaign.schedule.update
===================================
更新一个推广计划的分时折扣设置
### Response:
def update(self, campaign_id, schedule, nick=None):
'''xxxxx.xxxxx.campaign.schedule.update
===================================
更新一个推广计划的分时折扣设置'''
request = TOPRequest('xxxxx.xxxxx.campaign.schedule.update')
request['campaign_id'] = campaign_id
request['schedule'] = schedule
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':CampaignSchedule})
return self.result |
def from_sym_2_tri(symm):
"""convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
"""
inds = np.triu_indices_from(symm)
tri = symm[inds]
return tri | convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix | Below is the the instruction that describes the task:
### Input:
convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
### Response:
def from_sym_2_tri(symm):
"""convert a 2D symmetric matrix to an upper
triangular matrix in 1D format
Parameters
----------
symm : 2D array
Symmetric matrix
Returns
-------
tri: 1D array
Contains elements of upper triangular matrix
"""
inds = np.triu_indices_from(symm)
tri = symm[inds]
return tri |
def allowed_info_messages(*info_messages):
"""
Decorator ignoring defined info messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperInfoMixin.get_info_messages`
returns.
.. versionadded:: 2.0
"""
def wrapper(func):
setattr(func, ALLOWED_INFO_MESSAGES, info_messages)
return func
return wrapper | Decorator ignoring defined info messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperInfoMixin.get_info_messages`
returns.
.. versionadded:: 2.0 | Below is the the instruction that describes the task:
### Input:
Decorator ignoring defined info messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperInfoMixin.get_info_messages`
returns.
.. versionadded:: 2.0
### Response:
def allowed_info_messages(*info_messages):
"""
Decorator ignoring defined info messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperInfoMixin.get_info_messages`
returns.
.. versionadded:: 2.0
"""
def wrapper(func):
setattr(func, ALLOWED_INFO_MESSAGES, info_messages)
return func
return wrapper |
def _set_route_target(self, v, load=False):
"""
Setter method for route_target, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/route_target (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_target is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_target() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("action target_community",route_target.route_target, yang_name="route-target", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}), is_container='list', yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_target must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("action target_community",route_target.route_target, yang_name="route-target", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}), is_container='list', yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)""",
})
self.__route_target = t
if hasattr(self, '_set'):
self._set() | Setter method for route_target, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/route_target (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_target is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_target() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for route_target, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/route_target (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_target is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_target() directly.
### Response:
def _set_route_target(self, v, load=False):
"""
Setter method for route_target, mapped from YANG variable /rbridge_id/vrf/address_family/ipv6/unicast/route_target (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_target is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_target() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("action target_community",route_target.route_target, yang_name="route-target", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}), is_container='list', yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_target must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("action target_community",route_target.route_target, yang_name="route-target", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}), is_container='list', yang_name="route-target", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'VrfRtAfIpv6Ucast'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)""",
})
self.__route_target = t
if hasattr(self, '_set'):
self._set() |
def cmd_devop(self, args):
'''device operations'''
usage = "Usage: devop <read|write> <spi|i2c> name bus address"
if len(args) < 5:
print(usage)
return
if args[1] == 'spi':
bustype = mavutil.mavlink.DEVICE_OP_BUSTYPE_SPI
elif args[1] == 'i2c':
bustype = mavutil.mavlink.DEVICE_OP_BUSTYPE_I2C
else:
print(usage)
if args[0] == 'read':
self.devop_read(args[2:], bustype)
elif args[0] == 'write':
self.devop_write(args[2:], bustype)
else:
print(usage) | device operations | Below is the the instruction that describes the task:
### Input:
device operations
### Response:
def cmd_devop(self, args):
'''device operations'''
usage = "Usage: devop <read|write> <spi|i2c> name bus address"
if len(args) < 5:
print(usage)
return
if args[1] == 'spi':
bustype = mavutil.mavlink.DEVICE_OP_BUSTYPE_SPI
elif args[1] == 'i2c':
bustype = mavutil.mavlink.DEVICE_OP_BUSTYPE_I2C
else:
print(usage)
if args[0] == 'read':
self.devop_read(args[2:], bustype)
elif args[0] == 'write':
self.devop_write(args[2:], bustype)
else:
print(usage) |
def rename(self, arr, new_name=True):
"""
Rename an array to find a name that isn't already in the list
Parameters
----------
arr: InteractiveBase
A :class:`InteractiveArray` or :class:`InteractiveList` instance
whose name shall be checked
new_name: bool or str
If False, and the ``arr_name`` attribute of the new array is
already in the list, a ValueError is raised.
If True and the ``arr_name`` attribute of the new array is not
already in the list, the name is not changed. Otherwise, if the
array name is already in use, `new_name` is set to 'arr{0}'.
If not True, this will be used for renaming (if the array name of
`arr` is in use or not). ``'{0}'`` is replaced by a counter
Returns
-------
InteractiveBase
`arr` with changed ``arr_name`` attribute
bool or None
True, if the array has been renamed, False if not and None if the
array is already in the list
Raises
------
ValueError
If it was impossible to find a name that isn't already in the list
ValueError
If `new_name` is False and the array is already in the list"""
name_in_me = arr.psy.arr_name in self.arr_names
if not name_in_me:
return arr, False
elif name_in_me and not self._contains_array(arr):
if new_name is False:
raise ValueError(
"Array name %s is already in use! Set the `new_name` "
"parameter to None for renaming!" % arr.psy.arr_name)
elif new_name is True:
new_name = new_name if isstring(new_name) else 'arr{0}'
arr.psy.arr_name = self.next_available_name(new_name)
return arr, True
return arr, None | Rename an array to find a name that isn't already in the list
Parameters
----------
arr: InteractiveBase
A :class:`InteractiveArray` or :class:`InteractiveList` instance
whose name shall be checked
new_name: bool or str
If False, and the ``arr_name`` attribute of the new array is
already in the list, a ValueError is raised.
If True and the ``arr_name`` attribute of the new array is not
already in the list, the name is not changed. Otherwise, if the
array name is already in use, `new_name` is set to 'arr{0}'.
If not True, this will be used for renaming (if the array name of
`arr` is in use or not). ``'{0}'`` is replaced by a counter
Returns
-------
InteractiveBase
`arr` with changed ``arr_name`` attribute
bool or None
True, if the array has been renamed, False if not and None if the
array is already in the list
Raises
------
ValueError
If it was impossible to find a name that isn't already in the list
ValueError
If `new_name` is False and the array is already in the list | Below is the the instruction that describes the task:
### Input:
Rename an array to find a name that isn't already in the list
Parameters
----------
arr: InteractiveBase
A :class:`InteractiveArray` or :class:`InteractiveList` instance
whose name shall be checked
new_name: bool or str
If False, and the ``arr_name`` attribute of the new array is
already in the list, a ValueError is raised.
If True and the ``arr_name`` attribute of the new array is not
already in the list, the name is not changed. Otherwise, if the
array name is already in use, `new_name` is set to 'arr{0}'.
If not True, this will be used for renaming (if the array name of
`arr` is in use or not). ``'{0}'`` is replaced by a counter
Returns
-------
InteractiveBase
`arr` with changed ``arr_name`` attribute
bool or None
True, if the array has been renamed, False if not and None if the
array is already in the list
Raises
------
ValueError
If it was impossible to find a name that isn't already in the list
ValueError
If `new_name` is False and the array is already in the list
### Response:
def rename(self, arr, new_name=True):
"""
Rename an array to find a name that isn't already in the list
Parameters
----------
arr: InteractiveBase
A :class:`InteractiveArray` or :class:`InteractiveList` instance
whose name shall be checked
new_name: bool or str
If False, and the ``arr_name`` attribute of the new array is
already in the list, a ValueError is raised.
If True and the ``arr_name`` attribute of the new array is not
already in the list, the name is not changed. Otherwise, if the
array name is already in use, `new_name` is set to 'arr{0}'.
If not True, this will be used for renaming (if the array name of
`arr` is in use or not). ``'{0}'`` is replaced by a counter
Returns
-------
InteractiveBase
`arr` with changed ``arr_name`` attribute
bool or None
True, if the array has been renamed, False if not and None if the
array is already in the list
Raises
------
ValueError
If it was impossible to find a name that isn't already in the list
ValueError
If `new_name` is False and the array is already in the list"""
name_in_me = arr.psy.arr_name in self.arr_names
if not name_in_me:
return arr, False
elif name_in_me and not self._contains_array(arr):
if new_name is False:
raise ValueError(
"Array name %s is already in use! Set the `new_name` "
"parameter to None for renaming!" % arr.psy.arr_name)
elif new_name is True:
new_name = new_name if isstring(new_name) else 'arr{0}'
arr.psy.arr_name = self.next_available_name(new_name)
return arr, True
return arr, None |
def handle_update(self, action, params):
"""Handle the specified action on this component."""
_LOGGER.debug('Keypad: "%s" Handling "%s" Action: %s Params: %s"' % (
self._keypad.name, self.name, action, params))
return False | Handle the specified action on this component. | Below is the the instruction that describes the task:
### Input:
Handle the specified action on this component.
### Response:
def handle_update(self, action, params):
"""Handle the specified action on this component."""
_LOGGER.debug('Keypad: "%s" Handling "%s" Action: %s Params: %s"' % (
self._keypad.name, self.name, action, params))
return False |
def set_label(self, label, lang=None):
"""Sets the `label` metadata property on your Thing/Point. Only one label is allowed per language, so any
other labels in this language are removed before adding this one
Raises `ValueError` containing an error message if the parameters fail validation
`label` (mandatory) (string) the new text of the label
`lang` (optional) (string) The two-character ISO 639-1 language code to use for your label.
None means use the default language for your agent.
See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__)
"""
label = Validation.label_check_convert(label)
lang = Validation.lang_check_convert(lang, default=self._default_lang)
# remove any other labels with this language before adding
self.delete_label(lang)
subj = self._get_uuid_uriref()
self._graph.add((subj, self._labelPredicate, Literal(label, lang))) | Sets the `label` metadata property on your Thing/Point. Only one label is allowed per language, so any
other labels in this language are removed before adding this one
Raises `ValueError` containing an error message if the parameters fail validation
`label` (mandatory) (string) the new text of the label
`lang` (optional) (string) The two-character ISO 639-1 language code to use for your label.
None means use the default language for your agent.
See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__) | Below is the the instruction that describes the task:
### Input:
Sets the `label` metadata property on your Thing/Point. Only one label is allowed per language, so any
other labels in this language are removed before adding this one
Raises `ValueError` containing an error message if the parameters fail validation
`label` (mandatory) (string) the new text of the label
`lang` (optional) (string) The two-character ISO 639-1 language code to use for your label.
None means use the default language for your agent.
See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__)
### Response:
def set_label(self, label, lang=None):
"""Sets the `label` metadata property on your Thing/Point. Only one label is allowed per language, so any
other labels in this language are removed before adding this one
Raises `ValueError` containing an error message if the parameters fail validation
`label` (mandatory) (string) the new text of the label
`lang` (optional) (string) The two-character ISO 639-1 language code to use for your label.
None means use the default language for your agent.
See [Config](./Config.m.html#IoticAgent.IOT.Config.Config.__init__)
"""
label = Validation.label_check_convert(label)
lang = Validation.lang_check_convert(lang, default=self._default_lang)
# remove any other labels with this language before adding
self.delete_label(lang)
subj = self._get_uuid_uriref()
self._graph.add((subj, self._labelPredicate, Literal(label, lang))) |
def resources_gc_prefix(options, policy_config, policy_collection):
"""Garbage collect old custodian policies based on prefix.
We attempt to introspect to find the event sources for a policy
but without the old configuration this is implicit.
"""
# Classify policies by region
policy_regions = {}
for p in policy_collection:
if p.execution_mode == 'poll':
continue
policy_regions.setdefault(p.options.region, []).append(p)
regions = get_gc_regions(options.regions)
for r in regions:
region_gc(options, r, policy_config, policy_regions.get(r, [])) | Garbage collect old custodian policies based on prefix.
We attempt to introspect to find the event sources for a policy
but without the old configuration this is implicit. | Below is the the instruction that describes the task:
### Input:
Garbage collect old custodian policies based on prefix.
We attempt to introspect to find the event sources for a policy
but without the old configuration this is implicit.
### Response:
def resources_gc_prefix(options, policy_config, policy_collection):
"""Garbage collect old custodian policies based on prefix.
We attempt to introspect to find the event sources for a policy
but without the old configuration this is implicit.
"""
# Classify policies by region
policy_regions = {}
for p in policy_collection:
if p.execution_mode == 'poll':
continue
policy_regions.setdefault(p.options.region, []).append(p)
regions = get_gc_regions(options.regions)
for r in regions:
region_gc(options, r, policy_config, policy_regions.get(r, [])) |
def _merge_tops_same(self, tops):
'''
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
'''
top = DefaultOrderedDict(OrderedDict)
for cenv, ctops in six.iteritems(tops):
if all([x == {} for x in ctops]):
# No top file found in this env, check the default_top
default_top = self.opts['default_top']
fallback_tops = tops.get(default_top, [])
if all([x == {} for x in fallback_tops]):
# Nothing in the fallback top file
log.error(
'The \'%s\' saltenv has no top file, and the fallback '
'saltenv specified by default_top (%s) also has no '
'top file', cenv, default_top
)
continue
for ctop in fallback_tops:
for saltenv, targets in six.iteritems(ctop):
if saltenv != cenv:
continue
log.debug(
'The \'%s\' saltenv has no top file, using the '
'default_top saltenv (%s)', cenv, default_top
)
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
break
else:
log.error(
'The \'%s\' saltenv has no top file, and no '
'matches were found in the top file for the '
'default_top saltenv (%s)', cenv, default_top
)
continue
else:
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == 'include':
continue
elif saltenv != cenv:
log.debug(
'Section for saltenv \'%s\' in the \'%s\' '
'saltenv\'s top file will be ignored, as the '
'top_file_merging_strategy is set to \'same\' '
'and the saltenvs do not match',
saltenv, cenv
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError('Unable to render top file. No targets found.')
return top | For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored. | Below is the the instruction that describes the task:
### Input:
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
### Response:
def _merge_tops_same(self, tops):
'''
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
'''
top = DefaultOrderedDict(OrderedDict)
for cenv, ctops in six.iteritems(tops):
if all([x == {} for x in ctops]):
# No top file found in this env, check the default_top
default_top = self.opts['default_top']
fallback_tops = tops.get(default_top, [])
if all([x == {} for x in fallback_tops]):
# Nothing in the fallback top file
log.error(
'The \'%s\' saltenv has no top file, and the fallback '
'saltenv specified by default_top (%s) also has no '
'top file', cenv, default_top
)
continue
for ctop in fallback_tops:
for saltenv, targets in six.iteritems(ctop):
if saltenv != cenv:
continue
log.debug(
'The \'%s\' saltenv has no top file, using the '
'default_top saltenv (%s)', cenv, default_top
)
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
break
else:
log.error(
'The \'%s\' saltenv has no top file, and no '
'matches were found in the top file for the '
'default_top saltenv (%s)', cenv, default_top
)
continue
else:
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == 'include':
continue
elif saltenv != cenv:
log.debug(
'Section for saltenv \'%s\' in the \'%s\' '
'saltenv\'s top file will be ignored, as the '
'top_file_merging_strategy is set to \'same\' '
'and the saltenvs do not match',
saltenv, cenv
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError('Unable to render top file. No targets found.')
return top |
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close() | Pack the given files into a tar, setting cwd = srcdir | Below is the the instruction that describes the task:
### Input:
Pack the given files into a tar, setting cwd = srcdir
### Response:
def packtar(tarfile, files, srcdir):
""" Pack the given files into a tar, setting cwd = srcdir"""
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("pack tar %s from folder %s with files ", tarfile, srcdir)
log.debug(files)
try:
check_call([TAR, '-czf', tarfile] + files, cwd=srcdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error packing tar file %s to %s", tarfile, srcdir)
raise
nullfd.close() |
def estimate_library_complexity(df, algorithm="RNA-seq"):
"""
estimate library complexity from the number of reads vs.
number of unique start sites. returns "NA" if there are
not enough data points to fit the line
"""
DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)}
cutoffs = DEFAULT_CUTOFFS[algorithm]
if len(df) < 5:
return {"unique_starts_per_read": 'nan',
"complexity": "NA"}
model = sm.ols(formula="starts ~ reads", data=df)
fitted = model.fit()
slope = fitted.params["reads"]
if slope <= cutoffs[0]:
complexity = "LOW"
elif slope <= cutoffs[1]:
complexity = "MEDIUM"
else:
complexity = "HIGH"
# for now don't return the complexity flag
return {"Unique Starts Per Read": float(slope)} | estimate library complexity from the number of reads vs.
number of unique start sites. returns "NA" if there are
not enough data points to fit the line | Below is the the instruction that describes the task:
### Input:
estimate library complexity from the number of reads vs.
number of unique start sites. returns "NA" if there are
not enough data points to fit the line
### Response:
def estimate_library_complexity(df, algorithm="RNA-seq"):
"""
estimate library complexity from the number of reads vs.
number of unique start sites. returns "NA" if there are
not enough data points to fit the line
"""
DEFAULT_CUTOFFS = {"RNA-seq": (0.25, 0.40)}
cutoffs = DEFAULT_CUTOFFS[algorithm]
if len(df) < 5:
return {"unique_starts_per_read": 'nan',
"complexity": "NA"}
model = sm.ols(formula="starts ~ reads", data=df)
fitted = model.fit()
slope = fitted.params["reads"]
if slope <= cutoffs[0]:
complexity = "LOW"
elif slope <= cutoffs[1]:
complexity = "MEDIUM"
else:
complexity = "HIGH"
# for now don't return the complexity flag
return {"Unique Starts Per Read": float(slope)} |
def _parselog(self, r):
"""
Parse bazaar log file format
Args:
r (str): bzr revision identifier
Yields:
dict: dict of (attr, value) pairs
::
$ bzr log -l1
------------------------------------------------------------
revno: 1
committer: ubuntu <ubuntu@ubuntu-desktop>
branch nick: ubuntu-desktop /etc repository
timestamp: Wed 2011-10-12 01:16:55 -0500
message:
Initial commit
"""
def __parselog(entry):
"""
Parse bazaar log file format
Args:
entry (str): log message string
Yields:
tuple: (attrname, value)
"""
bufname = None
buf = deque()
print(entry)
if entry == ['']:
return
for l in itersplit(entry, '\n'):
if not l:
continue
mobj = self.logrgx.match(l)
if not mobj:
# " - Log message"
buf.append(self._logmessage_transform(l))
if mobj:
mobjlen = len(mobj.groups())
if mobjlen == 2:
# "attr: value"
attr, value = mobj.groups()
if attr == 'message':
bufname = 'desc'
else:
attr = self.field_trans.get(attr, attr)
yield (self.field_trans.get(attr, attr), value)
else:
raise Exception()
if bufname is not None:
if len(buf):
buf.pop()
len(buf) > 1 and buf.popleft()
yield (bufname, '\n'.join(buf))
return
kwargs = dict(__parselog(r)) # FIXME
if kwargs:
if 'tags' not in kwargs:
kwargs['tags'] = tuple()
else:
kwargs['tags'].split(' ') # TODO
if 'branchnick' not in kwargs:
kwargs['branchnick'] = None
try:
yield kwargs # TODO
# return self._tuple(**kwargs)
except:
log.error(r)
log.error(kwargs)
raise
else:
log.error("failed to parse: %r" % r) | Parse bazaar log file format
Args:
r (str): bzr revision identifier
Yields:
dict: dict of (attr, value) pairs
::
$ bzr log -l1
------------------------------------------------------------
revno: 1
committer: ubuntu <ubuntu@ubuntu-desktop>
branch nick: ubuntu-desktop /etc repository
timestamp: Wed 2011-10-12 01:16:55 -0500
message:
Initial commit | Below is the the instruction that describes the task:
### Input:
Parse bazaar log file format
Args:
r (str): bzr revision identifier
Yields:
dict: dict of (attr, value) pairs
::
$ bzr log -l1
------------------------------------------------------------
revno: 1
committer: ubuntu <ubuntu@ubuntu-desktop>
branch nick: ubuntu-desktop /etc repository
timestamp: Wed 2011-10-12 01:16:55 -0500
message:
Initial commit
### Response:
def _parselog(self, r):
"""
Parse bazaar log file format
Args:
r (str): bzr revision identifier
Yields:
dict: dict of (attr, value) pairs
::
$ bzr log -l1
------------------------------------------------------------
revno: 1
committer: ubuntu <ubuntu@ubuntu-desktop>
branch nick: ubuntu-desktop /etc repository
timestamp: Wed 2011-10-12 01:16:55 -0500
message:
Initial commit
"""
def __parselog(entry):
"""
Parse bazaar log file format
Args:
entry (str): log message string
Yields:
tuple: (attrname, value)
"""
bufname = None
buf = deque()
print(entry)
if entry == ['']:
return
for l in itersplit(entry, '\n'):
if not l:
continue
mobj = self.logrgx.match(l)
if not mobj:
# " - Log message"
buf.append(self._logmessage_transform(l))
if mobj:
mobjlen = len(mobj.groups())
if mobjlen == 2:
# "attr: value"
attr, value = mobj.groups()
if attr == 'message':
bufname = 'desc'
else:
attr = self.field_trans.get(attr, attr)
yield (self.field_trans.get(attr, attr), value)
else:
raise Exception()
if bufname is not None:
if len(buf):
buf.pop()
len(buf) > 1 and buf.popleft()
yield (bufname, '\n'.join(buf))
return
kwargs = dict(__parselog(r)) # FIXME
if kwargs:
if 'tags' not in kwargs:
kwargs['tags'] = tuple()
else:
kwargs['tags'].split(' ') # TODO
if 'branchnick' not in kwargs:
kwargs['branchnick'] = None
try:
yield kwargs # TODO
# return self._tuple(**kwargs)
except:
log.error(r)
log.error(kwargs)
raise
else:
log.error("failed to parse: %r" % r) |
def add_after(self):
"""Returns a builder inserting a new block after the current block"""
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx+1) | Returns a builder inserting a new block after the current block | Below is the the instruction that describes the task:
### Input:
Returns a builder inserting a new block after the current block
### Response:
def add_after(self):
"""Returns a builder inserting a new block after the current block"""
idx = self._container.structure.index(self)
return BlockBuilder(self._container, idx+1) |
def plot_predict(self, h=5, past_values=20, intervals=True, oos_data=None, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Sort/manipulate the out-of-sample data
_, X_oos = dmatrices(self.formula, oos_data)
X_oos = np.array([X_oos])[0]
X_pred = X_oos[:h]
date_index = self.shift_dates(h)
if self.latent_variables.estimation_method in ['M-H']:
sim_vector = np.zeros([15000,h])
for n in range(0, 15000):
t_z = self.draw_latent_variables(nsims=1).T[0]
_, Y, _, coefficients = self._model(t_z)
coefficients_star = coefficients.T[-1]
theta_pred = np.dot(np.array([coefficients_star]), X_pred.T)[0]
t_z = np.array([self.latent_variables.z_list[k].prior.transform(t_z[k]) for k in range(t_z.shape[0])])
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z)
sim_vector[n,:] = self.family.draw_variable(self.link(theta_pred), model_scale, model_shape, model_skewness, theta_pred.shape[0])
mean_values = np.append(Y, self.link(np.array([np.mean(i) for i in sim_vector.T])))
else:
# Retrieve data, dates and (transformed) latent variables
_, Y, _, coefficients = self._model(self.latent_variables.get_z_values())
coefficients_star = coefficients.T[-1]
theta_pred = np.dot(np.array([coefficients_star]), X_pred.T)[0]
t_z = self.transform_z()
sim_vector = np.zeros([15000,h])
mean_values = np.append(Y, self.link(theta_pred))
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z)
if self.model_name2 == "Skewt":
m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0))
mean_values += (model_skewness - (1.0/model_skewness))*model_scale*m1
for n in range(0,15000):
sim_vector[n,:] = self.family.draw_variable(self.link(theta_pred),model_scale,model_shape,model_skewness,theta_pred.shape[0])
sim_vector = sim_vector.T
error_bars = []
for pre in range(5,100,5):
error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector], 0, mean_values[-h-1]))
forecasted_values = mean_values[-h-1:]
plot_values = mean_values[-h-past_values:]
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
for count in range(9):
plt.fill_between(date_index[-h-1:], error_bars[count], error_bars[-count],
alpha=alpha[count])
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast | Below is the the instruction that describes the task:
### Input:
Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast
### Response:
def plot_predict(self, h=5, past_values=20, intervals=True, oos_data=None, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
oos_data : pd.DataFrame
Data for the variables to be used out of sample (ys can be NaNs)
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Sort/manipulate the out-of-sample data
_, X_oos = dmatrices(self.formula, oos_data)
X_oos = np.array([X_oos])[0]
X_pred = X_oos[:h]
date_index = self.shift_dates(h)
if self.latent_variables.estimation_method in ['M-H']:
sim_vector = np.zeros([15000,h])
for n in range(0, 15000):
t_z = self.draw_latent_variables(nsims=1).T[0]
_, Y, _, coefficients = self._model(t_z)
coefficients_star = coefficients.T[-1]
theta_pred = np.dot(np.array([coefficients_star]), X_pred.T)[0]
t_z = np.array([self.latent_variables.z_list[k].prior.transform(t_z[k]) for k in range(t_z.shape[0])])
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z)
sim_vector[n,:] = self.family.draw_variable(self.link(theta_pred), model_scale, model_shape, model_skewness, theta_pred.shape[0])
mean_values = np.append(Y, self.link(np.array([np.mean(i) for i in sim_vector.T])))
else:
# Retrieve data, dates and (transformed) latent variables
_, Y, _, coefficients = self._model(self.latent_variables.get_z_values())
coefficients_star = coefficients.T[-1]
theta_pred = np.dot(np.array([coefficients_star]), X_pred.T)[0]
t_z = self.transform_z()
sim_vector = np.zeros([15000,h])
mean_values = np.append(Y, self.link(theta_pred))
model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z)
if self.model_name2 == "Skewt":
m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0))
mean_values += (model_skewness - (1.0/model_skewness))*model_scale*m1
for n in range(0,15000):
sim_vector[n,:] = self.family.draw_variable(self.link(theta_pred),model_scale,model_shape,model_skewness,theta_pred.shape[0])
sim_vector = sim_vector.T
error_bars = []
for pre in range(5,100,5):
error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector], 0, mean_values[-h-1]))
forecasted_values = mean_values[-h-1:]
plot_values = mean_values[-h-past_values:]
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
for count in range(9):
plt.fill_between(date_index[-h-1:], error_bars[count], error_bars[-count],
alpha=alpha[count])
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() |
def setData(self, index, value, role=Qt.EditRole):
"""
Reimplements the :meth:`QAbstractItemModel.setData` method.
:param index: Index.
:type index: QModelIndex
:param value: Value.
:type value: QVariant
:param role: Role.
:type role: int
:return: Method success.
:rtype: bool
"""
if not index.isValid():
return False
node = self.get_node(index)
if role == Qt.DisplayRole or role == Qt.EditRole:
value = foundations.strings.to_string(value.toString())
roles = {Qt.DisplayRole: value, Qt.EditRole: value}
else:
roles = {role: value}
if index.column() == 0:
if (node and hasattr(node, "roles")):
node.roles.update(roles)
node.name = value
else:
attribute = self.get_attribute(node, index.column())
if (attribute and hasattr(attribute, "roles")):
attribute.roles.update(roles)
attribute.value = value
self.dataChanged.emit(index, index)
return True | Reimplements the :meth:`QAbstractItemModel.setData` method.
:param index: Index.
:type index: QModelIndex
:param value: Value.
:type value: QVariant
:param role: Role.
:type role: int
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Reimplements the :meth:`QAbstractItemModel.setData` method.
:param index: Index.
:type index: QModelIndex
:param value: Value.
:type value: QVariant
:param role: Role.
:type role: int
:return: Method success.
:rtype: bool
### Response:
def setData(self, index, value, role=Qt.EditRole):
"""
Reimplements the :meth:`QAbstractItemModel.setData` method.
:param index: Index.
:type index: QModelIndex
:param value: Value.
:type value: QVariant
:param role: Role.
:type role: int
:return: Method success.
:rtype: bool
"""
if not index.isValid():
return False
node = self.get_node(index)
if role == Qt.DisplayRole or role == Qt.EditRole:
value = foundations.strings.to_string(value.toString())
roles = {Qt.DisplayRole: value, Qt.EditRole: value}
else:
roles = {role: value}
if index.column() == 0:
if (node and hasattr(node, "roles")):
node.roles.update(roles)
node.name = value
else:
attribute = self.get_attribute(node, index.column())
if (attribute and hasattr(attribute, "roles")):
attribute.roles.update(roles)
attribute.value = value
self.dataChanged.emit(index, index)
return True |
def correct_db_restart(self):
"""Ensure DB is consistent after unexpected restarts. """
LOG.info("Checking consistency of DB")
# Any Segments allocated that's not in Network or FW DB, release it
seg_netid_dict = self.service_segs.get_seg_netid_src(fw_const.FW_CONST)
vlan_netid_dict = self.service_vlans.get_seg_netid_src(
fw_const.FW_CONST)
for netid in seg_netid_dict:
net = self.get_network(netid)
fw_net = self.get_fw_by_netid(netid)
if not net or not fw_net:
if netid in vlan_netid_dict:
vlan_net = vlan_netid_dict[netid]
else:
vlan_net = None
self.delete_os_nwk_db(netid, seg_netid_dict[netid], vlan_net)
LOG.info("Allocated segment for net %s not in DB "
"returning", net)
return
# Any VLANs allocated that's not in Network or FW DB, release it
# For Virtual case, this list will be empty
for netid in vlan_netid_dict:
net = self.get_network(netid)
fw_net = self.get_fw_by_netid(netid)
if not net or not fw_net:
if netid in seg_netid_dict:
vlan_net = seg_netid_dict[netid]
else:
vlan_net = None
self.delete_os_nwk_db(netid, vlan_net, vlan_netid_dict[netid])
LOG.info("Allocated vlan for net %s not in DB returning",
net)
return
# Release all IP's from DB that has no NetID or SubnetID
self.service_in_ip.release_subnet_no_netid()
self.service_out_ip.release_subnet_no_netid()
# It leaves out following possibilities not covered by above.
# 1. Crash can happen just after creating FWID in DB (for init state)
# 2. Crash can happen after 1 + IP address allocation
# 3. Crash can happen after 2 + create OS network
# IP address allocated will be freed as above.
# Only OS network will remain for case 3.
# Also, create that FW DB entry only if that FWID didn't exist.
# Delete all dummy networks created for dummy router from OS if it's
# ID is not in NetDB
# Delete all dummy routers and its associated networks/subnetfrom OS
# if it's ID is not in FWDB
fw_dict = self.get_all_fw_db()
for fw_id in fw_dict:
rtr_nwk = fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_list = self.os_helper.get_network_by_name(rtr_nwk)
# TODO(padkrish) Come back to finish this. Not sure of this.
# The router interface should be deleted first and then the network
# Try using show_router
for net in net_list:
# Check for if it's there in NetDB
net_db_item = self.get_network(net.get('id'))
if not net_db_item:
self.os_helper.delete_network_all_subnets(net.get('id'))
LOG.info("Router Network %s not in DB, returning",
net.get('id'))
return
rtr_name = fw_id[0:4] + fw_const.DUMMY_SERVICE_RTR + (
fw_id[len(fw_id) - 4:])
rtr_list = self.os_helper.get_rtr_by_name(rtr_name)
for rtr in rtr_list:
fw_db_item = self.get_fw_by_rtrid(rtr.get('id'))
if not fw_db_item:
# There should be only one
if not net_list:
LOG.error("net_list len is 0, router net not "
"found")
return
fw_type = fw_dict[fw_id].get('fw_type')
if fw_type == fw_const.FW_TENANT_EDGE:
rtr_net = net_list[0]
rtr_subnet_lt = (
self.os_helper.get_subnets_for_net(rtr_net))
if rtr_subnet_lt is None:
LOG.error("router subnet not found for "
"net %s", rtr_net)
return
rtr_subnet_id = rtr_subnet_lt[0].get('id')
LOG.info("Deleted dummy router network %s",
rtr.get('id'))
ret = self.delete_os_dummy_rtr_nwk(rtr.get('id'),
rtr_net.get('id'),
rtr_subnet_id)
return ret
LOG.info("Done Checking consistency of DB, no issues") | Ensure DB is consistent after unexpected restarts. | Below is the the instruction that describes the task:
### Input:
Ensure DB is consistent after unexpected restarts.
### Response:
def correct_db_restart(self):
"""Ensure DB is consistent after unexpected restarts. """
LOG.info("Checking consistency of DB")
# Any Segments allocated that's not in Network or FW DB, release it
seg_netid_dict = self.service_segs.get_seg_netid_src(fw_const.FW_CONST)
vlan_netid_dict = self.service_vlans.get_seg_netid_src(
fw_const.FW_CONST)
for netid in seg_netid_dict:
net = self.get_network(netid)
fw_net = self.get_fw_by_netid(netid)
if not net or not fw_net:
if netid in vlan_netid_dict:
vlan_net = vlan_netid_dict[netid]
else:
vlan_net = None
self.delete_os_nwk_db(netid, seg_netid_dict[netid], vlan_net)
LOG.info("Allocated segment for net %s not in DB "
"returning", net)
return
# Any VLANs allocated that's not in Network or FW DB, release it
# For Virtual case, this list will be empty
for netid in vlan_netid_dict:
net = self.get_network(netid)
fw_net = self.get_fw_by_netid(netid)
if not net or not fw_net:
if netid in seg_netid_dict:
vlan_net = seg_netid_dict[netid]
else:
vlan_net = None
self.delete_os_nwk_db(netid, vlan_net, vlan_netid_dict[netid])
LOG.info("Allocated vlan for net %s not in DB returning",
net)
return
# Release all IP's from DB that has no NetID or SubnetID
self.service_in_ip.release_subnet_no_netid()
self.service_out_ip.release_subnet_no_netid()
# It leaves out following possibilities not covered by above.
# 1. Crash can happen just after creating FWID in DB (for init state)
# 2. Crash can happen after 1 + IP address allocation
# 3. Crash can happen after 2 + create OS network
# IP address allocated will be freed as above.
# Only OS network will remain for case 3.
# Also, create that FW DB entry only if that FWID didn't exist.
# Delete all dummy networks created for dummy router from OS if it's
# ID is not in NetDB
# Delete all dummy routers and its associated networks/subnetfrom OS
# if it's ID is not in FWDB
fw_dict = self.get_all_fw_db()
for fw_id in fw_dict:
rtr_nwk = fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_list = self.os_helper.get_network_by_name(rtr_nwk)
# TODO(padkrish) Come back to finish this. Not sure of this.
# The router interface should be deleted first and then the network
# Try using show_router
for net in net_list:
# Check for if it's there in NetDB
net_db_item = self.get_network(net.get('id'))
if not net_db_item:
self.os_helper.delete_network_all_subnets(net.get('id'))
LOG.info("Router Network %s not in DB, returning",
net.get('id'))
return
rtr_name = fw_id[0:4] + fw_const.DUMMY_SERVICE_RTR + (
fw_id[len(fw_id) - 4:])
rtr_list = self.os_helper.get_rtr_by_name(rtr_name)
for rtr in rtr_list:
fw_db_item = self.get_fw_by_rtrid(rtr.get('id'))
if not fw_db_item:
# There should be only one
if not net_list:
LOG.error("net_list len is 0, router net not "
"found")
return
fw_type = fw_dict[fw_id].get('fw_type')
if fw_type == fw_const.FW_TENANT_EDGE:
rtr_net = net_list[0]
rtr_subnet_lt = (
self.os_helper.get_subnets_for_net(rtr_net))
if rtr_subnet_lt is None:
LOG.error("router subnet not found for "
"net %s", rtr_net)
return
rtr_subnet_id = rtr_subnet_lt[0].get('id')
LOG.info("Deleted dummy router network %s",
rtr.get('id'))
ret = self.delete_os_dummy_rtr_nwk(rtr.get('id'),
rtr_net.get('id'),
rtr_subnet_id)
return ret
LOG.info("Done Checking consistency of DB, no issues") |
def _get_encrypted_masterpassword(self):
""" Obtain the encrypted masterkey
.. note:: The encrypted masterkey is checksummed, so that we can
figure out that a provided password is correct or not. The
checksum is only 4 bytes long!
"""
if not self.unlocked():
raise WalletLocked
aes = AESCipher(self.password)
return "{}${}".format(
self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey)
) | Obtain the encrypted masterkey
.. note:: The encrypted masterkey is checksummed, so that we can
figure out that a provided password is correct or not. The
checksum is only 4 bytes long! | Below is the the instruction that describes the task:
### Input:
Obtain the encrypted masterkey
.. note:: The encrypted masterkey is checksummed, so that we can
figure out that a provided password is correct or not. The
checksum is only 4 bytes long!
### Response:
def _get_encrypted_masterpassword(self):
""" Obtain the encrypted masterkey
.. note:: The encrypted masterkey is checksummed, so that we can
figure out that a provided password is correct or not. The
checksum is only 4 bytes long!
"""
if not self.unlocked():
raise WalletLocked
aes = AESCipher(self.password)
return "{}${}".format(
self._derive_checksum(self.masterkey), aes.encrypt(self.masterkey)
) |
def refresh(self, index=None):
"""Refresh tabwidget"""
if index is None:
index = self.get_stack_index()
# Set current editor
if self.get_stack_count():
index = self.get_stack_index()
finfo = self.data[index]
editor = finfo.editor
editor.setFocus()
self._refresh_outlineexplorer(index, update=False)
self.__refresh_statusbar(index)
self.__refresh_readonly(index)
self.__check_file_status(index)
self.__modify_stack_title()
self.update_plugin_title.emit()
else:
editor = None
# Update the modification-state-dependent parameters
self.modification_changed()
# Update FindReplace binding
self.find_widget.set_editor(editor, refresh=False) | Refresh tabwidget | Below is the the instruction that describes the task:
### Input:
Refresh tabwidget
### Response:
def refresh(self, index=None):
"""Refresh tabwidget"""
if index is None:
index = self.get_stack_index()
# Set current editor
if self.get_stack_count():
index = self.get_stack_index()
finfo = self.data[index]
editor = finfo.editor
editor.setFocus()
self._refresh_outlineexplorer(index, update=False)
self.__refresh_statusbar(index)
self.__refresh_readonly(index)
self.__check_file_status(index)
self.__modify_stack_title()
self.update_plugin_title.emit()
else:
editor = None
# Update the modification-state-dependent parameters
self.modification_changed()
# Update FindReplace binding
self.find_widget.set_editor(editor, refresh=False) |
def process_children(self, node, elem, module, path, omit=[]):
"""Proceed with all children of `node`."""
for ch in node.i_children:
if ch not in omit and (ch.i_config or self.doctype == "data"):
self.node_handler.get(ch.keyword, self.ignore)(
ch, elem, module, path) | Proceed with all children of `node`. | Below is the the instruction that describes the task:
### Input:
Proceed with all children of `node`.
### Response:
def process_children(self, node, elem, module, path, omit=[]):
"""Proceed with all children of `node`."""
for ch in node.i_children:
if ch not in omit and (ch.i_config or self.doctype == "data"):
self.node_handler.get(ch.keyword, self.ignore)(
ch, elem, module, path) |
def get(cls, *args, **kwargs):
"""Create and return a serializable Report object, retrieved from cache if possible"""
from indico_piwik.plugin import PiwikPlugin
if not PiwikPlugin.settings.get('cache_enabled'):
return cls(*args, **kwargs).to_serializable()
cache = GenericCache('Piwik.Report')
key = u'{}-{}-{}'.format(cls.__name__, args, kwargs)
report = cache.get(key)
if not report:
report = cls(*args, **kwargs)
cache.set(key, report, PiwikPlugin.settings.get('cache_ttl'))
return report.to_serializable() | Create and return a serializable Report object, retrieved from cache if possible | Below is the the instruction that describes the task:
### Input:
Create and return a serializable Report object, retrieved from cache if possible
### Response:
def get(cls, *args, **kwargs):
"""Create and return a serializable Report object, retrieved from cache if possible"""
from indico_piwik.plugin import PiwikPlugin
if not PiwikPlugin.settings.get('cache_enabled'):
return cls(*args, **kwargs).to_serializable()
cache = GenericCache('Piwik.Report')
key = u'{}-{}-{}'.format(cls.__name__, args, kwargs)
report = cache.get(key)
if not report:
report = cls(*args, **kwargs)
cache.set(key, report, PiwikPlugin.settings.get('cache_ttl'))
return report.to_serializable() |
def get_word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths):
"""
Calculate word saliency according to Chuang et al. 2012.
saliency(w) = p(w) * distinctiveness(w)
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
"""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
p_w = get_marginal_word_distrib(topic_word_distrib, p_t)
return p_w * get_word_distinctiveness(topic_word_distrib, p_t) | Calculate word saliency according to Chuang et al. 2012.
saliency(w) = p(w) * distinctiveness(w)
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models" | Below is the the instruction that describes the task:
### Input:
Calculate word saliency according to Chuang et al. 2012.
saliency(w) = p(w) * distinctiveness(w)
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
### Response:
def get_word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths):
"""
Calculate word saliency according to Chuang et al. 2012.
saliency(w) = p(w) * distinctiveness(w)
J. Chuang, C. Manning, J. Heer 2012: "Termite: Visualization Techniques for Assessing Textual Topic Models"
"""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
p_w = get_marginal_word_distrib(topic_word_distrib, p_t)
return p_w * get_word_distinctiveness(topic_word_distrib, p_t) |
def p44(msg):
"""Static pressure.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: static pressure in hPa
"""
d = hex2bin(data(msg))
if d[34] == '0':
return None
p = bin2int(d[35:46]) # hPa
return p | Static pressure.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: static pressure in hPa | Below is the the instruction that describes the task:
### Input:
Static pressure.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: static pressure in hPa
### Response:
def p44(msg):
"""Static pressure.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: static pressure in hPa
"""
d = hex2bin(data(msg))
if d[34] == '0':
return None
p = bin2int(d[35:46]) # hPa
return p |
def outputStandard(self, extended=False):
"""
Standard, multi-line output display
"""
successfulResponses = len(
[
True for rsp in self.results if rsp['success']
]
)
sys.stdout.write(""" - RESULTS
I asked {num_servers} servers for {rec_type} records related to {domain},
{success_responses} responded with records and {error_responses} gave errors
Here are the results;\n\n\n""".format(
num_servers=len(self.serverList),
rec_type=self.recordType,
domain=self.domain,
success_responses=successfulResponses,
error_responses=len(self.serverList) - successfulResponses
))
errors = []
for rsp in self.resultsColated:
out = []
if extended:
out.append("The following servers\n")
out.append("\n".join([
" - {0} ({1} - {2})".
format(s['ip'], s['provider'], s['country'])
for s in rsp['servers']]))
out.append("\nresponded with;\n")
else:
out.append("{num_servers} servers responded with;\n".format(
num_servers=len(rsp['servers']))
)
out.append(
"\n".join(rsp['results'])
)
out.append("\n\n")
if rsp['success']:
sys.stdout.write("".join(out))
else:
errors.append("".join(out))
sys.stdout.write("\n\nAnd here are the errors;\n\n\n")
sys.stdout.write("".join(errors)) | Standard, multi-line output display | Below is the the instruction that describes the task:
### Input:
Standard, multi-line output display
### Response:
def outputStandard(self, extended=False):
"""
Standard, multi-line output display
"""
successfulResponses = len(
[
True for rsp in self.results if rsp['success']
]
)
sys.stdout.write(""" - RESULTS
I asked {num_servers} servers for {rec_type} records related to {domain},
{success_responses} responded with records and {error_responses} gave errors
Here are the results;\n\n\n""".format(
num_servers=len(self.serverList),
rec_type=self.recordType,
domain=self.domain,
success_responses=successfulResponses,
error_responses=len(self.serverList) - successfulResponses
))
errors = []
for rsp in self.resultsColated:
out = []
if extended:
out.append("The following servers\n")
out.append("\n".join([
" - {0} ({1} - {2})".
format(s['ip'], s['provider'], s['country'])
for s in rsp['servers']]))
out.append("\nresponded with;\n")
else:
out.append("{num_servers} servers responded with;\n".format(
num_servers=len(rsp['servers']))
)
out.append(
"\n".join(rsp['results'])
)
out.append("\n\n")
if rsp['success']:
sys.stdout.write("".join(out))
else:
errors.append("".join(out))
sys.stdout.write("\n\nAnd here are the errors;\n\n\n")
sys.stdout.write("".join(errors)) |
def solve_sdr(prob, *args, **kwargs):
"""Solve the SDP relaxation.
"""
# lifted variables and semidefinite constraint
X = cvx.Semidef(prob.n + 1)
W = prob.f0.homogeneous_form()
rel_obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(W, X)))
rel_constr = [X[-1, -1] == 1]
for f in prob.fs:
W = f.homogeneous_form()
lhs = cvx.sum_entries(cvx.mul_elemwise(W, X))
if f.relop == '==':
rel_constr.append(lhs == 0)
else:
rel_constr.append(lhs <= 0)
rel_prob = cvx.Problem(rel_obj, rel_constr)
rel_prob.solve(*args, **kwargs)
if rel_prob.status not in [cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE]:
raise Exception("Relaxation problem status: %s" % rel_prob.status)
return X.value, rel_prob.value | Solve the SDP relaxation. | Below is the the instruction that describes the task:
### Input:
Solve the SDP relaxation.
### Response:
def solve_sdr(prob, *args, **kwargs):
"""Solve the SDP relaxation.
"""
# lifted variables and semidefinite constraint
X = cvx.Semidef(prob.n + 1)
W = prob.f0.homogeneous_form()
rel_obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(W, X)))
rel_constr = [X[-1, -1] == 1]
for f in prob.fs:
W = f.homogeneous_form()
lhs = cvx.sum_entries(cvx.mul_elemwise(W, X))
if f.relop == '==':
rel_constr.append(lhs == 0)
else:
rel_constr.append(lhs <= 0)
rel_prob = cvx.Problem(rel_obj, rel_constr)
rel_prob.solve(*args, **kwargs)
if rel_prob.status not in [cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE]:
raise Exception("Relaxation problem status: %s" % rel_prob.status)
return X.value, rel_prob.value |
def check_match(self, name):
"""
Check if a release version matches any of the specificed patterns.
Parameters
==========
name: str
Release name
Returns
=======
bool:
True if it matches, False otherwise.
"""
return any(pattern.match(name) for pattern in self.patterns) | Check if a release version matches any of the specificed patterns.
Parameters
==========
name: str
Release name
Returns
=======
bool:
True if it matches, False otherwise. | Below is the the instruction that describes the task:
### Input:
Check if a release version matches any of the specificed patterns.
Parameters
==========
name: str
Release name
Returns
=======
bool:
True if it matches, False otherwise.
### Response:
def check_match(self, name):
"""
Check if a release version matches any of the specificed patterns.
Parameters
==========
name: str
Release name
Returns
=======
bool:
True if it matches, False otherwise.
"""
return any(pattern.match(name) for pattern in self.patterns) |
def protected_resource_view(scopes=None):
"""
View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7
"""
if scopes is None:
scopes = []
def wrapper(view):
def view_wrapper(request, *args, **kwargs):
access_token = extract_access_token(request)
try:
try:
kwargs['token'] = Token.objects.get(access_token=access_token)
except Token.DoesNotExist:
logger.debug('[UserInfo] Token does not exist: %s', access_token)
raise BearerTokenError('invalid_token')
if kwargs['token'].has_expired():
logger.debug('[UserInfo] Token has expired: %s', access_token)
raise BearerTokenError('invalid_token')
if not set(scopes).issubset(set(kwargs['token'].scope)):
logger.debug('[UserInfo] Missing openid scope.')
raise BearerTokenError('insufficient_scope')
except BearerTokenError as error:
response = HttpResponse(status=error.status)
response['WWW-Authenticate'] = 'error="{0}", error_description="{1}"'.format(
error.code, error.description)
return response
return view(request, *args, **kwargs)
return view_wrapper
return wrapper | View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7 | Below is the the instruction that describes the task:
### Input:
View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7
### Response:
def protected_resource_view(scopes=None):
"""
View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7
"""
if scopes is None:
scopes = []
def wrapper(view):
def view_wrapper(request, *args, **kwargs):
access_token = extract_access_token(request)
try:
try:
kwargs['token'] = Token.objects.get(access_token=access_token)
except Token.DoesNotExist:
logger.debug('[UserInfo] Token does not exist: %s', access_token)
raise BearerTokenError('invalid_token')
if kwargs['token'].has_expired():
logger.debug('[UserInfo] Token has expired: %s', access_token)
raise BearerTokenError('invalid_token')
if not set(scopes).issubset(set(kwargs['token'].scope)):
logger.debug('[UserInfo] Missing openid scope.')
raise BearerTokenError('insufficient_scope')
except BearerTokenError as error:
response = HttpResponse(status=error.status)
response['WWW-Authenticate'] = 'error="{0}", error_description="{1}"'.format(
error.code, error.description)
return response
return view(request, *args, **kwargs)
return view_wrapper
return wrapper |
def find_by_name(name):
"""
Find and return a format by name.
:param name: A string describing the name of the format.
"""
for format in FORMATS:
if name == format.name:
return format
raise UnknownFormat('No format found with name "%s"' % name) | Find and return a format by name.
:param name: A string describing the name of the format. | Below is the the instruction that describes the task:
### Input:
Find and return a format by name.
:param name: A string describing the name of the format.
### Response:
def find_by_name(name):
"""
Find and return a format by name.
:param name: A string describing the name of the format.
"""
for format in FORMATS:
if name == format.name:
return format
raise UnknownFormat('No format found with name "%s"' % name) |
def image_meta_set(image_id=None,
name=None,
profile=None,
**kwargs): # pylint: disable=C0103
'''
Sets a key=value pair in the metadata for an image (nova image-meta set)
CLI Examples:
.. code-block:: bash
salt '*' nova.image_meta_set 6f52b2ff-0b31-4d84-8fd1-af45b84824f6 cheese=gruyere
salt '*' nova.image_meta_set name=myimage salad=pasta beans=baked
'''
conn = _auth(profile, **kwargs)
return conn.image_meta_set(
image_id,
name,
**kwargs
) | Sets a key=value pair in the metadata for an image (nova image-meta set)
CLI Examples:
.. code-block:: bash
salt '*' nova.image_meta_set 6f52b2ff-0b31-4d84-8fd1-af45b84824f6 cheese=gruyere
salt '*' nova.image_meta_set name=myimage salad=pasta beans=baked | Below is the the instruction that describes the task:
### Input:
Sets a key=value pair in the metadata for an image (nova image-meta set)
CLI Examples:
.. code-block:: bash
salt '*' nova.image_meta_set 6f52b2ff-0b31-4d84-8fd1-af45b84824f6 cheese=gruyere
salt '*' nova.image_meta_set name=myimage salad=pasta beans=baked
### Response:
def image_meta_set(image_id=None,
name=None,
profile=None,
**kwargs): # pylint: disable=C0103
'''
Sets a key=value pair in the metadata for an image (nova image-meta set)
CLI Examples:
.. code-block:: bash
salt '*' nova.image_meta_set 6f52b2ff-0b31-4d84-8fd1-af45b84824f6 cheese=gruyere
salt '*' nova.image_meta_set name=myimage salad=pasta beans=baked
'''
conn = _auth(profile, **kwargs)
return conn.image_meta_set(
image_id,
name,
**kwargs
) |
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != 'pem':
raise Unsupported('Only PEM certs supported here')
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False | Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise | Below is the the instruction that describes the task:
### Input:
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
### Response:
def validate_signature(self, signedtext, cert_file, cert_type, node_name, node_id, id_attr):
"""
Validate signature on XML document.
The parameters actually used in this CryptoBackend
implementation are :
:param signedtext: The signed XML data as string
:param cert_file: xmlsec key_spec string(), filename,
'pkcs11://' URI or PEM data
:param cert_type: string, must be 'pem' for now
:returns: True on successful validation, False otherwise
"""
if cert_type != 'pem':
raise Unsupported('Only PEM certs supported here')
import xmlsec
xml = xmlsec.parse_xml(signedtext)
try:
return xmlsec.verify(xml, cert_file)
except xmlsec.XMLSigException:
return False |
def _consume(self):
""" Consume commands from the queue.
The command is repeated according to the configured value.
Wait after each command is sent.
The bridge socket is a shared resource. It must only
be used by one thread at a time. Note that this can and
will delay commands if multiple groups are attempting
to communicate at the same time on the same bridge.
"""
while not self.is_closed:
# Get command from queue.
msg = self._command_queue.get()
# Closed
if msg is None:
return
# Use the lock so we are sure is_ready is not changed during execution
# and the socket is not in use
with self._lock:
# Check if bridge is ready
if self.is_ready:
(command, reps, wait) = msg
# Select group if a different group is currently selected.
if command.select and self._selected_number != command.group_number:
if self._send_raw(command.select_command.get_bytes(self)):
self._selected_number = command.group_number
time.sleep(SELECT_WAIT)
else:
# Stop sending on socket error
self.is_ready = False
# Repeat command as necessary.
for _ in range(reps):
if self.is_ready:
if self._send_raw(command.get_bytes(self)):
time.sleep(wait)
else:
# Stop sending on socket error
self.is_ready = False
# Wait if bridge is not ready, we're only reading is_ready, no lock needed
if not self.is_ready and not self.is_closed:
# For older bridges, always try again, there's no keep-alive thread
if self.version < 6:
# Give the reconnect some time
time.sleep(RECONNECT_TIME)
self.is_ready = True | Consume commands from the queue.
The command is repeated according to the configured value.
Wait after each command is sent.
The bridge socket is a shared resource. It must only
be used by one thread at a time. Note that this can and
will delay commands if multiple groups are attempting
to communicate at the same time on the same bridge. | Below is the the instruction that describes the task:
### Input:
Consume commands from the queue.
The command is repeated according to the configured value.
Wait after each command is sent.
The bridge socket is a shared resource. It must only
be used by one thread at a time. Note that this can and
will delay commands if multiple groups are attempting
to communicate at the same time on the same bridge.
### Response:
def _consume(self):
""" Consume commands from the queue.
The command is repeated according to the configured value.
Wait after each command is sent.
The bridge socket is a shared resource. It must only
be used by one thread at a time. Note that this can and
will delay commands if multiple groups are attempting
to communicate at the same time on the same bridge.
"""
while not self.is_closed:
# Get command from queue.
msg = self._command_queue.get()
# Closed
if msg is None:
return
# Use the lock so we are sure is_ready is not changed during execution
# and the socket is not in use
with self._lock:
# Check if bridge is ready
if self.is_ready:
(command, reps, wait) = msg
# Select group if a different group is currently selected.
if command.select and self._selected_number != command.group_number:
if self._send_raw(command.select_command.get_bytes(self)):
self._selected_number = command.group_number
time.sleep(SELECT_WAIT)
else:
# Stop sending on socket error
self.is_ready = False
# Repeat command as necessary.
for _ in range(reps):
if self.is_ready:
if self._send_raw(command.get_bytes(self)):
time.sleep(wait)
else:
# Stop sending on socket error
self.is_ready = False
# Wait if bridge is not ready, we're only reading is_ready, no lock needed
if not self.is_ready and not self.is_closed:
# For older bridges, always try again, there's no keep-alive thread
if self.version < 6:
# Give the reconnect some time
time.sleep(RECONNECT_TIME)
self.is_ready = True |
def isnumeric(obj):
'''
Return true if obj is a numeric value
'''
from decimal import Decimal
if type(obj) == Decimal:
return True
else:
try:
float(obj)
except:
return False
return True | Return true if obj is a numeric value | Below is the the instruction that describes the task:
### Input:
Return true if obj is a numeric value
### Response:
def isnumeric(obj):
'''
Return true if obj is a numeric value
'''
from decimal import Decimal
if type(obj) == Decimal:
return True
else:
try:
float(obj)
except:
return False
return True |
def _combine_results(self, match_as_dict):
'''Combine results from different parsed parts:
we look for non-empty results in values like
'postal_code_b' or 'postal_code_c' and store
them as main value.
So 'postal_code_b':'123456'
becomes:
'postal_code' :'123456'
'''
keys = []
vals = []
for k, v in six.iteritems(match_as_dict):
if k[-2:] in '_a_b_c_d_e_f_g_h_i_j_k_l_m':
if v:
# strip last 2 chars: '..._b' -> '...'
keys.append(k[:-2])
vals.append(v)
else:
if k not in keys:
keys.append(k)
vals.append(v)
return dict(zip(keys, vals)) | Combine results from different parsed parts:
we look for non-empty results in values like
'postal_code_b' or 'postal_code_c' and store
them as main value.
So 'postal_code_b':'123456'
becomes:
'postal_code' :'123456' | Below is the the instruction that describes the task:
### Input:
Combine results from different parsed parts:
we look for non-empty results in values like
'postal_code_b' or 'postal_code_c' and store
them as main value.
So 'postal_code_b':'123456'
becomes:
'postal_code' :'123456'
### Response:
def _combine_results(self, match_as_dict):
'''Combine results from different parsed parts:
we look for non-empty results in values like
'postal_code_b' or 'postal_code_c' and store
them as main value.
So 'postal_code_b':'123456'
becomes:
'postal_code' :'123456'
'''
keys = []
vals = []
for k, v in six.iteritems(match_as_dict):
if k[-2:] in '_a_b_c_d_e_f_g_h_i_j_k_l_m':
if v:
# strip last 2 chars: '..._b' -> '...'
keys.append(k[:-2])
vals.append(v)
else:
if k not in keys:
keys.append(k)
vals.append(v)
return dict(zip(keys, vals)) |
def generate_view_data(self):
"""Generate the views."""
self.view_data['version'] = '{} {}'.format('Glances', __version__)
self.view_data['psutil_version'] = ' with psutil {}'.format(psutil_version)
try:
self.view_data['configuration_file'] = 'Configuration file: {}'.format(self.config.loaded_config_file)
except AttributeError:
pass
msg_col = ' {0:1} {1:35}'
msg_col2 = ' {0:1} {1:35}'
self.view_data['sort_auto'] = msg_col.format('a', 'Sort processes automatically')
self.view_data['sort_network'] = msg_col2.format('b', 'Bytes or bits for network I/O')
self.view_data['sort_cpu'] = msg_col.format('c', 'Sort processes by CPU%')
self.view_data['show_hide_alert'] = msg_col2.format('l', 'Show/hide alert logs')
self.view_data['sort_mem'] = msg_col.format('m', 'Sort processes by MEM%')
self.view_data['sort_user'] = msg_col.format('u', 'Sort processes by USER')
self.view_data['delete_warning_alerts'] = msg_col2.format('w', 'Delete warning alerts')
self.view_data['sort_proc'] = msg_col.format('p', 'Sort processes by name')
self.view_data['delete_warning_critical_alerts'] = msg_col2.format('x', 'Delete warning and critical alerts')
self.view_data['sort_io'] = msg_col.format('i', 'Sort processes by I/O rate')
self.view_data['percpu'] = msg_col2.format('1', 'Global CPU or per-CPU stats')
self.view_data['sort_cpu_times'] = msg_col.format('t', 'Sort processes by TIME')
self.view_data['show_hide_help'] = msg_col2.format('h', 'Show/hide this help screen')
self.view_data['show_hide_diskio'] = msg_col.format('d', 'Show/hide disk I/O stats')
self.view_data['show_hide_irq'] = msg_col2.format('Q', 'Show/hide IRQ stats')
self.view_data['view_network_io_combination'] = msg_col2.format('T', 'View network I/O as combination')
self.view_data['show_hide_filesystem'] = msg_col.format('f', 'Show/hide filesystem stats')
self.view_data['view_cumulative_network'] = msg_col2.format('U', 'View cumulative network I/O')
self.view_data['show_hide_network'] = msg_col.format('n', 'Show/hide network stats')
self.view_data['show_hide_filesytem_freespace'] = msg_col2.format('F', 'Show filesystem free space')
self.view_data['show_hide_sensors'] = msg_col.format('s', 'Show/hide sensors stats')
self.view_data['generate_graphs'] = msg_col2.format('g', 'Generate graphs for current history')
self.view_data['show_hide_left_sidebar'] = msg_col.format('2', 'Show/hide left sidebar')
self.view_data['reset_history'] = msg_col2.format('r', 'Reset history')
self.view_data['enable_disable_process_stats'] = msg_col.format('z', 'Enable/disable processes stats')
self.view_data['quit'] = msg_col2.format('q', 'Quit (Esc and Ctrl-C also work)')
self.view_data['enable_disable_top_extends_stats'] = msg_col.format('e', 'Enable/disable top extended stats')
self.view_data['enable_disable_short_processname'] = msg_col.format('/', 'Enable/disable short processes name')
self.view_data['enable_disable_irix'] = msg_col.format('0', 'Enable/disable Irix process CPU')
self.view_data['enable_disable_docker'] = msg_col2.format('D', 'Enable/disable Docker stats')
self.view_data['enable_disable_quick_look'] = msg_col.format('3', 'Enable/disable quick look plugin')
self.view_data['show_hide_ip'] = msg_col2.format('I', 'Show/hide IP module')
self.view_data['diskio_iops'] = msg_col2.format('B', 'Count/rate for Disk I/O')
self.view_data['show_hide_top_menu'] = msg_col2.format('5', 'Show/hide top menu (QL, CPU, MEM, SWAP and LOAD)')
self.view_data['enable_disable_gpu'] = msg_col.format('G', 'Enable/disable gpu plugin')
self.view_data['enable_disable_mean_gpu'] = msg_col2.format('6', 'Enable/disable mean gpu')
self.view_data['edit_pattern_filter'] = 'ENTER: Edit the process filter pattern' | Generate the views. | Below is the the instruction that describes the task:
### Input:
Generate the views.
### Response:
def generate_view_data(self):
"""Generate the views."""
self.view_data['version'] = '{} {}'.format('Glances', __version__)
self.view_data['psutil_version'] = ' with psutil {}'.format(psutil_version)
try:
self.view_data['configuration_file'] = 'Configuration file: {}'.format(self.config.loaded_config_file)
except AttributeError:
pass
msg_col = ' {0:1} {1:35}'
msg_col2 = ' {0:1} {1:35}'
self.view_data['sort_auto'] = msg_col.format('a', 'Sort processes automatically')
self.view_data['sort_network'] = msg_col2.format('b', 'Bytes or bits for network I/O')
self.view_data['sort_cpu'] = msg_col.format('c', 'Sort processes by CPU%')
self.view_data['show_hide_alert'] = msg_col2.format('l', 'Show/hide alert logs')
self.view_data['sort_mem'] = msg_col.format('m', 'Sort processes by MEM%')
self.view_data['sort_user'] = msg_col.format('u', 'Sort processes by USER')
self.view_data['delete_warning_alerts'] = msg_col2.format('w', 'Delete warning alerts')
self.view_data['sort_proc'] = msg_col.format('p', 'Sort processes by name')
self.view_data['delete_warning_critical_alerts'] = msg_col2.format('x', 'Delete warning and critical alerts')
self.view_data['sort_io'] = msg_col.format('i', 'Sort processes by I/O rate')
self.view_data['percpu'] = msg_col2.format('1', 'Global CPU or per-CPU stats')
self.view_data['sort_cpu_times'] = msg_col.format('t', 'Sort processes by TIME')
self.view_data['show_hide_help'] = msg_col2.format('h', 'Show/hide this help screen')
self.view_data['show_hide_diskio'] = msg_col.format('d', 'Show/hide disk I/O stats')
self.view_data['show_hide_irq'] = msg_col2.format('Q', 'Show/hide IRQ stats')
self.view_data['view_network_io_combination'] = msg_col2.format('T', 'View network I/O as combination')
self.view_data['show_hide_filesystem'] = msg_col.format('f', 'Show/hide filesystem stats')
self.view_data['view_cumulative_network'] = msg_col2.format('U', 'View cumulative network I/O')
self.view_data['show_hide_network'] = msg_col.format('n', 'Show/hide network stats')
self.view_data['show_hide_filesytem_freespace'] = msg_col2.format('F', 'Show filesystem free space')
self.view_data['show_hide_sensors'] = msg_col.format('s', 'Show/hide sensors stats')
self.view_data['generate_graphs'] = msg_col2.format('g', 'Generate graphs for current history')
self.view_data['show_hide_left_sidebar'] = msg_col.format('2', 'Show/hide left sidebar')
self.view_data['reset_history'] = msg_col2.format('r', 'Reset history')
self.view_data['enable_disable_process_stats'] = msg_col.format('z', 'Enable/disable processes stats')
self.view_data['quit'] = msg_col2.format('q', 'Quit (Esc and Ctrl-C also work)')
self.view_data['enable_disable_top_extends_stats'] = msg_col.format('e', 'Enable/disable top extended stats')
self.view_data['enable_disable_short_processname'] = msg_col.format('/', 'Enable/disable short processes name')
self.view_data['enable_disable_irix'] = msg_col.format('0', 'Enable/disable Irix process CPU')
self.view_data['enable_disable_docker'] = msg_col2.format('D', 'Enable/disable Docker stats')
self.view_data['enable_disable_quick_look'] = msg_col.format('3', 'Enable/disable quick look plugin')
self.view_data['show_hide_ip'] = msg_col2.format('I', 'Show/hide IP module')
self.view_data['diskio_iops'] = msg_col2.format('B', 'Count/rate for Disk I/O')
self.view_data['show_hide_top_menu'] = msg_col2.format('5', 'Show/hide top menu (QL, CPU, MEM, SWAP and LOAD)')
self.view_data['enable_disable_gpu'] = msg_col.format('G', 'Enable/disable gpu plugin')
self.view_data['enable_disable_mean_gpu'] = msg_col2.format('6', 'Enable/disable mean gpu')
self.view_data['edit_pattern_filter'] = 'ENTER: Edit the process filter pattern' |
def import_file(
src,
file_name,
imported_format=ImportedFormat.BOTH,
progress_from=0.0,
progress_to=None,
):
"""Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
"""
if progress_to is not None:
if not isinstance(progress_from, float) or not isinstance(progress_to, float):
raise ValueError("Progress_from and progress_to must be float")
if progress_from < 0 or progress_from > 1:
raise ValueError("Progress_from must be between 0 and 1")
if progress_to < 0 or progress_to > 1:
raise ValueError("Progress_to must be between 0 and 1")
if progress_from >= progress_to:
raise ValueError("Progress_to must be higher than progress_from")
print("Importing and compressing {}...".format(file_name))
def importGz():
"""Import gzipped file.
The file_name must have .gz extension.
"""
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
with open(file_name[:-3], 'wb') as f_out, gzip.open(src, 'rb') as f_in:
try:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
else: # Extracted file not-required
# Verify the compressed file.
with gzip.open(src, 'rb') as f:
try:
while f.read(CHUNK_SIZE) != b'':
pass
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
if imported_format == ImportedFormat.COMPRESSED:
return file_name
else:
return file_name[:-3]
def import7z():
"""Import compressed file in various formats.
Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2.
"""
extracted_name, _ = os.path.splitext(file_name)
destination_name = extracted_name
temp_dir = 'temp_{}'.format(extracted_name)
cmd = '7z x -y -o{} {}'.format(shlex.quote(temp_dir), shlex.quote(src))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as err:
if err.returncode == 2:
raise ValueError("Failed to extract file: {}".format(file_name))
else:
raise
paths = os.listdir(temp_dir)
if len(paths) == 1 and os.path.isfile(os.path.join(temp_dir, paths[0])):
# Single file in archive.
temp_file = os.path.join(temp_dir, paths[0])
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(temp_file, 'rb') as f_in, gzip.open(
extracted_name + '.gz', 'wb'
) as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
shutil.move(temp_file, './{}'.format(extracted_name))
if extracted_name.endswith('.tar'):
with tarfile.open(extracted_name) as tar:
tar.extractall()
os.remove(extracted_name)
destination_name, _ = os.path.splitext(extracted_name)
else:
destination_name = extracted_name + '.gz'
else:
# Directory or several files in archive.
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with tarfile.open(extracted_name + '.tar.gz', 'w:gz') as tar:
for fname in glob.glob(os.path.join(temp_dir, '*')):
tar.add(fname, os.path.basename(fname))
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
for path in os.listdir(temp_dir):
shutil.move(os.path.join(temp_dir, path), './{}'.format(path))
else:
destination_name = extracted_name + '.tar.gz'
shutil.rmtree(temp_dir)
return destination_name
def importUncompressed():
"""Import uncompressed file."""
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(src, 'rb') as f_in, gzip.open(file_name + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
return (
file_name + '.gz'
if imported_format == ImportedFormat.COMPRESSED
else file_name
)
# Large file download from Google Drive requires cookie and token.
try:
response = None
if re.match(
r'^https://drive.google.com/[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
session = requests.Session()
response = session.get(src, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
break
if token is not None:
params = {'confirm': token}
response = session.get(src, params=params, stream=True)
elif re.match(
r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
response = requests.get(src, stream=True)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to {}".format(src))
if response:
with open(file_name, 'wb') as f:
total = response.headers.get('content-length')
total = float(total) if total else None
downloaded = 0
current_progress = 0
for content in response.iter_content(chunk_size=CHUNK_SIZE):
f.write(content)
if total is not None and progress_to is not None:
downloaded += len(content)
progress_span = progress_to - progress_from
next_progress = progress_from + progress_span * downloaded / total
next_progress = round(next_progress, 2)
if next_progress > current_progress:
print(progress(next_progress))
current_progress = next_progress
# Check if a temporary file exists.
if not os.path.isfile(file_name):
raise ValueError("Downloaded file not found {}".format(file_name))
src = file_name
else:
if not os.path.isfile(src):
raise ValueError("Source file not found {}".format(src))
# Decide which import should be used.
if re.search(r'\.(bz2|zip|rar|7z|tgz|tar\.gz|tar\.bz2)$', file_name):
destination_file_name = import7z()
elif file_name.endswith('.gz'):
destination_file_name = importGz()
else:
destination_file_name = importUncompressed()
if progress_to is not None:
print(progress(progress_to))
return destination_file_name | Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given) | Below is the the instruction that describes the task:
### Input:
Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
### Response:
def import_file(
src,
file_name,
imported_format=ImportedFormat.BOTH,
progress_from=0.0,
progress_to=None,
):
"""Import file to working directory.
:param src: Source file path or URL
:param file_name: Source file name
:param imported_format: Import file format (extracted, compressed or both)
:param progress_from: Initial progress value
:param progress_to: Final progress value
:return: Destination file path (if extracted and compressed, extracted path given)
"""
if progress_to is not None:
if not isinstance(progress_from, float) or not isinstance(progress_to, float):
raise ValueError("Progress_from and progress_to must be float")
if progress_from < 0 or progress_from > 1:
raise ValueError("Progress_from must be between 0 and 1")
if progress_to < 0 or progress_to > 1:
raise ValueError("Progress_to must be between 0 and 1")
if progress_from >= progress_to:
raise ValueError("Progress_to must be higher than progress_from")
print("Importing and compressing {}...".format(file_name))
def importGz():
"""Import gzipped file.
The file_name must have .gz extension.
"""
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
with open(file_name[:-3], 'wb') as f_out, gzip.open(src, 'rb') as f_in:
try:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
else: # Extracted file not-required
# Verify the compressed file.
with gzip.open(src, 'rb') as f:
try:
while f.read(CHUNK_SIZE) != b'':
pass
except zlib.error:
raise ValueError("Invalid gzip file format: {}".format(file_name))
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
if imported_format == ImportedFormat.COMPRESSED:
return file_name
else:
return file_name[:-3]
def import7z():
"""Import compressed file in various formats.
Supported extensions: .bz2, .zip, .rar, .7z, .tar.gz, and .tar.bz2.
"""
extracted_name, _ = os.path.splitext(file_name)
destination_name = extracted_name
temp_dir = 'temp_{}'.format(extracted_name)
cmd = '7z x -y -o{} {}'.format(shlex.quote(temp_dir), shlex.quote(src))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as err:
if err.returncode == 2:
raise ValueError("Failed to extract file: {}".format(file_name))
else:
raise
paths = os.listdir(temp_dir)
if len(paths) == 1 and os.path.isfile(os.path.join(temp_dir, paths[0])):
# Single file in archive.
temp_file = os.path.join(temp_dir, paths[0])
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(temp_file, 'rb') as f_in, gzip.open(
extracted_name + '.gz', 'wb'
) as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
shutil.move(temp_file, './{}'.format(extracted_name))
if extracted_name.endswith('.tar'):
with tarfile.open(extracted_name) as tar:
tar.extractall()
os.remove(extracted_name)
destination_name, _ = os.path.splitext(extracted_name)
else:
destination_name = extracted_name + '.gz'
else:
# Directory or several files in archive.
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with tarfile.open(extracted_name + '.tar.gz', 'w:gz') as tar:
for fname in glob.glob(os.path.join(temp_dir, '*')):
tar.add(fname, os.path.basename(fname))
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
for path in os.listdir(temp_dir):
shutil.move(os.path.join(temp_dir, path), './{}'.format(path))
else:
destination_name = extracted_name + '.tar.gz'
shutil.rmtree(temp_dir)
return destination_name
def importUncompressed():
"""Import uncompressed file."""
if imported_format != ImportedFormat.EXTRACTED: # Compressed file required
with open(src, 'rb') as f_in, gzip.open(file_name + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out, CHUNK_SIZE)
if imported_format != ImportedFormat.COMPRESSED: # Extracted file required
try:
shutil.copyfile(src, file_name)
except shutil.SameFileError:
pass # Skip copy of downloaded files
return (
file_name + '.gz'
if imported_format == ImportedFormat.COMPRESSED
else file_name
)
# Large file download from Google Drive requires cookie and token.
try:
response = None
if re.match(
r'^https://drive.google.com/[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
session = requests.Session()
response = session.get(src, stream=True)
token = None
for key, value in response.cookies.items():
if key.startswith('download_warning'):
token = value
break
if token is not None:
params = {'confirm': token}
response = session.get(src, params=params, stream=True)
elif re.match(
r'^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$',
src,
):
response = requests.get(src, stream=True)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("Could not connect to {}".format(src))
if response:
with open(file_name, 'wb') as f:
total = response.headers.get('content-length')
total = float(total) if total else None
downloaded = 0
current_progress = 0
for content in response.iter_content(chunk_size=CHUNK_SIZE):
f.write(content)
if total is not None and progress_to is not None:
downloaded += len(content)
progress_span = progress_to - progress_from
next_progress = progress_from + progress_span * downloaded / total
next_progress = round(next_progress, 2)
if next_progress > current_progress:
print(progress(next_progress))
current_progress = next_progress
# Check if a temporary file exists.
if not os.path.isfile(file_name):
raise ValueError("Downloaded file not found {}".format(file_name))
src = file_name
else:
if not os.path.isfile(src):
raise ValueError("Source file not found {}".format(src))
# Decide which import should be used.
if re.search(r'\.(bz2|zip|rar|7z|tgz|tar\.gz|tar\.bz2)$', file_name):
destination_file_name = import7z()
elif file_name.endswith('.gz'):
destination_file_name = importGz()
else:
destination_file_name = importUncompressed()
if progress_to is not None:
print(progress(progress_to))
return destination_file_name |
def go_to_marker(self, row, col, table_type):
"""Move to point in time marked by the marker.
Parameters
----------
row : QtCore.int
column : QtCore.int
table_type : str
'dataset' table or 'annot' table, it works on either
"""
if table_type == 'dataset':
marker_time = self.idx_marker.property('start')[row]
marker_end_time = self.idx_marker.property('end')[row]
else:
marker_time = self.idx_annot_list.property('start')[row]
marker_end_time = self.idx_annot_list.property('end')[row]
window_length = self.parent.value('window_length')
if self.parent.traces.action['centre_event'].isChecked():
window_start = (marker_time + marker_end_time - window_length) / 2
else:
window_start = floor(marker_time / window_length) * window_length
self.parent.overview.update_position(window_start)
if table_type == 'annot':
for annot in self.parent.traces.idx_annot:
if annot.marker.x() == marker_time:
self.parent.traces.highlight_event(annot)
break | Move to point in time marked by the marker.
Parameters
----------
row : QtCore.int
column : QtCore.int
table_type : str
'dataset' table or 'annot' table, it works on either | Below is the the instruction that describes the task:
### Input:
Move to point in time marked by the marker.
Parameters
----------
row : QtCore.int
column : QtCore.int
table_type : str
'dataset' table or 'annot' table, it works on either
### Response:
def go_to_marker(self, row, col, table_type):
"""Move to point in time marked by the marker.
Parameters
----------
row : QtCore.int
column : QtCore.int
table_type : str
'dataset' table or 'annot' table, it works on either
"""
if table_type == 'dataset':
marker_time = self.idx_marker.property('start')[row]
marker_end_time = self.idx_marker.property('end')[row]
else:
marker_time = self.idx_annot_list.property('start')[row]
marker_end_time = self.idx_annot_list.property('end')[row]
window_length = self.parent.value('window_length')
if self.parent.traces.action['centre_event'].isChecked():
window_start = (marker_time + marker_end_time - window_length) / 2
else:
window_start = floor(marker_time / window_length) * window_length
self.parent.overview.update_position(window_start)
if table_type == 'annot':
for annot in self.parent.traces.idx_annot:
if annot.marker.x() == marker_time:
self.parent.traces.highlight_event(annot)
break |
def postinit(self, left=None, ops=None):
"""Do some setup after initialisation.
:param left: The value at the left being applied to a comparison
operator.
:type left: NodeNG or None
:param ops: The remainder of the operators
and their relevant right hand value.
:type ops: list(tuple(str, NodeNG)) or None
"""
self.left = left
self.ops = ops | Do some setup after initialisation.
:param left: The value at the left being applied to a comparison
operator.
:type left: NodeNG or None
:param ops: The remainder of the operators
and their relevant right hand value.
:type ops: list(tuple(str, NodeNG)) or None | Below is the the instruction that describes the task:
### Input:
Do some setup after initialisation.
:param left: The value at the left being applied to a comparison
operator.
:type left: NodeNG or None
:param ops: The remainder of the operators
and their relevant right hand value.
:type ops: list(tuple(str, NodeNG)) or None
### Response:
def postinit(self, left=None, ops=None):
"""Do some setup after initialisation.
:param left: The value at the left being applied to a comparison
operator.
:type left: NodeNG or None
:param ops: The remainder of the operators
and their relevant right hand value.
:type ops: list(tuple(str, NodeNG)) or None
"""
self.left = left
self.ops = ops |
def accept_reject_or_neither(self, url, parent_page=None):
'''
Returns `True` (accepted), `False` (rejected), or `None` (no decision).
`None` usually means rejected, unless `max_hops_off` comes into play.
'''
if not isinstance(url, urlcanon.ParsedUrl):
url = urlcanon.semantic(url)
if not url.scheme in (b'http', b'https'):
# XXX doesn't belong here maybe (where? worker ignores unknown
# schemes?)
return False
try_parent_urls = []
if parent_page:
try_parent_urls.append(urlcanon.semantic(parent_page.url))
if parent_page.redirect_url:
try_parent_urls.append(
urlcanon.semantic(parent_page.redirect_url))
# enforce max_hops
if (parent_page and "max_hops" in self.scope
and parent_page.hops_from_seed >= self.scope["max_hops"]):
return False
# enforce reject rules
if "blocks" in self.scope:
for block_rule in self.scope["blocks"]:
rule = urlcanon.MatchRule(**block_rule)
if try_parent_urls:
for parent_url in try_parent_urls:
if rule.applies(url, parent_url):
return False
else:
if rule.applies(url):
return False
# honor accept rules
for accept_rule in self.scope["accepts"]:
rule = urlcanon.MatchRule(**accept_rule)
if try_parent_urls:
for parent_url in try_parent_urls:
if rule.applies(url, parent_url):
return True
else:
if rule.applies(url):
return True
# no decision if we reach here
return None | Returns `True` (accepted), `False` (rejected), or `None` (no decision).
`None` usually means rejected, unless `max_hops_off` comes into play. | Below is the the instruction that describes the task:
### Input:
Returns `True` (accepted), `False` (rejected), or `None` (no decision).
`None` usually means rejected, unless `max_hops_off` comes into play.
### Response:
def accept_reject_or_neither(self, url, parent_page=None):
'''
Returns `True` (accepted), `False` (rejected), or `None` (no decision).
`None` usually means rejected, unless `max_hops_off` comes into play.
'''
if not isinstance(url, urlcanon.ParsedUrl):
url = urlcanon.semantic(url)
if not url.scheme in (b'http', b'https'):
# XXX doesn't belong here maybe (where? worker ignores unknown
# schemes?)
return False
try_parent_urls = []
if parent_page:
try_parent_urls.append(urlcanon.semantic(parent_page.url))
if parent_page.redirect_url:
try_parent_urls.append(
urlcanon.semantic(parent_page.redirect_url))
# enforce max_hops
if (parent_page and "max_hops" in self.scope
and parent_page.hops_from_seed >= self.scope["max_hops"]):
return False
# enforce reject rules
if "blocks" in self.scope:
for block_rule in self.scope["blocks"]:
rule = urlcanon.MatchRule(**block_rule)
if try_parent_urls:
for parent_url in try_parent_urls:
if rule.applies(url, parent_url):
return False
else:
if rule.applies(url):
return False
# honor accept rules
for accept_rule in self.scope["accepts"]:
rule = urlcanon.MatchRule(**accept_rule)
if try_parent_urls:
for parent_url in try_parent_urls:
if rule.applies(url, parent_url):
return True
else:
if rule.applies(url):
return True
# no decision if we reach here
return None |
def normalize_exception(self, space):
"""Normalize the OperationError. In other words, fix w_type and/or
w_value to make sure that the __class__ of w_value is exactly w_type.
"""
#
# This method covers all ways in which the Python statement
# "raise X, Y" can produce a valid exception type and instance.
#
# In the following table, 'Class' means a subclass of BaseException
# and 'inst' is an instance of either 'Class' or a subclass of it.
#
# The flow object space only deals with non-advanced case.
#
# input (w_type, w_value)... becomes... advanced case?
# ---------------------------------------------------------------------
# (Class, None) (Class, Class()) no
# (Class, inst) (inst.__class__, inst) no
# (Class, tuple) (Class, Class(*tuple)) yes
# (Class, x) (Class, Class(x)) no
# (inst, None) (inst.__class__, inst) no
#
w_type = self.w_type
w_value = self.get_w_value(space)
if space.exception_is_valid_obj_as_class_w(w_type):
# this is for all cases of the form (Class, something)
if space.is_w(w_value, space.w_None):
# raise Type: we assume we have to instantiate Type
w_value = space.call_function(w_type)
w_type = self._exception_getclass(space, w_value)
else:
w_valuetype = space.exception_getclass(w_value)
if space.exception_issubclass_w(w_valuetype, w_type):
# raise Type, Instance: let etype be the exact type of value
w_type = w_valuetype
else:
if space.isinstance_w(w_value, space.w_tuple):
# raise Type, tuple: assume the tuple contains the
# constructor args
w_value = space.call(w_type, w_value)
else:
# raise Type, X: assume X is the constructor argument
w_value = space.call_function(w_type, w_value)
w_type = self._exception_getclass(space, w_value)
if self.w_cause:
# ensure w_cause is of a valid type
if space.is_none(self.w_cause):
pass
else:
self._exception_getclass(space, self.w_cause, "exception causes")
space.setattr(w_value, space.wrap("__cause__"), self.w_cause)
if self._application_traceback:
from pypy.interpreter.pytraceback import PyTraceback
from pypy.module.exceptions.interp_exceptions import W_BaseException
tb = self._application_traceback
if (isinstance(w_value, W_BaseException) and
isinstance(tb, PyTraceback)):
# traceback hasn't escaped yet
w_value.w_traceback = tb
else:
# traceback has escaped
space.setattr(w_value, space.wrap("__traceback__"),
space.wrap(self.get_traceback()))
else:
# the only case left here is (inst, None), from a 'raise inst'.
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
raise OperationError(space.w_TypeError,
space.wrap("instance exception may not "
"have a separate value"))
w_value = w_inst
w_type = w_instclass
self.w_type = w_type
self._w_value = w_value | Normalize the OperationError. In other words, fix w_type and/or
w_value to make sure that the __class__ of w_value is exactly w_type. | Below is the the instruction that describes the task:
### Input:
Normalize the OperationError. In other words, fix w_type and/or
w_value to make sure that the __class__ of w_value is exactly w_type.
### Response:
def normalize_exception(self, space):
"""Normalize the OperationError. In other words, fix w_type and/or
w_value to make sure that the __class__ of w_value is exactly w_type.
"""
#
# This method covers all ways in which the Python statement
# "raise X, Y" can produce a valid exception type and instance.
#
# In the following table, 'Class' means a subclass of BaseException
# and 'inst' is an instance of either 'Class' or a subclass of it.
#
# The flow object space only deals with non-advanced case.
#
# input (w_type, w_value)... becomes... advanced case?
# ---------------------------------------------------------------------
# (Class, None) (Class, Class()) no
# (Class, inst) (inst.__class__, inst) no
# (Class, tuple) (Class, Class(*tuple)) yes
# (Class, x) (Class, Class(x)) no
# (inst, None) (inst.__class__, inst) no
#
w_type = self.w_type
w_value = self.get_w_value(space)
if space.exception_is_valid_obj_as_class_w(w_type):
# this is for all cases of the form (Class, something)
if space.is_w(w_value, space.w_None):
# raise Type: we assume we have to instantiate Type
w_value = space.call_function(w_type)
w_type = self._exception_getclass(space, w_value)
else:
w_valuetype = space.exception_getclass(w_value)
if space.exception_issubclass_w(w_valuetype, w_type):
# raise Type, Instance: let etype be the exact type of value
w_type = w_valuetype
else:
if space.isinstance_w(w_value, space.w_tuple):
# raise Type, tuple: assume the tuple contains the
# constructor args
w_value = space.call(w_type, w_value)
else:
# raise Type, X: assume X is the constructor argument
w_value = space.call_function(w_type, w_value)
w_type = self._exception_getclass(space, w_value)
if self.w_cause:
# ensure w_cause is of a valid type
if space.is_none(self.w_cause):
pass
else:
self._exception_getclass(space, self.w_cause, "exception causes")
space.setattr(w_value, space.wrap("__cause__"), self.w_cause)
if self._application_traceback:
from pypy.interpreter.pytraceback import PyTraceback
from pypy.module.exceptions.interp_exceptions import W_BaseException
tb = self._application_traceback
if (isinstance(w_value, W_BaseException) and
isinstance(tb, PyTraceback)):
# traceback hasn't escaped yet
w_value.w_traceback = tb
else:
# traceback has escaped
space.setattr(w_value, space.wrap("__traceback__"),
space.wrap(self.get_traceback()))
else:
# the only case left here is (inst, None), from a 'raise inst'.
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
raise OperationError(space.w_TypeError,
space.wrap("instance exception may not "
"have a separate value"))
w_value = w_inst
w_type = w_instclass
self.w_type = w_type
self._w_value = w_value |
def cycle_canceling(self, display):
'''
API:
cycle_canceling(self, display)
Description:
Solves minimum cost feasible flow problem using cycle canceling
algorithm. Returns True when an optimal solution is found, returns
False otherwise. 'flow' attribute values of arcs should be
considered as junk when returned False.
Input:
display: Display method.
Pre:
(1) Arcs should have 'capacity' and 'cost' attribute.
(2) Nodes should have 'demand' attribute, this value should be
positive if the node is a supply node, negative if it is demand
node and 0 if it is transhipment node.
(3) graph should not have node 's' and 't'.
Post:
Changes 'flow' attributes of arcs.
Return:
Returns True when an optimal solution is found, returns False
otherwise.
'''
# find a feasible solution to flow problem
if not self.find_feasible_flow():
return False
# create residual graph
residual_g = self.create_residual_graph()
# identify a negative cycle in residual graph
ncycle = residual_g.get_negative_cycle()
# loop while residual graph has a negative cycle
while ncycle is not None:
# find capacity of cycle
cap = residual_g.find_cycle_capacity(ncycle)
# augment capacity amount along the cycle
self.augment_cycle(cap, ncycle)
# create residual graph
residual_g = self.create_residual_graph()
# identify next negative cycle
ncycle = residual_g.get_negative_cycle()
return True | API:
cycle_canceling(self, display)
Description:
Solves minimum cost feasible flow problem using cycle canceling
algorithm. Returns True when an optimal solution is found, returns
False otherwise. 'flow' attribute values of arcs should be
considered as junk when returned False.
Input:
display: Display method.
Pre:
(1) Arcs should have 'capacity' and 'cost' attribute.
(2) Nodes should have 'demand' attribute, this value should be
positive if the node is a supply node, negative if it is demand
node and 0 if it is transhipment node.
(3) graph should not have node 's' and 't'.
Post:
Changes 'flow' attributes of arcs.
Return:
Returns True when an optimal solution is found, returns False
otherwise. | Below is the the instruction that describes the task:
### Input:
API:
cycle_canceling(self, display)
Description:
Solves minimum cost feasible flow problem using cycle canceling
algorithm. Returns True when an optimal solution is found, returns
False otherwise. 'flow' attribute values of arcs should be
considered as junk when returned False.
Input:
display: Display method.
Pre:
(1) Arcs should have 'capacity' and 'cost' attribute.
(2) Nodes should have 'demand' attribute, this value should be
positive if the node is a supply node, negative if it is demand
node and 0 if it is transhipment node.
(3) graph should not have node 's' and 't'.
Post:
Changes 'flow' attributes of arcs.
Return:
Returns True when an optimal solution is found, returns False
otherwise.
### Response:
def cycle_canceling(self, display):
'''
API:
cycle_canceling(self, display)
Description:
Solves minimum cost feasible flow problem using cycle canceling
algorithm. Returns True when an optimal solution is found, returns
False otherwise. 'flow' attribute values of arcs should be
considered as junk when returned False.
Input:
display: Display method.
Pre:
(1) Arcs should have 'capacity' and 'cost' attribute.
(2) Nodes should have 'demand' attribute, this value should be
positive if the node is a supply node, negative if it is demand
node and 0 if it is transhipment node.
(3) graph should not have node 's' and 't'.
Post:
Changes 'flow' attributes of arcs.
Return:
Returns True when an optimal solution is found, returns False
otherwise.
'''
# find a feasible solution to flow problem
if not self.find_feasible_flow():
return False
# create residual graph
residual_g = self.create_residual_graph()
# identify a negative cycle in residual graph
ncycle = residual_g.get_negative_cycle()
# loop while residual graph has a negative cycle
while ncycle is not None:
# find capacity of cycle
cap = residual_g.find_cycle_capacity(ncycle)
# augment capacity amount along the cycle
self.augment_cycle(cap, ncycle)
# create residual graph
residual_g = self.create_residual_graph()
# identify next negative cycle
ncycle = residual_g.get_negative_cycle()
return True |
def unchunk(self):
"""
Convert a chunked array back into a full array with (key,value) pairs
where key is a tuple of indices, and value is an ndarray.
"""
plan, padding, vshape, split = self.plan, self.padding, self.vshape, self.split
nchunks = self.getnumber(plan, vshape)
full_shape = concatenate((nchunks, plan))
n = len(vshape)
perm = concatenate(list(zip(range(n), range(n, 2*n))))
if self.uniform:
def _unchunk(it):
ordered = sorted(it, key=lambda kv: kv[0][split:])
keys, values = zip(*ordered)
yield keys[0][:split], asarray(values).reshape(full_shape).transpose(perm).reshape(vshape)
else:
def _unchunk(it):
ordered = sorted(it, key=lambda kv: kv[0][split:])
keys, values = zip(*ordered)
k_chks = [k[split:] for k in keys]
arr = empty(nchunks, dtype='object')
for (i, d) in zip(k_chks, values):
arr[i] = d
yield keys[0][:split], allstack(arr.tolist())
# remove padding
if self.padded:
removepad = self.removepad
rdd = self._rdd.map(lambda kv: (kv[0], removepad(kv[0][split:], kv[1], nchunks, padding, axes=range(n))))
else:
rdd = self._rdd
# skip partitionBy if there is not actually any chunking
if array_equal(self.plan, self.vshape):
rdd = rdd.map(lambda kv: (kv[0][:split], kv[1]))
ordered = self._ordered
else:
ranges = self.kshape
npartitions = int(prod(ranges))
if len(self.kshape) == 0:
partitioner = lambda k: 0
else:
partitioner = lambda k: ravel_multi_index(k[:split], ranges)
rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner).mapPartitions(_unchunk)
ordered = True
if array_equal(self.vshape, [1]):
rdd = rdd.mapValues(lambda v: squeeze(v))
newshape = self.shape[:-1]
else:
newshape = self.shape
return BoltArraySpark(rdd, shape=newshape, split=self._split,
dtype=self.dtype, ordered=ordered) | Convert a chunked array back into a full array with (key,value) pairs
where key is a tuple of indices, and value is an ndarray. | Below is the the instruction that describes the task:
### Input:
Convert a chunked array back into a full array with (key,value) pairs
where key is a tuple of indices, and value is an ndarray.
### Response:
def unchunk(self):
"""
Convert a chunked array back into a full array with (key,value) pairs
where key is a tuple of indices, and value is an ndarray.
"""
plan, padding, vshape, split = self.plan, self.padding, self.vshape, self.split
nchunks = self.getnumber(plan, vshape)
full_shape = concatenate((nchunks, plan))
n = len(vshape)
perm = concatenate(list(zip(range(n), range(n, 2*n))))
if self.uniform:
def _unchunk(it):
ordered = sorted(it, key=lambda kv: kv[0][split:])
keys, values = zip(*ordered)
yield keys[0][:split], asarray(values).reshape(full_shape).transpose(perm).reshape(vshape)
else:
def _unchunk(it):
ordered = sorted(it, key=lambda kv: kv[0][split:])
keys, values = zip(*ordered)
k_chks = [k[split:] for k in keys]
arr = empty(nchunks, dtype='object')
for (i, d) in zip(k_chks, values):
arr[i] = d
yield keys[0][:split], allstack(arr.tolist())
# remove padding
if self.padded:
removepad = self.removepad
rdd = self._rdd.map(lambda kv: (kv[0], removepad(kv[0][split:], kv[1], nchunks, padding, axes=range(n))))
else:
rdd = self._rdd
# skip partitionBy if there is not actually any chunking
if array_equal(self.plan, self.vshape):
rdd = rdd.map(lambda kv: (kv[0][:split], kv[1]))
ordered = self._ordered
else:
ranges = self.kshape
npartitions = int(prod(ranges))
if len(self.kshape) == 0:
partitioner = lambda k: 0
else:
partitioner = lambda k: ravel_multi_index(k[:split], ranges)
rdd = rdd.partitionBy(numPartitions=npartitions, partitionFunc=partitioner).mapPartitions(_unchunk)
ordered = True
if array_equal(self.vshape, [1]):
rdd = rdd.mapValues(lambda v: squeeze(v))
newshape = self.shape[:-1]
else:
newshape = self.shape
return BoltArraySpark(rdd, shape=newshape, split=self._split,
dtype=self.dtype, ordered=ordered) |
def moment_inertia(self):
"""
The analytic inertia tensor of the sphere primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor
"""
tensor = inertia.sphere_inertia(mass=self.volume,
radius=self.primitive.radius)
return tensor | The analytic inertia tensor of the sphere primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor | Below is the the instruction that describes the task:
### Input:
The analytic inertia tensor of the sphere primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor
### Response:
def moment_inertia(self):
"""
The analytic inertia tensor of the sphere primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor
"""
tensor = inertia.sphere_inertia(mass=self.volume,
radius=self.primitive.radius)
return tensor |
def add_ignored(self, ignored):
"""Add ignored text to the node. This will add the length of the ignored text to the node's
consumed property.
"""
if ignored:
if self.ignored:
self.ignored = ignored + self.ignored
else:
self.ignored = ignored
self.consumed += len(ignored) | Add ignored text to the node. This will add the length of the ignored text to the node's
consumed property. | Below is the the instruction that describes the task:
### Input:
Add ignored text to the node. This will add the length of the ignored text to the node's
consumed property.
### Response:
def add_ignored(self, ignored):
"""Add ignored text to the node. This will add the length of the ignored text to the node's
consumed property.
"""
if ignored:
if self.ignored:
self.ignored = ignored + self.ignored
else:
self.ignored = ignored
self.consumed += len(ignored) |
def get_datetime_at_period_ix(self, ix):
"""
Get the datetime at a given period.
:param period: The index of the period.
:returns: The datetime.
"""
if self.timestep_period_duration == TimePeriod.millisecond:
return self.start_datetime + timedelta(milliseconds=ix)
elif self.timestep_period_duration == TimePeriod.second:
return self.start_datetime + timedelta(seconds=ix)
elif self.timestep_period_duration == TimePeriod.minute:
return self.start_datetime + timedelta(minutes=ix)
elif self.timestep_period_duration == TimePeriod.hour:
return self.start_datetime + timedelta(hours=ix)
elif self.timestep_period_duration == TimePeriod.day:
return self.start_datetime + relativedelta(days=ix)
elif self.timestep_period_duration == TimePeriod.week:
return self.start_datetime + relativedelta(days=ix*7)
elif self.timestep_period_duration == TimePeriod.month:
return self.start_datetime + relativedelta(months=ix)
elif self.timestep_period_duration == TimePeriod.year:
return self.start_datetime + relativedelta(years=ix) | Get the datetime at a given period.
:param period: The index of the period.
:returns: The datetime. | Below is the the instruction that describes the task:
### Input:
Get the datetime at a given period.
:param period: The index of the period.
:returns: The datetime.
### Response:
def get_datetime_at_period_ix(self, ix):
"""
Get the datetime at a given period.
:param period: The index of the period.
:returns: The datetime.
"""
if self.timestep_period_duration == TimePeriod.millisecond:
return self.start_datetime + timedelta(milliseconds=ix)
elif self.timestep_period_duration == TimePeriod.second:
return self.start_datetime + timedelta(seconds=ix)
elif self.timestep_period_duration == TimePeriod.minute:
return self.start_datetime + timedelta(minutes=ix)
elif self.timestep_period_duration == TimePeriod.hour:
return self.start_datetime + timedelta(hours=ix)
elif self.timestep_period_duration == TimePeriod.day:
return self.start_datetime + relativedelta(days=ix)
elif self.timestep_period_duration == TimePeriod.week:
return self.start_datetime + relativedelta(days=ix*7)
elif self.timestep_period_duration == TimePeriod.month:
return self.start_datetime + relativedelta(months=ix)
elif self.timestep_period_duration == TimePeriod.year:
return self.start_datetime + relativedelta(years=ix) |
def visit_GeneratorExp(self, node: AST, dfltChaining: bool = True) -> str:
"""Return `node`s representation as generator expression."""
return f"({self.visit(node.elt)} " \
f"{' '.join(self.visit(gen) for gen in node.generators)})" | Return `node`s representation as generator expression. | Below is the the instruction that describes the task:
### Input:
Return `node`s representation as generator expression.
### Response:
def visit_GeneratorExp(self, node: AST, dfltChaining: bool = True) -> str:
"""Return `node`s representation as generator expression."""
return f"({self.visit(node.elt)} " \
f"{' '.join(self.visit(gen) for gen in node.generators)})" |
def zbar_function(fname, restype, *args):
"""Returns a foreign function exported by `zbar`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function.
"""
prototype = CFUNCTYPE(restype, *args)
return prototype((fname, load_libzbar())) | Returns a foreign function exported by `zbar`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function. | Below is the the instruction that describes the task:
### Input:
Returns a foreign function exported by `zbar`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function.
### Response:
def zbar_function(fname, restype, *args):
"""Returns a foreign function exported by `zbar`.
Args:
fname (:obj:`str`): Name of the exported function as string.
restype (:obj:): Return type - one of the `ctypes` primitive C data
types.
*args: Arguments - a sequence of `ctypes` primitive C data types.
Returns:
cddl.CFunctionType: A wrapper around the function.
"""
prototype = CFUNCTYPE(restype, *args)
return prototype((fname, load_libzbar())) |
def deterministic_shuffle(list_, seed=0, rng=None):
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
CommandLine:
python -m utool.util_numpy --test-deterministic_shuffle
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
"""
rng = ensure_rng(seed if rng is None else rng)
rng.shuffle(list_)
return list_ | r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
CommandLine:
python -m utool.util_numpy --test-deterministic_shuffle
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6] | Below is the the instruction that describes the task:
### Input:
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
CommandLine:
python -m utool.util_numpy --test-deterministic_shuffle
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
### Response:
def deterministic_shuffle(list_, seed=0, rng=None):
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
CommandLine:
python -m utool.util_numpy --test-deterministic_shuffle
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_numpy import * # NOQA
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
"""
rng = ensure_rng(seed if rng is None else rng)
rng.shuffle(list_)
return list_ |
def create_track_token(request):
"""Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task.
"""
from tracked_model.models import RequestInfo
request_pk = RequestInfo.create_or_get_from_request(request).pk
user_pk = None
if request.user.is_authenticated():
user_pk = request.user.pk
return TrackToken(request_pk=request_pk, user_pk=user_pk) | Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task. | Below is the the instruction that describes the task:
### Input:
Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task.
### Response:
def create_track_token(request):
"""Returns ``TrackToken``.
``TrackToken' contains request and user making changes.
It can be passed to ``TrackedModel.save`` instead of ``request``.
It is intended to be used when passing ``request`` is not possible
e.g. when ``TrackedModel.save`` will be called from celery task.
"""
from tracked_model.models import RequestInfo
request_pk = RequestInfo.create_or_get_from_request(request).pk
user_pk = None
if request.user.is_authenticated():
user_pk = request.user.pk
return TrackToken(request_pk=request_pk, user_pk=user_pk) |
def quit(self, reason=''):
"""
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.keep_going = False
self._quit(reason)
self._socket.shutdown(self._m_socket.SHUT_RDWR)
self._socket.close() | Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting. | Below is the the instruction that describes the task:
### Input:
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
### Response:
def quit(self, reason=''):
"""
Sends a QUIT message, closes the connection and -
ends Lurklib's main loop.
Optional arguments:
* reason='' - Reason for quitting.
"""
with self.lock:
self.keep_going = False
self._quit(reason)
self._socket.shutdown(self._m_socket.SHUT_RDWR)
self._socket.close() |
def create_namespaced_resource_quota(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_quota # noqa: E501
create a ResourceQuota # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceQuota body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ResourceQuota
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501
return data | create_namespaced_resource_quota # noqa: E501
create a ResourceQuota # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceQuota body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ResourceQuota
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
create_namespaced_resource_quota # noqa: E501
create a ResourceQuota # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceQuota body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ResourceQuota
If the method is called asynchronously,
returns the request thread.
### Response:
def create_namespaced_resource_quota(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_resource_quota # noqa: E501
create a ResourceQuota # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1ResourceQuota body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1ResourceQuota
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501
else:
(data) = self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501
return data |
def get_query_builder(self, *args, **kwargs):
"""
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
"""
query_builder = self.get_query_builder_class()
return query_builder(*args, **kwargs) | Return the query builder class instance that should be used to
build the query which is passed to the search engine backend. | Below is the the instruction that describes the task:
### Input:
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
### Response:
def get_query_builder(self, *args, **kwargs):
"""
Return the query builder class instance that should be used to
build the query which is passed to the search engine backend.
"""
query_builder = self.get_query_builder_class()
return query_builder(*args, **kwargs) |
def _send_splunk(event, index_override=None, sourcetype_override=None):
'''
Send the results to Splunk.
Requires the Splunk HTTP Event Collector running on port 8088.
This is available on Splunk Enterprise version 6.3 or higher.
'''
# Get Splunk Options
opts = _get_options()
log.info(str('Options: %s'), # future lint: disable=blacklisted-function
salt.utils.json.dumps(opts))
http_event_collector_key = opts['token']
http_event_collector_host = opts['indexer']
# Set up the collector
splunk_event = http_event_collector(http_event_collector_key, http_event_collector_host)
# init the payload
payload = {}
# Set up the event metadata
if index_override is None:
payload.update({"index": opts['index']})
else:
payload.update({"index": index_override})
if sourcetype_override is None:
payload.update({"sourcetype": opts['sourcetype']})
else:
payload.update({"index": sourcetype_override})
# Add the event
payload.update({"event": event})
log.info(str('Payload: %s'), # future lint: disable=blacklisted-function
salt.utils.json.dumps(payload))
# Fire it off
splunk_event.sendEvent(payload)
return True | Send the results to Splunk.
Requires the Splunk HTTP Event Collector running on port 8088.
This is available on Splunk Enterprise version 6.3 or higher. | Below is the the instruction that describes the task:
### Input:
Send the results to Splunk.
Requires the Splunk HTTP Event Collector running on port 8088.
This is available on Splunk Enterprise version 6.3 or higher.
### Response:
def _send_splunk(event, index_override=None, sourcetype_override=None):
'''
Send the results to Splunk.
Requires the Splunk HTTP Event Collector running on port 8088.
This is available on Splunk Enterprise version 6.3 or higher.
'''
# Get Splunk Options
opts = _get_options()
log.info(str('Options: %s'), # future lint: disable=blacklisted-function
salt.utils.json.dumps(opts))
http_event_collector_key = opts['token']
http_event_collector_host = opts['indexer']
# Set up the collector
splunk_event = http_event_collector(http_event_collector_key, http_event_collector_host)
# init the payload
payload = {}
# Set up the event metadata
if index_override is None:
payload.update({"index": opts['index']})
else:
payload.update({"index": index_override})
if sourcetype_override is None:
payload.update({"sourcetype": opts['sourcetype']})
else:
payload.update({"index": sourcetype_override})
# Add the event
payload.update({"event": event})
log.info(str('Payload: %s'), # future lint: disable=blacklisted-function
salt.utils.json.dumps(payload))
# Fire it off
splunk_event.sendEvent(payload)
return True |
def get_actions(self, params):
"""Send a HTTP request to the satellite (GET /_checks)
Get actions from the scheduler.
Un-serialize data received.
:param params: the request parameters
:type params: str
:return: Actions list on success, [] on failure
:rtype: list
"""
res = self.con.get('_checks', params, wait=True)
logger.debug("Got checks to execute from %s: %s", self.name, res)
return unserialize(res, True) | Send a HTTP request to the satellite (GET /_checks)
Get actions from the scheduler.
Un-serialize data received.
:param params: the request parameters
:type params: str
:return: Actions list on success, [] on failure
:rtype: list | Below is the the instruction that describes the task:
### Input:
Send a HTTP request to the satellite (GET /_checks)
Get actions from the scheduler.
Un-serialize data received.
:param params: the request parameters
:type params: str
:return: Actions list on success, [] on failure
:rtype: list
### Response:
def get_actions(self, params):
"""Send a HTTP request to the satellite (GET /_checks)
Get actions from the scheduler.
Un-serialize data received.
:param params: the request parameters
:type params: str
:return: Actions list on success, [] on failure
:rtype: list
"""
res = self.con.get('_checks', params, wait=True)
logger.debug("Got checks to execute from %s: %s", self.name, res)
return unserialize(res, True) |
def get_rsc_list_2(self, rsc_clz_list=None):
"""get the list of resource list to collect based on clz list
:param rsc_clz_list: the list of classes to collect
:return: filtered list of resource list,
like [VNXLunList(), VNXDiskList()]
"""
rsc_list_2 = self._default_rsc_list_with_perf_stats()
if rsc_clz_list is None:
rsc_clz_list = ResourceList.get_rsc_clz_list(rsc_list_2)
return [rsc_list
for rsc_list in rsc_list_2
if rsc_list.get_resource_class() in rsc_clz_list] | get the list of resource list to collect based on clz list
:param rsc_clz_list: the list of classes to collect
:return: filtered list of resource list,
like [VNXLunList(), VNXDiskList()] | Below is the the instruction that describes the task:
### Input:
get the list of resource list to collect based on clz list
:param rsc_clz_list: the list of classes to collect
:return: filtered list of resource list,
like [VNXLunList(), VNXDiskList()]
### Response:
def get_rsc_list_2(self, rsc_clz_list=None):
"""get the list of resource list to collect based on clz list
:param rsc_clz_list: the list of classes to collect
:return: filtered list of resource list,
like [VNXLunList(), VNXDiskList()]
"""
rsc_list_2 = self._default_rsc_list_with_perf_stats()
if rsc_clz_list is None:
rsc_clz_list = ResourceList.get_rsc_clz_list(rsc_list_2)
return [rsc_list
for rsc_list in rsc_list_2
if rsc_list.get_resource_class() in rsc_clz_list] |
def hash(self, raw: Any) -> str:
"""
Returns the hex digest of a HMAC-encoded version of the input.
"""
with MultiTimerContext(timer, TIMING_HASH):
raw_bytes = str(raw).encode('utf-8')
hmac_obj = hmac.new(key=self.key_bytes, msg=raw_bytes,
digestmod=self.digestmod)
return hmac_obj.hexdigest() | Returns the hex digest of a HMAC-encoded version of the input. | Below is the the instruction that describes the task:
### Input:
Returns the hex digest of a HMAC-encoded version of the input.
### Response:
def hash(self, raw: Any) -> str:
"""
Returns the hex digest of a HMAC-encoded version of the input.
"""
with MultiTimerContext(timer, TIMING_HASH):
raw_bytes = str(raw).encode('utf-8')
hmac_obj = hmac.new(key=self.key_bytes, msg=raw_bytes,
digestmod=self.digestmod)
return hmac_obj.hexdigest() |
def verify_report(self, device_id, root, data, signature, **kwargs):
"""Verify a buffer of report data on behalf of a device.
Args:
device_id (int): The id of the device that we should encrypt for
root (int): The root key type that should be used to generate the report
data (bytearray): The data that we should verify
signature (bytearray): The signature attached to data that we should verify
**kwargs: There are additional specific keyword args that are required
depending on the root key used. Typically, you must specify
- report_id (int): The report id
- sent_timestamp (int): The sent timestamp of the report
These two bits of information are used to construct the per report
signing and encryption key from the specific root key type.
Returns:
dict: The result of the verification process must always be a bool under the
'verified' key, however additional keys may be present depending on the
signature method used.
Raises:
NotFoundError: If the auth provider is not able to verify the data due to
an error. If the data is simply not valid, then the function returns
normally.
"""
report_key = self._verify_derive_key(device_id, root, **kwargs)
message_hash = hashlib.sha256(data).digest()
hmac_calc = hmac.new(report_key, message_hash, hashlib.sha256)
result = bytearray(hmac_calc.digest())
if len(signature) == 0:
verified = False
elif len(signature) > len(result):
verified = False
elif len(signature) < len(result):
trunc_result = result[:len(signature)]
verified = hmac.compare_digest(signature, trunc_result)
else:
verified = hmac.compare_digest(signature, result)
return {'verified': verified, 'bit_length': 8*len(signature)} | Verify a buffer of report data on behalf of a device.
Args:
device_id (int): The id of the device that we should encrypt for
root (int): The root key type that should be used to generate the report
data (bytearray): The data that we should verify
signature (bytearray): The signature attached to data that we should verify
**kwargs: There are additional specific keyword args that are required
depending on the root key used. Typically, you must specify
- report_id (int): The report id
- sent_timestamp (int): The sent timestamp of the report
These two bits of information are used to construct the per report
signing and encryption key from the specific root key type.
Returns:
dict: The result of the verification process must always be a bool under the
'verified' key, however additional keys may be present depending on the
signature method used.
Raises:
NotFoundError: If the auth provider is not able to verify the data due to
an error. If the data is simply not valid, then the function returns
normally. | Below is the the instruction that describes the task:
### Input:
Verify a buffer of report data on behalf of a device.
Args:
device_id (int): The id of the device that we should encrypt for
root (int): The root key type that should be used to generate the report
data (bytearray): The data that we should verify
signature (bytearray): The signature attached to data that we should verify
**kwargs: There are additional specific keyword args that are required
depending on the root key used. Typically, you must specify
- report_id (int): The report id
- sent_timestamp (int): The sent timestamp of the report
These two bits of information are used to construct the per report
signing and encryption key from the specific root key type.
Returns:
dict: The result of the verification process must always be a bool under the
'verified' key, however additional keys may be present depending on the
signature method used.
Raises:
NotFoundError: If the auth provider is not able to verify the data due to
an error. If the data is simply not valid, then the function returns
normally.
### Response:
def verify_report(self, device_id, root, data, signature, **kwargs):
"""Verify a buffer of report data on behalf of a device.
Args:
device_id (int): The id of the device that we should encrypt for
root (int): The root key type that should be used to generate the report
data (bytearray): The data that we should verify
signature (bytearray): The signature attached to data that we should verify
**kwargs: There are additional specific keyword args that are required
depending on the root key used. Typically, you must specify
- report_id (int): The report id
- sent_timestamp (int): The sent timestamp of the report
These two bits of information are used to construct the per report
signing and encryption key from the specific root key type.
Returns:
dict: The result of the verification process must always be a bool under the
'verified' key, however additional keys may be present depending on the
signature method used.
Raises:
NotFoundError: If the auth provider is not able to verify the data due to
an error. If the data is simply not valid, then the function returns
normally.
"""
report_key = self._verify_derive_key(device_id, root, **kwargs)
message_hash = hashlib.sha256(data).digest()
hmac_calc = hmac.new(report_key, message_hash, hashlib.sha256)
result = bytearray(hmac_calc.digest())
if len(signature) == 0:
verified = False
elif len(signature) > len(result):
verified = False
elif len(signature) < len(result):
trunc_result = result[:len(signature)]
verified = hmac.compare_digest(signature, trunc_result)
else:
verified = hmac.compare_digest(signature, result)
return {'verified': verified, 'bit_length': 8*len(signature)} |
def _create_add_petabencana_layer_action(self):
"""Create action for import OSM Dialog."""
icon = resources_path('img', 'icons', 'add-petabencana-layer.svg')
self.action_add_petabencana_layer = QAction(
QIcon(icon),
self.tr('Add PetaBencana Flood Layer'),
self.iface.mainWindow())
self.action_add_petabencana_layer.setStatusTip(self.tr(
'Add PetaBencana Flood Layer'))
self.action_add_petabencana_layer.setWhatsThis(self.tr(
'Use this to add a PetaBencana layer to your map. '
'It needs internet access to function.'))
self.action_add_petabencana_layer.triggered.connect(
self.add_petabencana_layer)
self.add_action(
self.action_add_petabencana_layer,
add_to_toolbar=self.full_toolbar) | Create action for import OSM Dialog. | Below is the the instruction that describes the task:
### Input:
Create action for import OSM Dialog.
### Response:
def _create_add_petabencana_layer_action(self):
"""Create action for import OSM Dialog."""
icon = resources_path('img', 'icons', 'add-petabencana-layer.svg')
self.action_add_petabencana_layer = QAction(
QIcon(icon),
self.tr('Add PetaBencana Flood Layer'),
self.iface.mainWindow())
self.action_add_petabencana_layer.setStatusTip(self.tr(
'Add PetaBencana Flood Layer'))
self.action_add_petabencana_layer.setWhatsThis(self.tr(
'Use this to add a PetaBencana layer to your map. '
'It needs internet access to function.'))
self.action_add_petabencana_layer.triggered.connect(
self.add_petabencana_layer)
self.add_action(
self.action_add_petabencana_layer,
add_to_toolbar=self.full_toolbar) |
def iteritems(self):
"""Iterator across all the non-duplicate keys and their values.
Only yields the first key of duplicates.
"""
keys_yielded = set()
for k, v in self._pairs:
if k not in keys_yielded:
keys_yielded.add(k)
yield k, v | Iterator across all the non-duplicate keys and their values.
Only yields the first key of duplicates. | Below is the the instruction that describes the task:
### Input:
Iterator across all the non-duplicate keys and their values.
Only yields the first key of duplicates.
### Response:
def iteritems(self):
"""Iterator across all the non-duplicate keys and their values.
Only yields the first key of duplicates.
"""
keys_yielded = set()
for k, v in self._pairs:
if k not in keys_yielded:
keys_yielded.add(k)
yield k, v |
def cmd_shodan(ip, no_cache, verbose, output):
"""Simple shodan API client.
Prints the JSON result of a shodan query.
Example:
\b
$ habu.shodan 8.8.8.8
{
"hostnames": [
"google-public-dns-a.google.com"
],
"country_code": "US",
"org": "Google",
"data": [
{
"isp": "Google",
"transport": "udp",
"data": "Recursion: enabled",
"asn": "AS15169",
"port": 53,
"hostnames": [
"google-public-dns-a.google.com"
]
}
],
"ports": [
53
]
}
"""
habucfg = loadcfg()
if 'SHODAN_APIKEY' not in habucfg:
print('You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY')
print('Get your API key from https://www.shodan.io/')
sys.exit(1)
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
data = shodan_get_result(ip, habucfg['SHODAN_APIKEY'], no_cache, verbose)
output.write(json.dumps(data, indent=4))
output.write('\n') | Simple shodan API client.
Prints the JSON result of a shodan query.
Example:
\b
$ habu.shodan 8.8.8.8
{
"hostnames": [
"google-public-dns-a.google.com"
],
"country_code": "US",
"org": "Google",
"data": [
{
"isp": "Google",
"transport": "udp",
"data": "Recursion: enabled",
"asn": "AS15169",
"port": 53,
"hostnames": [
"google-public-dns-a.google.com"
]
}
],
"ports": [
53
]
} | Below is the the instruction that describes the task:
### Input:
Simple shodan API client.
Prints the JSON result of a shodan query.
Example:
\b
$ habu.shodan 8.8.8.8
{
"hostnames": [
"google-public-dns-a.google.com"
],
"country_code": "US",
"org": "Google",
"data": [
{
"isp": "Google",
"transport": "udp",
"data": "Recursion: enabled",
"asn": "AS15169",
"port": 53,
"hostnames": [
"google-public-dns-a.google.com"
]
}
],
"ports": [
53
]
}
### Response:
def cmd_shodan(ip, no_cache, verbose, output):
"""Simple shodan API client.
Prints the JSON result of a shodan query.
Example:
\b
$ habu.shodan 8.8.8.8
{
"hostnames": [
"google-public-dns-a.google.com"
],
"country_code": "US",
"org": "Google",
"data": [
{
"isp": "Google",
"transport": "udp",
"data": "Recursion: enabled",
"asn": "AS15169",
"port": 53,
"hostnames": [
"google-public-dns-a.google.com"
]
}
],
"ports": [
53
]
}
"""
habucfg = loadcfg()
if 'SHODAN_APIKEY' not in habucfg:
print('You must provide a shodan apikey. Use the ~/.habu.json file (variable SHODAN_APIKEY), or export the variable HABU_SHODAN_APIKEY')
print('Get your API key from https://www.shodan.io/')
sys.exit(1)
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
data = shodan_get_result(ip, habucfg['SHODAN_APIKEY'], no_cache, verbose)
output.write(json.dumps(data, indent=4))
output.write('\n') |
def all(self):
"""
Returns all the values joined together.
:return <int>
"""
out = 0
for key, value in self.items():
out |= value
return out | Returns all the values joined together.
:return <int> | Below is the the instruction that describes the task:
### Input:
Returns all the values joined together.
:return <int>
### Response:
def all(self):
"""
Returns all the values joined together.
:return <int>
"""
out = 0
for key, value in self.items():
out |= value
return out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.