code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _fetch_access_token(self, url, data):
""" The real fetch access token """
logger.info('Fetching component access token')
res = self._http.post(
url=url,
data=data
)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatClientException(
errcode=None,
errmsg=None,
client=self,
request=reqe.request,
response=reqe.response
)
result = res.json()
if 'errcode' in result and result['errcode'] != 0:
raise WeChatClientException(
result['errcode'],
result['errmsg'],
client=self,
request=res.request,
response=res
)
expires_in = 7200
if 'expires_in' in result:
expires_in = result['expires_in']
self.session.set(
'component_access_token',
result['component_access_token'],
expires_in
)
self.expires_at = int(time.time()) + expires_in
return result | The real fetch access token | Below is the the instruction that describes the task:
### Input:
The real fetch access token
### Response:
def _fetch_access_token(self, url, data):
""" The real fetch access token """
logger.info('Fetching component access token')
res = self._http.post(
url=url,
data=data
)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatClientException(
errcode=None,
errmsg=None,
client=self,
request=reqe.request,
response=reqe.response
)
result = res.json()
if 'errcode' in result and result['errcode'] != 0:
raise WeChatClientException(
result['errcode'],
result['errmsg'],
client=self,
request=res.request,
response=res
)
expires_in = 7200
if 'expires_in' in result:
expires_in = result['expires_in']
self.session.set(
'component_access_token',
result['component_access_token'],
expires_in
)
self.expires_at = int(time.time()) + expires_in
return result |
def tag_atoms_unique_ids(self, force=False):
""" Tags each Atom in the Assembly with its unique_id.
Notes
-----
The unique_id for each atom is a tuple (a double). `unique_id[0]`
is the unique_id for its parent `Monomer` (see `Monomer.unique_id`
for more information). `unique_id[1]` is the atom_type in the
`Assembly` as a string, e.g. 'CA', 'CD2'.
Parameters
----------
force : bool, optional
If True the tag will be run even if Atoms are already tagged.
If False, only runs if at least one Atom is not tagged.
"""
tagged = ['unique_id' in x.tags.keys() for x in self.get_atoms()]
if (not all(tagged)) or force:
for m in self.get_monomers():
for atom_type, atom in m.atoms.items():
atom.tags['unique_id'] = (m.unique_id, atom_type)
return | Tags each Atom in the Assembly with its unique_id.
Notes
-----
The unique_id for each atom is a tuple (a double). `unique_id[0]`
is the unique_id for its parent `Monomer` (see `Monomer.unique_id`
for more information). `unique_id[1]` is the atom_type in the
`Assembly` as a string, e.g. 'CA', 'CD2'.
Parameters
----------
force : bool, optional
If True the tag will be run even if Atoms are already tagged.
If False, only runs if at least one Atom is not tagged. | Below is the the instruction that describes the task:
### Input:
Tags each Atom in the Assembly with its unique_id.
Notes
-----
The unique_id for each atom is a tuple (a double). `unique_id[0]`
is the unique_id for its parent `Monomer` (see `Monomer.unique_id`
for more information). `unique_id[1]` is the atom_type in the
`Assembly` as a string, e.g. 'CA', 'CD2'.
Parameters
----------
force : bool, optional
If True the tag will be run even if Atoms are already tagged.
If False, only runs if at least one Atom is not tagged.
### Response:
def tag_atoms_unique_ids(self, force=False):
""" Tags each Atom in the Assembly with its unique_id.
Notes
-----
The unique_id for each atom is a tuple (a double). `unique_id[0]`
is the unique_id for its parent `Monomer` (see `Monomer.unique_id`
for more information). `unique_id[1]` is the atom_type in the
`Assembly` as a string, e.g. 'CA', 'CD2'.
Parameters
----------
force : bool, optional
If True the tag will be run even if Atoms are already tagged.
If False, only runs if at least one Atom is not tagged.
"""
tagged = ['unique_id' in x.tags.keys() for x in self.get_atoms()]
if (not all(tagged)) or force:
for m in self.get_monomers():
for atom_type, atom in m.atoms.items():
atom.tags['unique_id'] = (m.unique_id, atom_type)
return |
def collect(self, file=sys.stderr):
"""Collect stats and print results to file
:param file: A writable file-like object
"""
cur = gcstats()
Ncur = len(cur)
if self.stats is not None and file is not None:
prev = self.stats
Nprev = self.ntypes # may be less than len(prev)
if Ncur != Nprev:
print("# Types %d -> %d" % (Nprev, Ncur), file=file)
Scur, Sprev, first = set(cur), set(prev), True
for T in Scur - Sprev: # new types
if first:
print('New Types', file=file)
first = False
print(' ', T, cur[T], file=file)
first = True
for T in Sprev - Scur: # collected types
if first:
print('Cleaned Types', file=file)
first = False
print(' ', T, -prev[T], file=file)
first = True
for T in Scur & Sprev:
if cur[T] == prev[T]:
continue
if first:
print('Known Types', file=file)
first = False
print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file)
else: # first call
print("All Types", file=file)
for T, C in cur.items():
print(' ', T, C, file=file)
self.stats, self.ntypes = cur, len(cur) | Collect stats and print results to file
:param file: A writable file-like object | Below is the the instruction that describes the task:
### Input:
Collect stats and print results to file
:param file: A writable file-like object
### Response:
def collect(self, file=sys.stderr):
"""Collect stats and print results to file
:param file: A writable file-like object
"""
cur = gcstats()
Ncur = len(cur)
if self.stats is not None and file is not None:
prev = self.stats
Nprev = self.ntypes # may be less than len(prev)
if Ncur != Nprev:
print("# Types %d -> %d" % (Nprev, Ncur), file=file)
Scur, Sprev, first = set(cur), set(prev), True
for T in Scur - Sprev: # new types
if first:
print('New Types', file=file)
first = False
print(' ', T, cur[T], file=file)
first = True
for T in Sprev - Scur: # collected types
if first:
print('Cleaned Types', file=file)
first = False
print(' ', T, -prev[T], file=file)
first = True
for T in Scur & Sprev:
if cur[T] == prev[T]:
continue
if first:
print('Known Types', file=file)
first = False
print(' ', T, cur[T], 'delta', cur[T] - prev[T], file=file)
else: # first call
print("All Types", file=file)
for T, C in cur.items():
print(' ', T, C, file=file)
self.stats, self.ntypes = cur, len(cur) |
def convertBits(self, sigOrVal, toType):
"""
Cast signed-unsigned, to int or bool
"""
if isinstance(sigOrVal, Value):
return convertBits__val(self, sigOrVal, toType)
elif isinstance(toType, HBool):
if self.bit_length() == 1:
v = 0 if sigOrVal._dtype.negated else 1
return sigOrVal._eq(self.getValueCls().fromPy(v, self))
elif isinstance(toType, Bits):
if self.bit_length() == toType.bit_length():
return sigOrVal._convSign(toType.signed)
elif toType == INT:
return Operator.withRes(AllOps.BitsToInt, [sigOrVal], toType)
return default_auto_cast_fn(self, sigOrVal, toType) | Cast signed-unsigned, to int or bool | Below is the the instruction that describes the task:
### Input:
Cast signed-unsigned, to int or bool
### Response:
def convertBits(self, sigOrVal, toType):
"""
Cast signed-unsigned, to int or bool
"""
if isinstance(sigOrVal, Value):
return convertBits__val(self, sigOrVal, toType)
elif isinstance(toType, HBool):
if self.bit_length() == 1:
v = 0 if sigOrVal._dtype.negated else 1
return sigOrVal._eq(self.getValueCls().fromPy(v, self))
elif isinstance(toType, Bits):
if self.bit_length() == toType.bit_length():
return sigOrVal._convSign(toType.signed)
elif toType == INT:
return Operator.withRes(AllOps.BitsToInt, [sigOrVal], toType)
return default_auto_cast_fn(self, sigOrVal, toType) |
def __checkSPKTimestamp(self):
"""
Check whether the SPK is too old and generate a new one in that case.
"""
if time.time() - self.__spk["timestamp"] > self.__spk_timeout:
self.__generateSPK() | Check whether the SPK is too old and generate a new one in that case. | Below is the the instruction that describes the task:
### Input:
Check whether the SPK is too old and generate a new one in that case.
### Response:
def __checkSPKTimestamp(self):
"""
Check whether the SPK is too old and generate a new one in that case.
"""
if time.time() - self.__spk["timestamp"] > self.__spk_timeout:
self.__generateSPK() |
def _has_data(self):
"""Check if there is any data"""
return any([
len([
v for a in (s[0] if is_list_like(s) else [s])
for v in (a if is_list_like(a) else [a]) if v is not None
]) for s in self.raw_series
]) | Check if there is any data | Below is the the instruction that describes the task:
### Input:
Check if there is any data
### Response:
def _has_data(self):
"""Check if there is any data"""
return any([
len([
v for a in (s[0] if is_list_like(s) else [s])
for v in (a if is_list_like(a) else [a]) if v is not None
]) for s in self.raw_series
]) |
def send_and_return_status(self,
send,
expect=None,
shutit_pexpect_child=None,
timeout=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=None,
escape=False,
retry=3,
note=None,
assume_gnu=True,
follow_on_commands=None,
loglevel=logging.INFO):
"""Returns true if a good exit code was received (usually 0)
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send=send,
expect=expect,
timeout=timeout,
check_exit=False,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
echo=echo,
escape=escape,
retry=retry,
note=note,
assume_gnu=assume_gnu,
loglevel=loglevel,
follow_on_commands=follow_on_commands))
return shutit_pexpect_session.check_last_exit_values(send,
check_exit=True,
expect=expect,
exit_values=exit_values,
retry=retry,
retbool=True) | Returns true if a good exit code was received (usually 0) | Below is the the instruction that describes the task:
### Input:
Returns true if a good exit code was received (usually 0)
### Response:
def send_and_return_status(self,
send,
expect=None,
shutit_pexpect_child=None,
timeout=None,
fail_on_empty_before=True,
record_command=True,
exit_values=None,
echo=None,
escape=False,
retry=3,
note=None,
assume_gnu=True,
follow_on_commands=None,
loglevel=logging.INFO):
"""Returns true if a good exit code was received (usually 0)
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
shutit_pexpect_session.send(ShutItSendSpec(shutit_pexpect_session,
send=send,
expect=expect,
timeout=timeout,
check_exit=False,
fail_on_empty_before=fail_on_empty_before,
record_command=record_command,
exit_values=exit_values,
echo=echo,
escape=escape,
retry=retry,
note=note,
assume_gnu=assume_gnu,
loglevel=loglevel,
follow_on_commands=follow_on_commands))
return shutit_pexpect_session.check_last_exit_values(send,
check_exit=True,
expect=expect,
exit_values=exit_values,
retry=retry,
retbool=True) |
def delete_tags(tags,
name=None,
group_id=None,
vpc_name=None,
vpc_id=None,
region=None,
key=None,
keyid=None,
profile=None):
'''
deletes tags from a security group
.. versionadded:: 2016.3.0
tags
a list of tags to remove
name
the name of the security group
group_id
the group id of the security group (in lie of a name/vpc combo)
vpc_name
the name of the vpc to search the named group for
vpc_id
the id of the vpc, in lieu of the vpc_name
region
the amazon region
key
amazon key
keyid
amazon keyid
profile
amazon profile
CLI example:
.. code-block:: bash
salt myminion boto_secgroup.delete_tags ['TAG_TO_DELETE1','TAG_TO_DELETE2'] security_group_name vpc_id=vpc-13435 profile=my_aws_profile
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
secgrp = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if secgrp:
if isinstance(tags, list):
tags_to_remove = {}
for tag in tags:
tags_to_remove[tag] = None
secgrp.remove_tags(tags_to_remove)
else:
msg = 'Tags must be a list of tagnames to remove from the security group'
raise SaltInvocationError(msg)
else:
msg = 'The security group could not be found'
raise SaltInvocationError(msg)
return True | deletes tags from a security group
.. versionadded:: 2016.3.0
tags
a list of tags to remove
name
the name of the security group
group_id
the group id of the security group (in lie of a name/vpc combo)
vpc_name
the name of the vpc to search the named group for
vpc_id
the id of the vpc, in lieu of the vpc_name
region
the amazon region
key
amazon key
keyid
amazon keyid
profile
amazon profile
CLI example:
.. code-block:: bash
salt myminion boto_secgroup.delete_tags ['TAG_TO_DELETE1','TAG_TO_DELETE2'] security_group_name vpc_id=vpc-13435 profile=my_aws_profile | Below is the the instruction that describes the task:
### Input:
deletes tags from a security group
.. versionadded:: 2016.3.0
tags
a list of tags to remove
name
the name of the security group
group_id
the group id of the security group (in lie of a name/vpc combo)
vpc_name
the name of the vpc to search the named group for
vpc_id
the id of the vpc, in lieu of the vpc_name
region
the amazon region
key
amazon key
keyid
amazon keyid
profile
amazon profile
CLI example:
.. code-block:: bash
salt myminion boto_secgroup.delete_tags ['TAG_TO_DELETE1','TAG_TO_DELETE2'] security_group_name vpc_id=vpc-13435 profile=my_aws_profile
### Response:
def delete_tags(tags,
name=None,
group_id=None,
vpc_name=None,
vpc_id=None,
region=None,
key=None,
keyid=None,
profile=None):
'''
deletes tags from a security group
.. versionadded:: 2016.3.0
tags
a list of tags to remove
name
the name of the security group
group_id
the group id of the security group (in lie of a name/vpc combo)
vpc_name
the name of the vpc to search the named group for
vpc_id
the id of the vpc, in lieu of the vpc_name
region
the amazon region
key
amazon key
keyid
amazon keyid
profile
amazon profile
CLI example:
.. code-block:: bash
salt myminion boto_secgroup.delete_tags ['TAG_TO_DELETE1','TAG_TO_DELETE2'] security_group_name vpc_id=vpc-13435 profile=my_aws_profile
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
secgrp = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if secgrp:
if isinstance(tags, list):
tags_to_remove = {}
for tag in tags:
tags_to_remove[tag] = None
secgrp.remove_tags(tags_to_remove)
else:
msg = 'Tags must be a list of tagnames to remove from the security group'
raise SaltInvocationError(msg)
else:
msg = 'The security group could not be found'
raise SaltInvocationError(msg)
return True |
def _unrecognised(achr):
""" Handle unrecognised characters. """
if options['handleUnrecognised'] == UNRECOGNISED_ECHO:
return achr
elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE:
return options['substituteChar']
else:
raise KeyError(achr) | Handle unrecognised characters. | Below is the the instruction that describes the task:
### Input:
Handle unrecognised characters.
### Response:
def _unrecognised(achr):
""" Handle unrecognised characters. """
if options['handleUnrecognised'] == UNRECOGNISED_ECHO:
return achr
elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE:
return options['substituteChar']
else:
raise KeyError(achr) |
def supported_peptide_lengths(self):
"""
(minimum, maximum) lengths of peptides supported by *all models*,
inclusive.
Returns
-------
(int, int) tuple
"""
if 'supported_peptide_lengths' not in self._cache:
length_ranges = set(
network.supported_peptide_lengths
for network in self.neural_networks)
result = (
max(lower for (lower, upper) in length_ranges),
min(upper for (lower, upper) in length_ranges))
self._cache["supported_peptide_lengths"] = result
return self._cache["supported_peptide_lengths"] | (minimum, maximum) lengths of peptides supported by *all models*,
inclusive.
Returns
-------
(int, int) tuple | Below is the the instruction that describes the task:
### Input:
(minimum, maximum) lengths of peptides supported by *all models*,
inclusive.
Returns
-------
(int, int) tuple
### Response:
def supported_peptide_lengths(self):
"""
(minimum, maximum) lengths of peptides supported by *all models*,
inclusive.
Returns
-------
(int, int) tuple
"""
if 'supported_peptide_lengths' not in self._cache:
length_ranges = set(
network.supported_peptide_lengths
for network in self.neural_networks)
result = (
max(lower for (lower, upper) in length_ranges),
min(upper for (lower, upper) in length_ranges))
self._cache["supported_peptide_lengths"] = result
return self._cache["supported_peptide_lengths"] |
def add_time_step(self, **create_time_step_kwargs):
"""Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step.
"""
ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs)
assert isinstance(ts, time_step.TimeStep)
self._time_steps.append(ts) | Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step. | Below is the the instruction that describes the task:
### Input:
Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step.
### Response:
def add_time_step(self, **create_time_step_kwargs):
"""Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step.
"""
ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs)
assert isinstance(ts, time_step.TimeStep)
self._time_steps.append(ts) |
def _speak_none(self, element):
"""
No speak any content of element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
# pylint: disable=no-self-use
element.set_attribute('role', 'presentation')
element.set_attribute('aria-hidden', 'true')
element.set_attribute(AccessibleCSSImplementation.DATA_SPEAK, 'none') | No speak any content of element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement | Below is the the instruction that describes the task:
### Input:
No speak any content of element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
### Response:
def _speak_none(self, element):
"""
No speak any content of element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
# pylint: disable=no-self-use
element.set_attribute('role', 'presentation')
element.set_attribute('aria-hidden', 'true')
element.set_attribute(AccessibleCSSImplementation.DATA_SPEAK, 'none') |
def set_font(font, section='appearance', option='font'):
"""Set font"""
CONF.set(section, option+'/family', to_text_string(font.family()))
CONF.set(section, option+'/size', float(font.pointSize()))
CONF.set(section, option+'/italic', int(font.italic()))
CONF.set(section, option+'/bold', int(font.bold()))
FONT_CACHE[(section, option)] = font | Set font | Below is the the instruction that describes the task:
### Input:
Set font
### Response:
def set_font(font, section='appearance', option='font'):
"""Set font"""
CONF.set(section, option+'/family', to_text_string(font.family()))
CONF.set(section, option+'/size', float(font.pointSize()))
CONF.set(section, option+'/italic', int(font.italic()))
CONF.set(section, option+'/bold', int(font.bold()))
FONT_CACHE[(section, option)] = font |
def find(self, func):
"""
Return the first value which passes a truth test.
Aliased as `detect`.
"""
self.ftmp = None
def test(value, index, list):
if func(value, index, list) is True:
self.ftmp = value
return True
self._clean.any(test)
return self._wrap(self.ftmp) | Return the first value which passes a truth test.
Aliased as `detect`. | Below is the the instruction that describes the task:
### Input:
Return the first value which passes a truth test.
Aliased as `detect`.
### Response:
def find(self, func):
"""
Return the first value which passes a truth test.
Aliased as `detect`.
"""
self.ftmp = None
def test(value, index, list):
if func(value, index, list) is True:
self.ftmp = value
return True
self._clean.any(test)
return self._wrap(self.ftmp) |
def _get_term_object(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
**term_fields):
'''
Return an instance of the ``_Term`` class given the term options.
'''
log.debug('Generating config for term %s under filter %s', term_name, filter_name)
term = _Term()
term.name = term_name
term_opts = {}
if merge_pillar:
term_opts = get_term_pillar(filter_name,
term_name,
pillar_key=pillar_key,
saltenv=saltenv,
pillarenv=pillarenv)
log.debug('Merging with pillar data:')
log.debug(term_opts)
term_opts = _clean_term_opts(term_opts)
log.debug('Cleaning up pillar data:')
log.debug(term_opts)
log.debug('Received processing opts:')
log.debug(term_fields)
log.debug('Cleaning up processing opts:')
term_fields = _clean_term_opts(term_fields)
log.debug(term_fields)
log.debug('Final term opts:')
term_opts.update(term_fields)
log.debug(term_fields)
for field, value in six.iteritems(term_opts):
# setting the field attributes to the term instance of _Term
setattr(term, field, value)
log.debug('Term config:')
log.debug(six.text_type(term))
return term | Return an instance of the ``_Term`` class given the term options. | Below is the the instruction that describes the task:
### Input:
Return an instance of the ``_Term`` class given the term options.
### Response:
def _get_term_object(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
**term_fields):
'''
Return an instance of the ``_Term`` class given the term options.
'''
log.debug('Generating config for term %s under filter %s', term_name, filter_name)
term = _Term()
term.name = term_name
term_opts = {}
if merge_pillar:
term_opts = get_term_pillar(filter_name,
term_name,
pillar_key=pillar_key,
saltenv=saltenv,
pillarenv=pillarenv)
log.debug('Merging with pillar data:')
log.debug(term_opts)
term_opts = _clean_term_opts(term_opts)
log.debug('Cleaning up pillar data:')
log.debug(term_opts)
log.debug('Received processing opts:')
log.debug(term_fields)
log.debug('Cleaning up processing opts:')
term_fields = _clean_term_opts(term_fields)
log.debug(term_fields)
log.debug('Final term opts:')
term_opts.update(term_fields)
log.debug(term_fields)
for field, value in six.iteritems(term_opts):
# setting the field attributes to the term instance of _Term
setattr(term, field, value)
log.debug('Term config:')
log.debug(six.text_type(term))
return term |
def _pip_cmd(self, name=None, prefix=None):
"""Get pip location based on environment `name` or `prefix`."""
if (name and prefix) or not (name or prefix):
raise TypeError("conda pip: exactly one of 'name' ""or 'prefix' "
"required.")
if name and self.environment_exists(name=name):
prefix = self.get_prefix_envname(name)
if sys.platform == 'win32':
python = join(prefix, 'python.exe') # FIXME:
pip = join(prefix, 'pip.exe') # FIXME:
else:
python = join(prefix, 'bin/python')
pip = join(prefix, 'bin/pip')
cmd_list = [python, pip]
return cmd_list | Get pip location based on environment `name` or `prefix`. | Below is the the instruction that describes the task:
### Input:
Get pip location based on environment `name` or `prefix`.
### Response:
def _pip_cmd(self, name=None, prefix=None):
"""Get pip location based on environment `name` or `prefix`."""
if (name and prefix) or not (name or prefix):
raise TypeError("conda pip: exactly one of 'name' ""or 'prefix' "
"required.")
if name and self.environment_exists(name=name):
prefix = self.get_prefix_envname(name)
if sys.platform == 'win32':
python = join(prefix, 'python.exe') # FIXME:
pip = join(prefix, 'pip.exe') # FIXME:
else:
python = join(prefix, 'bin/python')
pip = join(prefix, 'bin/pip')
cmd_list = [python, pip]
return cmd_list |
def _get_supercell_size(self, s1, s2):
"""
Returns the supercell size, and whether the supercell should
be applied to s1. If fu == 1, s1_supercell is returned as
true, to avoid ambiguity.
"""
if self._supercell_size == 'num_sites':
fu = s2.num_sites / s1.num_sites
elif self._supercell_size == 'num_atoms':
fu = s2.composition.num_atoms / s1.composition.num_atoms
elif self._supercell_size == 'volume':
fu = s2.volume / s1.volume
else:
try:
el = get_el_sp(self._supercell_size)
fu = s2.composition[el] / s1.composition[el]
except:
raise ValueError('Invalid argument for supercell_size.')
if fu < 2/3:
return int(round(1/fu)), False
else:
return int(round(fu)), True | Returns the supercell size, and whether the supercell should
be applied to s1. If fu == 1, s1_supercell is returned as
true, to avoid ambiguity. | Below is the the instruction that describes the task:
### Input:
Returns the supercell size, and whether the supercell should
be applied to s1. If fu == 1, s1_supercell is returned as
true, to avoid ambiguity.
### Response:
def _get_supercell_size(self, s1, s2):
"""
Returns the supercell size, and whether the supercell should
be applied to s1. If fu == 1, s1_supercell is returned as
true, to avoid ambiguity.
"""
if self._supercell_size == 'num_sites':
fu = s2.num_sites / s1.num_sites
elif self._supercell_size == 'num_atoms':
fu = s2.composition.num_atoms / s1.composition.num_atoms
elif self._supercell_size == 'volume':
fu = s2.volume / s1.volume
else:
try:
el = get_el_sp(self._supercell_size)
fu = s2.composition[el] / s1.composition[el]
except:
raise ValueError('Invalid argument for supercell_size.')
if fu < 2/3:
return int(round(1/fu)), False
else:
return int(round(fu)), True |
def render_trace(
self, trace_list=None, file=sys.stdout, render_cls=default_renderer(),
symbol_len=5, segment_size=5, segment_delim=' ', extra_line=True):
""" Render the trace to a file using unicode and ASCII escape sequences.
:param trace_list: A list of signals to be output in the specified order.
:param file: The place to write output, default to stdout.
:param render_cls: A class that translates traces into output bytes.
:param symbol_len: The "length" of each rendered cycle in characters.
:param segment_size: Traces are broken in the segments of this number of cycles.
:param segment_delim: The character to be output between segments.
:param extra_line: A Boolean to determin if we should print a blank line between signals.
The resulting output can be viewed directly on the terminal or looked
at with "more" or "less -R" which both should handle the ASCII escape
sequences used in rendering. render_trace takes the following optional
arguments.
"""
if _currently_in_ipython():
from IPython.display import display, HTML, Javascript # pylint: disable=import-error
from .inputoutput import trace_to_html
htmlstring = trace_to_html(self, trace_list=trace_list, sortkey=_trace_sort_key)
html_elem = HTML(htmlstring)
display(html_elem)
# print(htmlstring)
js_stuff = """
$.when(
$.getScript("https://cdnjs.cloudflare.com/ajax/libs/wavedrom/1.6.2/skins/default.js"),
$.getScript("https://cdnjs.cloudflare.com/ajax/libs/wavedrom/1.6.2/wavedrom.min.js"),
$.Deferred(function( deferred ){
$( deferred.resolve );
})).done(function(){
WaveDrom.ProcessAll();
});"""
display(Javascript(js_stuff))
else:
self.render_trace_to_text(
trace_list=trace_list, file=file, render_cls=render_cls,
symbol_len=symbol_len, segment_size=segment_size,
segment_delim=segment_delim, extra_line=extra_line) | Render the trace to a file using unicode and ASCII escape sequences.
:param trace_list: A list of signals to be output in the specified order.
:param file: The place to write output, default to stdout.
:param render_cls: A class that translates traces into output bytes.
:param symbol_len: The "length" of each rendered cycle in characters.
:param segment_size: Traces are broken in the segments of this number of cycles.
:param segment_delim: The character to be output between segments.
:param extra_line: A Boolean to determin if we should print a blank line between signals.
The resulting output can be viewed directly on the terminal or looked
at with "more" or "less -R" which both should handle the ASCII escape
sequences used in rendering. render_trace takes the following optional
arguments. | Below is the the instruction that describes the task:
### Input:
Render the trace to a file using unicode and ASCII escape sequences.
:param trace_list: A list of signals to be output in the specified order.
:param file: The place to write output, default to stdout.
:param render_cls: A class that translates traces into output bytes.
:param symbol_len: The "length" of each rendered cycle in characters.
:param segment_size: Traces are broken in the segments of this number of cycles.
:param segment_delim: The character to be output between segments.
:param extra_line: A Boolean to determin if we should print a blank line between signals.
The resulting output can be viewed directly on the terminal or looked
at with "more" or "less -R" which both should handle the ASCII escape
sequences used in rendering. render_trace takes the following optional
arguments.
### Response:
def render_trace(
self, trace_list=None, file=sys.stdout, render_cls=default_renderer(),
symbol_len=5, segment_size=5, segment_delim=' ', extra_line=True):
""" Render the trace to a file using unicode and ASCII escape sequences.
:param trace_list: A list of signals to be output in the specified order.
:param file: The place to write output, default to stdout.
:param render_cls: A class that translates traces into output bytes.
:param symbol_len: The "length" of each rendered cycle in characters.
:param segment_size: Traces are broken in the segments of this number of cycles.
:param segment_delim: The character to be output between segments.
:param extra_line: A Boolean to determin if we should print a blank line between signals.
The resulting output can be viewed directly on the terminal or looked
at with "more" or "less -R" which both should handle the ASCII escape
sequences used in rendering. render_trace takes the following optional
arguments.
"""
if _currently_in_ipython():
from IPython.display import display, HTML, Javascript # pylint: disable=import-error
from .inputoutput import trace_to_html
htmlstring = trace_to_html(self, trace_list=trace_list, sortkey=_trace_sort_key)
html_elem = HTML(htmlstring)
display(html_elem)
# print(htmlstring)
js_stuff = """
$.when(
$.getScript("https://cdnjs.cloudflare.com/ajax/libs/wavedrom/1.6.2/skins/default.js"),
$.getScript("https://cdnjs.cloudflare.com/ajax/libs/wavedrom/1.6.2/wavedrom.min.js"),
$.Deferred(function( deferred ){
$( deferred.resolve );
})).done(function(){
WaveDrom.ProcessAll();
});"""
display(Javascript(js_stuff))
else:
self.render_trace_to_text(
trace_list=trace_list, file=file, render_cls=render_cls,
symbol_len=symbol_len, segment_size=segment_size,
segment_delim=segment_delim, extra_line=extra_line) |
def get_compiler(self, *args, **kwargs):
"""
Add the query time restriction limit at the last moment. Applying it
earlier (e.g. by adding a filter to the queryset) does not allow the
caching of related object to work (they are attached to a queryset;
filter() returns a new queryset).
"""
if self.querytime.active and \
(not hasattr(self, '_querytime_filter_added') or
not self._querytime_filter_added):
time = self.querytime.time
if time is None:
self.add_q(Q(version_end_date__isnull=True))
else:
self.add_q(
(Q(version_end_date__gt=time) |
Q(version_end_date__isnull=True)) &
Q(version_start_date__lte=time)
)
# Ensure applying these filters happens only a single time (even
# if it doesn't falsify the query, it's just not very comfortable
# to read)
self._querytime_filter_added = True
return super(VersionedQuery, self).get_compiler(*args, **kwargs) | Add the query time restriction limit at the last moment. Applying it
earlier (e.g. by adding a filter to the queryset) does not allow the
caching of related object to work (they are attached to a queryset;
filter() returns a new queryset). | Below is the the instruction that describes the task:
### Input:
Add the query time restriction limit at the last moment. Applying it
earlier (e.g. by adding a filter to the queryset) does not allow the
caching of related object to work (they are attached to a queryset;
filter() returns a new queryset).
### Response:
def get_compiler(self, *args, **kwargs):
"""
Add the query time restriction limit at the last moment. Applying it
earlier (e.g. by adding a filter to the queryset) does not allow the
caching of related object to work (they are attached to a queryset;
filter() returns a new queryset).
"""
if self.querytime.active and \
(not hasattr(self, '_querytime_filter_added') or
not self._querytime_filter_added):
time = self.querytime.time
if time is None:
self.add_q(Q(version_end_date__isnull=True))
else:
self.add_q(
(Q(version_end_date__gt=time) |
Q(version_end_date__isnull=True)) &
Q(version_start_date__lte=time)
)
# Ensure applying these filters happens only a single time (even
# if it doesn't falsify the query, it's just not very comfortable
# to read)
self._querytime_filter_added = True
return super(VersionedQuery, self).get_compiler(*args, **kwargs) |
def cmd_genobstacles(self, args):
'''genobstacles command parser'''
usage = "usage: genobstacles <start|stop|restart|clearall|status|set>"
if len(args) == 0:
print(usage)
return
if args[0] == "set":
gen_settings.command(args[1:])
elif args[0] == "start":
if self.have_home:
self.start()
else:
self.pending_start = True
elif args[0] == "stop":
self.stop()
self.pending_start = False
elif args[0] == "restart":
self.stop()
self.start()
elif args[0] == "status":
print(self.status())
elif args[0] == "remove":
latlon = self.module('map').click_position
if self.last_click is not None and self.last_click == latlon:
return
self.last_click = latlon
if latlon is not None:
closest = None
closest_distance = 1000
for a in self.aircraft:
dist = a.distance_from(latlon[0], latlon[1])
if dist < closest_distance:
closest_distance = dist
closest = a
if closest is not None:
self.aircraft.remove(closest)
else:
print("No obstacle found at click point")
elif args[0] == "dropcloud":
self.cmd_dropobject(Weather())
elif args[0] == "dropeagle":
self.cmd_dropobject(BirdOfPrey())
elif args[0] == "dropbird":
self.cmd_dropobject(BirdMigrating())
elif args[0] == "dropplane":
self.cmd_dropobject(Aircraft())
elif args[0] == "clearall":
self.clearall()
else:
print(usage) | genobstacles command parser | Below is the the instruction that describes the task:
### Input:
genobstacles command parser
### Response:
def cmd_genobstacles(self, args):
'''genobstacles command parser'''
usage = "usage: genobstacles <start|stop|restart|clearall|status|set>"
if len(args) == 0:
print(usage)
return
if args[0] == "set":
gen_settings.command(args[1:])
elif args[0] == "start":
if self.have_home:
self.start()
else:
self.pending_start = True
elif args[0] == "stop":
self.stop()
self.pending_start = False
elif args[0] == "restart":
self.stop()
self.start()
elif args[0] == "status":
print(self.status())
elif args[0] == "remove":
latlon = self.module('map').click_position
if self.last_click is not None and self.last_click == latlon:
return
self.last_click = latlon
if latlon is not None:
closest = None
closest_distance = 1000
for a in self.aircraft:
dist = a.distance_from(latlon[0], latlon[1])
if dist < closest_distance:
closest_distance = dist
closest = a
if closest is not None:
self.aircraft.remove(closest)
else:
print("No obstacle found at click point")
elif args[0] == "dropcloud":
self.cmd_dropobject(Weather())
elif args[0] == "dropeagle":
self.cmd_dropobject(BirdOfPrey())
elif args[0] == "dropbird":
self.cmd_dropobject(BirdMigrating())
elif args[0] == "dropplane":
self.cmd_dropobject(Aircraft())
elif args[0] == "clearall":
self.clearall()
else:
print(usage) |
def as_statements(lines: Iterator[str]) -> Iterator[str]:
"""Create an iterator that transforms lines into sql statements.
Statements within the lines must end with ";"
The last statement will be included even if it does not end in ';'
>>> list(as_statements(['select * from', '-- comments are filtered', 't;']))
['select * from t']
>>> list(as_statements(['a;', 'b', 'c;', 'd', ' ']))
['a', 'b c', 'd']
"""
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield ' '.join(parts)
parts.clear()
if parts:
yield ' '.join(parts) | Create an iterator that transforms lines into sql statements.
Statements within the lines must end with ";"
The last statement will be included even if it does not end in ';'
>>> list(as_statements(['select * from', '-- comments are filtered', 't;']))
['select * from t']
>>> list(as_statements(['a;', 'b', 'c;', 'd', ' ']))
['a', 'b c', 'd'] | Below is the the instruction that describes the task:
### Input:
Create an iterator that transforms lines into sql statements.
Statements within the lines must end with ";"
The last statement will be included even if it does not end in ';'
>>> list(as_statements(['select * from', '-- comments are filtered', 't;']))
['select * from t']
>>> list(as_statements(['a;', 'b', 'c;', 'd', ' ']))
['a', 'b c', 'd']
### Response:
def as_statements(lines: Iterator[str]) -> Iterator[str]:
"""Create an iterator that transforms lines into sql statements.
Statements within the lines must end with ";"
The last statement will be included even if it does not end in ';'
>>> list(as_statements(['select * from', '-- comments are filtered', 't;']))
['select * from t']
>>> list(as_statements(['a;', 'b', 'c;', 'd', ' ']))
['a', 'b c', 'd']
"""
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield ' '.join(parts)
parts.clear()
if parts:
yield ' '.join(parts) |
def node(self, title, **args):
"""draw a node
"""
self._stream.write('%snode: {title:"%s"' % (self._indent, title))
self._write_attributes(NODE_ATTRS, **args)
self._stream.write("}\n") | draw a node | Below is the the instruction that describes the task:
### Input:
draw a node
### Response:
def node(self, title, **args):
"""draw a node
"""
self._stream.write('%snode: {title:"%s"' % (self._indent, title))
self._write_attributes(NODE_ATTRS, **args)
self._stream.write("}\n") |
def adapt_persistent_instance(persistent_object, target_rest_class=None, attribute_filter=None):
"""
Adapts a single persistent instance to a REST model; at present this is a
common method for all persistent backends.
Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as
for discussion on this feature
"""
# try and get the adapter and the REST class for the persistent object
if target_rest_class is None:
adapter_instance = registry.get_adapter_for_persistent_model(persistent_object)
else:
if inspect.isclass(target_rest_class):
target_rest_class = target_rest_class()
adapter_instance = registry.get_adapter_for_persistent_model(persistent_object, target_rest_class)
# would raise an exception if the attribute_filter differs from the target_rest_class
if attribute_filter is not None and isinstance(attribute_filter, parser.AttributeFilter):
parser.AttributeFilter.from_model(target_rest_class).conforms_to_template_filter(attribute_filter)
# convert filter to immutable if it isn't already
if isinstance(attribute_filter, parser.AttributeFilter):
attribute_filter = attribute_filter.as_immutable()
return adapter_instance.adapt_persistent_to_rest(persistent_object, attribute_filter) | Adapts a single persistent instance to a REST model; at present this is a
common method for all persistent backends.
Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as
for discussion on this feature | Below is the the instruction that describes the task:
### Input:
Adapts a single persistent instance to a REST model; at present this is a
common method for all persistent backends.
Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as
for discussion on this feature
### Response:
def adapt_persistent_instance(persistent_object, target_rest_class=None, attribute_filter=None):
"""
Adapts a single persistent instance to a REST model; at present this is a
common method for all persistent backends.
Refer to: https://groups.google.com/forum/#!topic/prestans-discuss/dO1yx8f60as
for discussion on this feature
"""
# try and get the adapter and the REST class for the persistent object
if target_rest_class is None:
adapter_instance = registry.get_adapter_for_persistent_model(persistent_object)
else:
if inspect.isclass(target_rest_class):
target_rest_class = target_rest_class()
adapter_instance = registry.get_adapter_for_persistent_model(persistent_object, target_rest_class)
# would raise an exception if the attribute_filter differs from the target_rest_class
if attribute_filter is not None and isinstance(attribute_filter, parser.AttributeFilter):
parser.AttributeFilter.from_model(target_rest_class).conforms_to_template_filter(attribute_filter)
# convert filter to immutable if it isn't already
if isinstance(attribute_filter, parser.AttributeFilter):
attribute_filter = attribute_filter.as_immutable()
return adapter_instance.adapt_persistent_to_rest(persistent_object, attribute_filter) |
def _check_graphviz_available(output_format):
"""check if we need graphviz for different output format"""
try:
subprocess.call(["dot", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print(
"The output format '%s' is currently not available.\n"
"Please install 'Graphviz' to have other output formats "
"than 'dot' or 'vcg'." % output_format
)
sys.exit(32) | check if we need graphviz for different output format | Below is the the instruction that describes the task:
### Input:
check if we need graphviz for different output format
### Response:
def _check_graphviz_available(output_format):
"""check if we need graphviz for different output format"""
try:
subprocess.call(["dot", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
print(
"The output format '%s' is currently not available.\n"
"Please install 'Graphviz' to have other output formats "
"than 'dot' or 'vcg'." % output_format
)
sys.exit(32) |
def fetch(self):
"""Lazily trigger download of the data when requested."""
if self._file_path is not None:
return self._file_path
temp_path = self.context.work_path
if self._content_hash is not None:
self._file_path = storage.load_file(self._content_hash,
temp_path=temp_path)
return self._file_path
if self.response is not None:
self._file_path = random_filename(temp_path)
content_hash = sha1()
with open(self._file_path, 'wb') as fh:
for chunk in self.response.iter_content(chunk_size=8192):
content_hash.update(chunk)
fh.write(chunk)
self._remove_file = True
chash = content_hash.hexdigest()
self._content_hash = storage.archive_file(self._file_path,
content_hash=chash)
if self.http.cache and self.ok:
self.context.set_tag(self.request_id, self.serialize())
self.retrieved_at = datetime.utcnow().isoformat()
return self._file_path | Lazily trigger download of the data when requested. | Below is the the instruction that describes the task:
### Input:
Lazily trigger download of the data when requested.
### Response:
def fetch(self):
"""Lazily trigger download of the data when requested."""
if self._file_path is not None:
return self._file_path
temp_path = self.context.work_path
if self._content_hash is not None:
self._file_path = storage.load_file(self._content_hash,
temp_path=temp_path)
return self._file_path
if self.response is not None:
self._file_path = random_filename(temp_path)
content_hash = sha1()
with open(self._file_path, 'wb') as fh:
for chunk in self.response.iter_content(chunk_size=8192):
content_hash.update(chunk)
fh.write(chunk)
self._remove_file = True
chash = content_hash.hexdigest()
self._content_hash = storage.archive_file(self._file_path,
content_hash=chash)
if self.http.cache and self.ok:
self.context.set_tag(self.request_id, self.serialize())
self.retrieved_at = datetime.utcnow().isoformat()
return self._file_path |
def parse_table_data(lines):
""""Parse list of lines from SOFT file into DataFrame.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Table data.
"""
# filter lines that do not start with symbols
data = "\n".join([i.rstrip() for i in lines
if not i.startswith(("^", "!", "#")) and i.rstrip()])
if data:
return read_csv(StringIO(data), index_col=None, sep="\t")
else:
return DataFrame() | Parse list of lines from SOFT file into DataFrame.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Table data. | Below is the the instruction that describes the task:
### Input:
Parse list of lines from SOFT file into DataFrame.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Table data.
### Response:
def parse_table_data(lines):
""""Parse list of lines from SOFT file into DataFrame.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Table data.
"""
# filter lines that do not start with symbols
data = "\n".join([i.rstrip() for i in lines
if not i.startswith(("^", "!", "#")) and i.rstrip()])
if data:
return read_csv(StringIO(data), index_col=None, sep="\t")
else:
return DataFrame() |
def start_file_logger(self, name, log_file_level=logging.DEBUG, log_file_path='./'):
"""Start file logging."""
log_file_path = os.path.expanduser(log_file_path) / '{}.log'.format(name)
logdir = log_file_path.parent
try:
logdir.mkdir(parents=True, exist_ok=True)
# If the log file exists, backs it up before creating a new file handler
if log_file_path.exists():
strtime = datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S')
shutil.move(log_file_path, log_file_path + '.' + strtime)
self.fh = TimedRotatingFileHandler(str(log_file_path), when='midnight', utc=True)
self.fh.suffix = '%Y-%m-%d_%H:%M:%S'
except (IOError, OSError) as ee:
warnings.warn('log file {0!r} could not be opened for writing: '
'{1}'.format(log_file_path, ee), RuntimeWarning)
else:
self.fh.setFormatter(fmt)
self.addHandler(self.fh)
self.fh.setLevel(log_file_level)
self.log_filename = log_file_path | Start file logging. | Below is the the instruction that describes the task:
### Input:
Start file logging.
### Response:
def start_file_logger(self, name, log_file_level=logging.DEBUG, log_file_path='./'):
"""Start file logging."""
log_file_path = os.path.expanduser(log_file_path) / '{}.log'.format(name)
logdir = log_file_path.parent
try:
logdir.mkdir(parents=True, exist_ok=True)
# If the log file exists, backs it up before creating a new file handler
if log_file_path.exists():
strtime = datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S')
shutil.move(log_file_path, log_file_path + '.' + strtime)
self.fh = TimedRotatingFileHandler(str(log_file_path), when='midnight', utc=True)
self.fh.suffix = '%Y-%m-%d_%H:%M:%S'
except (IOError, OSError) as ee:
warnings.warn('log file {0!r} could not be opened for writing: '
'{1}'.format(log_file_path, ee), RuntimeWarning)
else:
self.fh.setFormatter(fmt)
self.addHandler(self.fh)
self.fh.setLevel(log_file_level)
self.log_filename = log_file_path |
def _list_locators(self):
"""
Lists locators.
Returns:
generator of tuple: locator name str, locator header dict
"""
with _handle_azure_exception():
for share in self.client.list_shares():
yield share.name, self._model_to_dict(share) | Lists locators.
Returns:
generator of tuple: locator name str, locator header dict | Below is the the instruction that describes the task:
### Input:
Lists locators.
Returns:
generator of tuple: locator name str, locator header dict
### Response:
def _list_locators(self):
"""
Lists locators.
Returns:
generator of tuple: locator name str, locator header dict
"""
with _handle_azure_exception():
for share in self.client.list_shares():
yield share.name, self._model_to_dict(share) |
def _fix_list_dyn_func(list):
"""
This searches a list for dynamic function fragments, which may have been cut by generic searches for ",|;".
Example:
`link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts:
#. link_a
#. [[copy('links'
#. need_id)]]
This function fixes the above list to the following:
#. link_a
#. [[copy('links', need_id)]]
:param list: list which may contain splitted function calls
:return: list of fixed elements
"""
open_func_string = False
new_list = []
for element in list:
if '[[' in element:
open_func_string = True
new_link = [element]
elif ']]' in element:
new_link.append(element)
open_func_string = False
element = ",".join(new_link)
new_list.append(element)
elif open_func_string:
new_link.append(element)
else:
new_list.append(element)
return new_list | This searches a list for dynamic function fragments, which may have been cut by generic searches for ",|;".
Example:
`link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts:
#. link_a
#. [[copy('links'
#. need_id)]]
This function fixes the above list to the following:
#. link_a
#. [[copy('links', need_id)]]
:param list: list which may contain splitted function calls
:return: list of fixed elements | Below is the the instruction that describes the task:
### Input:
This searches a list for dynamic function fragments, which may have been cut by generic searches for ",|;".
Example:
`link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts:
#. link_a
#. [[copy('links'
#. need_id)]]
This function fixes the above list to the following:
#. link_a
#. [[copy('links', need_id)]]
:param list: list which may contain splitted function calls
:return: list of fixed elements
### Response:
def _fix_list_dyn_func(list):
"""
This searches a list for dynamic function fragments, which may have been cut by generic searches for ",|;".
Example:
`link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts:
#. link_a
#. [[copy('links'
#. need_id)]]
This function fixes the above list to the following:
#. link_a
#. [[copy('links', need_id)]]
:param list: list which may contain splitted function calls
:return: list of fixed elements
"""
open_func_string = False
new_list = []
for element in list:
if '[[' in element:
open_func_string = True
new_link = [element]
elif ']]' in element:
new_link.append(element)
open_func_string = False
element = ",".join(new_link)
new_list.append(element)
elif open_func_string:
new_link.append(element)
else:
new_list.append(element)
return new_list |
def taskotron_task_outcome(config, message, outcome=None):
""" Particular taskotron task outcome
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task outcome.
You can specify several outcomes by separating them with a comma ',',
i.e.: ``PASSED,FAILED``.
The full list of supported outcomes can be found in the libtaskotron
`documentation <https://docs.qadevel.cloud.fedoraproject.org/
libtaskotron/latest/resultyaml.html#minimal-version>`_.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
if not outcome:
return False
outcomes = [item.strip().lower() for item in outcome.split(',')]
return message['msg']['result'].get('outcome').lower() in outcomes | Particular taskotron task outcome
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task outcome.
You can specify several outcomes by separating them with a comma ',',
i.e.: ``PASSED,FAILED``.
The full list of supported outcomes can be found in the libtaskotron
`documentation <https://docs.qadevel.cloud.fedoraproject.org/
libtaskotron/latest/resultyaml.html#minimal-version>`_. | Below is the the instruction that describes the task:
### Input:
Particular taskotron task outcome
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task outcome.
You can specify several outcomes by separating them with a comma ',',
i.e.: ``PASSED,FAILED``.
The full list of supported outcomes can be found in the libtaskotron
`documentation <https://docs.qadevel.cloud.fedoraproject.org/
libtaskotron/latest/resultyaml.html#minimal-version>`_.
### Response:
def taskotron_task_outcome(config, message, outcome=None):
""" Particular taskotron task outcome
With this rule, you can limit messages to only those of particular
`taskotron <https://taskotron.fedoraproject.org/>`_ task outcome.
You can specify several outcomes by separating them with a comma ',',
i.e.: ``PASSED,FAILED``.
The full list of supported outcomes can be found in the libtaskotron
`documentation <https://docs.qadevel.cloud.fedoraproject.org/
libtaskotron/latest/resultyaml.html#minimal-version>`_.
"""
# We only operate on taskotron messages, first off.
if not taskotron_result_new(config, message):
return False
if not outcome:
return False
outcomes = [item.strip().lower() for item in outcome.split(',')]
return message['msg']['result'].get('outcome').lower() in outcomes |
def _compute_ranks(X, winsorize=False, truncation=None, verbose=True):
"""
Transform each column into ranked data. Tied ranks are averaged.
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara's scaled rank based Z-estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The data matrix where each column is a feature.
Row observations for each column will be replaced
by correponding rank.
winsorize: bool
Choose whether ranks should be winsorized (trimmed) or not. If True,
then ranks will be winsorized using the truncation parameter.
truncation: (float)
The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where
n is the number of samples.
Returns
-------
Xrank
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328.
"""
n_samples, n_features = X.shape
Xrank = np.zeros(shape=X.shape)
if winsorize:
if truncation is None:
truncation = 1 / (
4 * np.power(n_samples, 0.25) * np.sqrt(np.pi * np.log(n_samples))
)
elif truncation > 1:
truncation = np.min(1.0, truncation)
for col in np.arange(n_features):
Xrank[:, col] = rankdata(X[:, col], method="average")
Xrank[:, col] /= n_samples
if winsorize:
if n_samples > 100 * n_features:
Xrank[:, col] = n_samples * Xrank[:, col] / (n_samples + 1)
else:
lower_truncate = Xrank[:, col] <= truncation
upper_truncate = Xrank[:, col] > 1 - truncation
Xrank[lower_truncate, col] = truncation
Xrank[upper_truncate, col] = 1 - truncation
return Xrank | Transform each column into ranked data. Tied ranks are averaged.
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara's scaled rank based Z-estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The data matrix where each column is a feature.
Row observations for each column will be replaced
by correponding rank.
winsorize: bool
Choose whether ranks should be winsorized (trimmed) or not. If True,
then ranks will be winsorized using the truncation parameter.
truncation: (float)
The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where
n is the number of samples.
Returns
-------
Xrank
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328. | Below is the the instruction that describes the task:
### Input:
Transform each column into ranked data. Tied ranks are averaged.
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara's scaled rank based Z-estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The data matrix where each column is a feature.
Row observations for each column will be replaced
by correponding rank.
winsorize: bool
Choose whether ranks should be winsorized (trimmed) or not. If True,
then ranks will be winsorized using the truncation parameter.
truncation: (float)
The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where
n is the number of samples.
Returns
-------
Xrank
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328.
### Response:
def _compute_ranks(X, winsorize=False, truncation=None, verbose=True):
"""
Transform each column into ranked data. Tied ranks are averaged.
Ranks can optionally be winsorized as described in Liu 2009 otherwise
this returns Tsukahara's scaled rank based Z-estimator.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
The data matrix where each column is a feature.
Row observations for each column will be replaced
by correponding rank.
winsorize: bool
Choose whether ranks should be winsorized (trimmed) or not. If True,
then ranks will be winsorized using the truncation parameter.
truncation: (float)
The default value is given by 1/(4 n^(1/4) * sqrt(pi log n)), where
n is the number of samples.
Returns
-------
Xrank
References
----------
Liu, Han, John Lafferty, and Larry Wasserman.
"The nonparanormal: Semiparametric estimation of high dimensional
undirected graphs."
Journal of Machine Learning Research 10.Oct (2009): 2295-2328.
"""
n_samples, n_features = X.shape
Xrank = np.zeros(shape=X.shape)
if winsorize:
if truncation is None:
truncation = 1 / (
4 * np.power(n_samples, 0.25) * np.sqrt(np.pi * np.log(n_samples))
)
elif truncation > 1:
truncation = np.min(1.0, truncation)
for col in np.arange(n_features):
Xrank[:, col] = rankdata(X[:, col], method="average")
Xrank[:, col] /= n_samples
if winsorize:
if n_samples > 100 * n_features:
Xrank[:, col] = n_samples * Xrank[:, col] / (n_samples + 1)
else:
lower_truncate = Xrank[:, col] <= truncation
upper_truncate = Xrank[:, col] > 1 - truncation
Xrank[lower_truncate, col] = truncation
Xrank[upper_truncate, col] = 1 - truncation
return Xrank |
def is_numeric(self):
"""
Ensures :attr:`subject` is an int, float, or long.
"""
from decimal import Decimal
numeric_types = (int, float, long, Decimal) if USING_PYTHON2 else (int, float, Decimal) # noqa
if not isinstance(self._subject, numeric_types):
raise self._error_factory(_format("Expected {} to be numeric (int, float, long or Decimal)", self._subject)) | Ensures :attr:`subject` is an int, float, or long. | Below is the the instruction that describes the task:
### Input:
Ensures :attr:`subject` is an int, float, or long.
### Response:
def is_numeric(self):
"""
Ensures :attr:`subject` is an int, float, or long.
"""
from decimal import Decimal
numeric_types = (int, float, long, Decimal) if USING_PYTHON2 else (int, float, Decimal) # noqa
if not isinstance(self._subject, numeric_types):
raise self._error_factory(_format("Expected {} to be numeric (int, float, long or Decimal)", self._subject)) |
def has_a_conf(self, magic_hash=None): # pragma: no cover
"""Send a HTTP request to the satellite (GET /have_conf)
Used to know if the satellite has a conf
:param magic_hash: Config hash. Only used for HA arbiter communication
:type magic_hash: int
:return: Boolean indicating if the satellite has a (specific) configuration
:type: bool
"""
logger.debug("Have a configuration for %s, %s %s", self.name, self.alive, self.reachable)
self.have_conf = self.con.get('_have_conf', {'magic_hash': magic_hash})
return self.have_conf | Send a HTTP request to the satellite (GET /have_conf)
Used to know if the satellite has a conf
:param magic_hash: Config hash. Only used for HA arbiter communication
:type magic_hash: int
:return: Boolean indicating if the satellite has a (specific) configuration
:type: bool | Below is the the instruction that describes the task:
### Input:
Send a HTTP request to the satellite (GET /have_conf)
Used to know if the satellite has a conf
:param magic_hash: Config hash. Only used for HA arbiter communication
:type magic_hash: int
:return: Boolean indicating if the satellite has a (specific) configuration
:type: bool
### Response:
def has_a_conf(self, magic_hash=None): # pragma: no cover
"""Send a HTTP request to the satellite (GET /have_conf)
Used to know if the satellite has a conf
:param magic_hash: Config hash. Only used for HA arbiter communication
:type magic_hash: int
:return: Boolean indicating if the satellite has a (specific) configuration
:type: bool
"""
logger.debug("Have a configuration for %s, %s %s", self.name, self.alive, self.reachable)
self.have_conf = self.con.get('_have_conf', {'magic_hash': magic_hash})
return self.have_conf |
def get_calendars(self, calendar_id=None, body=None, params=None):
"""
`<>`_
:arg calendar_id: The ID of the calendar to fetch
:arg body: The from and size parameters optionally sent in the body
:arg from_: skips a number of calendars
:arg size: specifies a max number of calendars to get
"""
return self.transport.perform_request(
"GET", _make_path("_ml", "calendars", calendar_id), params=params, body=body
) | `<>`_
:arg calendar_id: The ID of the calendar to fetch
:arg body: The from and size parameters optionally sent in the body
:arg from_: skips a number of calendars
:arg size: specifies a max number of calendars to get | Below is the the instruction that describes the task:
### Input:
`<>`_
:arg calendar_id: The ID of the calendar to fetch
:arg body: The from and size parameters optionally sent in the body
:arg from_: skips a number of calendars
:arg size: specifies a max number of calendars to get
### Response:
def get_calendars(self, calendar_id=None, body=None, params=None):
"""
`<>`_
:arg calendar_id: The ID of the calendar to fetch
:arg body: The from and size parameters optionally sent in the body
:arg from_: skips a number of calendars
:arg size: specifies a max number of calendars to get
"""
return self.transport.perform_request(
"GET", _make_path("_ml", "calendars", calendar_id), params=params, body=body
) |
def hashi(self, key, replica=0):
"""Returns a ketama compatible hash from the given key.
"""
dh = self._listbytes(md5(str(key).encode('utf-8')).digest())
rd = replica * 4
return (
(dh[3 + rd] << 24) | (dh[2 + rd] << 16) |
(dh[1 + rd] << 8) | dh[0 + rd]) | Returns a ketama compatible hash from the given key. | Below is the the instruction that describes the task:
### Input:
Returns a ketama compatible hash from the given key.
### Response:
def hashi(self, key, replica=0):
"""Returns a ketama compatible hash from the given key.
"""
dh = self._listbytes(md5(str(key).encode('utf-8')).digest())
rd = replica * 4
return (
(dh[3 + rd] << 24) | (dh[2 + rd] << 16) |
(dh[1 + rd] << 8) | dh[0 + rd]) |
def nankurt(values, axis=None, skipna=True, mask=None):
"""
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1,np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076
"""
values = com.values_from_object(values)
if mask is None:
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid='ignore', divide='ignore'):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2 ** 2
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid='ignore', divide='ignore'):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result | Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1,np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076 | Below is the the instruction that describes the task:
### Input:
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1,np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076
### Response:
def nankurt(values, axis=None, skipna=True, mask=None):
"""
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1,np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076
"""
values = com.values_from_object(values)
if mask is None:
mask = isna(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid='ignore', divide='ignore'):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2 ** 2
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid='ignore', divide='ignore'):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result |
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode | Kill the child. | Below is the the instruction that describes the task:
### Input:
Kill the child.
### Response:
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode |
def decorate(func, caller, extras=()):
"""
decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function.
"""
evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
if '3.5' <= sys.version < '3.6':
# with Python 3.5 isgeneratorfunction returns True for all coroutines
# however we know that it is NOT possible to have a generator
# coroutine in python 3.5: PEP525 was not there yet
generatorcaller = isgeneratorfunction(
caller) and not iscoroutinefunction(caller)
else:
generatorcaller = isgeneratorfunction(caller)
if generatorcaller:
fun = FunctionMaker.create(
func, "for res in _call_(_func_, %s%%(shortsignature)s):\n"
" yield res" % es, evaldict, __wrapped__=func)
else:
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun | decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function. | Below is the the instruction that describes the task:
### Input:
decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function.
### Response:
def decorate(func, caller, extras=()):
"""
decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function.
"""
evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
if '3.5' <= sys.version < '3.6':
# with Python 3.5 isgeneratorfunction returns True for all coroutines
# however we know that it is NOT possible to have a generator
# coroutine in python 3.5: PEP525 was not there yet
generatorcaller = isgeneratorfunction(
caller) and not iscoroutinefunction(caller)
else:
generatorcaller = isgeneratorfunction(caller)
if generatorcaller:
fun = FunctionMaker.create(
func, "for res in _call_(_func_, %s%%(shortsignature)s):\n"
" yield res" % es, evaldict, __wrapped__=func)
else:
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun |
def pods(self):
"""Return list of all Pod objects in result"""
# Return empty list if xml_tree is not defined (error Result object)
if not self.xml_tree:
return []
# Create a Pod object for every pod group in xml
return [Pod(elem) for elem in self.xml_tree.findall('pod')] | Return list of all Pod objects in result | Below is the the instruction that describes the task:
### Input:
Return list of all Pod objects in result
### Response:
def pods(self):
"""Return list of all Pod objects in result"""
# Return empty list if xml_tree is not defined (error Result object)
if not self.xml_tree:
return []
# Create a Pod object for every pod group in xml
return [Pod(elem) for elem in self.xml_tree.findall('pod')] |
def getMoonPhase(self):
""" Returns the phase of the moon. """
sun = self.getObject(const.SUN)
moon = self.getObject(const.MOON)
dist = angle.distance(sun.lon, moon.lon)
if dist < 90:
return const.MOON_FIRST_QUARTER
elif dist < 180:
return const.MOON_SECOND_QUARTER
elif dist < 270:
return const.MOON_THIRD_QUARTER
else:
return const.MOON_LAST_QUARTER | Returns the phase of the moon. | Below is the the instruction that describes the task:
### Input:
Returns the phase of the moon.
### Response:
def getMoonPhase(self):
""" Returns the phase of the moon. """
sun = self.getObject(const.SUN)
moon = self.getObject(const.MOON)
dist = angle.distance(sun.lon, moon.lon)
if dist < 90:
return const.MOON_FIRST_QUARTER
elif dist < 180:
return const.MOON_SECOND_QUARTER
elif dist < 270:
return const.MOON_THIRD_QUARTER
else:
return const.MOON_LAST_QUARTER |
def project_version(full_version):
"""
project_version context manager
"""
project_full_version=full_version
v = _parse_project_version(full_version)
name = project_name()
project_fullname = '-'.join([name,v])
return _setenv(project_full_version=project_full_version, project_version=v,project_name=name,project_fullname=project_fullname) | project_version context manager | Below is the the instruction that describes the task:
### Input:
project_version context manager
### Response:
def project_version(full_version):
"""
project_version context manager
"""
project_full_version=full_version
v = _parse_project_version(full_version)
name = project_name()
project_fullname = '-'.join([name,v])
return _setenv(project_full_version=project_full_version, project_version=v,project_name=name,project_fullname=project_fullname) |
def _get_command(classes):
"""Associates each command class with command depending on setup.cfg
"""
commands = {}
setup_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')),
'setup.cfg')
for line in open(setup_file, 'r'):
for cl in classes:
if cl in line:
commands[cl] = line.split(' = ')[0].strip().replace('_', ' ')
return commands | Associates each command class with command depending on setup.cfg | Below is the the instruction that describes the task:
### Input:
Associates each command class with command depending on setup.cfg
### Response:
def _get_command(classes):
"""Associates each command class with command depending on setup.cfg
"""
commands = {}
setup_file = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')),
'setup.cfg')
for line in open(setup_file, 'r'):
for cl in classes:
if cl in line:
commands[cl] = line.split(' = ')[0].strip().replace('_', ' ')
return commands |
def _import_plugins(self):
"""
Internal function, ensure all plugin packages are imported.
"""
if self.detected:
return
# In some cases, plugin scanning may start during a request.
# Make sure there is only one thread scanning for plugins.
self.scanLock.acquire()
if self.detected:
return # previous threaded released + completed
try:
import_apps_submodule("content_plugins")
self.detected = True
finally:
self.scanLock.release() | Internal function, ensure all plugin packages are imported. | Below is the the instruction that describes the task:
### Input:
Internal function, ensure all plugin packages are imported.
### Response:
def _import_plugins(self):
"""
Internal function, ensure all plugin packages are imported.
"""
if self.detected:
return
# In some cases, plugin scanning may start during a request.
# Make sure there is only one thread scanning for plugins.
self.scanLock.acquire()
if self.detected:
return # previous threaded released + completed
try:
import_apps_submodule("content_plugins")
self.detected = True
finally:
self.scanLock.release() |
def run_wrap(self, args):
""" Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`. """
self.fire_event('prerun', args)
self.prerun(args)
try:
if self.session.allow_pager and self.use_pager:
desc = 'Command\: %s' % '-'.join(self.prog.split())
with paging.pager_redirect(desc, **self.get_pager_spec()):
result = self.run(args)
else:
result = self.run(args)
except (SystemExit, Exception) as e:
self.postrun(args, exc=e)
self.fire_event('postrun', args, exc=e)
raise e
else:
self.postrun(args, result=result)
self.fire_event('postrun', args, result=result)
return result | Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`. | Below is the the instruction that describes the task:
### Input:
Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`.
### Response:
def run_wrap(self, args):
""" Wrap some standard protocol around a command's run method. This
wrapper should generally never capture exceptions. It can look at
them and do things but prerun and postrun should always be symmetric.
Any exception suppression should happen in the `session.execute`. """
self.fire_event('prerun', args)
self.prerun(args)
try:
if self.session.allow_pager and self.use_pager:
desc = 'Command\: %s' % '-'.join(self.prog.split())
with paging.pager_redirect(desc, **self.get_pager_spec()):
result = self.run(args)
else:
result = self.run(args)
except (SystemExit, Exception) as e:
self.postrun(args, exc=e)
self.fire_event('postrun', args, exc=e)
raise e
else:
self.postrun(args, result=result)
self.fire_event('postrun', args, result=result)
return result |
def draw_bar_chart():
"""
Uses sample code from http://matplotlib.org/1.2.1/examples/api/barchart_demo.html
"""
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = numpy.arange(N)
width = 0.35
fig = pyplot.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, womenMeans, width, color='y', yerr=womenStd)
ax.set_ylabel('Scores')
ax.set_title('Scores by group and color')
ax.set_xticks(ind+width)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend((rects1[0], rects2[0]), ('Red', 'Yellow'))
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
pyplot.savefig(bars_filename) | Uses sample code from http://matplotlib.org/1.2.1/examples/api/barchart_demo.html | Below is the the instruction that describes the task:
### Input:
Uses sample code from http://matplotlib.org/1.2.1/examples/api/barchart_demo.html
### Response:
def draw_bar_chart():
"""
Uses sample code from http://matplotlib.org/1.2.1/examples/api/barchart_demo.html
"""
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = numpy.arange(N)
width = 0.35
fig = pyplot.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, womenMeans, width, color='y', yerr=womenStd)
ax.set_ylabel('Scores')
ax.set_title('Scores by group and color')
ax.set_xticks(ind+width)
ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))
ax.legend((rects1[0], rects2[0]), ('Red', 'Yellow'))
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d' % int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
pyplot.savefig(bars_filename) |
def _validate_open_params(**params):
"""
Validate the fql parameters and if invalid, generate exception
"""
if not params['FilterQueryLanguage'] and params['FilterQuery']:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
"FilterQuery without FilterQueryLanguage definition is "
"invalid")
if params['FilterQueryLanguage']:
if params['FilterQueryLanguage'] != 'DMTF:FQL':
raise CIMError(
CIM_ERR_QUERY_LANGUAGE_NOT_SUPPORTED,
_format("FilterQueryLanguage {0!A} not supported",
params['FilterQueryLanguage']))
ot = params['OperationTimeout']
if ot:
if not isinstance(ot, six.integer_types) or ot < 0 \
or ot > OPEN_MAX_TIMEOUT:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("OperationTimeout {0!A }must be positive integer "
"less than {1!A}", ot, OPEN_MAX_TIMEOUT)) | Validate the fql parameters and if invalid, generate exception | Below is the the instruction that describes the task:
### Input:
Validate the fql parameters and if invalid, generate exception
### Response:
def _validate_open_params(**params):
"""
Validate the fql parameters and if invalid, generate exception
"""
if not params['FilterQueryLanguage'] and params['FilterQuery']:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
"FilterQuery without FilterQueryLanguage definition is "
"invalid")
if params['FilterQueryLanguage']:
if params['FilterQueryLanguage'] != 'DMTF:FQL':
raise CIMError(
CIM_ERR_QUERY_LANGUAGE_NOT_SUPPORTED,
_format("FilterQueryLanguage {0!A} not supported",
params['FilterQueryLanguage']))
ot = params['OperationTimeout']
if ot:
if not isinstance(ot, six.integer_types) or ot < 0 \
or ot > OPEN_MAX_TIMEOUT:
raise CIMError(
CIM_ERR_INVALID_PARAMETER,
_format("OperationTimeout {0!A }must be positive integer "
"less than {1!A}", ot, OPEN_MAX_TIMEOUT)) |
async def create_text_channel(self, name, *, overwrites=None, category=None, reason=None, **options):
"""|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: Optional[:class:`str`]
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel.
The maximum value possible is `120`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
data = await self._create_channel(name, overwrites, ChannelType.text, category, reason=reason, **options)
channel = TextChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel | |coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: Optional[:class:`str`]
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel.
The maximum value possible is `120`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created. | Below is the the instruction that describes the task:
### Input:
|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: Optional[:class:`str`]
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel.
The maximum value possible is `120`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
### Response:
async def create_text_channel(self, name, *, overwrites=None, category=None, reason=None, **options):
"""|coro|
Creates a :class:`TextChannel` for the guild.
Note that you need the :attr:`~Permissions.manage_channels` permission
to create the channel.
The ``overwrites`` parameter can be used to create a 'secret'
channel upon creation. This parameter expects a :class:`dict` of
overwrites with the target (either a :class:`Member` or a :class:`Role`)
as the key and a :class:`PermissionOverwrite` as the value.
.. note::
Creating a channel of a specified position will not update the position of
other channels to follow suit. A follow-up call to :meth:`~TextChannel.edit`
will be required to update the position of the channel in the channel list.
Examples
----------
Creating a basic channel:
.. code-block:: python3
channel = await guild.create_text_channel('cool-channel')
Creating a "secret" channel:
.. code-block:: python3
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True)
}
channel = await guild.create_text_channel('secret', overwrites=overwrites)
Parameters
-----------
name: :class:`str`
The channel's name.
overwrites
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply upon creation of a channel.
Useful for creating secret channels.
category: Optional[:class:`CategoryChannel`]
The category to place the newly created channel under.
The permissions will be automatically synced to category if no
overwrites are provided.
position: :class:`int`
The position in the channel list. This is a number that starts
at 0. e.g. the top channel is position 0.
topic: Optional[:class:`str`]
The new channel's topic.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel.
The maximum value possible is `120`.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
reason: Optional[:class:`str`]
The reason for creating this channel. Shows up on the audit log.
Raises
-------
Forbidden
You do not have the proper permissions to create this channel.
HTTPException
Creating the channel failed.
InvalidArgument
The permission overwrite information is not in proper form.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
data = await self._create_channel(name, overwrites, ChannelType.text, category, reason=reason, **options)
channel = TextChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel |
def find(cls, id='', user=None, project=None):
"""
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
"""
if not id:
if not (user and project):
raise ValueError('Both user and project required')
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
id = cls.where(user_id=_user_id, project_id=_project_id).next().id
return super(ProjectPreferences, cls).find(id) | Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required. | Below is the the instruction that describes the task:
### Input:
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
### Response:
def find(cls, id='', user=None, project=None):
"""
Like :py:meth:`.PanoptesObject.find` but can also query by user and
project.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
"""
if not id:
if not (user and project):
raise ValueError('Both user and project required')
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
id = cls.where(user_id=_user_id, project_id=_project_id).next().id
return super(ProjectPreferences, cls).find(id) |
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6]) | Parse a I[.F] seconds value into (seconds, microseconds). | Below is the the instruction that describes the task:
### Input:
Parse a I[.F] seconds value into (seconds, microseconds).
### Response:
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6]) |
def add_zone_condition(self, droppable_id, zone_id, match=True):
"""stub"""
self.my_osid_object_form._my_map['zoneConditions'].append(
{'droppableId': droppable_id, 'zoneId': zone_id, 'match': match})
self.my_osid_object_form._my_map['zoneConditions'].sort(key=lambda k: k['zoneId']) | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def add_zone_condition(self, droppable_id, zone_id, match=True):
"""stub"""
self.my_osid_object_form._my_map['zoneConditions'].append(
{'droppableId': droppable_id, 'zoneId': zone_id, 'match': match})
self.my_osid_object_form._my_map['zoneConditions'].sort(key=lambda k: k['zoneId']) |
def _dump_to_text(self, with_stats):
"""
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
"""
return tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='text') | Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order. | Below is the the instruction that describes the task:
### Input:
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
### Response:
def _dump_to_text(self, with_stats):
"""
Dump the models into a list of strings. Each
string is a text representation of a tree.
Parameters
----------
with_stats : bool
If true, include node statistics in the output.
Returns
-------
out : SFrame
A table with two columns: feature, count,
ordered by 'count' in descending order.
"""
return tc.extensions._xgboost_dump_model(self.__proxy__, with_stats=with_stats, format='text') |
def _convert_1bit_array_to_byte_array(arr):
"""
Convert bit array to byte array.
:param arr: list
Bits as a list where each element is an integer of 0 or 1
Returns
-------
numpy.array
1D numpy array of type uint8
"""
# Padding if necessary
while len(arr) < 8 or len(arr) % 8:
arr.append(0)
arr = _np.array(arr, dtype='uint8')
bit_arr = []
idx = 0
# Iterate and combine 8-bits into a uint8
for arr_idx in range(int(len(arr) / 8)):
bit_arr.append(((arr[idx] << 7) & (1 << 7)) |
((arr[idx+1] << 6) & (1 << 6)) |
((arr[idx+2] << 5) & (1 << 5)) |
((arr[idx+3] << 4) & (1 << 4)) |
((arr[idx+4] << 3) & (1 << 3)) |
((arr[idx+5] << 2) & (1 << 2)) |
((arr[idx+6] << 1) & (1 << 1)) |
((arr[idx+7] << 0) & (1 << 0))
)
idx += 8
return _np.array(bit_arr, dtype='uint8') | Convert bit array to byte array.
:param arr: list
Bits as a list where each element is an integer of 0 or 1
Returns
-------
numpy.array
1D numpy array of type uint8 | Below is the the instruction that describes the task:
### Input:
Convert bit array to byte array.
:param arr: list
Bits as a list where each element is an integer of 0 or 1
Returns
-------
numpy.array
1D numpy array of type uint8
### Response:
def _convert_1bit_array_to_byte_array(arr):
"""
Convert bit array to byte array.
:param arr: list
Bits as a list where each element is an integer of 0 or 1
Returns
-------
numpy.array
1D numpy array of type uint8
"""
# Padding if necessary
while len(arr) < 8 or len(arr) % 8:
arr.append(0)
arr = _np.array(arr, dtype='uint8')
bit_arr = []
idx = 0
# Iterate and combine 8-bits into a uint8
for arr_idx in range(int(len(arr) / 8)):
bit_arr.append(((arr[idx] << 7) & (1 << 7)) |
((arr[idx+1] << 6) & (1 << 6)) |
((arr[idx+2] << 5) & (1 << 5)) |
((arr[idx+3] << 4) & (1 << 4)) |
((arr[idx+4] << 3) & (1 << 3)) |
((arr[idx+5] << 2) & (1 << 2)) |
((arr[idx+6] << 1) & (1 << 1)) |
((arr[idx+7] << 0) & (1 << 0))
)
idx += 8
return _np.array(bit_arr, dtype='uint8') |
def canWrite(variable):
"""
mention if an element can be written.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when write access is available, otherwise false
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as parameter.')
return lifepo4weredSO.access_lifepo4wered(variable.value, defines.ACCESS_WRITE) | mention if an element can be written.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when write access is available, otherwise false
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum | Below is the the instruction that describes the task:
### Input:
mention if an element can be written.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when write access is available, otherwise false
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
### Response:
def canWrite(variable):
"""
mention if an element can be written.
:param variable: the element to evaluate.
:type variable: Lifepo4weredEnum
:return: true when write access is available, otherwise false
:rtype: bool
:raises ValueError: if parameter value is not a member of Lifepo4weredEnum
"""
if variable not in variablesEnum:
raise ValueError('Use a lifepo4wered enum element as parameter.')
return lifepo4weredSO.access_lifepo4wered(variable.value, defines.ACCESS_WRITE) |
def infer_issubclass(callnode, context=None):
"""Infer issubclass() calls
:param nodes.Call callnode: an `issubclass` call
:param InferenceContext: the context for the inference
:rtype nodes.Const: Boolean Const value of the `issubclass` call
:raises UseInferenceDefault: If the node cannot be inferred
"""
call = arguments.CallSite.from_call(callnode)
if call.keyword_arguments:
# issubclass doesn't support keyword arguments
raise UseInferenceDefault("TypeError: issubclass() takes no keyword arguments")
if len(call.positional_arguments) != 2:
raise UseInferenceDefault(
"Expected two arguments, got {count}".format(
count=len(call.positional_arguments)
)
)
# The left hand argument is the obj to be checked
obj_node, class_or_tuple_node = call.positional_arguments
try:
obj_type = next(obj_node.infer(context=context))
except InferenceError as exc:
raise UseInferenceDefault from exc
if not isinstance(obj_type, nodes.ClassDef):
raise UseInferenceDefault("TypeError: arg 1 must be class")
# The right hand argument is the class(es) that the given
# object is to be checked against.
try:
class_container = _class_or_tuple_to_container(
class_or_tuple_node, context=context
)
except InferenceError as exc:
raise UseInferenceDefault from exc
try:
issubclass_bool = helpers.object_issubclass(obj_type, class_container, context)
except AstroidTypeError as exc:
raise UseInferenceDefault("TypeError: " + str(exc)) from exc
except MroError as exc:
raise UseInferenceDefault from exc
return nodes.Const(issubclass_bool) | Infer issubclass() calls
:param nodes.Call callnode: an `issubclass` call
:param InferenceContext: the context for the inference
:rtype nodes.Const: Boolean Const value of the `issubclass` call
:raises UseInferenceDefault: If the node cannot be inferred | Below is the the instruction that describes the task:
### Input:
Infer issubclass() calls
:param nodes.Call callnode: an `issubclass` call
:param InferenceContext: the context for the inference
:rtype nodes.Const: Boolean Const value of the `issubclass` call
:raises UseInferenceDefault: If the node cannot be inferred
### Response:
def infer_issubclass(callnode, context=None):
"""Infer issubclass() calls
:param nodes.Call callnode: an `issubclass` call
:param InferenceContext: the context for the inference
:rtype nodes.Const: Boolean Const value of the `issubclass` call
:raises UseInferenceDefault: If the node cannot be inferred
"""
call = arguments.CallSite.from_call(callnode)
if call.keyword_arguments:
# issubclass doesn't support keyword arguments
raise UseInferenceDefault("TypeError: issubclass() takes no keyword arguments")
if len(call.positional_arguments) != 2:
raise UseInferenceDefault(
"Expected two arguments, got {count}".format(
count=len(call.positional_arguments)
)
)
# The left hand argument is the obj to be checked
obj_node, class_or_tuple_node = call.positional_arguments
try:
obj_type = next(obj_node.infer(context=context))
except InferenceError as exc:
raise UseInferenceDefault from exc
if not isinstance(obj_type, nodes.ClassDef):
raise UseInferenceDefault("TypeError: arg 1 must be class")
# The right hand argument is the class(es) that the given
# object is to be checked against.
try:
class_container = _class_or_tuple_to_container(
class_or_tuple_node, context=context
)
except InferenceError as exc:
raise UseInferenceDefault from exc
try:
issubclass_bool = helpers.object_issubclass(obj_type, class_container, context)
except AstroidTypeError as exc:
raise UseInferenceDefault("TypeError: " + str(exc)) from exc
except MroError as exc:
raise UseInferenceDefault from exc
return nodes.Const(issubclass_bool) |
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png') | Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images | Below is the the instruction that describes the task:
### Input:
Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
### Response:
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
imsave(f, (images[i, :, :, :] + 1.0) * 0.5, format='png') |
def create_from_mesh_and_lines(cls, mesh, lines):
'''
Return a copy of mesh with line vertices and edges added.
mesh: A Mesh
lines: A list of Polyline or Lines objects.
'''
mesh_with_lines = mesh.copy()
mesh_with_lines.add_lines(lines)
return mesh_with_lines | Return a copy of mesh with line vertices and edges added.
mesh: A Mesh
lines: A list of Polyline or Lines objects. | Below is the the instruction that describes the task:
### Input:
Return a copy of mesh with line vertices and edges added.
mesh: A Mesh
lines: A list of Polyline or Lines objects.
### Response:
def create_from_mesh_and_lines(cls, mesh, lines):
'''
Return a copy of mesh with line vertices and edges added.
mesh: A Mesh
lines: A list of Polyline or Lines objects.
'''
mesh_with_lines = mesh.copy()
mesh_with_lines.add_lines(lines)
return mesh_with_lines |
def nn_getsockopt(socket, level, option, value):
"""retrieve a socket option
socket - socket number
level - option level
option - option
value - a writable byte buffer (e.g. a bytearray) which the option value
will be copied to
returns - number of bytes copied or on error nunber < 0
"""
if memoryview(value).readonly:
raise TypeError('Writable buffer is required')
size_t_size = ctypes.c_size_t(len(value))
rtn = _nn_getsockopt(socket, level, option, ctypes.addressof(value),
ctypes.byref(size_t_size))
return (rtn, size_t_size.value) | retrieve a socket option
socket - socket number
level - option level
option - option
value - a writable byte buffer (e.g. a bytearray) which the option value
will be copied to
returns - number of bytes copied or on error nunber < 0 | Below is the the instruction that describes the task:
### Input:
retrieve a socket option
socket - socket number
level - option level
option - option
value - a writable byte buffer (e.g. a bytearray) which the option value
will be copied to
returns - number of bytes copied or on error nunber < 0
### Response:
def nn_getsockopt(socket, level, option, value):
"""retrieve a socket option
socket - socket number
level - option level
option - option
value - a writable byte buffer (e.g. a bytearray) which the option value
will be copied to
returns - number of bytes copied or on error nunber < 0
"""
if memoryview(value).readonly:
raise TypeError('Writable buffer is required')
size_t_size = ctypes.c_size_t(len(value))
rtn = _nn_getsockopt(socket, level, option, ctypes.addressof(value),
ctypes.byref(size_t_size))
return (rtn, size_t_size.value) |
def delete_old_tickets(**kwargs):
"""
Delete tickets if they are over 2 days old
kwargs = ['raw', 'signal', 'instance', 'sender', 'created']
"""
sender = kwargs.get('sender', None)
now = datetime.now()
expire = datetime(now.year, now.month, now.day - 2)
sender.objects.filter(created__lt=expire).delete() | Delete tickets if they are over 2 days old
kwargs = ['raw', 'signal', 'instance', 'sender', 'created'] | Below is the the instruction that describes the task:
### Input:
Delete tickets if they are over 2 days old
kwargs = ['raw', 'signal', 'instance', 'sender', 'created']
### Response:
def delete_old_tickets(**kwargs):
"""
Delete tickets if they are over 2 days old
kwargs = ['raw', 'signal', 'instance', 'sender', 'created']
"""
sender = kwargs.get('sender', None)
now = datetime.now()
expire = datetime(now.year, now.month, now.day - 2)
sender.objects.filter(created__lt=expire).delete() |
def runInteraction(self, interaction, *args, **kwargs):
"""
Interact with the database and return the result.
:param interaction: <function> method with first argument is a <adbapi.Transaction> instance
:param args: additional positional arguments to be passed to interaction
:param kwargs: keyword arguments to be passed to interaction
:return: <defer>
"""
try:
return self._connectionPool.runInteraction(
interaction,
*args,
**kwargs
)
except:
d = defer.Deferred()
d.errback()
return d | Interact with the database and return the result.
:param interaction: <function> method with first argument is a <adbapi.Transaction> instance
:param args: additional positional arguments to be passed to interaction
:param kwargs: keyword arguments to be passed to interaction
:return: <defer> | Below is the the instruction that describes the task:
### Input:
Interact with the database and return the result.
:param interaction: <function> method with first argument is a <adbapi.Transaction> instance
:param args: additional positional arguments to be passed to interaction
:param kwargs: keyword arguments to be passed to interaction
:return: <defer>
### Response:
def runInteraction(self, interaction, *args, **kwargs):
"""
Interact with the database and return the result.
:param interaction: <function> method with first argument is a <adbapi.Transaction> instance
:param args: additional positional arguments to be passed to interaction
:param kwargs: keyword arguments to be passed to interaction
:return: <defer>
"""
try:
return self._connectionPool.runInteraction(
interaction,
*args,
**kwargs
)
except:
d = defer.Deferred()
d.errback()
return d |
def package_config(path, template='__config__.ini.TEMPLATE', config_name='__config__.ini', **params):
"""configure the module at the given path with a config template and file.
path = the filesystem path to the given module
template = the config template filename within that path
config_name = the config filename within that path
params = a dict containing config params, which are found in the template using %(key)s.
"""
config_fns = []
template_fns = rglob(path, template)
for template_fn in template_fns:
config_template = ConfigTemplate(fn=template_fn)
config = config_template.render(
fn=os.path.join(os.path.dirname(template_fn), config_name),
prompt=True, path=path, **params)
config.write()
config_fns.append(config.fn)
log.info('wrote %r' % config)
return config_fns | configure the module at the given path with a config template and file.
path = the filesystem path to the given module
template = the config template filename within that path
config_name = the config filename within that path
params = a dict containing config params, which are found in the template using %(key)s. | Below is the the instruction that describes the task:
### Input:
configure the module at the given path with a config template and file.
path = the filesystem path to the given module
template = the config template filename within that path
config_name = the config filename within that path
params = a dict containing config params, which are found in the template using %(key)s.
### Response:
def package_config(path, template='__config__.ini.TEMPLATE', config_name='__config__.ini', **params):
"""configure the module at the given path with a config template and file.
path = the filesystem path to the given module
template = the config template filename within that path
config_name = the config filename within that path
params = a dict containing config params, which are found in the template using %(key)s.
"""
config_fns = []
template_fns = rglob(path, template)
for template_fn in template_fns:
config_template = ConfigTemplate(fn=template_fn)
config = config_template.render(
fn=os.path.join(os.path.dirname(template_fn), config_name),
prompt=True, path=path, **params)
config.write()
config_fns.append(config.fn)
log.info('wrote %r' % config)
return config_fns |
def describe_enum(enum_definition):
"""Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class.
"""
enum_descriptor = EnumDescriptor()
enum_descriptor.name = enum_definition.definition_name().split('.')[-1]
values = []
for number in enum_definition.numbers():
value = enum_definition.lookup_by_number(number)
values.append(describe_enum_value(value))
if values:
enum_descriptor.values = values
return enum_descriptor | Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class. | Below is the the instruction that describes the task:
### Input:
Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class.
### Response:
def describe_enum(enum_definition):
"""Build descriptor for Enum class.
Args:
enum_definition: Enum class to provide descriptor for.
Returns:
Initialized EnumDescriptor instance describing the Enum class.
"""
enum_descriptor = EnumDescriptor()
enum_descriptor.name = enum_definition.definition_name().split('.')[-1]
values = []
for number in enum_definition.numbers():
value = enum_definition.lookup_by_number(number)
values.append(describe_enum_value(value))
if values:
enum_descriptor.values = values
return enum_descriptor |
def parse_nate_sims(path):
'''
parts0.dat) contains the id number, particle fraction (ignore) a, ecc, inc, long. asc., arg. per, and mean anomaly
for every particle in the simulation at t=0.
The second (parts3999.dat) contains the same info at t=3.999 Gyrs for these particles.
:return:
'''
zerostate = pandas.read_table(path + 'parts0.dat', delim_whitespace=True)
endstate = pandas.read_table(path + 'parts3999.dat', delim_whitespace=True)
# add perihelion
zerostate['q'] = zerostate['a'] * (1 - zerostate['e'])
endstate['q'] = endstate['a'] * (1 - endstate['e'])
return zerostate, endstate | parts0.dat) contains the id number, particle fraction (ignore) a, ecc, inc, long. asc., arg. per, and mean anomaly
for every particle in the simulation at t=0.
The second (parts3999.dat) contains the same info at t=3.999 Gyrs for these particles.
:return: | Below is the the instruction that describes the task:
### Input:
parts0.dat) contains the id number, particle fraction (ignore) a, ecc, inc, long. asc., arg. per, and mean anomaly
for every particle in the simulation at t=0.
The second (parts3999.dat) contains the same info at t=3.999 Gyrs for these particles.
:return:
### Response:
def parse_nate_sims(path):
'''
parts0.dat) contains the id number, particle fraction (ignore) a, ecc, inc, long. asc., arg. per, and mean anomaly
for every particle in the simulation at t=0.
The second (parts3999.dat) contains the same info at t=3.999 Gyrs for these particles.
:return:
'''
zerostate = pandas.read_table(path + 'parts0.dat', delim_whitespace=True)
endstate = pandas.read_table(path + 'parts3999.dat', delim_whitespace=True)
# add perihelion
zerostate['q'] = zerostate['a'] * (1 - zerostate['e'])
endstate['q'] = endstate['a'] * (1 - endstate['e'])
return zerostate, endstate |
def gps_date_time_encode(self, year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed):
'''
Pilot console PWM messges.
year : Year reported by Gps (uint8_t)
month : Month reported by Gps (uint8_t)
day : Day reported by Gps (uint8_t)
hour : Hour reported by Gps (uint8_t)
min : Min reported by Gps (uint8_t)
sec : Sec reported by Gps (uint8_t)
clockStat : Clock Status. See table 47 page 211 OEMStar Manual (uint8_t)
visSat : Visible satellites reported by Gps (uint8_t)
useSat : Used satellites in Solution (uint8_t)
GppGl : GPS+GLONASS satellites in Solution (uint8_t)
sigUsedMask : GPS and GLONASS usage mask (bit 0 GPS_used? bit_4 GLONASS_used?) (uint8_t)
percentUsed : Percent used GPS (uint8_t)
'''
return MAVLink_gps_date_time_message(year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed) | Pilot console PWM messges.
year : Year reported by Gps (uint8_t)
month : Month reported by Gps (uint8_t)
day : Day reported by Gps (uint8_t)
hour : Hour reported by Gps (uint8_t)
min : Min reported by Gps (uint8_t)
sec : Sec reported by Gps (uint8_t)
clockStat : Clock Status. See table 47 page 211 OEMStar Manual (uint8_t)
visSat : Visible satellites reported by Gps (uint8_t)
useSat : Used satellites in Solution (uint8_t)
GppGl : GPS+GLONASS satellites in Solution (uint8_t)
sigUsedMask : GPS and GLONASS usage mask (bit 0 GPS_used? bit_4 GLONASS_used?) (uint8_t)
percentUsed : Percent used GPS (uint8_t) | Below is the the instruction that describes the task:
### Input:
Pilot console PWM messges.
year : Year reported by Gps (uint8_t)
month : Month reported by Gps (uint8_t)
day : Day reported by Gps (uint8_t)
hour : Hour reported by Gps (uint8_t)
min : Min reported by Gps (uint8_t)
sec : Sec reported by Gps (uint8_t)
clockStat : Clock Status. See table 47 page 211 OEMStar Manual (uint8_t)
visSat : Visible satellites reported by Gps (uint8_t)
useSat : Used satellites in Solution (uint8_t)
GppGl : GPS+GLONASS satellites in Solution (uint8_t)
sigUsedMask : GPS and GLONASS usage mask (bit 0 GPS_used? bit_4 GLONASS_used?) (uint8_t)
percentUsed : Percent used GPS (uint8_t)
### Response:
def gps_date_time_encode(self, year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed):
'''
Pilot console PWM messges.
year : Year reported by Gps (uint8_t)
month : Month reported by Gps (uint8_t)
day : Day reported by Gps (uint8_t)
hour : Hour reported by Gps (uint8_t)
min : Min reported by Gps (uint8_t)
sec : Sec reported by Gps (uint8_t)
clockStat : Clock Status. See table 47 page 211 OEMStar Manual (uint8_t)
visSat : Visible satellites reported by Gps (uint8_t)
useSat : Used satellites in Solution (uint8_t)
GppGl : GPS+GLONASS satellites in Solution (uint8_t)
sigUsedMask : GPS and GLONASS usage mask (bit 0 GPS_used? bit_4 GLONASS_used?) (uint8_t)
percentUsed : Percent used GPS (uint8_t)
'''
return MAVLink_gps_date_time_message(year, month, day, hour, min, sec, clockStat, visSat, useSat, GppGl, sigUsedMask, percentUsed) |
def process_host(self, host_spec, name, line_idx=0):
"""
One host spec processing
:param host_spec:
:param name:
:param line_idx:
:return:
"""
try:
parts = host_spec.split(':', 1)
host = parts[0].strip()
port = parts[1] if len(parts) > 1 else 443
pem_cert = self.get_server_certificate(host, port)
if pem_cert:
sub = self.roca.process_pem_cert(pem_cert, name, line_idx)
return sub
except Exception as e:
logger.error('Error in file processing %s (%s) : %s' % (host_spec, name, e))
self.roca.trace_logger.log(e) | One host spec processing
:param host_spec:
:param name:
:param line_idx:
:return: | Below is the the instruction that describes the task:
### Input:
One host spec processing
:param host_spec:
:param name:
:param line_idx:
:return:
### Response:
def process_host(self, host_spec, name, line_idx=0):
"""
One host spec processing
:param host_spec:
:param name:
:param line_idx:
:return:
"""
try:
parts = host_spec.split(':', 1)
host = parts[0].strip()
port = parts[1] if len(parts) > 1 else 443
pem_cert = self.get_server_certificate(host, port)
if pem_cert:
sub = self.roca.process_pem_cert(pem_cert, name, line_idx)
return sub
except Exception as e:
logger.error('Error in file processing %s (%s) : %s' % (host_spec, name, e))
self.roca.trace_logger.log(e) |
def get_tx_identity_info(self, tx_ac):
"""returns features associated with a single transcript.
:param tx_ac: transcript accession with version (e.g., 'NM_199425.2')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+-------------
tx_ac | NM_199425.2
alt_ac | NM_199425.2
alt_aln_method | transcript
cds_start_i | 283
cds_end_i | 1003
lengths | {707,79,410}
hgnc | VSX1
"""
rows = self._fetchall(self._queries['tx_identity_info'], [tx_ac])
if len(rows) == 0:
raise HGVSDataNotAvailableError(
"No transcript definition for (tx_ac={tx_ac})".format(tx_ac=tx_ac))
return rows[0] | returns features associated with a single transcript.
:param tx_ac: transcript accession with version (e.g., 'NM_199425.2')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+-------------
tx_ac | NM_199425.2
alt_ac | NM_199425.2
alt_aln_method | transcript
cds_start_i | 283
cds_end_i | 1003
lengths | {707,79,410}
hgnc | VSX1 | Below is the the instruction that describes the task:
### Input:
returns features associated with a single transcript.
:param tx_ac: transcript accession with version (e.g., 'NM_199425.2')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+-------------
tx_ac | NM_199425.2
alt_ac | NM_199425.2
alt_aln_method | transcript
cds_start_i | 283
cds_end_i | 1003
lengths | {707,79,410}
hgnc | VSX1
### Response:
def get_tx_identity_info(self, tx_ac):
"""returns features associated with a single transcript.
:param tx_ac: transcript accession with version (e.g., 'NM_199425.2')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+-------------
tx_ac | NM_199425.2
alt_ac | NM_199425.2
alt_aln_method | transcript
cds_start_i | 283
cds_end_i | 1003
lengths | {707,79,410}
hgnc | VSX1
"""
rows = self._fetchall(self._queries['tx_identity_info'], [tx_ac])
if len(rows) == 0:
raise HGVSDataNotAvailableError(
"No transcript definition for (tx_ac={tx_ac})".format(tx_ac=tx_ac))
return rows[0] |
def append(self, sc):
"""
Add scale 'sc' and remove any previous
scales that cover the same aesthetics
"""
ae = sc.aesthetics[0]
cover_ae = self.find(ae)
if any(cover_ae):
warn(_TPL_DUPLICATE_SCALE.format(ae), PlotnineWarning)
idx = cover_ae.index(True)
self.pop(idx)
# super() does not work well with reloads
list.append(self, sc) | Add scale 'sc' and remove any previous
scales that cover the same aesthetics | Below is the the instruction that describes the task:
### Input:
Add scale 'sc' and remove any previous
scales that cover the same aesthetics
### Response:
def append(self, sc):
"""
Add scale 'sc' and remove any previous
scales that cover the same aesthetics
"""
ae = sc.aesthetics[0]
cover_ae = self.find(ae)
if any(cover_ae):
warn(_TPL_DUPLICATE_SCALE.format(ae), PlotnineWarning)
idx = cover_ae.index(True)
self.pop(idx)
# super() does not work well with reloads
list.append(self, sc) |
def new_address(self, sender=None, nonce=None):
"""Create a fresh 160bit address"""
if sender is not None and nonce is None:
nonce = self.get_nonce(sender)
new_address = self.calculate_new_address(sender, nonce)
if sender is None and new_address in self:
return self.new_address(sender, nonce)
return new_address | Create a fresh 160bit address | Below is the the instruction that describes the task:
### Input:
Create a fresh 160bit address
### Response:
def new_address(self, sender=None, nonce=None):
"""Create a fresh 160bit address"""
if sender is not None and nonce is None:
nonce = self.get_nonce(sender)
new_address = self.calculate_new_address(sender, nonce)
if sender is None and new_address in self:
return self.new_address(sender, nonce)
return new_address |
def parse_annotation_function(code, func_name):
"""Parse an annotation function.
Return the value of `name` keyword argument and the AST Call node.
func_name: expected function name
"""
expr = parse_annotation(code)
call = expr.value
assert type(call) is ast.Call, 'Annotation is not a function call'
assert type(call.func) is ast.Attribute, 'Unexpected annotation function'
assert type(call.func.value) is ast.Name, 'Invalid annotation function name'
assert call.func.value.id == 'nni', 'Annotation is not a NNI function'
assert call.func.attr == func_name, 'internal error #2'
assert len(call.keywords) == 1, 'Annotation function contains more than one keyword argument'
assert call.keywords[0].arg == 'name', 'Annotation keyword argument is not "name"'
name = call.keywords[0].value
return name, call | Parse an annotation function.
Return the value of `name` keyword argument and the AST Call node.
func_name: expected function name | Below is the the instruction that describes the task:
### Input:
Parse an annotation function.
Return the value of `name` keyword argument and the AST Call node.
func_name: expected function name
### Response:
def parse_annotation_function(code, func_name):
"""Parse an annotation function.
Return the value of `name` keyword argument and the AST Call node.
func_name: expected function name
"""
expr = parse_annotation(code)
call = expr.value
assert type(call) is ast.Call, 'Annotation is not a function call'
assert type(call.func) is ast.Attribute, 'Unexpected annotation function'
assert type(call.func.value) is ast.Name, 'Invalid annotation function name'
assert call.func.value.id == 'nni', 'Annotation is not a NNI function'
assert call.func.attr == func_name, 'internal error #2'
assert len(call.keywords) == 1, 'Annotation function contains more than one keyword argument'
assert call.keywords[0].arg == 'name', 'Annotation keyword argument is not "name"'
name = call.keywords[0].value
return name, call |
def sendNotification(snmpDispatcher, authData, transportTarget,
notifyType, *varBinds, **options):
"""Send SNMP notification.
Based on passed parameters, prepares SNMP TRAP or INFORM
notification (:RFC:`1905#section-4.2.6`) and schedules its
transmission by I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asyncore-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
notifyType: str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Note
----
The `SnmpDispatcher` object may be expensive to create, therefore it is
advised to maintain it for the lifecycle of the application/thread for
as long as possible.
Returns
-------
sendRequestHandle: int
Unique request identifier. Can be used for matching received
responses with ongoing *INFORM* requests. Returns `None` for
*TRAP* notifications.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> snmpDispatcher = SnmpDispatcher()
>>>
>>> sendNotification(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 162)),
>>> 'trap',
>>> NotificationType(ObjectIdentity('SNMPv2-MIB', 'coldStart')),
>>> lookupMib=True
>>> )
>>> snmpDispatcher.transportDispatcher.runDispatcher()
"""
sysUpTime = v2c.apiTrapPDU.sysUpTime
snmpTrapOID = v2c.apiTrapPDU.snmpTrapOID
def _ensureVarBinds(varBinds):
# Add sysUpTime if not present already
if not varBinds or varBinds[0][0] != sysUpTime:
varBinds.insert(0, (v2c.ObjectIdentifier(sysUpTime), v2c.TimeTicks(0)))
# Search for and reposition sysUpTime if it's elsewhere
for idx, varBind in enumerate(varBinds[1:]):
if varBind[0] == sysUpTime:
varBinds[0] = varBind
del varBinds[idx + 1]
break
if len(varBinds) < 2:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
# Search for and reposition snmpTrapOID if it's elsewhere
for idx, varBind in enumerate(varBinds[2:]):
if varBind[0] == snmpTrapOID:
del varBinds[idx + 2]
if varBinds[1][0] == snmpTrapOID:
varBinds[1] = varBind
else:
varBinds.insert(1, varBind)
break
# Fail on missing snmpTrapOID
if varBinds[1][0] != snmpTrapOID:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
return varBinds
def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx):
if not cbFun:
return
if errorIndication:
cbFun(errorIndication, v2c.Integer(0), v2c.Integer(0), None,
cbCtx=cbCtx, snmpDispatcher=snmpDispatcher, stateHandle=stateHandle)
return
errorStatus = v2c.apiTrapPDU.getErrorStatus(rspPdu)
errorIndex = v2c.apiTrapPDU.getErrorIndex(rspPdu)
varBinds = v2c.apiTrapPDU.getVarBinds(rspPdu)
if lookupMib:
varBinds = VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, varBinds)
nextStateHandle = v2c.getNextRequestID()
nextVarBinds = cbFun(errorIndication, errorStatus, errorIndex, varBinds,
cbCtx=cbCtx,
snmpDispatcher=snmpDispatcher,
stateHandle=stateHandle,
nextStateHandle=nextStateHandle)
if not nextVarBinds:
return
v2c.apiTrapPDU.setRequestID(reqPdu, nextStateHandle)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(nextVarBinds))
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
lookupMib, cbFun, cbCtx = [options.get(x) for x in ('lookupMib', 'cbFun', 'cbCtx')]
if lookupMib:
varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
if notifyType == 'trap':
reqPdu = v2c.TrapPDU()
else:
reqPdu = v2c.InformRequestPDU()
v2c.apiTrapPDU.setDefaults(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, varBinds)
varBinds = v2c.apiTrapPDU.getVarBinds(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(varBinds))
if authData.mpModel == 0:
reqPdu = rfc2576.v2ToV1(reqPdu)
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun) | Send SNMP notification.
Based on passed parameters, prepares SNMP TRAP or INFORM
notification (:RFC:`1905#section-4.2.6`) and schedules its
transmission by I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asyncore-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
notifyType: str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Note
----
The `SnmpDispatcher` object may be expensive to create, therefore it is
advised to maintain it for the lifecycle of the application/thread for
as long as possible.
Returns
-------
sendRequestHandle: int
Unique request identifier. Can be used for matching received
responses with ongoing *INFORM* requests. Returns `None` for
*TRAP* notifications.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> snmpDispatcher = SnmpDispatcher()
>>>
>>> sendNotification(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 162)),
>>> 'trap',
>>> NotificationType(ObjectIdentity('SNMPv2-MIB', 'coldStart')),
>>> lookupMib=True
>>> )
>>> snmpDispatcher.transportDispatcher.runDispatcher() | Below is the the instruction that describes the task:
### Input:
Send SNMP notification.
Based on passed parameters, prepares SNMP TRAP or INFORM
notification (:RFC:`1905#section-4.2.6`) and schedules its
transmission by I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asyncore-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
notifyType: str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Note
----
The `SnmpDispatcher` object may be expensive to create, therefore it is
advised to maintain it for the lifecycle of the application/thread for
as long as possible.
Returns
-------
sendRequestHandle: int
Unique request identifier. Can be used for matching received
responses with ongoing *INFORM* requests. Returns `None` for
*TRAP* notifications.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> snmpDispatcher = SnmpDispatcher()
>>>
>>> sendNotification(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 162)),
>>> 'trap',
>>> NotificationType(ObjectIdentity('SNMPv2-MIB', 'coldStart')),
>>> lookupMib=True
>>> )
>>> snmpDispatcher.transportDispatcher.runDispatcher()
### Response:
def sendNotification(snmpDispatcher, authData, transportTarget,
notifyType, *varBinds, **options):
"""Send SNMP notification.
Based on passed parameters, prepares SNMP TRAP or INFORM
notification (:RFC:`1905#section-4.2.6`) and schedules its
transmission by I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asyncore-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
notifyType: str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Note
----
The `SnmpDispatcher` object may be expensive to create, therefore it is
advised to maintain it for the lifecycle of the application/thread for
as long as possible.
Returns
-------
sendRequestHandle: int
Unique request identifier. Can be used for matching received
responses with ongoing *INFORM* requests. Returns `None` for
*TRAP* notifications.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> snmpDispatcher = SnmpDispatcher()
>>>
>>> sendNotification(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 162)),
>>> 'trap',
>>> NotificationType(ObjectIdentity('SNMPv2-MIB', 'coldStart')),
>>> lookupMib=True
>>> )
>>> snmpDispatcher.transportDispatcher.runDispatcher()
"""
sysUpTime = v2c.apiTrapPDU.sysUpTime
snmpTrapOID = v2c.apiTrapPDU.snmpTrapOID
def _ensureVarBinds(varBinds):
# Add sysUpTime if not present already
if not varBinds or varBinds[0][0] != sysUpTime:
varBinds.insert(0, (v2c.ObjectIdentifier(sysUpTime), v2c.TimeTicks(0)))
# Search for and reposition sysUpTime if it's elsewhere
for idx, varBind in enumerate(varBinds[1:]):
if varBind[0] == sysUpTime:
varBinds[0] = varBind
del varBinds[idx + 1]
break
if len(varBinds) < 2:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
# Search for and reposition snmpTrapOID if it's elsewhere
for idx, varBind in enumerate(varBinds[2:]):
if varBind[0] == snmpTrapOID:
del varBinds[idx + 2]
if varBinds[1][0] == snmpTrapOID:
varBinds[1] = varBind
else:
varBinds.insert(1, varBind)
break
# Fail on missing snmpTrapOID
if varBinds[1][0] != snmpTrapOID:
raise error.PySnmpError('SNMP notification PDU requires '
'SNMPv2-MIB::snmpTrapOID.0 to be present')
return varBinds
def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx):
if not cbFun:
return
if errorIndication:
cbFun(errorIndication, v2c.Integer(0), v2c.Integer(0), None,
cbCtx=cbCtx, snmpDispatcher=snmpDispatcher, stateHandle=stateHandle)
return
errorStatus = v2c.apiTrapPDU.getErrorStatus(rspPdu)
errorIndex = v2c.apiTrapPDU.getErrorIndex(rspPdu)
varBinds = v2c.apiTrapPDU.getVarBinds(rspPdu)
if lookupMib:
varBinds = VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, varBinds)
nextStateHandle = v2c.getNextRequestID()
nextVarBinds = cbFun(errorIndication, errorStatus, errorIndex, varBinds,
cbCtx=cbCtx,
snmpDispatcher=snmpDispatcher,
stateHandle=stateHandle,
nextStateHandle=nextStateHandle)
if not nextVarBinds:
return
v2c.apiTrapPDU.setRequestID(reqPdu, nextStateHandle)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(nextVarBinds))
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
lookupMib, cbFun, cbCtx = [options.get(x) for x in ('lookupMib', 'cbFun', 'cbCtx')]
if lookupMib:
varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
if notifyType == 'trap':
reqPdu = v2c.TrapPDU()
else:
reqPdu = v2c.InformRequestPDU()
v2c.apiTrapPDU.setDefaults(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, varBinds)
varBinds = v2c.apiTrapPDU.getVarBinds(reqPdu)
v2c.apiTrapPDU.setVarBinds(reqPdu, _ensureVarBinds(varBinds))
if authData.mpModel == 0:
reqPdu = rfc2576.v2ToV1(reqPdu)
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun) |
def click(self, target=None, modifiers=""):
""" Moves the cursor to the target location and clicks the default mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("click expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1) # For responsiveness
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if modifiers != 0:
keyboard.keyUp(modifiers)
Debug.history("Clicked at {}".format(target_location)) | Moves the cursor to the target location and clicks the default mouse button. | Below is the the instruction that describes the task:
### Input:
Moves the cursor to the target location and clicks the default mouse button.
### Response:
def click(self, target=None, modifiers=""):
""" Moves the cursor to the target location and clicks the default mouse button. """
if target is None:
target = self._lastMatch or self # Whichever one is not None
target_location = None
if isinstance(target, Pattern):
target_location = self.find(target).getTarget()
elif isinstance(target, basestring):
target_location = self.find(target).getTarget()
elif isinstance(target, Match):
target_location = target.getTarget()
elif isinstance(target, Region):
target_location = target.getCenter()
elif isinstance(target, Location):
target_location = target
else:
raise TypeError("click expected Pattern, String, Match, Region, or Location object")
if modifiers != "":
keyboard.keyDown(modifiers)
Mouse.moveSpeed(target_location, Settings.MoveMouseDelay)
time.sleep(0.1) # For responsiveness
if Settings.ClickDelay > 0:
time.sleep(min(1.0, Settings.ClickDelay))
Settings.ClickDelay = 0.0
Mouse.click()
time.sleep(0.1)
if modifiers != 0:
keyboard.keyUp(modifiers)
Debug.history("Clicked at {}".format(target_location)) |
def sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return nested_map(x, size) | Get a structure of sizes for a structure of nested arrays. | Below is the the instruction that describes the task:
### Input:
Get a structure of sizes for a structure of nested arrays.
### Response:
def sizes(x):
"""Get a structure of sizes for a structure of nested arrays."""
def size(x):
try:
return x.size
except Exception: # pylint: disable=broad-except
return 0
return nested_map(x, size) |
def start_task(self, task_tag, skip_unresolved=False):
""" Check dependency for the given task_tag and start task. For dependency checking see
:meth:`.WTaskDependencyRegistryStorage.dependency_check`. If task is already started then it must be
stopped before it will be started again.
:param task_tag: task to start. Any required dependencies will be started automatically.
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
"""
if self.started_tasks(task_registry_id=task_tag) is not None:
return
task_cls = self.tasks_by_tag(task_tag)
if task_cls is None:
raise RuntimeError("Task '%s' wasn't found" % task_tag)
self.dependency_check(task_cls, skip_unresolved=skip_unresolved)
def start_dependency(start_task_cls):
for dependency in start_task_cls.__dependency__:
if self.started_tasks(task_registry_id=dependency) is not None:
continue
dependent_task = self.tasks_by_tag(dependency)
if dependent_task is not None:
start_dependency(dependent_task)
self.__started.append(start_task_cls.start_dependent_task())
start_dependency(task_cls) | Check dependency for the given task_tag and start task. For dependency checking see
:meth:`.WTaskDependencyRegistryStorage.dependency_check`. If task is already started then it must be
stopped before it will be started again.
:param task_tag: task to start. Any required dependencies will be started automatically.
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None | Below is the the instruction that describes the task:
### Input:
Check dependency for the given task_tag and start task. For dependency checking see
:meth:`.WTaskDependencyRegistryStorage.dependency_check`. If task is already started then it must be
stopped before it will be started again.
:param task_tag: task to start. Any required dependencies will be started automatically.
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
### Response:
def start_task(self, task_tag, skip_unresolved=False):
""" Check dependency for the given task_tag and start task. For dependency checking see
:meth:`.WTaskDependencyRegistryStorage.dependency_check`. If task is already started then it must be
stopped before it will be started again.
:param task_tag: task to start. Any required dependencies will be started automatically.
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
"""
if self.started_tasks(task_registry_id=task_tag) is not None:
return
task_cls = self.tasks_by_tag(task_tag)
if task_cls is None:
raise RuntimeError("Task '%s' wasn't found" % task_tag)
self.dependency_check(task_cls, skip_unresolved=skip_unresolved)
def start_dependency(start_task_cls):
for dependency in start_task_cls.__dependency__:
if self.started_tasks(task_registry_id=dependency) is not None:
continue
dependent_task = self.tasks_by_tag(dependency)
if dependent_task is not None:
start_dependency(dependent_task)
self.__started.append(start_task_cls.start_dependent_task())
start_dependency(task_cls) |
def distances(self, points):
"""
Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side
:param points: Points for which distances are computed
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side)
"""
return [np.dot(self.normal_vector, pp) + self.d for pp in points] | Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side
:param points: Points for which distances are computed
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side) | Below is the the instruction that describes the task:
### Input:
Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side
:param points: Points for which distances are computed
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side)
### Response:
def distances(self, points):
"""
Computes the distances from the plane to each of the points. Positive distances are on the side of the
normal of the plane while negative distances are on the other side
:param points: Points for which distances are computed
:return: Distances from the plane to the points (positive values on the side of the normal to the plane,
negative values on the other side)
"""
return [np.dot(self.normal_vector, pp) + self.d for pp in points] |
def validate_query_params(self):
"""Ensure no unsupported query params were used."""
allowed_params = set(self.get_filters().keys())
allowed_params.update(self.get_always_allowed_arguments())
unallowed = set(self.request.query_params.keys()) - allowed_params
if unallowed:
msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format(
', '.join(unallowed),
', '.join(allowed_params),
)
self.form.add_error(field=None, error=ParseError(msg)) | Ensure no unsupported query params were used. | Below is the the instruction that describes the task:
### Input:
Ensure no unsupported query params were used.
### Response:
def validate_query_params(self):
"""Ensure no unsupported query params were used."""
allowed_params = set(self.get_filters().keys())
allowed_params.update(self.get_always_allowed_arguments())
unallowed = set(self.request.query_params.keys()) - allowed_params
if unallowed:
msg = 'Unsupported parameter(s): {}. Please use a combination of: {}.'.format(
', '.join(unallowed),
', '.join(allowed_params),
)
self.form.add_error(field=None, error=ParseError(msg)) |
def get_child(self, name: YangIdentifier,
ns: YangIdentifier = None) -> Optional[SchemaNode]:
"""Return receiver's schema child.
Args:
name: Child's name.
ns: Child's namespace (= `self.ns` if absent).
"""
ns = ns if ns else self.ns
todo = []
for child in self.children:
if child.name is None:
todo.append(child)
elif child.name == name and child.ns == ns:
return child
for c in todo:
grandchild = c.get_child(name, ns)
if grandchild is not None:
return grandchild | Return receiver's schema child.
Args:
name: Child's name.
ns: Child's namespace (= `self.ns` if absent). | Below is the the instruction that describes the task:
### Input:
Return receiver's schema child.
Args:
name: Child's name.
ns: Child's namespace (= `self.ns` if absent).
### Response:
def get_child(self, name: YangIdentifier,
ns: YangIdentifier = None) -> Optional[SchemaNode]:
"""Return receiver's schema child.
Args:
name: Child's name.
ns: Child's namespace (= `self.ns` if absent).
"""
ns = ns if ns else self.ns
todo = []
for child in self.children:
if child.name is None:
todo.append(child)
elif child.name == name and child.ns == ns:
return child
for c in todo:
grandchild = c.get_child(name, ns)
if grandchild is not None:
return grandchild |
def _generate_html(self):
"""
Generate the HTML for the specified graphs.
:return:
:rtype:
"""
logger.debug('Generating templated HTML')
env = Environment(
loader=PackageLoader('pypi_download_stats', 'templates'),
extensions=['jinja2.ext.loopcontrols'])
env.filters['format_date_long'] = filter_format_date_long
env.filters['format_date_ymd'] = filter_format_date_ymd
env.filters['data_columns'] = filter_data_columns
template = env.get_template('base.html')
logger.debug('Rendering template')
html = template.render(
project=self.project_name,
cache_date=self._stats.as_of_datetime,
user=getuser(),
host=platform_node(),
version=VERSION,
proj_url=PROJECT_URL,
graphs=self._graphs,
graph_keys=self.GRAPH_KEYS,
resources=Resources(mode='inline').render(),
badges=self._badges
)
logger.debug('Template rendered')
return html | Generate the HTML for the specified graphs.
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
Generate the HTML for the specified graphs.
:return:
:rtype:
### Response:
def _generate_html(self):
"""
Generate the HTML for the specified graphs.
:return:
:rtype:
"""
logger.debug('Generating templated HTML')
env = Environment(
loader=PackageLoader('pypi_download_stats', 'templates'),
extensions=['jinja2.ext.loopcontrols'])
env.filters['format_date_long'] = filter_format_date_long
env.filters['format_date_ymd'] = filter_format_date_ymd
env.filters['data_columns'] = filter_data_columns
template = env.get_template('base.html')
logger.debug('Rendering template')
html = template.render(
project=self.project_name,
cache_date=self._stats.as_of_datetime,
user=getuser(),
host=platform_node(),
version=VERSION,
proj_url=PROJECT_URL,
graphs=self._graphs,
graph_keys=self.GRAPH_KEYS,
resources=Resources(mode='inline').render(),
badges=self._badges
)
logger.debug('Template rendered')
return html |
def run_mq2(plugin, folder, lod_threshold=None, session=None,
outputfolder=None):
""" Run the plugin. """
qtls_file = 'qtls.csv'
matrix_file = 'qtls_matrix.csv'
map_file = 'map.csv'
map_qtl_file = 'map_with_qtls.csv'
qtls_mk_file = 'qtls_with_mk.csv'
map_chart_file = 'MapChart.map'
if outputfolder: # pragma: no cover
qtls_file = '%s/%s' % (outputfolder, qtls_file)
qtls_mk_file = '%s/%s' % (outputfolder, qtls_mk_file)
matrix_file = '%s/%s' % (outputfolder, matrix_file)
map_file = '%s/%s' % (outputfolder, map_file)
map_qtl_file = '%s/%s' % (outputfolder, map_qtl_file)
map_chart_file = '%s/%s' % (outputfolder, map_chart_file)
LOG.debug('Call the plugin to create the map, qtls and matrix files')
if folder and os.path.isdir(folder):
plugin.convert_inputfiles(folder=folder, session=session,
lod_threshold=lod_threshold,
qtls_file=qtls_file,
matrix_file=matrix_file,
map_file=map_file)
else:
plugin.convert_inputfiles(inputfile=folder, session=session,
lod_threshold=lod_threshold,
qtls_file=qtls_file,
matrix_file=matrix_file,
map_file=map_file)
LOG.debug('Add the number of QTLs found on the matrix')
_append_count_to_matrix(matrix_file, lod_threshold)
LOG.debug('Append the closest marker to the peak')
add_marker_to_qtls(qtls_file, map_file, outputfile=qtls_mk_file)
LOG.debug('Put the number of QTLs found on each marker of the map')
add_qtl_to_map(qtls_mk_file, map_file, outputfile=map_qtl_file)
LOG.debug('Generate the mapchart file')
flanking_markers = generate_map_chart_file(
matrix_file, lod_threshold, map_chart_file=map_chart_file)
LOG.debug('Append flanking markers to qtl list')
flanking_markers = append_flanking_markers(
qtls_mk_file, flanking_markers)
if folder and os.path.isdir(folder) and os.path.exists(folder):
shutil.rmtree(folder)
return 0 | Run the plugin. | Below is the the instruction that describes the task:
### Input:
Run the plugin.
### Response:
def run_mq2(plugin, folder, lod_threshold=None, session=None,
outputfolder=None):
""" Run the plugin. """
qtls_file = 'qtls.csv'
matrix_file = 'qtls_matrix.csv'
map_file = 'map.csv'
map_qtl_file = 'map_with_qtls.csv'
qtls_mk_file = 'qtls_with_mk.csv'
map_chart_file = 'MapChart.map'
if outputfolder: # pragma: no cover
qtls_file = '%s/%s' % (outputfolder, qtls_file)
qtls_mk_file = '%s/%s' % (outputfolder, qtls_mk_file)
matrix_file = '%s/%s' % (outputfolder, matrix_file)
map_file = '%s/%s' % (outputfolder, map_file)
map_qtl_file = '%s/%s' % (outputfolder, map_qtl_file)
map_chart_file = '%s/%s' % (outputfolder, map_chart_file)
LOG.debug('Call the plugin to create the map, qtls and matrix files')
if folder and os.path.isdir(folder):
plugin.convert_inputfiles(folder=folder, session=session,
lod_threshold=lod_threshold,
qtls_file=qtls_file,
matrix_file=matrix_file,
map_file=map_file)
else:
plugin.convert_inputfiles(inputfile=folder, session=session,
lod_threshold=lod_threshold,
qtls_file=qtls_file,
matrix_file=matrix_file,
map_file=map_file)
LOG.debug('Add the number of QTLs found on the matrix')
_append_count_to_matrix(matrix_file, lod_threshold)
LOG.debug('Append the closest marker to the peak')
add_marker_to_qtls(qtls_file, map_file, outputfile=qtls_mk_file)
LOG.debug('Put the number of QTLs found on each marker of the map')
add_qtl_to_map(qtls_mk_file, map_file, outputfile=map_qtl_file)
LOG.debug('Generate the mapchart file')
flanking_markers = generate_map_chart_file(
matrix_file, lod_threshold, map_chart_file=map_chart_file)
LOG.debug('Append flanking markers to qtl list')
flanking_markers = append_flanking_markers(
qtls_mk_file, flanking_markers)
if folder and os.path.isdir(folder) and os.path.exists(folder):
shutil.rmtree(folder)
return 0 |
def create_csv(filename, csv_data, mode="w"):
"""
Create a CSV file with the given data and store it in the
file with the given name.
:param filename: name of the file to store the data in
:pram csv_data: the data to be stored in the file
:param mode: the mode in which we have to open the file. It can
be 'w', 'a', etc. Default is 'w'
"""
with open(filename, mode) as f:
csv_data.replace("_", r"\_")
f.write(csv_data) | Create a CSV file with the given data and store it in the
file with the given name.
:param filename: name of the file to store the data in
:pram csv_data: the data to be stored in the file
:param mode: the mode in which we have to open the file. It can
be 'w', 'a', etc. Default is 'w' | Below is the the instruction that describes the task:
### Input:
Create a CSV file with the given data and store it in the
file with the given name.
:param filename: name of the file to store the data in
:pram csv_data: the data to be stored in the file
:param mode: the mode in which we have to open the file. It can
be 'w', 'a', etc. Default is 'w'
### Response:
def create_csv(filename, csv_data, mode="w"):
"""
Create a CSV file with the given data and store it in the
file with the given name.
:param filename: name of the file to store the data in
:pram csv_data: the data to be stored in the file
:param mode: the mode in which we have to open the file. It can
be 'w', 'a', etc. Default is 'w'
"""
with open(filename, mode) as f:
csv_data.replace("_", r"\_")
f.write(csv_data) |
def clip_out_of_image(self):
"""
Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.clip_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape) | Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions. | Below is the the instruction that describes the task:
### Input:
Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
### Response:
def clip_out_of_image(self):
"""
Clip off all parts from all bounding boxes that are outside of the image.
Returns
-------
imgaug.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [bb.clip_out_of_image(self.shape)
for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape) |
def get_template_dirs():
"""Return a set of all template directories."""
temp_glob = rel_to_cwd('templates', '**', 'templates', 'config.yaml')
temp_groups = glob(temp_glob)
temp_groups = [get_parent_dir(path, 2) for path in temp_groups]
return set(temp_groups) | Return a set of all template directories. | Below is the the instruction that describes the task:
### Input:
Return a set of all template directories.
### Response:
def get_template_dirs():
"""Return a set of all template directories."""
temp_glob = rel_to_cwd('templates', '**', 'templates', 'config.yaml')
temp_groups = glob(temp_glob)
temp_groups = [get_parent_dir(path, 2) for path in temp_groups]
return set(temp_groups) |
def format_py3o_val(value):
"""
format a value to fit py3o's context
* Handle linebreaks
"""
value = force_unicode(value)
value = escape(value)
value = value.replace(u'\n', u'<text:line-break/>')
return Markup(value) | format a value to fit py3o's context
* Handle linebreaks | Below is the the instruction that describes the task:
### Input:
format a value to fit py3o's context
* Handle linebreaks
### Response:
def format_py3o_val(value):
"""
format a value to fit py3o's context
* Handle linebreaks
"""
value = force_unicode(value)
value = escape(value)
value = value.replace(u'\n', u'<text:line-break/>')
return Markup(value) |
def iter_pth_paths(filename):
"""Given a .pth file, extract and yield all inner paths without honoring imports. This shadows
python's site.py behavior, which is invoked at interpreter startup."""
try:
f = open(filename, 'rU') # noqa
except IOError:
return
dirname = os.path.dirname(filename)
known_paths = set()
with f:
for line in f:
line = line.rstrip()
if not line or line.startswith('#'):
continue
elif line.startswith(('import ', 'import\t')):
try:
exec_function(line, globals_map={})
continue
except Exception:
# NB: import lines are routinely abused with extra code appended using `;` so the class of
# exceptions that might be raised in broader than ImportError. As such we cacth broadly
# here.
# Defer error handling to the higher level site.py logic invoked at startup.
return
else:
extras_dir, extras_dir_case_insensitive = makepath(dirname, line)
if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir):
yield extras_dir
known_paths.add(extras_dir_case_insensitive) | Given a .pth file, extract and yield all inner paths without honoring imports. This shadows
python's site.py behavior, which is invoked at interpreter startup. | Below is the the instruction that describes the task:
### Input:
Given a .pth file, extract and yield all inner paths without honoring imports. This shadows
python's site.py behavior, which is invoked at interpreter startup.
### Response:
def iter_pth_paths(filename):
"""Given a .pth file, extract and yield all inner paths without honoring imports. This shadows
python's site.py behavior, which is invoked at interpreter startup."""
try:
f = open(filename, 'rU') # noqa
except IOError:
return
dirname = os.path.dirname(filename)
known_paths = set()
with f:
for line in f:
line = line.rstrip()
if not line or line.startswith('#'):
continue
elif line.startswith(('import ', 'import\t')):
try:
exec_function(line, globals_map={})
continue
except Exception:
# NB: import lines are routinely abused with extra code appended using `;` so the class of
# exceptions that might be raised in broader than ImportError. As such we cacth broadly
# here.
# Defer error handling to the higher level site.py logic invoked at startup.
return
else:
extras_dir, extras_dir_case_insensitive = makepath(dirname, line)
if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir):
yield extras_dir
known_paths.add(extras_dir_case_insensitive) |
def ensure_provisioning(
table_name, key_name,
num_consec_read_checks,
num_consec_write_checks):
""" Ensure that provisioning is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type key_name: str
:param key_name: Configuration option key name
:type num_consec_read_checks: int
:param num_consec_read_checks: How many consecutive checks have we had
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
"""
if get_global_option('circuit_breaker_url') or get_table_option(
key_name, 'circuit_breaker_url'):
if circuit_breaker.is_open(table_name, key_name):
logger.warning('Circuit breaker is OPEN!')
return (0, 0)
# Handle throughput alarm checks
__ensure_provisioning_alarm(table_name, key_name)
try:
read_update_needed, updated_read_units, num_consec_read_checks = \
__ensure_provisioning_reads(
table_name,
key_name,
num_consec_read_checks)
write_update_needed, updated_write_units, num_consec_write_checks = \
__ensure_provisioning_writes(
table_name,
key_name,
num_consec_write_checks)
if read_update_needed:
num_consec_read_checks = 0
if write_update_needed:
num_consec_write_checks = 0
# Handle throughput updates
if read_update_needed or write_update_needed:
logger.info(
'{0} - Changing provisioning to {1:d} '
'read units and {2:d} write units'.format(
table_name,
int(updated_read_units),
int(updated_write_units)))
__update_throughput(
table_name,
key_name,
updated_read_units,
updated_write_units)
else:
logger.info('{0} - No need to change provisioning'.format(
table_name))
except JSONResponseError:
raise
except BotoServerError:
raise
return num_consec_read_checks, num_consec_write_checks | Ensure that provisioning is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type key_name: str
:param key_name: Configuration option key name
:type num_consec_read_checks: int
:param num_consec_read_checks: How many consecutive checks have we had
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (int, int) -- num_consec_read_checks, num_consec_write_checks | Below is the the instruction that describes the task:
### Input:
Ensure that provisioning is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type key_name: str
:param key_name: Configuration option key name
:type num_consec_read_checks: int
:param num_consec_read_checks: How many consecutive checks have we had
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
### Response:
def ensure_provisioning(
table_name, key_name,
num_consec_read_checks,
num_consec_write_checks):
""" Ensure that provisioning is correct
:type table_name: str
:param table_name: Name of the DynamoDB table
:type key_name: str
:param key_name: Configuration option key name
:type num_consec_read_checks: int
:param num_consec_read_checks: How many consecutive checks have we had
:type num_consec_write_checks: int
:param num_consec_write_checks: How many consecutive checks have we had
:returns: (int, int) -- num_consec_read_checks, num_consec_write_checks
"""
if get_global_option('circuit_breaker_url') or get_table_option(
key_name, 'circuit_breaker_url'):
if circuit_breaker.is_open(table_name, key_name):
logger.warning('Circuit breaker is OPEN!')
return (0, 0)
# Handle throughput alarm checks
__ensure_provisioning_alarm(table_name, key_name)
try:
read_update_needed, updated_read_units, num_consec_read_checks = \
__ensure_provisioning_reads(
table_name,
key_name,
num_consec_read_checks)
write_update_needed, updated_write_units, num_consec_write_checks = \
__ensure_provisioning_writes(
table_name,
key_name,
num_consec_write_checks)
if read_update_needed:
num_consec_read_checks = 0
if write_update_needed:
num_consec_write_checks = 0
# Handle throughput updates
if read_update_needed or write_update_needed:
logger.info(
'{0} - Changing provisioning to {1:d} '
'read units and {2:d} write units'.format(
table_name,
int(updated_read_units),
int(updated_write_units)))
__update_throughput(
table_name,
key_name,
updated_read_units,
updated_write_units)
else:
logger.info('{0} - No need to change provisioning'.format(
table_name))
except JSONResponseError:
raise
except BotoServerError:
raise
return num_consec_read_checks, num_consec_write_checks |
def remove_port(uri):
"""Remove the port number from a URI
:param uri: full URI that Twilio requested on your server
:returns: full URI without a port number
:rtype: str
"""
new_netloc = uri.netloc.split(':')[0]
new_uri = uri._replace(netloc=new_netloc)
return new_uri.geturl() | Remove the port number from a URI
:param uri: full URI that Twilio requested on your server
:returns: full URI without a port number
:rtype: str | Below is the the instruction that describes the task:
### Input:
Remove the port number from a URI
:param uri: full URI that Twilio requested on your server
:returns: full URI without a port number
:rtype: str
### Response:
def remove_port(uri):
"""Remove the port number from a URI
:param uri: full URI that Twilio requested on your server
:returns: full URI without a port number
:rtype: str
"""
new_netloc = uri.netloc.split(':')[0]
new_uri = uri._replace(netloc=new_netloc)
return new_uri.geturl() |
def store(self, addr, length=1, non_temporal=False):
"""
Store one or more adresses.
:param addr: byte address of store location
:param length: All address from addr until addr+length (exclusive) are
stored (default: 1)
:param non_temporal: if True, no write-allocate will be issued, but cacheline will be zeroed
"""
if non_temporal:
raise ValueError("non_temporal stores are not yet supported")
if addr is None:
return
elif not isinstance(addr, Iterable):
self.first_level.store(addr, length=length)
else:
self.first_level.iterstore(addr, length=length) | Store one or more adresses.
:param addr: byte address of store location
:param length: All address from addr until addr+length (exclusive) are
stored (default: 1)
:param non_temporal: if True, no write-allocate will be issued, but cacheline will be zeroed | Below is the the instruction that describes the task:
### Input:
Store one or more adresses.
:param addr: byte address of store location
:param length: All address from addr until addr+length (exclusive) are
stored (default: 1)
:param non_temporal: if True, no write-allocate will be issued, but cacheline will be zeroed
### Response:
def store(self, addr, length=1, non_temporal=False):
"""
Store one or more adresses.
:param addr: byte address of store location
:param length: All address from addr until addr+length (exclusive) are
stored (default: 1)
:param non_temporal: if True, no write-allocate will be issued, but cacheline will be zeroed
"""
if non_temporal:
raise ValueError("non_temporal stores are not yet supported")
if addr is None:
return
elif not isinstance(addr, Iterable):
self.first_level.store(addr, length=length)
else:
self.first_level.iterstore(addr, length=length) |
def fmt(self, value):
""" Sets self.fmt, with some extra help for plain format strings. """
if isinstance(value, str):
value = value.split(self.join_str)
if not (value and isinstance(value, (list, tuple))):
raise TypeError(
' '.join((
'Expecting str or list/tuple of formats {!r}.',
'Got: ({}) {!r}'
)).format(
self.default_format,
type(value).__name__,
value,
))
self._fmt = value | Sets self.fmt, with some extra help for plain format strings. | Below is the the instruction that describes the task:
### Input:
Sets self.fmt, with some extra help for plain format strings.
### Response:
def fmt(self, value):
""" Sets self.fmt, with some extra help for plain format strings. """
if isinstance(value, str):
value = value.split(self.join_str)
if not (value and isinstance(value, (list, tuple))):
raise TypeError(
' '.join((
'Expecting str or list/tuple of formats {!r}.',
'Got: ({}) {!r}'
)).format(
self.default_format,
type(value).__name__,
value,
))
self._fmt = value |
def dendrite_filter(n):
'''Select only dendrites'''
return n.type == NeuriteType.basal_dendrite or n.type == NeuriteType.apical_dendrite | Select only dendrites | Below is the the instruction that describes the task:
### Input:
Select only dendrites
### Response:
def dendrite_filter(n):
'''Select only dendrites'''
return n.type == NeuriteType.basal_dendrite or n.type == NeuriteType.apical_dendrite |
def conn_aws(cred, crid):
"""Establish connection to AWS service."""
driver = get_driver(Provider.EC2)
try:
aws_obj = driver(cred['aws_access_key_id'],
cred['aws_secret_access_key'],
region=cred['aws_default_region'])
except SSLError as e:
abort_err("\r SSL Error with AWS: {}".format(e))
except InvalidCredsError as e:
abort_err("\r Error with AWS Credentials: {}".format(e))
return {crid: aws_obj} | Establish connection to AWS service. | Below is the the instruction that describes the task:
### Input:
Establish connection to AWS service.
### Response:
def conn_aws(cred, crid):
"""Establish connection to AWS service."""
driver = get_driver(Provider.EC2)
try:
aws_obj = driver(cred['aws_access_key_id'],
cred['aws_secret_access_key'],
region=cred['aws_default_region'])
except SSLError as e:
abort_err("\r SSL Error with AWS: {}".format(e))
except InvalidCredsError as e:
abort_err("\r Error with AWS Credentials: {}".format(e))
return {crid: aws_obj} |
def get_ilvl(li, w_namespace):
"""
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
"""
ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap)
if len(ilvls) == 0:
return -1
return int(ilvls[0].get('%sval' % w_namespace)) | The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not. | Below is the the instruction that describes the task:
### Input:
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
### Response:
def get_ilvl(li, w_namespace):
"""
The ilvl on an li tag tells the li tag at what level of indentation this
tag is at. This is used to determine if the li tag needs to be nested or
not.
"""
ilvls = li.xpath('.//w:ilvl', namespaces=li.nsmap)
if len(ilvls) == 0:
return -1
return int(ilvls[0].get('%sval' % w_namespace)) |
def realForm(self, req, tag):
"""
Render L{liveForm}.
"""
self.liveForm.setFragmentParent(self)
return self.liveForm | Render L{liveForm}. | Below is the the instruction that describes the task:
### Input:
Render L{liveForm}.
### Response:
def realForm(self, req, tag):
"""
Render L{liveForm}.
"""
self.liveForm.setFragmentParent(self)
return self.liveForm |
def load_info(self):
''' Get info from logged account '''
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/login.phtml',"User-Agent": user_agent}
req = self.session.get('http://'+self.domain+'/team_news.phtml',headers=headers).content
soup = BeautifulSoup(req)
self.title = soup.title.string
estado = soup.find('div',{'id':'content'}).find('div',{'id':'manager'}).string
if estado:
print estado.strip()
return
[s.extract() for s in soup('strong')]
if (soup.find('div',{'id':'userid'}) != None):
self.myid = soup.find('div',{'id':'userid'}).p.text.strip()[2:]
self.money = int(soup.find('div',{'id':'manager_money'}).p.text.strip().replace(".","")[:-2])
self.teamvalue = int(soup.find('div',{'id':'teamvalue'}).p.text.strip().replace(".","")[:-2])
self.community_id = soup.find('link')['href'][24:]
self.username = soup.find('div',{'id':'username'}).p.a.text | Get info from logged account | Below is the the instruction that describes the task:
### Input:
Get info from logged account
### Response:
def load_info(self):
''' Get info from logged account '''
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/login.phtml',"User-Agent": user_agent}
req = self.session.get('http://'+self.domain+'/team_news.phtml',headers=headers).content
soup = BeautifulSoup(req)
self.title = soup.title.string
estado = soup.find('div',{'id':'content'}).find('div',{'id':'manager'}).string
if estado:
print estado.strip()
return
[s.extract() for s in soup('strong')]
if (soup.find('div',{'id':'userid'}) != None):
self.myid = soup.find('div',{'id':'userid'}).p.text.strip()[2:]
self.money = int(soup.find('div',{'id':'manager_money'}).p.text.strip().replace(".","")[:-2])
self.teamvalue = int(soup.find('div',{'id':'teamvalue'}).p.text.strip().replace(".","")[:-2])
self.community_id = soup.find('link')['href'][24:]
self.username = soup.find('div',{'id':'username'}).p.a.text |
def sort_languages(self, order=Qt.AscendingOrder):
"""
Sorts the Model languages.
:param order: Order. ( Qt.SortOrder )
"""
self.beginResetModel()
self.__languages = sorted(self.__languages, key=lambda x: (x.name), reverse=order)
self.endResetModel() | Sorts the Model languages.
:param order: Order. ( Qt.SortOrder ) | Below is the the instruction that describes the task:
### Input:
Sorts the Model languages.
:param order: Order. ( Qt.SortOrder )
### Response:
def sort_languages(self, order=Qt.AscendingOrder):
"""
Sorts the Model languages.
:param order: Order. ( Qt.SortOrder )
"""
self.beginResetModel()
self.__languages = sorted(self.__languages, key=lambda x: (x.name), reverse=order)
self.endResetModel() |
def make_controller(cls, config, session, left_menu_items=None):
"""New CRUD controllers using the admin configuration can be created using this."""
m = config.model
Controller = config.defaultCrudRestController
class ModelController(Controller):
model = m
table = config.table_type(session)
table_filler = config.table_filler_type(session)
new_form = config.new_form_type(session)
new_filler = config.new_filler_type(session)
edit_form = config.edit_form_type(session)
edit_filler = config.edit_filler_type(session)
allow_only = config.allow_only
if hasattr(config.layout, 'crud_resources'):
resources = config.layout.crud_resources
def _before(self, *args, **kw):
super(self.__class__, self)._before(*args, **kw)
tmpl_context.make_pager_args = make_pager_args
if request.response_type not in ('application/json',):
default_renderer = AdminController._get_default_renderer()
for action in ('get_all', 'new', 'edit'):
for template in config.layout.crud_templates.get(action, []):
if template.startswith(default_renderer):
override_template(getattr(self, action), template)
return ModelController(session, left_menu_items) | New CRUD controllers using the admin configuration can be created using this. | Below is the the instruction that describes the task:
### Input:
New CRUD controllers using the admin configuration can be created using this.
### Response:
def make_controller(cls, config, session, left_menu_items=None):
"""New CRUD controllers using the admin configuration can be created using this."""
m = config.model
Controller = config.defaultCrudRestController
class ModelController(Controller):
model = m
table = config.table_type(session)
table_filler = config.table_filler_type(session)
new_form = config.new_form_type(session)
new_filler = config.new_filler_type(session)
edit_form = config.edit_form_type(session)
edit_filler = config.edit_filler_type(session)
allow_only = config.allow_only
if hasattr(config.layout, 'crud_resources'):
resources = config.layout.crud_resources
def _before(self, *args, **kw):
super(self.__class__, self)._before(*args, **kw)
tmpl_context.make_pager_args = make_pager_args
if request.response_type not in ('application/json',):
default_renderer = AdminController._get_default_renderer()
for action in ('get_all', 'new', 'edit'):
for template in config.layout.crud_templates.get(action, []):
if template.startswith(default_renderer):
override_template(getattr(self, action), template)
return ModelController(session, left_menu_items) |
def _process_removed_port(self, device):
"""Process the removed ports."""
LOG.debug("Trying to remove the port %r", device)
self._update_port_status_cache(device, device_bound=False)
self._port_unbound(device, vnic_deleted=True)
LOG.debug("The port was successfully removed.")
self._removed_ports.discard(device) | Process the removed ports. | Below is the the instruction that describes the task:
### Input:
Process the removed ports.
### Response:
def _process_removed_port(self, device):
"""Process the removed ports."""
LOG.debug("Trying to remove the port %r", device)
self._update_port_status_cache(device, device_bound=False)
self._port_unbound(device, vnic_deleted=True)
LOG.debug("The port was successfully removed.")
self._removed_ports.discard(device) |
def _generate_edges(self, edge_prob):
"""Generate a random tree-structured dependency graph based on a
specified edge probability.
Also create helper data struct mapping child -> parent.
"""
self.E, self.parent = [], {}
for i in range(self.m):
if random() < edge_prob and i > 0:
p_i = choice(i)
self.E.append((p_i, i))
self.parent[i] = p_i | Generate a random tree-structured dependency graph based on a
specified edge probability.
Also create helper data struct mapping child -> parent. | Below is the the instruction that describes the task:
### Input:
Generate a random tree-structured dependency graph based on a
specified edge probability.
Also create helper data struct mapping child -> parent.
### Response:
def _generate_edges(self, edge_prob):
"""Generate a random tree-structured dependency graph based on a
specified edge probability.
Also create helper data struct mapping child -> parent.
"""
self.E, self.parent = [], {}
for i in range(self.m):
if random() < edge_prob and i > 0:
p_i = choice(i)
self.E.append((p_i, i))
self.parent[i] = p_i |
def parse_memory(memory):
"""
Parses a string representing memory and returns
an integer # of bytes.
:param memory:
:return:
"""
memory = str(memory)
if 'None' in memory:
return 2147483648 # toil's default
try:
import re
raw_mem_split = re.split('([a-zA-Z]+)', memory)
mem_split = []
for r in raw_mem_split:
if r:
mem_split.append(r.replace(' ', ''))
if len(mem_split) == 1:
return int(memory)
if len(mem_split) == 2:
num = mem_split[0]
unit = mem_split[1]
return int(float(num) * return_bytes(unit))
else:
raise RuntimeError('Memory parsing failed: {}'.format(memory))
except:
return 2147483648 | Parses a string representing memory and returns
an integer # of bytes.
:param memory:
:return: | Below is the the instruction that describes the task:
### Input:
Parses a string representing memory and returns
an integer # of bytes.
:param memory:
:return:
### Response:
def parse_memory(memory):
"""
Parses a string representing memory and returns
an integer # of bytes.
:param memory:
:return:
"""
memory = str(memory)
if 'None' in memory:
return 2147483648 # toil's default
try:
import re
raw_mem_split = re.split('([a-zA-Z]+)', memory)
mem_split = []
for r in raw_mem_split:
if r:
mem_split.append(r.replace(' ', ''))
if len(mem_split) == 1:
return int(memory)
if len(mem_split) == 2:
num = mem_split[0]
unit = mem_split[1]
return int(float(num) * return_bytes(unit))
else:
raise RuntimeError('Memory parsing failed: {}'.format(memory))
except:
return 2147483648 |
def validate_one_format(jupytext_format):
"""Validate extension and options for the given format"""
if not isinstance(jupytext_format, dict):
raise JupytextFormatError('Jupytext format should be a dictionary')
for key in jupytext_format:
if key not in _VALID_FORMAT_INFO + _VALID_FORMAT_OPTIONS:
raise JupytextFormatError("Unknown format option '{}' - should be one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)))
value = jupytext_format[key]
if key in _BINARY_FORMAT_OPTIONS:
if not isinstance(value, bool):
raise JupytextFormatError("Format option '{}' should be a bool, not '{}'".format(key, str(value)))
if 'extension' not in jupytext_format:
raise JupytextFormatError('Missing format extension')
ext = jupytext_format['extension']
if ext not in NOTEBOOK_EXTENSIONS + ['.auto']:
raise JupytextFormatError("Extension '{}' is not a notebook extension. Please use one of '{}'.".format(
ext, "', '".join(NOTEBOOK_EXTENSIONS + ['.auto'])))
return jupytext_format | Validate extension and options for the given format | Below is the the instruction that describes the task:
### Input:
Validate extension and options for the given format
### Response:
def validate_one_format(jupytext_format):
"""Validate extension and options for the given format"""
if not isinstance(jupytext_format, dict):
raise JupytextFormatError('Jupytext format should be a dictionary')
for key in jupytext_format:
if key not in _VALID_FORMAT_INFO + _VALID_FORMAT_OPTIONS:
raise JupytextFormatError("Unknown format option '{}' - should be one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)))
value = jupytext_format[key]
if key in _BINARY_FORMAT_OPTIONS:
if not isinstance(value, bool):
raise JupytextFormatError("Format option '{}' should be a bool, not '{}'".format(key, str(value)))
if 'extension' not in jupytext_format:
raise JupytextFormatError('Missing format extension')
ext = jupytext_format['extension']
if ext not in NOTEBOOK_EXTENSIONS + ['.auto']:
raise JupytextFormatError("Extension '{}' is not a notebook extension. Please use one of '{}'.".format(
ext, "', '".join(NOTEBOOK_EXTENSIONS + ['.auto'])))
return jupytext_format |
def as_boxes(self, solid=False):
"""
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
-----------
solid: bool, if True return boxes for sparse_solid
Returns
---------
mesh: Trimesh object made up of one box per filled cell.
"""
if solid:
filled = self.sparse_solid
else:
filled = self.sparse_surface
# center points of voxels
centers = indices_to_points(indices=filled,
pitch=self.pitch,
origin=self.origin)
mesh = multibox(centers=centers, pitch=self.pitch)
return mesh | A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
-----------
solid: bool, if True return boxes for sparse_solid
Returns
---------
mesh: Trimesh object made up of one box per filled cell. | Below is the the instruction that describes the task:
### Input:
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
-----------
solid: bool, if True return boxes for sparse_solid
Returns
---------
mesh: Trimesh object made up of one box per filled cell.
### Response:
def as_boxes(self, solid=False):
"""
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
-----------
solid: bool, if True return boxes for sparse_solid
Returns
---------
mesh: Trimesh object made up of one box per filled cell.
"""
if solid:
filled = self.sparse_solid
else:
filled = self.sparse_surface
# center points of voxels
centers = indices_to_points(indices=filled,
pitch=self.pitch,
origin=self.origin)
mesh = multibox(centers=centers, pitch=self.pitch)
return mesh |
def data(self):
"""Form data."""
d = super(CommunityForm, self).data
d.pop('csrf_token', None)
return d | Form data. | Below is the the instruction that describes the task:
### Input:
Form data.
### Response:
def data(self):
"""Form data."""
d = super(CommunityForm, self).data
d.pop('csrf_token', None)
return d |
def __str_cleanup(line):
"""
Remove the unnecessary characters in the line that we don't want
:param str line:
:return str:
"""
if '#' in line:
line = line.replace("#", "")
line = line.strip()
if '-----------' in line:
line = ''
return line | Remove the unnecessary characters in the line that we don't want
:param str line:
:return str: | Below is the the instruction that describes the task:
### Input:
Remove the unnecessary characters in the line that we don't want
:param str line:
:return str:
### Response:
def __str_cleanup(line):
"""
Remove the unnecessary characters in the line that we don't want
:param str line:
:return str:
"""
if '#' in line:
line = line.replace("#", "")
line = line.strip()
if '-----------' in line:
line = ''
return line |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.