code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def run(self):
'''
Enter into the server loop
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# instantiate some classes inside our new process
self.event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=True)
self.wrap = ReactWrap(self.opts)
for data in self.event.iter_events(full=True):
# skip all events fired by ourselves
if data['data'].get('user') == self.wrap.event_user:
continue
# NOTE: these events must contain the masters key in order to be accepted
# see salt.runners.reactor for the requesting interface
if 'salt/reactors/manage' in data['tag']:
master_key = salt.utils.master.get_master_key('root', self.opts)
if data['data'].get('key') != master_key:
log.error('received salt/reactors/manage event without matching master_key. discarding')
continue
if data['tag'].endswith('salt/reactors/manage/is_leader'):
self.event.fire_event({'result': self.is_leader,
'user': self.wrap.event_user},
'salt/reactors/manage/leader/value')
if data['tag'].endswith('salt/reactors/manage/set_leader'):
# we only want to register events from the local master
if data['data'].get('id') == self.opts['id']:
self.is_leader = data['data']['value']
self.event.fire_event({'result': self.is_leader,
'user': self.wrap.event_user},
'salt/reactors/manage/leader/value')
if data['tag'].endswith('salt/reactors/manage/add'):
_data = data['data']
res = self.add_reactor(_data['event'], _data['reactors'])
self.event.fire_event({'reactors': self.list_all(),
'result': res,
'user': self.wrap.event_user},
'salt/reactors/manage/add-complete')
elif data['tag'].endswith('salt/reactors/manage/delete'):
_data = data['data']
res = self.delete_reactor(_data['event'])
self.event.fire_event({'reactors': self.list_all(),
'result': res,
'user': self.wrap.event_user},
'salt/reactors/manage/delete-complete')
elif data['tag'].endswith('salt/reactors/manage/list'):
self.event.fire_event({'reactors': self.list_all(),
'user': self.wrap.event_user},
'salt/reactors/manage/list-results')
# do not handle any reactions if not leader in cluster
if not self.is_leader:
continue
else:
reactors = self.list_reactors(data['tag'])
if not reactors:
continue
chunks = self.reactions(data['tag'], data['data'], reactors)
if chunks:
if self.opts['master_stats']:
_data = data['data']
start = time.time()
try:
self.call_reactions(chunks)
except SystemExit:
log.warning('Exit ignored by reactor')
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, _data)
self._post_stats(stats) | Enter into the server loop | Below is the the instruction that describes the task:
### Input:
Enter into the server loop
### Response:
def run(self):
'''
Enter into the server loop
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# instantiate some classes inside our new process
self.event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=True)
self.wrap = ReactWrap(self.opts)
for data in self.event.iter_events(full=True):
# skip all events fired by ourselves
if data['data'].get('user') == self.wrap.event_user:
continue
# NOTE: these events must contain the masters key in order to be accepted
# see salt.runners.reactor for the requesting interface
if 'salt/reactors/manage' in data['tag']:
master_key = salt.utils.master.get_master_key('root', self.opts)
if data['data'].get('key') != master_key:
log.error('received salt/reactors/manage event without matching master_key. discarding')
continue
if data['tag'].endswith('salt/reactors/manage/is_leader'):
self.event.fire_event({'result': self.is_leader,
'user': self.wrap.event_user},
'salt/reactors/manage/leader/value')
if data['tag'].endswith('salt/reactors/manage/set_leader'):
# we only want to register events from the local master
if data['data'].get('id') == self.opts['id']:
self.is_leader = data['data']['value']
self.event.fire_event({'result': self.is_leader,
'user': self.wrap.event_user},
'salt/reactors/manage/leader/value')
if data['tag'].endswith('salt/reactors/manage/add'):
_data = data['data']
res = self.add_reactor(_data['event'], _data['reactors'])
self.event.fire_event({'reactors': self.list_all(),
'result': res,
'user': self.wrap.event_user},
'salt/reactors/manage/add-complete')
elif data['tag'].endswith('salt/reactors/manage/delete'):
_data = data['data']
res = self.delete_reactor(_data['event'])
self.event.fire_event({'reactors': self.list_all(),
'result': res,
'user': self.wrap.event_user},
'salt/reactors/manage/delete-complete')
elif data['tag'].endswith('salt/reactors/manage/list'):
self.event.fire_event({'reactors': self.list_all(),
'user': self.wrap.event_user},
'salt/reactors/manage/list-results')
# do not handle any reactions if not leader in cluster
if not self.is_leader:
continue
else:
reactors = self.list_reactors(data['tag'])
if not reactors:
continue
chunks = self.reactions(data['tag'], data['data'], reactors)
if chunks:
if self.opts['master_stats']:
_data = data['data']
start = time.time()
try:
self.call_reactions(chunks)
except SystemExit:
log.warning('Exit ignored by reactor')
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, _data)
self._post_stats(stats) |
def needs_packages(*packages):
'''Decorator: ensure that packages are installed on host given by fabric
argument `-H` (local or remote).
'''
def real_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
non_installed = _non_installed(packages)
if non_installed:
what_for = 'in order to run this task'
install_packages(non_installed, what_for=what_for)
return func(*args, **kwargs)
return wrapper
return real_decorator | Decorator: ensure that packages are installed on host given by fabric
argument `-H` (local or remote). | Below is the the instruction that describes the task:
### Input:
Decorator: ensure that packages are installed on host given by fabric
argument `-H` (local or remote).
### Response:
def needs_packages(*packages):
'''Decorator: ensure that packages are installed on host given by fabric
argument `-H` (local or remote).
'''
def real_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
non_installed = _non_installed(packages)
if non_installed:
what_for = 'in order to run this task'
install_packages(non_installed, what_for=what_for)
return func(*args, **kwargs)
return wrapper
return real_decorator |
def watch(directory=None, auto_clear=False, extensions=[]):
"""Starts a server to render the specified file or directory containing a README."""
if directory and not os.path.isdir(directory):
raise ValueError('Directory not found: ' + directory)
directory = os.path.abspath(directory)
# Initial run
event_handler = ChangeHandler(directory, auto_clear, extensions)
event_handler.run()
# Setup watchdog
observer = Observer()
observer.schedule(event_handler, path=directory, recursive=True)
observer.start()
# Watch and run tests until interrupted by user
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | Starts a server to render the specified file or directory containing a README. | Below is the the instruction that describes the task:
### Input:
Starts a server to render the specified file or directory containing a README.
### Response:
def watch(directory=None, auto_clear=False, extensions=[]):
"""Starts a server to render the specified file or directory containing a README."""
if directory and not os.path.isdir(directory):
raise ValueError('Directory not found: ' + directory)
directory = os.path.abspath(directory)
# Initial run
event_handler = ChangeHandler(directory, auto_clear, extensions)
event_handler.run()
# Setup watchdog
observer = Observer()
observer.schedule(event_handler, path=directory, recursive=True)
observer.start()
# Watch and run tests until interrupted by user
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() |
def addr(val, default_port=8000, defualt_host='localhost'):
"""
Convert a string of format host[:port] into Addr(host, port).
>>> addr('0:80')
Addr(host='0', port=80)
>>> addr('127.0.0.1:80')
Addr(host='127.0.0.1', port=80)
>>> addr('0.0.0.0', default_port=8000)
Addr(host='0.0.0.0', port=8000)
"""
import re
import socket
match = re.match(r'\A(?P<host>.*?)(:(?P<port>(\d+|\w+)))?\Z', val)
if match is None:
raise argparse.ArgumentTypeError(
'%r is not a valid host[:port] address.' % val
)
host, port = match.group('host', 'port')
if not host:
host = defualt_host
if not port:
port = default_port
elif port.isdigit():
port = int(port)
else:
port = socket.getservbyname(port)
return Addr(host, port) | Convert a string of format host[:port] into Addr(host, port).
>>> addr('0:80')
Addr(host='0', port=80)
>>> addr('127.0.0.1:80')
Addr(host='127.0.0.1', port=80)
>>> addr('0.0.0.0', default_port=8000)
Addr(host='0.0.0.0', port=8000) | Below is the the instruction that describes the task:
### Input:
Convert a string of format host[:port] into Addr(host, port).
>>> addr('0:80')
Addr(host='0', port=80)
>>> addr('127.0.0.1:80')
Addr(host='127.0.0.1', port=80)
>>> addr('0.0.0.0', default_port=8000)
Addr(host='0.0.0.0', port=8000)
### Response:
def addr(val, default_port=8000, defualt_host='localhost'):
"""
Convert a string of format host[:port] into Addr(host, port).
>>> addr('0:80')
Addr(host='0', port=80)
>>> addr('127.0.0.1:80')
Addr(host='127.0.0.1', port=80)
>>> addr('0.0.0.0', default_port=8000)
Addr(host='0.0.0.0', port=8000)
"""
import re
import socket
match = re.match(r'\A(?P<host>.*?)(:(?P<port>(\d+|\w+)))?\Z', val)
if match is None:
raise argparse.ArgumentTypeError(
'%r is not a valid host[:port] address.' % val
)
host, port = match.group('host', 'port')
if not host:
host = defualt_host
if not port:
port = default_port
elif port.isdigit():
port = int(port)
else:
port = socket.getservbyname(port)
return Addr(host, port) |
def _create_pbuilders(env, runas='root'):
'''
Create the .pbuilder family of files in user's home directory
env
A list or dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
- DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
runas : root
.. versionadded:: fluorine
User to create the files and directories
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
'''
home = os.path.expanduser('~{0}'.format(runas))
pbuilderrc = os.path.join(home, '.pbuilderrc')
if not os.path.isfile(pbuilderrc):
raise SaltInvocationError(
'pbuilderrc environment is incorrectly setup'
)
env_overrides = _get_build_env(env)
if env_overrides and not env_overrides.isspace():
with salt.utils.files.fopen(pbuilderrc, 'a') as fow:
fow.write(salt.utils.stringutils.to_str(env_overrides))
cmd = "chown {0}:{0} {1}".format(runas, pbuilderrc)
retrc = __salt__['cmd.retcode'](cmd, runas='root')
if retrc != 0:
raise SaltInvocationError(
"Create pbuilderrc in home directory failed with return error \'{0}\', "
"check logs for further details".format(
retrc)
) | Create the .pbuilder family of files in user's home directory
env
A list or dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
- DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
runas : root
.. versionadded:: fluorine
User to create the files and directories
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized. | Below is the the instruction that describes the task:
### Input:
Create the .pbuilder family of files in user's home directory
env
A list or dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
- DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
runas : root
.. versionadded:: fluorine
User to create the files and directories
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
### Response:
def _create_pbuilders(env, runas='root'):
'''
Create the .pbuilder family of files in user's home directory
env
A list or dictionary of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
- env:
- DEB_BUILD_OPTIONS: 'nocheck'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :ref:`here <yaml-idiosyncrasies>`.
runas : root
.. versionadded:: fluorine
User to create the files and directories
.. note::
Ensure the user has correct permissions to any files and
directories which are to be utilized.
'''
home = os.path.expanduser('~{0}'.format(runas))
pbuilderrc = os.path.join(home, '.pbuilderrc')
if not os.path.isfile(pbuilderrc):
raise SaltInvocationError(
'pbuilderrc environment is incorrectly setup'
)
env_overrides = _get_build_env(env)
if env_overrides and not env_overrides.isspace():
with salt.utils.files.fopen(pbuilderrc, 'a') as fow:
fow.write(salt.utils.stringutils.to_str(env_overrides))
cmd = "chown {0}:{0} {1}".format(runas, pbuilderrc)
retrc = __salt__['cmd.retcode'](cmd, runas='root')
if retrc != 0:
raise SaltInvocationError(
"Create pbuilderrc in home directory failed with return error \'{0}\', "
"check logs for further details".format(
retrc)
) |
def transcribe(decoder, audio_file, libdir=None):
""" Decode streaming audio data from raw binary file on disk. """
decoder = get_decoder()
decoder.start_utt()
stream = open(audio_file, 'rb')
while True:
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
else:
break
decoder.end_utt()
return evaluate_results(decoder) | Decode streaming audio data from raw binary file on disk. | Below is the the instruction that describes the task:
### Input:
Decode streaming audio data from raw binary file on disk.
### Response:
def transcribe(decoder, audio_file, libdir=None):
""" Decode streaming audio data from raw binary file on disk. """
decoder = get_decoder()
decoder.start_utt()
stream = open(audio_file, 'rb')
while True:
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
else:
break
decoder.end_utt()
return evaluate_results(decoder) |
def get_local_admins():
'''
Show all local administrator accounts.
CLI Example:
.. code-block:: bash
salt '*' panos.get_local_admins
'''
admin_list = get_users_config()
response = []
if 'users' not in admin_list['result']:
return response
if isinstance(admin_list['result']['users']['entry'], list):
for entry in admin_list['result']['users']['entry']:
response.append(entry['name'])
else:
response.append(admin_list['result']['users']['entry']['name'])
return response | Show all local administrator accounts.
CLI Example:
.. code-block:: bash
salt '*' panos.get_local_admins | Below is the the instruction that describes the task:
### Input:
Show all local administrator accounts.
CLI Example:
.. code-block:: bash
salt '*' panos.get_local_admins
### Response:
def get_local_admins():
'''
Show all local administrator accounts.
CLI Example:
.. code-block:: bash
salt '*' panos.get_local_admins
'''
admin_list = get_users_config()
response = []
if 'users' not in admin_list['result']:
return response
if isinstance(admin_list['result']['users']['entry'], list):
for entry in admin_list['result']['users']['entry']:
response.append(entry['name'])
else:
response.append(admin_list['result']['users']['entry']['name'])
return response |
def add_sample(a_float, dist):
"""Adds `a_float` to `dist`, updating its existing buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not have known bucket options defined
ValueError: if there are not enough bucket count fields in `dist`
"""
dist_type, _ = _detect_bucket_option(dist)
if dist_type == u'exponentialBuckets':
_update_general_statistics(a_float, dist)
_update_exponential_bucket_count(a_float, dist)
elif dist_type == u'linearBuckets':
_update_general_statistics(a_float, dist)
_update_linear_bucket_count(a_float, dist)
elif dist_type == u'explicitBuckets':
_update_general_statistics(a_float, dist)
_update_explicit_bucket_count(a_float, dist)
else:
_logger.error(u'Could not determine bucket option type for %s', dist)
raise ValueError(u'Unknown bucket option type') | Adds `a_float` to `dist`, updating its existing buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not have known bucket options defined
ValueError: if there are not enough bucket count fields in `dist` | Below is the the instruction that describes the task:
### Input:
Adds `a_float` to `dist`, updating its existing buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not have known bucket options defined
ValueError: if there are not enough bucket count fields in `dist`
### Response:
def add_sample(a_float, dist):
"""Adds `a_float` to `dist`, updating its existing buckets.
Args:
a_float (float): a new value
dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`):
the Distribution being updated
Raises:
ValueError: if `dist` does not have known bucket options defined
ValueError: if there are not enough bucket count fields in `dist`
"""
dist_type, _ = _detect_bucket_option(dist)
if dist_type == u'exponentialBuckets':
_update_general_statistics(a_float, dist)
_update_exponential_bucket_count(a_float, dist)
elif dist_type == u'linearBuckets':
_update_general_statistics(a_float, dist)
_update_linear_bucket_count(a_float, dist)
elif dist_type == u'explicitBuckets':
_update_general_statistics(a_float, dist)
_update_explicit_bucket_count(a_float, dist)
else:
_logger.error(u'Could not determine bucket option type for %s', dist)
raise ValueError(u'Unknown bucket option type') |
def get(self, key, default=None, type_=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
rv = self[key]
except KeyError:
return default
if type_ is not None:
try:
rv = type_(rv)
except ValueError:
rv = default
return rv | Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`. | Below is the the instruction that describes the task:
### Input:
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
### Response:
def get(self, key, default=None, type_=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
rv = self[key]
except KeyError:
return default
if type_ is not None:
try:
rv = type_(rv)
except ValueError:
rv = default
return rv |
def check_bounds(self):
"""Make sure the camera is not outside if its legal range."""
if not (self.camera_bounds == None):
if self.__pan.X < self.camera_bounds.Left:
self.__pan[0] = self.camera_bounds.Left
if self.__pan.X > self.camera_bounds.Right:
self.__pan[0] = self.camera_bounds.Right
if self.__pan.Y < self.camera_bounds.Top:
self.__pan[1] = self.camera_bounds.Top
if self.__pan.Y > self.camera_bounds.Bottom:
self.__pan[1] = self.camera_bounds.Bottom | Make sure the camera is not outside if its legal range. | Below is the the instruction that describes the task:
### Input:
Make sure the camera is not outside if its legal range.
### Response:
def check_bounds(self):
"""Make sure the camera is not outside if its legal range."""
if not (self.camera_bounds == None):
if self.__pan.X < self.camera_bounds.Left:
self.__pan[0] = self.camera_bounds.Left
if self.__pan.X > self.camera_bounds.Right:
self.__pan[0] = self.camera_bounds.Right
if self.__pan.Y < self.camera_bounds.Top:
self.__pan[1] = self.camera_bounds.Top
if self.__pan.Y > self.camera_bounds.Bottom:
self.__pan[1] = self.camera_bounds.Bottom |
def table_top_abs(self):
"""Returns the absolute position of table top"""
table_height = np.array([0, 0, self.table_full_size[2]])
return string_to_array(self.floor.get("pos")) + table_height | Returns the absolute position of table top | Below is the the instruction that describes the task:
### Input:
Returns the absolute position of table top
### Response:
def table_top_abs(self):
"""Returns the absolute position of table top"""
table_height = np.array([0, 0, self.table_full_size[2]])
return string_to_array(self.floor.get("pos")) + table_height |
def take_step(self,
state: StateType,
max_actions: int = None,
allowed_actions: List[Set] = None) -> List[StateType]:
"""
The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score.
"""
raise NotImplementedError | The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score. | Below is the the instruction that describes the task:
### Input:
The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score.
### Response:
def take_step(self,
state: StateType,
max_actions: int = None,
allowed_actions: List[Set] = None) -> List[StateType]:
"""
The main method in the ``TransitionFunction`` API. This function defines the computation
done at each step of decoding and returns a ranked list of next states.
The input state is `grouped`, to allow for efficient computation, but the output states
should all have a ``group_size`` of 1, to make things easier on the decoding algorithm.
They will get regrouped later as needed.
Because of the way we handle grouping in the decoder states, constructing a new state is
actually a relatively expensive operation. If you know a priori that only some of the
states will be needed (either because you have a set of gold action sequences, or you have
a fixed beam size), passing that information into this function will keep us from
constructing more states than we need, which will greatly speed up your computation.
IMPORTANT: This method `must` returns states already sorted by their score, otherwise
``BeamSearch`` and other methods will break. For efficiency, we do not perform an
additional sort in those methods.
ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you
want to evaluate all possible states and do not need any sorting (e.g., this is true for
maximum marginal likelihood training that does not use a beam search). In this case, we
may skip the sorting step for efficiency reasons.
Parameters
----------
state : ``State``
The current state of the decoder, which we will take a step `from`. We may be grouping
together computation for several states here. Because we can have several states for
each instance in the original batch being evaluated at the same time, we use
``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch
in ``model.forward.``
max_actions : ``int``, optional
If you know that you will only need a certain number of states out of this (e.g., in a
beam search), you can pass in the max number of actions that you need, and we will only
construct that many states (for each `batch` instance - `not` for each `group`
instance!). This can save a whole lot of computation if you have an action space
that's much larger than your beam size.
allowed_actions : ``List[Set]``, optional
If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g.,
maximum marginal likelihood only needs to evaluate action sequences in a given set),
you can pass those constraints here, to avoid constructing state objects unnecessarily.
If there are no constraints from the trainer, passing a value of ``None`` here will
allow all actions to be considered.
This is a list because it is `batched` - every instance in the batch has a set of
allowed actions. Note that the size of this list is the ``group_size`` in the
``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs
to convert from the `batched` allowed action sequences that it has to a `grouped`
allowed action sequence list.
Returns
-------
next_states : ``List[State]``
A list of next states, ordered by score.
"""
raise NotImplementedError |
def split_edge(self, vertex1, vertex2, multicolor, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True, key=None):
""" Splits an edge in current :class:`BreakpointGraph` most similar to supplied data (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param vertex1: a first vertex out of two the edge to be split is incident to
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two the edge to be split is incident to
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param multicolor: a multi-color to find most suitable edge to be split
:type multicolor: :class:`bg.multicolor.Multicolor`
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor), guidance=guidance,
sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key) | Splits an edge in current :class:`BreakpointGraph` most similar to supplied data (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param vertex1: a first vertex out of two the edge to be split is incident to
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two the edge to be split is incident to
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param multicolor: a multi-color to find most suitable edge to be split
:type multicolor: :class:`bg.multicolor.Multicolor`
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes | Below is the the instruction that describes the task:
### Input:
Splits an edge in current :class:`BreakpointGraph` most similar to supplied data (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param vertex1: a first vertex out of two the edge to be split is incident to
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two the edge to be split is incident to
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param multicolor: a multi-color to find most suitable edge to be split
:type multicolor: :class:`bg.multicolor.Multicolor`
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
### Response:
def split_edge(self, vertex1, vertex2, multicolor, guidance=None, sorted_guidance=False,
account_for_colors_multiplicity_in_guidance=True, key=None):
""" Splits an edge in current :class:`BreakpointGraph` most similar to supplied data (if no unique identifier ``key`` is provided) with respect to supplied guidance.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__split_bgedge` method.
:param vertex1: a first vertex out of two the edge to be split is incident to
:type vertex1: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param vertex2: a second vertex out of two the edge to be split is incident to
:type vertex2: any python hashable object. :class:`bg.vertex.BGVertex` is expected
:param multicolor: a multi-color to find most suitable edge to be split
:type multicolor: :class:`bg.multicolor.Multicolor`
:param duplication_splitting: flag (**not** currently implemented) for a splitting of color-based splitting to take into account multiplicity of respective colors
:type duplication_splitting: ``Boolean``
:param key: unique identifier of edge to be split
:type key: any python object. ``int`` is expected
:return: ``None``, performs inplace changes
"""
self.__split_bgedge(bgedge=BGEdge(vertex1=vertex1, vertex2=vertex2, multicolor=multicolor), guidance=guidance,
sorted_guidance=sorted_guidance,
account_for_colors_multiplicity_in_guidance=account_for_colors_multiplicity_in_guidance,
key=key) |
def drain_OD(q_plant, T, depth_end, SDR):
"""Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
??
"""
nu = pc.viscosity_kinematic(T)
K_minor = con.PIPE_ENTRANCE_K_MINOR + con.PIPE_EXIT_K_MINOR + con.EL90_K_MINOR
drain_ID = pc.diam_pipe(q_plant, depth_end, depth_end, nu, mat.PVC_PIPE_ROUGH, K_minor)
drain_ND = pipe.SDR_available_ND(drain_ID, SDR)
return pipe.OD(drain_ND).magnitude | Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
?? | Below is the the instruction that describes the task:
### Input:
Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
??
### Response:
def drain_OD(q_plant, T, depth_end, SDR):
"""Return the nominal diameter of the entrance tank drain pipe. Depth at the
end of the flocculator is used for headloss and length calculation inputs in
the diam_pipe calculation.
Parameters
----------
q_plant: float
Plant flow rate
T: float
Design temperature
depth_end: float
The depth of water at the end of the flocculator
SDR: float
Standard dimension ratio
Returns
-------
float
?
Examples
--------
>>> from aguaclara.play import*
??
"""
nu = pc.viscosity_kinematic(T)
K_minor = con.PIPE_ENTRANCE_K_MINOR + con.PIPE_EXIT_K_MINOR + con.EL90_K_MINOR
drain_ID = pc.diam_pipe(q_plant, depth_end, depth_end, nu, mat.PVC_PIPE_ROUGH, K_minor)
drain_ND = pipe.SDR_available_ND(drain_ID, SDR)
return pipe.OD(drain_ND).magnitude |
def _nest_variable(v, check_records=False):
"""Nest a variable when moving from scattered back to consolidated.
check_records -- avoid re-nesting a record input if it comes from a previous
step and is already nested, don't need to re-array.
"""
if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and
v.get("type", {}).get("type") == "array"):
return v
else:
v = copy.deepcopy(v)
v["type"] = {"type": "array", "items": v["type"]}
return v | Nest a variable when moving from scattered back to consolidated.
check_records -- avoid re-nesting a record input if it comes from a previous
step and is already nested, don't need to re-array. | Below is the the instruction that describes the task:
### Input:
Nest a variable when moving from scattered back to consolidated.
check_records -- avoid re-nesting a record input if it comes from a previous
step and is already nested, don't need to re-array.
### Response:
def _nest_variable(v, check_records=False):
"""Nest a variable when moving from scattered back to consolidated.
check_records -- avoid re-nesting a record input if it comes from a previous
step and is already nested, don't need to re-array.
"""
if (check_records and is_cwl_record(v) and len(v["id"].split("/")) > 1 and
v.get("type", {}).get("type") == "array"):
return v
else:
v = copy.deepcopy(v)
v["type"] = {"type": "array", "items": v["type"]}
return v |
def get(self, bounce_type=None, inactive=None, email_filter=None,
message_id=None, count=None, offset=None, api_key=None,
secure=None, test=None, **request_args):
'''Builds query string params from inputs. It handles offset and
count defaults and validation.
:param bounce_type: The type of bounces retrieve. See `bounce_types`
for a list of types, or read the Postmark API docs. Defaults to
`None`.
:param inactive: If `True`, retrieves inactive bounces only.
Defaults to `None`.
:param email_filter: A string to filter emails by.
Defaults to `None`.
:param message_id: Retrieve a bounce for a single message's ID.
Defaults to `None`.
:param count: The number of bounces to retrieve in this request.
Defaults to 25 if `message_id` is not provided.
:param offset: The page offset for bounces to retrieve. Defaults to 0
if `message_id` is not provided.
:param api_key: Your Postmark API key. Defaults to `self.api_key`.
:param secure: Use the https scheme for Postmark API.
Defaults to `self.secure`.
:params test: Use the Postmark test API. Defaults to `self.test`.
:rtype: :class:`BouncesResponse`
'''
params = self._construct_params(bounce_type=bounce_type,
inactive=inactive,
email_filter=email_filter,
message_id=message_id,
count=count,
offset=offset)
url = self._get_api_url(secure=secure)
headers = self._get_headers(api_key=api_key, test=test,
request_args=request_args)
response = self._request(url, headers=headers, params=params,
**request_args)
return response | Builds query string params from inputs. It handles offset and
count defaults and validation.
:param bounce_type: The type of bounces retrieve. See `bounce_types`
for a list of types, or read the Postmark API docs. Defaults to
`None`.
:param inactive: If `True`, retrieves inactive bounces only.
Defaults to `None`.
:param email_filter: A string to filter emails by.
Defaults to `None`.
:param message_id: Retrieve a bounce for a single message's ID.
Defaults to `None`.
:param count: The number of bounces to retrieve in this request.
Defaults to 25 if `message_id` is not provided.
:param offset: The page offset for bounces to retrieve. Defaults to 0
if `message_id` is not provided.
:param api_key: Your Postmark API key. Defaults to `self.api_key`.
:param secure: Use the https scheme for Postmark API.
Defaults to `self.secure`.
:params test: Use the Postmark test API. Defaults to `self.test`.
:rtype: :class:`BouncesResponse` | Below is the the instruction that describes the task:
### Input:
Builds query string params from inputs. It handles offset and
count defaults and validation.
:param bounce_type: The type of bounces retrieve. See `bounce_types`
for a list of types, or read the Postmark API docs. Defaults to
`None`.
:param inactive: If `True`, retrieves inactive bounces only.
Defaults to `None`.
:param email_filter: A string to filter emails by.
Defaults to `None`.
:param message_id: Retrieve a bounce for a single message's ID.
Defaults to `None`.
:param count: The number of bounces to retrieve in this request.
Defaults to 25 if `message_id` is not provided.
:param offset: The page offset for bounces to retrieve. Defaults to 0
if `message_id` is not provided.
:param api_key: Your Postmark API key. Defaults to `self.api_key`.
:param secure: Use the https scheme for Postmark API.
Defaults to `self.secure`.
:params test: Use the Postmark test API. Defaults to `self.test`.
:rtype: :class:`BouncesResponse`
### Response:
def get(self, bounce_type=None, inactive=None, email_filter=None,
message_id=None, count=None, offset=None, api_key=None,
secure=None, test=None, **request_args):
'''Builds query string params from inputs. It handles offset and
count defaults and validation.
:param bounce_type: The type of bounces retrieve. See `bounce_types`
for a list of types, or read the Postmark API docs. Defaults to
`None`.
:param inactive: If `True`, retrieves inactive bounces only.
Defaults to `None`.
:param email_filter: A string to filter emails by.
Defaults to `None`.
:param message_id: Retrieve a bounce for a single message's ID.
Defaults to `None`.
:param count: The number of bounces to retrieve in this request.
Defaults to 25 if `message_id` is not provided.
:param offset: The page offset for bounces to retrieve. Defaults to 0
if `message_id` is not provided.
:param api_key: Your Postmark API key. Defaults to `self.api_key`.
:param secure: Use the https scheme for Postmark API.
Defaults to `self.secure`.
:params test: Use the Postmark test API. Defaults to `self.test`.
:rtype: :class:`BouncesResponse`
'''
params = self._construct_params(bounce_type=bounce_type,
inactive=inactive,
email_filter=email_filter,
message_id=message_id,
count=count,
offset=offset)
url = self._get_api_url(secure=secure)
headers = self._get_headers(api_key=api_key, test=test,
request_args=request_args)
response = self._request(url, headers=headers, params=params,
**request_args)
return response |
def _parse_forces(line, lines):
"""Parse the forces block, including individual terms (e.g. Hubbard)"""
units = line.split()[4].rstrip(":")
next(lines)
newline = next(lines)
total = []; non_local = []; ionic = []; local = []; core_correction = []
hubbard = []; scf = []; types = []
while (not "The non-local contrib." in newline) and len(newline.split()) > 0:
if "=" in newline:
total.append([float(x) for x in newline.partition("=")[2].split()])
types.append(int(newline.split()[3]))
newline = next(lines)
if len(newline.split()) > 0:
while not "The ionic contribution" in newline:
if "=" in newline:
non_local.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The local contribution" in newline:
if "=" in newline:
ionic.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The core correction contribution" in newline:
if "=" in newline:
local.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The Hubbard contrib." in newline:
if "=" in newline:
core_correction.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The SCF correction term" in newline:
if "=" in newline:
hubbard.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while len(newline.split()) > 0:
if "=" in newline:
scf.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
newline = next(lines)
total_force = float(newline.split()[3])
total_scf = float(newline.split()[8])
return {
"force units": units,
"total force": total_force,
"total SCF correction": total_scf,
"forces": total,
"non-local contribution to forces": non_local,
"ionic contribution to forces": ionic,
"local contribution to forces": local,
"core corrections to forces": core_correction,
"Hubbard contribution to forces": hubbard,
"SCF correction term to forces": scf,
"Atomic species index for forces": types
} | Parse the forces block, including individual terms (e.g. Hubbard) | Below is the the instruction that describes the task:
### Input:
Parse the forces block, including individual terms (e.g. Hubbard)
### Response:
def _parse_forces(line, lines):
"""Parse the forces block, including individual terms (e.g. Hubbard)"""
units = line.split()[4].rstrip(":")
next(lines)
newline = next(lines)
total = []; non_local = []; ionic = []; local = []; core_correction = []
hubbard = []; scf = []; types = []
while (not "The non-local contrib." in newline) and len(newline.split()) > 0:
if "=" in newline:
total.append([float(x) for x in newline.partition("=")[2].split()])
types.append(int(newline.split()[3]))
newline = next(lines)
if len(newline.split()) > 0:
while not "The ionic contribution" in newline:
if "=" in newline:
non_local.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The local contribution" in newline:
if "=" in newline:
ionic.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The core correction contribution" in newline:
if "=" in newline:
local.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The Hubbard contrib." in newline:
if "=" in newline:
core_correction.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while not "The SCF correction term" in newline:
if "=" in newline:
hubbard.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
while len(newline.split()) > 0:
if "=" in newline:
scf.append([float(x) for x in newline.partition("=")[2].split()])
newline = next(lines)
newline = next(lines)
total_force = float(newline.split()[3])
total_scf = float(newline.split()[8])
return {
"force units": units,
"total force": total_force,
"total SCF correction": total_scf,
"forces": total,
"non-local contribution to forces": non_local,
"ionic contribution to forces": ionic,
"local contribution to forces": local,
"core corrections to forces": core_correction,
"Hubbard contribution to forces": hubbard,
"SCF correction term to forces": scf,
"Atomic species index for forces": types
} |
def nearest_bins(self, bins, even=False, pow2=False):
"""Return nearest number of FFT bins (even or power of two)"""
if pow2:
bins_log2 = math.log(bins, 2)
if bins_log2 % 1 != 0:
bins = 2**math.ceil(bins_log2)
logger.warning('number of FFT bins should be power of two, changing to {}'.format(bins))
elif even:
if bins % 2 != 0:
bins = math.ceil(bins / 2) * 2
logger.warning('number of FFT bins should be even, changing to {}'.format(bins))
return bins | Return nearest number of FFT bins (even or power of two) | Below is the the instruction that describes the task:
### Input:
Return nearest number of FFT bins (even or power of two)
### Response:
def nearest_bins(self, bins, even=False, pow2=False):
"""Return nearest number of FFT bins (even or power of two)"""
if pow2:
bins_log2 = math.log(bins, 2)
if bins_log2 % 1 != 0:
bins = 2**math.ceil(bins_log2)
logger.warning('number of FFT bins should be power of two, changing to {}'.format(bins))
elif even:
if bins % 2 != 0:
bins = math.ceil(bins / 2) * 2
logger.warning('number of FFT bins should be even, changing to {}'.format(bins))
return bins |
def status(self, start, end, sources=None):
""" Check the data coverage in the Historics archive for a given interval.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus
:param start: Unix timestamp for the start time
:type start: int
:param end: Unix timestamp for the start time
:type end: int
:param sources: list of data sources to include.
:type sources: list
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'start': start, 'end': end}
if sources:
params['sources'] = ','.join(sources)
return self.request.get('status', params=params) | Check the data coverage in the Historics archive for a given interval.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus
:param start: Unix timestamp for the start time
:type start: int
:param end: Unix timestamp for the start time
:type end: int
:param sources: list of data sources to include.
:type sources: list
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` | Below is the the instruction that describes the task:
### Input:
Check the data coverage in the Historics archive for a given interval.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus
:param start: Unix timestamp for the start time
:type start: int
:param end: Unix timestamp for the start time
:type end: int
:param sources: list of data sources to include.
:type sources: list
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
### Response:
def status(self, start, end, sources=None):
""" Check the data coverage in the Historics archive for a given interval.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstatus
:param start: Unix timestamp for the start time
:type start: int
:param end: Unix timestamp for the start time
:type end: int
:param sources: list of data sources to include.
:type sources: list
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {'start': start, 'end': end}
if sources:
params['sources'] = ','.join(sources)
return self.request.get('status', params=params) |
def update_groups_for_user(self, user: User, state: State = None):
"""
Update the Group memberships for the given users state
:param user: User to update for
:param state: State to update user for
:return:
"""
if state is None:
state = user.profile.state
for config in self.filter(states=state):
# grant user new groups for their state
config.update_group_membership_for_user(user)
for config in self.exclude(states=state):
# ensure user does not have groups from previous state
config.remove_user_from_alliance_groups(user)
config.remove_user_from_corp_groups(user) | Update the Group memberships for the given users state
:param user: User to update for
:param state: State to update user for
:return: | Below is the the instruction that describes the task:
### Input:
Update the Group memberships for the given users state
:param user: User to update for
:param state: State to update user for
:return:
### Response:
def update_groups_for_user(self, user: User, state: State = None):
"""
Update the Group memberships for the given users state
:param user: User to update for
:param state: State to update user for
:return:
"""
if state is None:
state = user.profile.state
for config in self.filter(states=state):
# grant user new groups for their state
config.update_group_membership_for_user(user)
for config in self.exclude(states=state):
# ensure user does not have groups from previous state
config.remove_user_from_alliance_groups(user)
config.remove_user_from_corp_groups(user) |
def console():
" Enter point "
autocomplete()
config = settings.MakesiteParser()
config.read([
settings.BASECONFIG, settings.HOMECONFIG,
op.join(settings.MAKESITE_HOME or '', settings.CFGNAME),
op.join(op.curdir, settings.CFGNAME),
])
argv = []
alias = dict(config.items('alias'))
names = alias.keys()
for arg in sys.argv[1:]:
if arg in names:
argv += alias[arg].split()
continue
argv.append(arg)
main(argv) | Enter point | Below is the the instruction that describes the task:
### Input:
Enter point
### Response:
def console():
" Enter point "
autocomplete()
config = settings.MakesiteParser()
config.read([
settings.BASECONFIG, settings.HOMECONFIG,
op.join(settings.MAKESITE_HOME or '', settings.CFGNAME),
op.join(op.curdir, settings.CFGNAME),
])
argv = []
alias = dict(config.items('alias'))
names = alias.keys()
for arg in sys.argv[1:]:
if arg in names:
argv += alias[arg].split()
continue
argv.append(arg)
main(argv) |
def get_SCAT_box(slope, x_mean, y_mean, beta_threshold = .1):
"""
takes in data and returns information about SCAT box:
the largest possible x_value, the largest possible y_value,
and functions for the two bounding lines of the box
"""
# if beta_threshold is -999, that means null
if beta_threshold == -999:
beta_threshold = .1
slope_err_threshold = abs(slope) * beta_threshold
x, y = x_mean, y_mean
# get lines that pass through mass center, with opposite slope
slope1 = slope + (2* slope_err_threshold)
line1_y_int = y - (slope1 * x)
line1_x_int = -1 * (old_div(line1_y_int, slope1))
slope2 = slope - (2 * slope_err_threshold)
line2_y_int = y - (slope2 * x)
line2_x_int = -1 * (old_div(line2_y_int, slope2))
# l1_y_int and l2_x_int form the bottom line of the box
# l2_y_int and l1_x_int form the top line of the box
# print "_diagonal line1:", (0, line2_y_int), (line2_x_int, 0), (x, y)
# print "_diagonal line2:", (0, line1_y_int), (line1_x_int, 0), (x, y)
# print "_bottom line:", [(0, line1_y_int), (line2_x_int, 0)]
# print "_top line:", [(0, line2_y_int), (line1_x_int, 0)]
low_bound = [(0, line1_y_int), (line2_x_int, 0)]
high_bound = [(0, line2_y_int), (line1_x_int, 0)]
x_max = high_bound[1][0]#
y_max = high_bound[0][1]
# function for low_bound
low_slope = old_div((low_bound[0][1] - low_bound[1][1]), (low_bound[0][0] - low_bound[1][0])) #
low_y_int = low_bound[0][1]
def low_bound(x):
y = low_slope * x + low_y_int
return y
# function for high_bound
high_slope = old_div((high_bound[0][1] - high_bound[1][1]), (high_bound[0][0] - high_bound[1][0])) # y_0-y_1/x_0-x_1
high_y_int = high_bound[0][1]
def high_bound(x):
y = high_slope * x + high_y_int
return y
high_line = [high_y_int, high_slope]
low_line = [low_y_int, low_slope]
return low_bound, high_bound, x_max, y_max, low_line, high_line | takes in data and returns information about SCAT box:
the largest possible x_value, the largest possible y_value,
and functions for the two bounding lines of the box | Below is the the instruction that describes the task:
### Input:
takes in data and returns information about SCAT box:
the largest possible x_value, the largest possible y_value,
and functions for the two bounding lines of the box
### Response:
def get_SCAT_box(slope, x_mean, y_mean, beta_threshold = .1):
"""
takes in data and returns information about SCAT box:
the largest possible x_value, the largest possible y_value,
and functions for the two bounding lines of the box
"""
# if beta_threshold is -999, that means null
if beta_threshold == -999:
beta_threshold = .1
slope_err_threshold = abs(slope) * beta_threshold
x, y = x_mean, y_mean
# get lines that pass through mass center, with opposite slope
slope1 = slope + (2* slope_err_threshold)
line1_y_int = y - (slope1 * x)
line1_x_int = -1 * (old_div(line1_y_int, slope1))
slope2 = slope - (2 * slope_err_threshold)
line2_y_int = y - (slope2 * x)
line2_x_int = -1 * (old_div(line2_y_int, slope2))
# l1_y_int and l2_x_int form the bottom line of the box
# l2_y_int and l1_x_int form the top line of the box
# print "_diagonal line1:", (0, line2_y_int), (line2_x_int, 0), (x, y)
# print "_diagonal line2:", (0, line1_y_int), (line1_x_int, 0), (x, y)
# print "_bottom line:", [(0, line1_y_int), (line2_x_int, 0)]
# print "_top line:", [(0, line2_y_int), (line1_x_int, 0)]
low_bound = [(0, line1_y_int), (line2_x_int, 0)]
high_bound = [(0, line2_y_int), (line1_x_int, 0)]
x_max = high_bound[1][0]#
y_max = high_bound[0][1]
# function for low_bound
low_slope = old_div((low_bound[0][1] - low_bound[1][1]), (low_bound[0][0] - low_bound[1][0])) #
low_y_int = low_bound[0][1]
def low_bound(x):
y = low_slope * x + low_y_int
return y
# function for high_bound
high_slope = old_div((high_bound[0][1] - high_bound[1][1]), (high_bound[0][0] - high_bound[1][0])) # y_0-y_1/x_0-x_1
high_y_int = high_bound[0][1]
def high_bound(x):
y = high_slope * x + high_y_int
return y
high_line = [high_y_int, high_slope]
low_line = [low_y_int, low_slope]
return low_bound, high_bound, x_max, y_max, low_line, high_line |
def CDQ(cpu):
"""
EDX:EAX = sign-extend of EAX
"""
cpu.EDX = Operators.EXTRACT(Operators.SEXTEND(cpu.EAX, 32, 64), 32, 32) | EDX:EAX = sign-extend of EAX | Below is the the instruction that describes the task:
### Input:
EDX:EAX = sign-extend of EAX
### Response:
def CDQ(cpu):
"""
EDX:EAX = sign-extend of EAX
"""
cpu.EDX = Operators.EXTRACT(Operators.SEXTEND(cpu.EAX, 32, 64), 32, 32) |
def bind(self, event_name, callback, *args, **kwargs):
"""Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
"""
self.event_callbacks[event_name].append((callback, args, kwargs)) | Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event. | Below is the the instruction that describes the task:
### Input:
Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
### Response:
def bind(self, event_name, callback, *args, **kwargs):
"""Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
"""
self.event_callbacks[event_name].append((callback, args, kwargs)) |
def plot_history_alive(model, t, transactions, datetime_col, freq="D", start_date=None, ax=None, **kwargs):
"""
Draw a graph showing the probability of being alive for a customer in time.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
t: int
the number of time units since the birth we want to draw the p_alive
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in the transactions that denotes the datetime the purchase was made
freq: str, optional
Default 'D' for days. Other examples= 'W' for weekly
start_date: datetime, optional
Limit xaxis to start date
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if start_date is None:
start_date = min(transactions[datetime_col])
if ax is None:
ax = plt.subplot(111)
# Get purchasing history of user
customer_history = transactions[[datetime_col]].copy()
customer_history.index = pd.DatetimeIndex(customer_history[datetime_col])
# Add transactions column
customer_history["transactions"] = 1
customer_history = customer_history.resample(freq).sum()
# plot alive_path
path = calculate_alive_path(model, transactions, datetime_col, t, freq)
path_dates = pd.date_range(start=min(transactions[datetime_col]), periods=len(path), freq=freq)
plt.plot(path_dates, path, "-", label="P_alive")
# plot buying dates
payment_dates = customer_history[customer_history["transactions"] >= 1].index
plt.vlines(payment_dates.values, ymin=0, ymax=1, colors="r", linestyles="dashed", label="purchases")
plt.ylim(0, 1.0)
plt.yticks(np.arange(0, 1.1, 0.1))
plt.xlim(start_date, path_dates[-1])
plt.legend(loc=3)
plt.ylabel("P_alive")
plt.title("History of P_alive")
return ax | Draw a graph showing the probability of being alive for a customer in time.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
t: int
the number of time units since the birth we want to draw the p_alive
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in the transactions that denotes the datetime the purchase was made
freq: str, optional
Default 'D' for days. Other examples= 'W' for weekly
start_date: datetime, optional
Limit xaxis to start date
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot | Below is the the instruction that describes the task:
### Input:
Draw a graph showing the probability of being alive for a customer in time.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
t: int
the number of time units since the birth we want to draw the p_alive
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in the transactions that denotes the datetime the purchase was made
freq: str, optional
Default 'D' for days. Other examples= 'W' for weekly
start_date: datetime, optional
Limit xaxis to start date
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
### Response:
def plot_history_alive(model, t, transactions, datetime_col, freq="D", start_date=None, ax=None, **kwargs):
"""
Draw a graph showing the probability of being alive for a customer in time.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
t: int
the number of time units since the birth we want to draw the p_alive
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in the transactions that denotes the datetime the purchase was made
freq: str, optional
Default 'D' for days. Other examples= 'W' for weekly
start_date: datetime, optional
Limit xaxis to start date
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if start_date is None:
start_date = min(transactions[datetime_col])
if ax is None:
ax = plt.subplot(111)
# Get purchasing history of user
customer_history = transactions[[datetime_col]].copy()
customer_history.index = pd.DatetimeIndex(customer_history[datetime_col])
# Add transactions column
customer_history["transactions"] = 1
customer_history = customer_history.resample(freq).sum()
# plot alive_path
path = calculate_alive_path(model, transactions, datetime_col, t, freq)
path_dates = pd.date_range(start=min(transactions[datetime_col]), periods=len(path), freq=freq)
plt.plot(path_dates, path, "-", label="P_alive")
# plot buying dates
payment_dates = customer_history[customer_history["transactions"] >= 1].index
plt.vlines(payment_dates.values, ymin=0, ymax=1, colors="r", linestyles="dashed", label="purchases")
plt.ylim(0, 1.0)
plt.yticks(np.arange(0, 1.1, 0.1))
plt.xlim(start_date, path_dates[-1])
plt.legend(loc=3)
plt.ylabel("P_alive")
plt.title("History of P_alive")
return ax |
def zonalstats(features, raster, all_touched, band, categorical,
indent, info, nodata, prefix, stats, sequence, use_rs):
'''zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
'''
if info:
logging.basicConfig(level=logging.INFO)
if stats is not None:
stats = stats.split(" ")
if 'all' in [x.lower() for x in stats]:
stats = "ALL"
zonal_results = gen_zonal_stats(
features,
raster,
all_touched=all_touched,
band=band,
categorical=categorical,
nodata=nodata,
stats=stats,
prefix=prefix,
geojson_out=True)
if sequence:
for feature in zonal_results:
if use_rs:
click.echo(b'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
{'type': 'FeatureCollection',
'features': list(zonal_results)})) | zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson | Below is the the instruction that describes the task:
### Input:
zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
### Response:
def zonalstats(features, raster, all_touched, band, categorical,
indent, info, nodata, prefix, stats, sequence, use_rs):
'''zonalstats generates summary statistics of geospatial raster datasets
based on vector features.
The input arguments to zonalstats should be valid GeoJSON Features. (see cligj)
The output GeoJSON will be mostly unchanged but have additional properties per feature
describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset.
The raster is specified by the required -r/--raster argument.
Example, calculate rainfall stats for each state and output to file:
\b
rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
'''
if info:
logging.basicConfig(level=logging.INFO)
if stats is not None:
stats = stats.split(" ")
if 'all' in [x.lower() for x in stats]:
stats = "ALL"
zonal_results = gen_zonal_stats(
features,
raster,
all_touched=all_touched,
band=band,
categorical=categorical,
nodata=nodata,
stats=stats,
prefix=prefix,
geojson_out=True)
if sequence:
for feature in zonal_results:
if use_rs:
click.echo(b'\x1e', nl=False)
click.echo(json.dumps(feature))
else:
click.echo(json.dumps(
{'type': 'FeatureCollection',
'features': list(zonal_results)})) |
def setCurrentMode( self, mode ):
"""
Sets the current mode for this dialog to the inputed mode.
:param mode | <XPopupWidget.Mode>
"""
if ( self._currentMode == mode ):
return
self._currentMode = mode
self.updateModeSettings() | Sets the current mode for this dialog to the inputed mode.
:param mode | <XPopupWidget.Mode> | Below is the the instruction that describes the task:
### Input:
Sets the current mode for this dialog to the inputed mode.
:param mode | <XPopupWidget.Mode>
### Response:
def setCurrentMode( self, mode ):
"""
Sets the current mode for this dialog to the inputed mode.
:param mode | <XPopupWidget.Mode>
"""
if ( self._currentMode == mode ):
return
self._currentMode = mode
self.updateModeSettings() |
def remove_volatile(type_):
"""removes volatile from the type definition
If type is not volatile type, it will be returned as is
"""
nake_type = remove_alias(type_)
if not is_volatile(nake_type):
return type_
else:
if isinstance(nake_type, cpptypes.array_t):
is_c = is_const(nake_type)
if is_c:
base_type_ = nake_type.base.base.base
else:
base_type_ = nake_type.base.base
result_type = base_type_
if is_c:
result_type = cpptypes.const_t(result_type)
return cpptypes.array_t(result_type, nake_type.size)
return nake_type.base | removes volatile from the type definition
If type is not volatile type, it will be returned as is | Below is the the instruction that describes the task:
### Input:
removes volatile from the type definition
If type is not volatile type, it will be returned as is
### Response:
def remove_volatile(type_):
"""removes volatile from the type definition
If type is not volatile type, it will be returned as is
"""
nake_type = remove_alias(type_)
if not is_volatile(nake_type):
return type_
else:
if isinstance(nake_type, cpptypes.array_t):
is_c = is_const(nake_type)
if is_c:
base_type_ = nake_type.base.base.base
else:
base_type_ = nake_type.base.base
result_type = base_type_
if is_c:
result_type = cpptypes.const_t(result_type)
return cpptypes.array_t(result_type, nake_type.size)
return nake_type.base |
def get_assessment_notification_session(self, assessment_receiver):
"""Gets the notification session for notifications pertaining to assessment changes.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
return: (osid.assessment.AssessmentNotificationSession) - an
``AssessmentNotificationSession``
raise: NullArgument - ``assessment_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` is ``true``.*
"""
if not self.supports_assessment_notification():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemNotificationSession(runtime=self._runtime, receiver=assessment_receiver) | Gets the notification session for notifications pertaining to assessment changes.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
return: (osid.assessment.AssessmentNotificationSession) - an
``AssessmentNotificationSession``
raise: NullArgument - ``assessment_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the notification session for notifications pertaining to assessment changes.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
return: (osid.assessment.AssessmentNotificationSession) - an
``AssessmentNotificationSession``
raise: NullArgument - ``assessment_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` is ``true``.*
### Response:
def get_assessment_notification_session(self, assessment_receiver):
"""Gets the notification session for notifications pertaining to assessment changes.
arg: assessment_receiver
(osid.assessment.AssessmentReceiver): the assessment
receiver interface
return: (osid.assessment.AssessmentNotificationSession) - an
``AssessmentNotificationSession``
raise: NullArgument - ``assessment_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_notification()`` is ``true``.*
"""
if not self.supports_assessment_notification():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.ItemNotificationSession(runtime=self._runtime, receiver=assessment_receiver) |
def get_resources(cls):
"""Returns Ext Resources."""
controller = RoutesController(directory.get_plugin())
return [extensions.ResourceExtension(
Routes.get_alias(),
controller)] | Returns Ext Resources. | Below is the the instruction that describes the task:
### Input:
Returns Ext Resources.
### Response:
def get_resources(cls):
"""Returns Ext Resources."""
controller = RoutesController(directory.get_plugin())
return [extensions.ResourceExtension(
Routes.get_alias(),
controller)] |
def update_nanopubstore_start_dt(url: str, start_dt: str):
"""Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's
"""
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if not start_dates_doc:
start_dates_doc = {
"_key": start_dates_doc_key,
"start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}],
}
state_mgmt.insert(start_dates_doc)
else:
for idx, start_date in enumerate(start_dates_doc["start_dates"]):
if start_date["nanopubstore"] == hostname:
start_dates_doc["start_dates"][idx]["start_dt"] = start_dt
break
else:
start_dates_doc["start_dates"].append(
{"nanopubstore": hostname, "start_dt": start_dt}
)
state_mgmt.replace(start_dates_doc) | Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's | Below is the the instruction that describes the task:
### Input:
Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's
### Response:
def update_nanopubstore_start_dt(url: str, start_dt: str):
"""Add nanopubstore start_dt to belapi.state_mgmt collection
Args:
url: url of nanopubstore
start_dt: datetime of last query against nanopubstore for new ID's
"""
hostname = urllib.parse.urlsplit(url)[1]
start_dates_doc = state_mgmt.get(start_dates_doc_key)
if not start_dates_doc:
start_dates_doc = {
"_key": start_dates_doc_key,
"start_dates": [{"nanopubstore": hostname, "start_dt": start_dt}],
}
state_mgmt.insert(start_dates_doc)
else:
for idx, start_date in enumerate(start_dates_doc["start_dates"]):
if start_date["nanopubstore"] == hostname:
start_dates_doc["start_dates"][idx]["start_dt"] = start_dt
break
else:
start_dates_doc["start_dates"].append(
{"nanopubstore": hostname, "start_dt": start_dt}
)
state_mgmt.replace(start_dates_doc) |
def llcs(s1, s2):
'''length of the longest common sequence
This implementation takes O(len(s1) * len(s2)) time and
O(min(len(s1), len(s2))) space.
Use only with short strings.
>>> llcs('a.b.cd','!a!b!c!!!d!')
4
'''
m, n = len(s1), len(s2)
if m < n: # ensure n <= m, to use O(min(n,m)) space
m, n = n, m
s1, s2 = s2, s1
l = [0] * (n+1)
for i in range(m):
p = 0
for j in range(n):
t = 1 if s1[i] == s2[j] else 0
p, l[j+1] = l[j+1], max(p+t, l[j], l[j+1])
return l[n] | length of the longest common sequence
This implementation takes O(len(s1) * len(s2)) time and
O(min(len(s1), len(s2))) space.
Use only with short strings.
>>> llcs('a.b.cd','!a!b!c!!!d!')
4 | Below is the the instruction that describes the task:
### Input:
length of the longest common sequence
This implementation takes O(len(s1) * len(s2)) time and
O(min(len(s1), len(s2))) space.
Use only with short strings.
>>> llcs('a.b.cd','!a!b!c!!!d!')
4
### Response:
def llcs(s1, s2):
'''length of the longest common sequence
This implementation takes O(len(s1) * len(s2)) time and
O(min(len(s1), len(s2))) space.
Use only with short strings.
>>> llcs('a.b.cd','!a!b!c!!!d!')
4
'''
m, n = len(s1), len(s2)
if m < n: # ensure n <= m, to use O(min(n,m)) space
m, n = n, m
s1, s2 = s2, s1
l = [0] * (n+1)
for i in range(m):
p = 0
for j in range(n):
t = 1 if s1[i] == s2[j] else 0
p, l[j+1] = l[j+1], max(p+t, l[j], l[j+1])
return l[n] |
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html") | Execute a command in a console. | Below is the the instruction that describes the task:
### Input:
Execute a command in a console.
### Response:
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html") |
def valid_sequences(self):
"""Returns list"""
valid_sets = [[x] for x in self.possible_items if x['left'] == 0]
change = True
niter = 200
while change and niter > 0:
change = False
niter -=1
for possible in sorted(self.possible_items, key=lambda x:x['left']):
for current_valid in valid_sets[:]:
if possible['left'] == current_valid[-1]['right']:
if current_valid + [possible] not in valid_sets:
if current_valid[-1]['left'] != current_valid[-1]['right'] or possible['left'] != possible['right']: #avoids Null insertion twice
valid_sets.append(current_valid + [possible])
change = True
if not niter:
raise Exception('too many iterations')
return valid_sets | Returns list | Below is the the instruction that describes the task:
### Input:
Returns list
### Response:
def valid_sequences(self):
"""Returns list"""
valid_sets = [[x] for x in self.possible_items if x['left'] == 0]
change = True
niter = 200
while change and niter > 0:
change = False
niter -=1
for possible in sorted(self.possible_items, key=lambda x:x['left']):
for current_valid in valid_sets[:]:
if possible['left'] == current_valid[-1]['right']:
if current_valid + [possible] not in valid_sets:
if current_valid[-1]['left'] != current_valid[-1]['right'] or possible['left'] != possible['right']: #avoids Null insertion twice
valid_sets.append(current_valid + [possible])
change = True
if not niter:
raise Exception('too many iterations')
return valid_sets |
def get_maya_location(self, ):
""" Return the installation path to maya
:returns: path to maya
:rtype: str
:raises: errors.SoftwareNotFoundError
"""
import _winreg
# query winreg entry
# the last flag is needed, if we want to test with 32 bit python!
# Because Maya is an 64 bit key!
for ver in MAYA_VERSIONS:
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
MAYA_REG_KEY.format(mayaversion=ver), 0,
_winreg.KEY_READ | _winreg.KEY_WOW64_64KEY)
value = _winreg.QueryValueEx(key, "MAYA_INSTALL_LOCATION")[0]
except WindowsError:
log.debug('Maya %s installation not found in registry!' % ver)
if not value:
raise errors.SoftwareNotFoundError('Maya %s installation not found in registry!' % MAYA_VERSIONS)
return value | Return the installation path to maya
:returns: path to maya
:rtype: str
:raises: errors.SoftwareNotFoundError | Below is the the instruction that describes the task:
### Input:
Return the installation path to maya
:returns: path to maya
:rtype: str
:raises: errors.SoftwareNotFoundError
### Response:
def get_maya_location(self, ):
""" Return the installation path to maya
:returns: path to maya
:rtype: str
:raises: errors.SoftwareNotFoundError
"""
import _winreg
# query winreg entry
# the last flag is needed, if we want to test with 32 bit python!
# Because Maya is an 64 bit key!
for ver in MAYA_VERSIONS:
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,
MAYA_REG_KEY.format(mayaversion=ver), 0,
_winreg.KEY_READ | _winreg.KEY_WOW64_64KEY)
value = _winreg.QueryValueEx(key, "MAYA_INSTALL_LOCATION")[0]
except WindowsError:
log.debug('Maya %s installation not found in registry!' % ver)
if not value:
raise errors.SoftwareNotFoundError('Maya %s installation not found in registry!' % MAYA_VERSIONS)
return value |
def color_overlap(color1, *args):
'''
color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
of color1 followed by any additional colors (overlaid left to right). This respects alpha
values when calculating the results.
Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
'''
args = list(args)
args.insert(0, color1)
rgba = np.asarray([0.5,0.5,0.5,0])
for c in args:
c = to_rgba(c)
a = c[...,3]
a0 = rgba[...,3]
if np.isclose(a0, 0).all(): rgba = np.ones(rgba.shape) * c
elif np.isclose(a, 0).all(): continue
else: rgba = times(a, c) + times(1-a, rgba)
return rgba | color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
of color1 followed by any additional colors (overlaid left to right). This respects alpha
values when calculating the results.
Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded. | Below is the the instruction that describes the task:
### Input:
color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
of color1 followed by any additional colors (overlaid left to right). This respects alpha
values when calculating the results.
Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
### Response:
def color_overlap(color1, *args):
'''
color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
of color1 followed by any additional colors (overlaid left to right). This respects alpha
values when calculating the results.
Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
'''
args = list(args)
args.insert(0, color1)
rgba = np.asarray([0.5,0.5,0.5,0])
for c in args:
c = to_rgba(c)
a = c[...,3]
a0 = rgba[...,3]
if np.isclose(a0, 0).all(): rgba = np.ones(rgba.shape) * c
elif np.isclose(a, 0).all(): continue
else: rgba = times(a, c) + times(1-a, rgba)
return rgba |
def safe_dir(sub_dir=None):
"""Absolute path from safe package directory.
:param sub_dir: Sub directory relative to safe package directory.
:type sub_dir: str
:return: The Absolute path.
:rtype: str
"""
safe_relative_path = os.path.join(
os.path.dirname(__file__), '../')
return os.path.abspath(
os.path.join(safe_relative_path, sub_dir)) | Absolute path from safe package directory.
:param sub_dir: Sub directory relative to safe package directory.
:type sub_dir: str
:return: The Absolute path.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Absolute path from safe package directory.
:param sub_dir: Sub directory relative to safe package directory.
:type sub_dir: str
:return: The Absolute path.
:rtype: str
### Response:
def safe_dir(sub_dir=None):
"""Absolute path from safe package directory.
:param sub_dir: Sub directory relative to safe package directory.
:type sub_dir: str
:return: The Absolute path.
:rtype: str
"""
safe_relative_path = os.path.join(
os.path.dirname(__file__), '../')
return os.path.abspath(
os.path.join(safe_relative_path, sub_dir)) |
def _strategy(codes, context):
"""
Convert SRE regex parse tree to strategy that generates strings matching that
regex represented by that parse tree.
`codes` is either a list of SRE regex elements representations or a particular
element representation. Each element is a tuple of element code (as string) and
parameters. E.g. regex 'ab[0-9]+' compiles to following elements:
[
('literal', 97),
('literal', 98),
('max_repeat', (1, 4294967295, [
('in', [
('range', (48, 57))
])
]))
]
The function recursively traverses regex element tree and converts each element
to strategy that generates strings that match that element.
Context stores
1. List of groups (for backreferences)
2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior
of various inner strategies)
"""
if not isinstance(codes, tuple):
# List of codes
strategies = []
i = 0
while i < len(codes):
if codes[i][0] == sre.LITERAL and not (context.flags & re.IGNORECASE):
# Merge subsequent "literals" into one `just()` strategy
# that generates corresponding text if no IGNORECASE
j = i + 1
while j < len(codes) and codes[j][0] == sre.LITERAL:
j += 1
strategies.append(hs.just(
u''.join([six.unichr(charcode) for (_, charcode) in codes[i:j]])
))
i = j
else:
strategies.append(_strategy(codes[i], context))
i += 1
return hs.tuples(*strategies).map(u''.join)
else:
# Single code
code, value = codes
if code == sre.LITERAL:
# Regex 'a' (single char)
c = six.unichr(value)
if context.flags & re.IGNORECASE:
return hs.sampled_from([c.lower(), c.upper()])
else:
return hs.just(c)
elif code == sre.NOT_LITERAL:
# Regex '[^a]' (negation of a single char)
c = six.unichr(value)
blacklist = set([c.lower(), c.upper()]) \
if context.flags & re.IGNORECASE else [c]
return hs.characters(blacklist_characters=blacklist)
elif code == sre.IN:
# Regex '[abc0-9]' (set of characters)
charsets = value
builder = CharactersBuilder(negate=charsets[0][0] == sre.NEGATE,
flags=context.flags)
for charset_code, charset_value in charsets:
if charset_code == sre.NEGATE:
# Regex '[^...]' (negation)
pass
elif charset_code == sre.LITERAL:
# Regex '[a]' (single char)
builder.add_chars(six.unichr(charset_value))
elif charset_code == sre.RANGE:
# Regex '[a-z]' (char range)
low, high = charset_value
for x in six.moves.range(low, high+1):
builder.add_chars(six.unichr(x))
elif charset_code == sre.CATEGORY:
# Regex '[\w]' (char category)
builder.add_category(charset_value)
else:
raise he.InvalidArgument(
'Unknown charset code: %s' % charset_code
)
return builder.strategy
elif code == sre.ANY:
# Regex '.' (any char)
if context.flags & re.DOTALL:
return hs.characters()
else:
return hs.characters(blacklist_characters="\n")
elif code == sre.AT:
# Regexes like '^...', '...$', '\bfoo', '\Bfoo'
if value == sre.AT_END:
return hs.one_of(hs.just(u''), hs.just(u'\n'))
return hs.just('')
elif code == sre.SUBPATTERN:
# Various groups: '(...)', '(:...)' or '(?P<name>...)'
old_flags = context.flags
if HAS_SUBPATTERN_FLAGS:
context.flags = (context.flags | value[1]) & ~value[2]
strat = _strategy(value[-1], context)
context.flags = old_flags
if value[0]:
context.groups[value[0]] = strat
strat = hs.shared(strat, key=value[0])
return strat
elif code == sre.GROUPREF:
# Regex '\\1' or '(?P=name)' (group reference)
return hs.shared(context.groups[value], key=value)
elif code == sre.ASSERT:
# Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind)
return _strategy(value[1], context)
elif code == sre.ASSERT_NOT:
# Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind)
return hs.just('')
elif code == sre.BRANCH:
# Regex 'a|b|c' (branch)
return hs.one_of([_strategy(branch, context) for branch in value[1]])
elif code in [sre.MIN_REPEAT, sre.MAX_REPEAT]:
# Regexes 'a?', 'a*', 'a+' and their non-greedy variants (repeaters)
at_least, at_most, regex = value
if at_most == 4294967295:
at_most = None
return hs.lists(_strategy(regex, context),
min_size=at_least,
max_size=at_most).map(''.join)
elif code == sre.GROUPREF_EXISTS:
# Regex '(?(id/name)yes-pattern|no-pattern)' (if group exists selection)
return hs.one_of(
_strategy(value[1], context),
_strategy(value[2], context) if value[2] else hs.just(u''),
)
else:
raise he.InvalidArgument('Unknown code point: %s' % repr(code)) | Convert SRE regex parse tree to strategy that generates strings matching that
regex represented by that parse tree.
`codes` is either a list of SRE regex elements representations or a particular
element representation. Each element is a tuple of element code (as string) and
parameters. E.g. regex 'ab[0-9]+' compiles to following elements:
[
('literal', 97),
('literal', 98),
('max_repeat', (1, 4294967295, [
('in', [
('range', (48, 57))
])
]))
]
The function recursively traverses regex element tree and converts each element
to strategy that generates strings that match that element.
Context stores
1. List of groups (for backreferences)
2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior
of various inner strategies) | Below is the the instruction that describes the task:
### Input:
Convert SRE regex parse tree to strategy that generates strings matching that
regex represented by that parse tree.
`codes` is either a list of SRE regex elements representations or a particular
element representation. Each element is a tuple of element code (as string) and
parameters. E.g. regex 'ab[0-9]+' compiles to following elements:
[
('literal', 97),
('literal', 98),
('max_repeat', (1, 4294967295, [
('in', [
('range', (48, 57))
])
]))
]
The function recursively traverses regex element tree and converts each element
to strategy that generates strings that match that element.
Context stores
1. List of groups (for backreferences)
2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior
of various inner strategies)
### Response:
def _strategy(codes, context):
"""
Convert SRE regex parse tree to strategy that generates strings matching that
regex represented by that parse tree.
`codes` is either a list of SRE regex elements representations or a particular
element representation. Each element is a tuple of element code (as string) and
parameters. E.g. regex 'ab[0-9]+' compiles to following elements:
[
('literal', 97),
('literal', 98),
('max_repeat', (1, 4294967295, [
('in', [
('range', (48, 57))
])
]))
]
The function recursively traverses regex element tree and converts each element
to strategy that generates strings that match that element.
Context stores
1. List of groups (for backreferences)
2. Active regex flags (e.g. IGNORECASE, DOTALL, UNICODE, they affect behavior
of various inner strategies)
"""
if not isinstance(codes, tuple):
# List of codes
strategies = []
i = 0
while i < len(codes):
if codes[i][0] == sre.LITERAL and not (context.flags & re.IGNORECASE):
# Merge subsequent "literals" into one `just()` strategy
# that generates corresponding text if no IGNORECASE
j = i + 1
while j < len(codes) and codes[j][0] == sre.LITERAL:
j += 1
strategies.append(hs.just(
u''.join([six.unichr(charcode) for (_, charcode) in codes[i:j]])
))
i = j
else:
strategies.append(_strategy(codes[i], context))
i += 1
return hs.tuples(*strategies).map(u''.join)
else:
# Single code
code, value = codes
if code == sre.LITERAL:
# Regex 'a' (single char)
c = six.unichr(value)
if context.flags & re.IGNORECASE:
return hs.sampled_from([c.lower(), c.upper()])
else:
return hs.just(c)
elif code == sre.NOT_LITERAL:
# Regex '[^a]' (negation of a single char)
c = six.unichr(value)
blacklist = set([c.lower(), c.upper()]) \
if context.flags & re.IGNORECASE else [c]
return hs.characters(blacklist_characters=blacklist)
elif code == sre.IN:
# Regex '[abc0-9]' (set of characters)
charsets = value
builder = CharactersBuilder(negate=charsets[0][0] == sre.NEGATE,
flags=context.flags)
for charset_code, charset_value in charsets:
if charset_code == sre.NEGATE:
# Regex '[^...]' (negation)
pass
elif charset_code == sre.LITERAL:
# Regex '[a]' (single char)
builder.add_chars(six.unichr(charset_value))
elif charset_code == sre.RANGE:
# Regex '[a-z]' (char range)
low, high = charset_value
for x in six.moves.range(low, high+1):
builder.add_chars(six.unichr(x))
elif charset_code == sre.CATEGORY:
# Regex '[\w]' (char category)
builder.add_category(charset_value)
else:
raise he.InvalidArgument(
'Unknown charset code: %s' % charset_code
)
return builder.strategy
elif code == sre.ANY:
# Regex '.' (any char)
if context.flags & re.DOTALL:
return hs.characters()
else:
return hs.characters(blacklist_characters="\n")
elif code == sre.AT:
# Regexes like '^...', '...$', '\bfoo', '\Bfoo'
if value == sre.AT_END:
return hs.one_of(hs.just(u''), hs.just(u'\n'))
return hs.just('')
elif code == sre.SUBPATTERN:
# Various groups: '(...)', '(:...)' or '(?P<name>...)'
old_flags = context.flags
if HAS_SUBPATTERN_FLAGS:
context.flags = (context.flags | value[1]) & ~value[2]
strat = _strategy(value[-1], context)
context.flags = old_flags
if value[0]:
context.groups[value[0]] = strat
strat = hs.shared(strat, key=value[0])
return strat
elif code == sre.GROUPREF:
# Regex '\\1' or '(?P=name)' (group reference)
return hs.shared(context.groups[value], key=value)
elif code == sre.ASSERT:
# Regex '(?=...)' or '(?<=...)' (positive lookahead/lookbehind)
return _strategy(value[1], context)
elif code == sre.ASSERT_NOT:
# Regex '(?!...)' or '(?<!...)' (negative lookahead/lookbehind)
return hs.just('')
elif code == sre.BRANCH:
# Regex 'a|b|c' (branch)
return hs.one_of([_strategy(branch, context) for branch in value[1]])
elif code in [sre.MIN_REPEAT, sre.MAX_REPEAT]:
# Regexes 'a?', 'a*', 'a+' and their non-greedy variants (repeaters)
at_least, at_most, regex = value
if at_most == 4294967295:
at_most = None
return hs.lists(_strategy(regex, context),
min_size=at_least,
max_size=at_most).map(''.join)
elif code == sre.GROUPREF_EXISTS:
# Regex '(?(id/name)yes-pattern|no-pattern)' (if group exists selection)
return hs.one_of(
_strategy(value[1], context),
_strategy(value[2], context) if value[2] else hs.just(u''),
)
else:
raise he.InvalidArgument('Unknown code point: %s' % repr(code)) |
def select(self, field_paths):
"""Create a "select" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.select` for
more information on this method.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query.
"""
query = query_mod.Query(self)
return query.select(field_paths) | Create a "select" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.select` for
more information on this method.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query. | Below is the the instruction that describes the task:
### Input:
Create a "select" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.select` for
more information on this method.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query.
### Response:
def select(self, field_paths):
"""Create a "select" query with this collection as parent.
See
:meth:`~.firestore_v1beta1.query.Query.select` for
more information on this method.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query.
"""
query = query_mod.Query(self)
return query.select(field_paths) |
def apply_getters(self, task):
"""
This function is called when we specify the task dependencies with the syntax:
deps={node: "@property"}
In this case the task has to the get `property` from `node` before starting the calculation.
At present, the following properties are supported:
- @structure
"""
if not self.getters: return
for getter in self.getters:
if getter == "@structure":
task.history.info("Getting structure from %s" % self.node)
new_structure = self.node.get_final_structure()
task._change_structure(new_structure)
else:
raise ValueError("Wrong getter %s" % getter) | This function is called when we specify the task dependencies with the syntax:
deps={node: "@property"}
In this case the task has to the get `property` from `node` before starting the calculation.
At present, the following properties are supported:
- @structure | Below is the the instruction that describes the task:
### Input:
This function is called when we specify the task dependencies with the syntax:
deps={node: "@property"}
In this case the task has to the get `property` from `node` before starting the calculation.
At present, the following properties are supported:
- @structure
### Response:
def apply_getters(self, task):
"""
This function is called when we specify the task dependencies with the syntax:
deps={node: "@property"}
In this case the task has to the get `property` from `node` before starting the calculation.
At present, the following properties are supported:
- @structure
"""
if not self.getters: return
for getter in self.getters:
if getter == "@structure":
task.history.info("Getting structure from %s" % self.node)
new_structure = self.node.get_final_structure()
task._change_structure(new_structure)
else:
raise ValueError("Wrong getter %s" % getter) |
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options) | Guess a lexer by strong distinctions in the text (eg, shebang). | Below is the the instruction that describes the task:
### Input:
Guess a lexer by strong distinctions in the text (eg, shebang).
### Response:
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options) |
def reset(self):
"""
Put all LEDs back to their default color
"""
if not self.leds:
return
self.animate_stop()
for group in self.led_groups:
self.set_color(group, LED_DEFAULT_COLOR) | Put all LEDs back to their default color | Below is the the instruction that describes the task:
### Input:
Put all LEDs back to their default color
### Response:
def reset(self):
"""
Put all LEDs back to their default color
"""
if not self.leds:
return
self.animate_stop()
for group in self.led_groups:
self.set_color(group, LED_DEFAULT_COLOR) |
def update_snapshots(self):
"""Update list of EBS Snapshots for the account / region
Returns:
`None`
"""
self.log.debug('Updating EBSSnapshots for {}/{}'.format(self.account.account_name, self.region))
ec2 = self.session.resource('ec2', region_name=self.region)
try:
existing_snapshots = EBSSnapshot.get_all(self.account, self.region)
snapshots = {x.id: x for x in ec2.snapshots.filter(OwnerIds=[self.account.account_number])}
for data in list(snapshots.values()):
if data.id in existing_snapshots:
snapshot = existing_snapshots[data.id]
if snapshot.update(data):
self.log.debug('Change detected for EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
else:
properties = {
'create_time': data.start_time,
'encrypted': data.encrypted,
'kms_key_id': data.kms_key_id,
'state': data.state,
'state_message': data.state_message,
'volume_id': data.volume_id,
'volume_size': data.volume_size,
}
tags = {t['Key']: t['Value'] for t in data.tags or {}}
snapshot = EBSSnapshot.create(
data.id,
account_id=self.account.account_id,
location=self.region,
properties=properties,
tags=tags
)
self.log.debug('Added new EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
db.session.commit()
vk = set(list(snapshots.keys()))
evk = set(list(existing_snapshots.keys()))
try:
for snapshotID in evk - vk:
db.session.delete(existing_snapshots[snapshotID].resource)
self.log.debug('Deleted EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshotID
))
db.session.commit()
except:
self.log.exception('Failed removing deleted snapshots')
db.session.rollback()
finally:
del ec2 | Update list of EBS Snapshots for the account / region
Returns:
`None` | Below is the the instruction that describes the task:
### Input:
Update list of EBS Snapshots for the account / region
Returns:
`None`
### Response:
def update_snapshots(self):
"""Update list of EBS Snapshots for the account / region
Returns:
`None`
"""
self.log.debug('Updating EBSSnapshots for {}/{}'.format(self.account.account_name, self.region))
ec2 = self.session.resource('ec2', region_name=self.region)
try:
existing_snapshots = EBSSnapshot.get_all(self.account, self.region)
snapshots = {x.id: x for x in ec2.snapshots.filter(OwnerIds=[self.account.account_number])}
for data in list(snapshots.values()):
if data.id in existing_snapshots:
snapshot = existing_snapshots[data.id]
if snapshot.update(data):
self.log.debug('Change detected for EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
else:
properties = {
'create_time': data.start_time,
'encrypted': data.encrypted,
'kms_key_id': data.kms_key_id,
'state': data.state,
'state_message': data.state_message,
'volume_id': data.volume_id,
'volume_size': data.volume_size,
}
tags = {t['Key']: t['Value'] for t in data.tags or {}}
snapshot = EBSSnapshot.create(
data.id,
account_id=self.account.account_id,
location=self.region,
properties=properties,
tags=tags
)
self.log.debug('Added new EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshot.resource.resource_id
))
db.session.commit()
vk = set(list(snapshots.keys()))
evk = set(list(existing_snapshots.keys()))
try:
for snapshotID in evk - vk:
db.session.delete(existing_snapshots[snapshotID].resource)
self.log.debug('Deleted EBSSnapshot {}/{}/{}'.format(
self.account.account_name,
self.region,
snapshotID
))
db.session.commit()
except:
self.log.exception('Failed removing deleted snapshots')
db.session.rollback()
finally:
del ec2 |
def build(self, pre=None, shortest=False):
"""Build the ``Ref`` instance by fetching the rule from
the GramFuzzer instance and building it
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
"""
global REF_LEVEL
REF_LEVEL += 1
try:
if pre is None:
pre = []
#print("{:04d} - {} - {}:{}".format(REF_LEVEL, shortest, self.cat, self.refname))
definition = self.fuzzer.get_ref(self.cat, self.refname)
res = utils.val(
definition,
pre,
shortest=(shortest or REF_LEVEL >= self.max_recursion)
)
return res
# this needs to happen no matter what
finally:
REF_LEVEL -= 1 | Build the ``Ref`` instance by fetching the rule from
the GramFuzzer instance and building it
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated. | Below is the the instruction that describes the task:
### Input:
Build the ``Ref`` instance by fetching the rule from
the GramFuzzer instance and building it
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
### Response:
def build(self, pre=None, shortest=False):
"""Build the ``Ref`` instance by fetching the rule from
the GramFuzzer instance and building it
:param list pre: The prerequisites list
:param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
"""
global REF_LEVEL
REF_LEVEL += 1
try:
if pre is None:
pre = []
#print("{:04d} - {} - {}:{}".format(REF_LEVEL, shortest, self.cat, self.refname))
definition = self.fuzzer.get_ref(self.cat, self.refname)
res = utils.val(
definition,
pre,
shortest=(shortest or REF_LEVEL >= self.max_recursion)
)
return res
# this needs to happen no matter what
finally:
REF_LEVEL -= 1 |
async def generate_psk(self, security_key):
"""Generate and set a psk from the security key."""
if not self._psk:
PatchedDTLSSecurityStore.IDENTITY = 'Client_identity'.encode(
'utf-8')
PatchedDTLSSecurityStore.KEY = security_key.encode('utf-8')
command = Gateway().generate_psk(self._psk_id)
self._psk = await self.request(command)
PatchedDTLSSecurityStore.IDENTITY = self._psk_id.encode('utf-8')
PatchedDTLSSecurityStore.KEY = self._psk.encode('utf-8')
# aiocoap has now cached our psk, so it must be reset.
# We also no longer need the protocol, so this will clean that up.
await self._reset_protocol()
return self._psk | Generate and set a psk from the security key. | Below is the the instruction that describes the task:
### Input:
Generate and set a psk from the security key.
### Response:
async def generate_psk(self, security_key):
"""Generate and set a psk from the security key."""
if not self._psk:
PatchedDTLSSecurityStore.IDENTITY = 'Client_identity'.encode(
'utf-8')
PatchedDTLSSecurityStore.KEY = security_key.encode('utf-8')
command = Gateway().generate_psk(self._psk_id)
self._psk = await self.request(command)
PatchedDTLSSecurityStore.IDENTITY = self._psk_id.encode('utf-8')
PatchedDTLSSecurityStore.KEY = self._psk.encode('utf-8')
# aiocoap has now cached our psk, so it must be reset.
# We also no longer need the protocol, so this will clean that up.
await self._reset_protocol()
return self._psk |
def closed(self):
"""True if connection is closed."""
closed = self._closing or self._closed
if not closed and self._reader and self._reader.at_eof():
self._closing = closed = True
self._loop.call_soon(self._do_close, None)
return closed | True if connection is closed. | Below is the the instruction that describes the task:
### Input:
True if connection is closed.
### Response:
def closed(self):
"""True if connection is closed."""
closed = self._closing or self._closed
if not closed and self._reader and self._reader.at_eof():
self._closing = closed = True
self._loop.call_soon(self._do_close, None)
return closed |
def dist_manhattan(src, tar, qval=2, alphabet=None):
"""Return the normalized Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Manhattan distance
Examples
--------
>>> dist_manhattan('cat', 'hat')
0.5
>>> round(dist_manhattan('Niall', 'Neil'), 12)
0.636363636364
>>> round(dist_manhattan('Colin', 'Cuilen'), 12)
0.692307692308
>>> dist_manhattan('ATCG', 'TAGC')
1.0
"""
return Manhattan().dist(src, tar, qval, alphabet) | Return the normalized Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Manhattan distance
Examples
--------
>>> dist_manhattan('cat', 'hat')
0.5
>>> round(dist_manhattan('Niall', 'Neil'), 12)
0.636363636364
>>> round(dist_manhattan('Colin', 'Cuilen'), 12)
0.692307692308
>>> dist_manhattan('ATCG', 'TAGC')
1.0 | Below is the the instruction that describes the task:
### Input:
Return the normalized Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Manhattan distance
Examples
--------
>>> dist_manhattan('cat', 'hat')
0.5
>>> round(dist_manhattan('Niall', 'Neil'), 12)
0.636363636364
>>> round(dist_manhattan('Colin', 'Cuilen'), 12)
0.692307692308
>>> dist_manhattan('ATCG', 'TAGC')
1.0
### Response:
def dist_manhattan(src, tar, qval=2, alphabet=None):
"""Return the normalized Manhattan distance between two strings.
This is a wrapper for :py:meth:`Manhattan.dist`.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
qval : int
The length of each q-gram; 0 for non-q-gram version
alphabet : collection or int
The values or size of the alphabet
Returns
-------
float
The normalized Manhattan distance
Examples
--------
>>> dist_manhattan('cat', 'hat')
0.5
>>> round(dist_manhattan('Niall', 'Neil'), 12)
0.636363636364
>>> round(dist_manhattan('Colin', 'Cuilen'), 12)
0.692307692308
>>> dist_manhattan('ATCG', 'TAGC')
1.0
"""
return Manhattan().dist(src, tar, qval, alphabet) |
def add_head(self, head):
""" Add head Node """
if not isinstance(head, DependencyNode):
raise TypeError('"head" must be a DependencyNode')
self._heads.append(head) | Add head Node | Below is the the instruction that describes the task:
### Input:
Add head Node
### Response:
def add_head(self, head):
""" Add head Node """
if not isinstance(head, DependencyNode):
raise TypeError('"head" must be a DependencyNode')
self._heads.append(head) |
def _handle_post(self, request):
# type: (Post) -> CallbackResponses
"""Called with the lock taken"""
method_name = request.path[1]
method = self._block[method_name]
assert isinstance(method, MethodModel), \
"Cannot Post to %s which is a %s" % (method.path, type(method))
self.check_field_writeable(method)
post_function = self.get_post_function(method_name)
args = method.validate(request.parameters)
with self.lock_released:
result = post_function(**args)
# Don't need to serialize as the result should be immutable
ret = [request.return_response(result)]
return ret | Called with the lock taken | Below is the the instruction that describes the task:
### Input:
Called with the lock taken
### Response:
def _handle_post(self, request):
# type: (Post) -> CallbackResponses
"""Called with the lock taken"""
method_name = request.path[1]
method = self._block[method_name]
assert isinstance(method, MethodModel), \
"Cannot Post to %s which is a %s" % (method.path, type(method))
self.check_field_writeable(method)
post_function = self.get_post_function(method_name)
args = method.validate(request.parameters)
with self.lock_released:
result = post_function(**args)
# Don't need to serialize as the result should be immutable
ret = [request.return_response(result)]
return ret |
def register(self, name, content, description=None):
"""
Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document
"""
return self.__app.documents.register(name, content, self._plugin, description) | Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document | Below is the the instruction that describes the task:
### Input:
Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document
### Response:
def register(self, name, content, description=None):
"""
Register a new document.
:param content: Content of this document. Jinja and rst are supported.
:type content: str
:param name: Unique name of the document for documentation purposes.
:param description: Short description of this document
"""
return self.__app.documents.register(name, content, self._plugin, description) |
def is_member_of(self, group_dn):
"""
Returns true if our user is a member of the given group.
"""
is_member = None
# Normalize the DN
group_dn = group_dn.lower()
# If we have self._group_dns, we'll use it. Otherwise, we'll try to
# avoid the cost of loading it.
if self._group_dns is None:
is_member = self._group_type.is_member(self._ldap_user, group_dn)
if is_member is None:
is_member = group_dn in self.get_group_dns()
logger.debug(
"{} is{}a member of {}".format(
self._ldap_user.dn, is_member and " " or " not ", group_dn
)
)
return is_member | Returns true if our user is a member of the given group. | Below is the the instruction that describes the task:
### Input:
Returns true if our user is a member of the given group.
### Response:
def is_member_of(self, group_dn):
"""
Returns true if our user is a member of the given group.
"""
is_member = None
# Normalize the DN
group_dn = group_dn.lower()
# If we have self._group_dns, we'll use it. Otherwise, we'll try to
# avoid the cost of loading it.
if self._group_dns is None:
is_member = self._group_type.is_member(self._ldap_user, group_dn)
if is_member is None:
is_member = group_dn in self.get_group_dns()
logger.debug(
"{} is{}a member of {}".format(
self._ldap_user.dn, is_member and " " or " not ", group_dn
)
)
return is_member |
def eval_cor_pattern(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
"""Parse and build recursively a tree of DependencyNode from pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()&|':
if char in pattern:
complex_node = True
# If it's a simple node, evaluate it directly
if complex_node is False:
return self.eval_simple_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running)
return self.eval_complex_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running) | Parse and build recursively a tree of DependencyNode from pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode | Below is the the instruction that describes the task:
### Input:
Parse and build recursively a tree of DependencyNode from pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
### Response:
def eval_cor_pattern(self, pattern, hosts, services, hostgroups, servicegroups, running=False):
"""Parse and build recursively a tree of DependencyNode from pattern
:param pattern: pattern to parse
:type pattern: str
:param hosts: hosts list, used to find a specific host
:type hosts: alignak.objects.host.Host
:param services: services list, used to find a specific service
:type services: alignak.objects.service.Service
:param running: rules are evaluated at run time and parsing. True means runtime
:type running: bool
:return: root node of parsed tree
:rtype: alignak.dependencynode.DependencyNode
"""
pattern = pattern.strip()
complex_node = False
# Look if it's a complex pattern (with rule) or
# if it's a leaf of it, like a host/service
for char in '()&|':
if char in pattern:
complex_node = True
# If it's a simple node, evaluate it directly
if complex_node is False:
return self.eval_simple_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running)
return self.eval_complex_cor_pattern(pattern, hosts, services,
hostgroups, servicegroups, running) |
def mergeall(filename, snrmin, snrmax, bdfdir):
""" Merge cands/noise files over all scans
Tries to find scans from filename, but will fall back to finding relevant files if it does not exist.
"""
filename = os.path.abspath(filename)
bignumber = 500
if os.path.exists(filename):
scans = ps.read_scans(filename, bdfdir=bdfdir)
scanlist = sorted(scans.keys())
else:
logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename))
filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename))))
try:
scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist]))
except IndexError:
logger.warn('Could not parse filenames for scans. Looking over big range.')
scanlist = range(bignumber)
logger.info('Merging over scans {0}'.format(scanlist))
for scan in scanlist:
pc.merge_segments(filename, scan)
pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) | Merge cands/noise files over all scans
Tries to find scans from filename, but will fall back to finding relevant files if it does not exist. | Below is the the instruction that describes the task:
### Input:
Merge cands/noise files over all scans
Tries to find scans from filename, but will fall back to finding relevant files if it does not exist.
### Response:
def mergeall(filename, snrmin, snrmax, bdfdir):
""" Merge cands/noise files over all scans
Tries to find scans from filename, but will fall back to finding relevant files if it does not exist.
"""
filename = os.path.abspath(filename)
bignumber = 500
if os.path.exists(filename):
scans = ps.read_scans(filename, bdfdir=bdfdir)
scanlist = sorted(scans.keys())
else:
logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename))
filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename))))
try:
scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist]))
except IndexError:
logger.warn('Could not parse filenames for scans. Looking over big range.')
scanlist = range(bignumber)
logger.info('Merging over scans {0}'.format(scanlist))
for scan in scanlist:
pc.merge_segments(filename, scan)
pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) |
def _make_defaults_exposure_table():
"""Build headers for a table related to exposure classes.
:return: A table with headers.
:rtype: m.Table
"""
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Default values'), header=True))
table.add(row)
return table | Build headers for a table related to exposure classes.
:return: A table with headers.
:rtype: m.Table | Below is the the instruction that describes the task:
### Input:
Build headers for a table related to exposure classes.
:return: A table with headers.
:rtype: m.Table
### Response:
def _make_defaults_exposure_table():
"""Build headers for a table related to exposure classes.
:return: A table with headers.
:rtype: m.Table
"""
table = m.Table(style_class='table table-condensed table-striped')
row = m.Row()
row.add(m.Cell(tr('Name'), header=True))
row.add(m.Cell(tr('Default values'), header=True))
table.add(row)
return table |
def option_completer(cls, k,v):
"Tab completion hook for the %%opts cell magic."
line = v.text_until_cursor
completions = cls.setup_completer()
compositor_defs = {el.group:el.output_type.__name__
for el in Compositor.definitions if el.group}
return cls.line_completer(line, completions, compositor_defs) | Tab completion hook for the %%opts cell magic. | Below is the the instruction that describes the task:
### Input:
Tab completion hook for the %%opts cell magic.
### Response:
def option_completer(cls, k,v):
"Tab completion hook for the %%opts cell magic."
line = v.text_until_cursor
completions = cls.setup_completer()
compositor_defs = {el.group:el.output_type.__name__
for el in Compositor.definitions if el.group}
return cls.line_completer(line, completions, compositor_defs) |
def get_families_by_genus_type(self, family_genus_type=None):
"""Gets a ``FamilyList`` corresponding to the given family genus ``Type`` which
does not include families of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
arg: family_genus_type (osid.type.Type): a family genus type
return: (osid.relationship.FamilyList) - the returned ``Family
list``
raise: NullArgument - ``family_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if family_genus_type is None:
raise NullArgument()
url_path = '/handcar/services/relationship/families'
families_of_type = []
all_families = self._get_request(url_path)
for family in all_families:
# DO WE NEED TO CHECK ALL THREE ATRIBUTES OF THE Id HERE?
if family['genusTypeId'] == str(family_genus_type):
families_of_type.append(family)
return objects.FamilyList(families_of_type) | Gets a ``FamilyList`` corresponding to the given family genus ``Type`` which
does not include families of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
arg: family_genus_type (osid.type.Type): a family genus type
return: (osid.relationship.FamilyList) - the returned ``Family
list``
raise: NullArgument - ``family_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a ``FamilyList`` corresponding to the given family genus ``Type`` which
does not include families of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
arg: family_genus_type (osid.type.Type): a family genus type
return: (osid.relationship.FamilyList) - the returned ``Family
list``
raise: NullArgument - ``family_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_families_by_genus_type(self, family_genus_type=None):
"""Gets a ``FamilyList`` corresponding to the given family genus ``Type`` which
does not include families of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
arg: family_genus_type (osid.type.Type): a family genus type
return: (osid.relationship.FamilyList) - the returned ``Family
list``
raise: NullArgument - ``family_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if family_genus_type is None:
raise NullArgument()
url_path = '/handcar/services/relationship/families'
families_of_type = []
all_families = self._get_request(url_path)
for family in all_families:
# DO WE NEED TO CHECK ALL THREE ATRIBUTES OF THE Id HERE?
if family['genusTypeId'] == str(family_genus_type):
families_of_type.append(family)
return objects.FamilyList(families_of_type) |
def get_file(fs, filename):
'''Serve files for storages with direct file access'''
storage = by_name(fs)
if storage is None:
abort(404)
return storage.serve(filename) | Serve files for storages with direct file access | Below is the the instruction that describes the task:
### Input:
Serve files for storages with direct file access
### Response:
def get_file(fs, filename):
'''Serve files for storages with direct file access'''
storage = by_name(fs)
if storage is None:
abort(404)
return storage.serve(filename) |
def VarintEncode(value):
"""Convert an integer to a varint and write it using the write function."""
result = b""
if value < 0:
raise ValueError("Varint can not encode a negative number.")
bits = value & 0x7f
value >>= 7
while value:
result += HIGH_CHR_MAP[bits]
bits = value & 0x7f
value >>= 7
result += CHR_MAP[bits]
return result | Convert an integer to a varint and write it using the write function. | Below is the the instruction that describes the task:
### Input:
Convert an integer to a varint and write it using the write function.
### Response:
def VarintEncode(value):
"""Convert an integer to a varint and write it using the write function."""
result = b""
if value < 0:
raise ValueError("Varint can not encode a negative number.")
bits = value & 0x7f
value >>= 7
while value:
result += HIGH_CHR_MAP[bits]
bits = value & 0x7f
value >>= 7
result += CHR_MAP[bits]
return result |
def backup_source_file(fname, options=None):
""" backup_source_file(fname : str)
>>> backup_source_file('~/Desktop/lisp/test.lisp')
Create a backup copy of the source file.
"""
opts = parse_options(options)
backup_dir = opts.backup_dir
assert os.path.exists(fname), \
("\n--%s-- Warning: File `%s' does not exist. . ." % (current_time(), fname))
assert os.path.exists(os.path.abspath(backup_dir)), \
("\n--%s-- Warning: Directory `%s' does not exist. . ." % (current_time(), fname))
backup_name = backup_dir + os.sep + os.path.split(fname)[1] + opts.backup_suffix
try:
shutil.copyfile(fname, backup_name)
except IOError:
message = "\n--%s-- Warning: Couldn't backup the file `%s' in `%s', check if you have enough permissions. "
tpl = (current_time(), fname, backup_dir)
sys.stderr.write(message % tpl) | backup_source_file(fname : str)
>>> backup_source_file('~/Desktop/lisp/test.lisp')
Create a backup copy of the source file. | Below is the the instruction that describes the task:
### Input:
backup_source_file(fname : str)
>>> backup_source_file('~/Desktop/lisp/test.lisp')
Create a backup copy of the source file.
### Response:
def backup_source_file(fname, options=None):
""" backup_source_file(fname : str)
>>> backup_source_file('~/Desktop/lisp/test.lisp')
Create a backup copy of the source file.
"""
opts = parse_options(options)
backup_dir = opts.backup_dir
assert os.path.exists(fname), \
("\n--%s-- Warning: File `%s' does not exist. . ." % (current_time(), fname))
assert os.path.exists(os.path.abspath(backup_dir)), \
("\n--%s-- Warning: Directory `%s' does not exist. . ." % (current_time(), fname))
backup_name = backup_dir + os.sep + os.path.split(fname)[1] + opts.backup_suffix
try:
shutil.copyfile(fname, backup_name)
except IOError:
message = "\n--%s-- Warning: Couldn't backup the file `%s' in `%s', check if you have enough permissions. "
tpl = (current_time(), fname, backup_dir)
sys.stderr.write(message % tpl) |
def xsd_error_log_string(xsd_error_log):
"""Return a human-readable string representation of the error log
returned by lxml's XMLSchema validator.
"""
ret = []
for error in xsd_error_log:
ret.append(
"ERROR ON LINE {}: {}".format(error.line, error.message.encode("utf-8"))
)
return "\n".join(ret) | Return a human-readable string representation of the error log
returned by lxml's XMLSchema validator. | Below is the the instruction that describes the task:
### Input:
Return a human-readable string representation of the error log
returned by lxml's XMLSchema validator.
### Response:
def xsd_error_log_string(xsd_error_log):
"""Return a human-readable string representation of the error log
returned by lxml's XMLSchema validator.
"""
ret = []
for error in xsd_error_log:
ret.append(
"ERROR ON LINE {}: {}".format(error.line, error.message.encode("utf-8"))
)
return "\n".join(ret) |
def _get_policy_set_uri(self, guid=None):
"""
Returns the full path that uniquely identifies
the subject endpoint.
"""
uri = self.uri + '/v1/policy-set'
if guid:
uri += '/' + urllib.quote_plus(guid)
return uri | Returns the full path that uniquely identifies
the subject endpoint. | Below is the the instruction that describes the task:
### Input:
Returns the full path that uniquely identifies
the subject endpoint.
### Response:
def _get_policy_set_uri(self, guid=None):
"""
Returns the full path that uniquely identifies
the subject endpoint.
"""
uri = self.uri + '/v1/policy-set'
if guid:
uri += '/' + urllib.quote_plus(guid)
return uri |
def check_docstring_first(src, filename='<unknown>'):
# type: (bytes, str) -> int
"""Returns nonzero if the source has what looks like a docstring that is
not at the beginning of the source.
A string will be considered a docstring if it is a STRING token with a
col offset of 0.
"""
found_docstring_line = None
found_code_line = None
tok_gen = tokenize_tokenize(io.BytesIO(src).readline)
for tok_type, _, (sline, scol), _, _ in tok_gen:
# Looks like a docstring!
if tok_type == tokenize.STRING and scol == 0:
if found_docstring_line is not None:
print(
'{}:{} Multiple module docstrings '
'(first docstring on line {}).'.format(
filename, sline, found_docstring_line,
),
)
return 1
elif found_code_line is not None:
print(
'{}:{} Module docstring appears after code '
'(code seen on line {}).'.format(
filename, sline, found_code_line,
),
)
return 1
else:
found_docstring_line = sline
elif tok_type not in NON_CODE_TOKENS and found_code_line is None:
found_code_line = sline
return 0 | Returns nonzero if the source has what looks like a docstring that is
not at the beginning of the source.
A string will be considered a docstring if it is a STRING token with a
col offset of 0. | Below is the the instruction that describes the task:
### Input:
Returns nonzero if the source has what looks like a docstring that is
not at the beginning of the source.
A string will be considered a docstring if it is a STRING token with a
col offset of 0.
### Response:
def check_docstring_first(src, filename='<unknown>'):
# type: (bytes, str) -> int
"""Returns nonzero if the source has what looks like a docstring that is
not at the beginning of the source.
A string will be considered a docstring if it is a STRING token with a
col offset of 0.
"""
found_docstring_line = None
found_code_line = None
tok_gen = tokenize_tokenize(io.BytesIO(src).readline)
for tok_type, _, (sline, scol), _, _ in tok_gen:
# Looks like a docstring!
if tok_type == tokenize.STRING and scol == 0:
if found_docstring_line is not None:
print(
'{}:{} Multiple module docstrings '
'(first docstring on line {}).'.format(
filename, sline, found_docstring_line,
),
)
return 1
elif found_code_line is not None:
print(
'{}:{} Module docstring appears after code '
'(code seen on line {}).'.format(
filename, sline, found_code_line,
),
)
return 1
else:
found_docstring_line = sline
elif tok_type not in NON_CODE_TOKENS and found_code_line is None:
found_code_line = sline
return 0 |
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None,
pad=True, **kwargs):
"""Query the segment database for this flag's active segments.
This method assumes all of the metadata for each flag have been
filled. Minimally, the following attributes must be filled
.. autosummary::
~DataQualityFlag.name
~DataQualityFlag.known
Segments will be fetched from the database, with any
:attr:`~DataQualityFlag.padding` added on-the-fly.
This `DataQualityFlag` will be modified in-place.
Parameters
----------
source : `str`
source of segments for this flag. This must be
either a URL for a segment database or a path to a file on disk.
segments : `SegmentList`, optional
a list of segments during which to query, if not given,
existing known segments for this flag will be used.
pad : `bool`, optional, default: `True`
apply the `~DataQualityFlag.padding` associated with this
flag, default: `True`.
**kwargs
any other keyword arguments to be passed to
:meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`.
Returns
-------
self : `DataQualityFlag`
a reference to this flag
"""
tmp = DataQualityDict()
tmp[self.name] = self
tmp.populate(source=source, segments=segments, pad=pad, **kwargs)
return tmp[self.name] | Query the segment database for this flag's active segments.
This method assumes all of the metadata for each flag have been
filled. Minimally, the following attributes must be filled
.. autosummary::
~DataQualityFlag.name
~DataQualityFlag.known
Segments will be fetched from the database, with any
:attr:`~DataQualityFlag.padding` added on-the-fly.
This `DataQualityFlag` will be modified in-place.
Parameters
----------
source : `str`
source of segments for this flag. This must be
either a URL for a segment database or a path to a file on disk.
segments : `SegmentList`, optional
a list of segments during which to query, if not given,
existing known segments for this flag will be used.
pad : `bool`, optional, default: `True`
apply the `~DataQualityFlag.padding` associated with this
flag, default: `True`.
**kwargs
any other keyword arguments to be passed to
:meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`.
Returns
-------
self : `DataQualityFlag`
a reference to this flag | Below is the the instruction that describes the task:
### Input:
Query the segment database for this flag's active segments.
This method assumes all of the metadata for each flag have been
filled. Minimally, the following attributes must be filled
.. autosummary::
~DataQualityFlag.name
~DataQualityFlag.known
Segments will be fetched from the database, with any
:attr:`~DataQualityFlag.padding` added on-the-fly.
This `DataQualityFlag` will be modified in-place.
Parameters
----------
source : `str`
source of segments for this flag. This must be
either a URL for a segment database or a path to a file on disk.
segments : `SegmentList`, optional
a list of segments during which to query, if not given,
existing known segments for this flag will be used.
pad : `bool`, optional, default: `True`
apply the `~DataQualityFlag.padding` associated with this
flag, default: `True`.
**kwargs
any other keyword arguments to be passed to
:meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`.
Returns
-------
self : `DataQualityFlag`
a reference to this flag
### Response:
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None,
pad=True, **kwargs):
"""Query the segment database for this flag's active segments.
This method assumes all of the metadata for each flag have been
filled. Minimally, the following attributes must be filled
.. autosummary::
~DataQualityFlag.name
~DataQualityFlag.known
Segments will be fetched from the database, with any
:attr:`~DataQualityFlag.padding` added on-the-fly.
This `DataQualityFlag` will be modified in-place.
Parameters
----------
source : `str`
source of segments for this flag. This must be
either a URL for a segment database or a path to a file on disk.
segments : `SegmentList`, optional
a list of segments during which to query, if not given,
existing known segments for this flag will be used.
pad : `bool`, optional, default: `True`
apply the `~DataQualityFlag.padding` associated with this
flag, default: `True`.
**kwargs
any other keyword arguments to be passed to
:meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`.
Returns
-------
self : `DataQualityFlag`
a reference to this flag
"""
tmp = DataQualityDict()
tmp[self.name] = self
tmp.populate(source=source, segments=segments, pad=pad, **kwargs)
return tmp[self.name] |
def unzip(zip_file_path, output_dir, permission=None):
"""
Unzip the given file into the given directory while preserving file permissions in the process.
Parameters
----------
zip_file_path : str
Path to the zip file
output_dir : str
Path to the directory where the it should be unzipped to
permission : octal int
Permission to set
"""
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
# For each item in the zip file, extract the file and set permissions if available
for file_info in zip_ref.infolist():
name = file_info.filename
extracted_path = os.path.join(output_dir, name)
zip_ref.extract(name, output_dir)
_set_permissions(file_info, extracted_path)
_override_permissions(extracted_path, permission)
_override_permissions(output_dir, permission) | Unzip the given file into the given directory while preserving file permissions in the process.
Parameters
----------
zip_file_path : str
Path to the zip file
output_dir : str
Path to the directory where the it should be unzipped to
permission : octal int
Permission to set | Below is the the instruction that describes the task:
### Input:
Unzip the given file into the given directory while preserving file permissions in the process.
Parameters
----------
zip_file_path : str
Path to the zip file
output_dir : str
Path to the directory where the it should be unzipped to
permission : octal int
Permission to set
### Response:
def unzip(zip_file_path, output_dir, permission=None):
"""
Unzip the given file into the given directory while preserving file permissions in the process.
Parameters
----------
zip_file_path : str
Path to the zip file
output_dir : str
Path to the directory where the it should be unzipped to
permission : octal int
Permission to set
"""
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
# For each item in the zip file, extract the file and set permissions if available
for file_info in zip_ref.infolist():
name = file_info.filename
extracted_path = os.path.join(output_dir, name)
zip_ref.extract(name, output_dir)
_set_permissions(file_info, extracted_path)
_override_permissions(extracted_path, permission)
_override_permissions(output_dir, permission) |
def toggle_comment_visibility(uid, comid, collapse, recid):
"""
Toggle the visibility of the given comment (collapse) for the
given user. Return the new visibility
:param uid: the user id for which the change applies
:param comid: the comment id to close/open
:param collapse: if the comment is to be closed (1) or opened (0)
:param recid: the record id to which the comment belongs
:return: if the comment is visible or not after the update
"""
# We rely on the client to tell if comment should be collapsed or
# developed, to ensure consistency between our internal state and
# client state. Even if not strictly necessary, we store the
# record ID for quicker retrieval of the collapsed comments of a
# given discussion page. To prevent unnecessary population of the
# table, only one distinct tuple (record ID, comment ID, user ID)
# can be inserted (due to table definition). For the same purpose
# we also check that comment to collapse exists, and corresponds
# to an existing record: we cannot rely on the recid found as part
# of the URL, as no former check is done. This rule is not applied
# when deleting an entry, as in the worst case no line would be
# removed. For optimized retrieval of row to delete, the id_bibrec
# column is used, though not strictly necessary.
if collapse:
query = """SELECT id_bibrec from "cmtRECORDCOMMENT" WHERE id=%s"""
params = (comid,)
res = run_sql(query, params)
if res:
query = """INSERT INTO "cmtCOLLAPSED" (id_bibrec, "id_cmtRECORDCOMMENT", id_user)
VALUES (%s, %s, %s)"""
params = (res[0][0], comid, uid)
run_sql(query, params)
return True
else:
query = """DELETE FROM "cmtCOLLAPSED" WHERE
"id_cmtRECORDCOMMENT"=%s and
id_user=%s and
id_bibrec=%s"""
params = (comid, uid, recid)
run_sql(query, params)
return False | Toggle the visibility of the given comment (collapse) for the
given user. Return the new visibility
:param uid: the user id for which the change applies
:param comid: the comment id to close/open
:param collapse: if the comment is to be closed (1) or opened (0)
:param recid: the record id to which the comment belongs
:return: if the comment is visible or not after the update | Below is the the instruction that describes the task:
### Input:
Toggle the visibility of the given comment (collapse) for the
given user. Return the new visibility
:param uid: the user id for which the change applies
:param comid: the comment id to close/open
:param collapse: if the comment is to be closed (1) or opened (0)
:param recid: the record id to which the comment belongs
:return: if the comment is visible or not after the update
### Response:
def toggle_comment_visibility(uid, comid, collapse, recid):
"""
Toggle the visibility of the given comment (collapse) for the
given user. Return the new visibility
:param uid: the user id for which the change applies
:param comid: the comment id to close/open
:param collapse: if the comment is to be closed (1) or opened (0)
:param recid: the record id to which the comment belongs
:return: if the comment is visible or not after the update
"""
# We rely on the client to tell if comment should be collapsed or
# developed, to ensure consistency between our internal state and
# client state. Even if not strictly necessary, we store the
# record ID for quicker retrieval of the collapsed comments of a
# given discussion page. To prevent unnecessary population of the
# table, only one distinct tuple (record ID, comment ID, user ID)
# can be inserted (due to table definition). For the same purpose
# we also check that comment to collapse exists, and corresponds
# to an existing record: we cannot rely on the recid found as part
# of the URL, as no former check is done. This rule is not applied
# when deleting an entry, as in the worst case no line would be
# removed. For optimized retrieval of row to delete, the id_bibrec
# column is used, though not strictly necessary.
if collapse:
query = """SELECT id_bibrec from "cmtRECORDCOMMENT" WHERE id=%s"""
params = (comid,)
res = run_sql(query, params)
if res:
query = """INSERT INTO "cmtCOLLAPSED" (id_bibrec, "id_cmtRECORDCOMMENT", id_user)
VALUES (%s, %s, %s)"""
params = (res[0][0], comid, uid)
run_sql(query, params)
return True
else:
query = """DELETE FROM "cmtCOLLAPSED" WHERE
"id_cmtRECORDCOMMENT"=%s and
id_user=%s and
id_bibrec=%s"""
params = (comid, uid, recid)
run_sql(query, params)
return False |
def task_property_present_predicate(service, task, prop):
""" True if the json_element passed is present for the task specified.
"""
try:
response = get_service_task(service, task)
except Exception as e:
pass
return (response is not None) and (prop in response) | True if the json_element passed is present for the task specified. | Below is the the instruction that describes the task:
### Input:
True if the json_element passed is present for the task specified.
### Response:
def task_property_present_predicate(service, task, prop):
""" True if the json_element passed is present for the task specified.
"""
try:
response = get_service_task(service, task)
except Exception as e:
pass
return (response is not None) and (prop in response) |
def set_start(self,time,pad = None):
"""
Set the start time of the datafind query.
@param time: GPS start time of query.
"""
if pad:
self.add_var_opt('gps-start-time', int(time)-int(pad))
else:
self.add_var_opt('gps-start-time', int(time))
self.__start = time
self.__set_output() | Set the start time of the datafind query.
@param time: GPS start time of query. | Below is the the instruction that describes the task:
### Input:
Set the start time of the datafind query.
@param time: GPS start time of query.
### Response:
def set_start(self,time,pad = None):
"""
Set the start time of the datafind query.
@param time: GPS start time of query.
"""
if pad:
self.add_var_opt('gps-start-time', int(time)-int(pad))
else:
self.add_var_opt('gps-start-time', int(time))
self.__start = time
self.__set_output() |
def block_pipeline_command(func):
"""
Prints error because some pipelined commands should be blocked when running in cluster-mode
"""
def inner(*args, **kwargs):
raise RedisClusterException(
"ERROR: Calling pipelined function {0} is blocked when running redis in cluster mode...".format(
func.__name__))
return inner | Prints error because some pipelined commands should be blocked when running in cluster-mode | Below is the the instruction that describes the task:
### Input:
Prints error because some pipelined commands should be blocked when running in cluster-mode
### Response:
def block_pipeline_command(func):
"""
Prints error because some pipelined commands should be blocked when running in cluster-mode
"""
def inner(*args, **kwargs):
raise RedisClusterException(
"ERROR: Calling pipelined function {0} is blocked when running redis in cluster mode...".format(
func.__name__))
return inner |
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
result = self.devh.controlMsg(
usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,
usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb.USBDevice.write_data failed')
return True | Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
### Response:
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
result = self.devh.controlMsg(
usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,
usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb.USBDevice.write_data failed')
return True |
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programatically (such as in test suites) or out-of-prcess
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
alias_table=self.alias_manager.alias_table,
use_readline=self.has_readline,
config=self.config,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
# Only configure readline if we truly are using readline. IPython can
# do tab-completion over the network, in GUIs, etc, where readline
# itself may be absent
if self.has_readline:
self.set_readline_completer() | Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programatically (such as in test suites) or out-of-prcess
(typically over the network by remote frontends). | Below is the the instruction that describes the task:
### Input:
Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programatically (such as in test suites) or out-of-prcess
(typically over the network by remote frontends).
### Response:
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programatically (such as in test suites) or out-of-prcess
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (module_completer,
magic_run_completer, cd_completer, reset_completer)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
alias_table=self.alias_manager.alias_table,
use_readline=self.has_readline,
config=self.config,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
# Only configure readline if we truly are using readline. IPython can
# do tab-completion over the network, in GUIs, etc, where readline
# itself may be absent
if self.has_readline:
self.set_readline_completer() |
def rseq_qc(job, job_vars):
"""
QC module: contains QC metrics and information about the BAM post alignment
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
# Command
docker_call(tool='jvivian/qc', tool_parameters=['/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid],
work_dir=work_dir, sudo=sudo)
# Write to FileStore
output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f]
tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz')) | QC module: contains QC metrics and information about the BAM post alignment
job_vars: tuple Tuple of dictionaries: input_args and ids | Below is the the instruction that describes the task:
### Input:
QC module: contains QC metrics and information about the BAM post alignment
job_vars: tuple Tuple of dictionaries: input_args and ids
### Response:
def rseq_qc(job, job_vars):
"""
QC module: contains QC metrics and information about the BAM post alignment
job_vars: tuple Tuple of dictionaries: input_args and ids
"""
input_args, ids = job_vars
work_dir = job.fileStore.getLocalTempDir()
uuid = input_args['uuid']
sudo = input_args['sudo']
# I/O
return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai')
# Command
docker_call(tool='jvivian/qc', tool_parameters=['/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid],
work_dir=work_dir, sudo=sudo)
# Write to FileStore
output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f]
tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files)
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz')) |
def getprefixes(self):
"""Add prefixes for each namespace referenced by parameter types."""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns) | Add prefixes for each namespace referenced by parameter types. | Below is the the instruction that describes the task:
### Input:
Add prefixes for each namespace referenced by parameter types.
### Response:
def getprefixes(self):
"""Add prefixes for each namespace referenced by parameter types."""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns) |
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k))
if k.dtype.kind == 'b':
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {0:d} is used to index array "
"with shape {1:s}.".format(len(k),
str(self.shape)))
if k.ndim > 1:
raise IndexError("{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim))
if getattr(k, 'dims', (dim, )) != (dim, ):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {0:s} but the target dimension is "
"{1:s}.".format(str(k.dims), dim)) | Make sanity checks | Below is the the instruction that describes the task:
### Input:
Make sanity checks
### Response:
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k))
if k.dtype.kind == 'b':
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {0:d} is used to index array "
"with shape {1:s}.".format(len(k),
str(self.shape)))
if k.ndim > 1:
raise IndexError("{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim))
if getattr(k, 'dims', (dim, )) != (dim, ):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {0:s} but the target dimension is "
"{1:s}.".format(str(k.dims), dim)) |
def make_list_elms_pretty(self):
""" make any list element read like a list
"""
for elm in self.parser.getElementsByTag(self.top_node, tag='li'):
elm.text = r'• {}'.format(elm.text) | make any list element read like a list | Below is the the instruction that describes the task:
### Input:
make any list element read like a list
### Response:
def make_list_elms_pretty(self):
""" make any list element read like a list
"""
for elm in self.parser.getElementsByTag(self.top_node, tag='li'):
elm.text = r'• {}'.format(elm.text) |
def add_account_to_group(self, account, group):
""" Add account to group. """
lgroup: OpenldapGroup = self._get_group(group.name)
person: OpenldapAccount = self._get_account(account.username)
changes = changeset(lgroup, {})
changes = lgroup.add_member(changes, person)
save(changes, database=self._database) | Add account to group. | Below is the the instruction that describes the task:
### Input:
Add account to group.
### Response:
def add_account_to_group(self, account, group):
""" Add account to group. """
lgroup: OpenldapGroup = self._get_group(group.name)
person: OpenldapAccount = self._get_account(account.username)
changes = changeset(lgroup, {})
changes = lgroup.add_member(changes, person)
save(changes, database=self._database) |
def converged_ionic(self):
"""
Checks that ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw | Checks that ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run | Below is the the instruction that describes the task:
### Input:
Checks that ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
### Response:
def converged_ionic(self):
"""
Checks that ionic step convergence has been reached, i.e. that vasp
exited before reaching the max ionic steps for a relaxation run
"""
nsw = self.parameters.get("NSW", 0)
return nsw <= 1 or len(self.ionic_steps) < nsw |
def filter_models(request, models, exclude):
"""
Returns (model, perm,) for all models that match models/exclude patterns
and are visible by current user.
"""
items = get_avail_models(request)
included = []
def full_name(model):
return '%s.%s' % (model.__module__, model.__name__)
# I beleive that that implemented
# O(len(patterns)*len(matched_patterns)*len(all_models))
# algorythm is fine for model lists because they are small and admin
# performance is not a bottleneck. If it is not the case then the code
# should be optimized.
if len(models) == 0:
included = items
else:
for pattern in models:
wildcard_models = []
for item in items:
model, perms = item
model_str = full_name(model)
if model_str == pattern:
# exact match
included.append(item)
elif fnmatch(model_str, pattern) and \
item not in wildcard_models:
# wildcard match, put item in separate list so it can be
# sorted alphabetically later
wildcard_models.append(item)
if wildcard_models:
# sort wildcard matches alphabetically before adding them
wildcard_models.sort(
key=lambda x: x[0]._meta.verbose_name_plural
)
included += wildcard_models
result = included[:]
for pattern in exclude:
for item in included:
model, perms = item
if fnmatch(full_name(model), pattern):
try:
result.remove(item)
except ValueError: # if the item was already removed skip
pass
return result | Returns (model, perm,) for all models that match models/exclude patterns
and are visible by current user. | Below is the the instruction that describes the task:
### Input:
Returns (model, perm,) for all models that match models/exclude patterns
and are visible by current user.
### Response:
def filter_models(request, models, exclude):
"""
Returns (model, perm,) for all models that match models/exclude patterns
and are visible by current user.
"""
items = get_avail_models(request)
included = []
def full_name(model):
return '%s.%s' % (model.__module__, model.__name__)
# I beleive that that implemented
# O(len(patterns)*len(matched_patterns)*len(all_models))
# algorythm is fine for model lists because they are small and admin
# performance is not a bottleneck. If it is not the case then the code
# should be optimized.
if len(models) == 0:
included = items
else:
for pattern in models:
wildcard_models = []
for item in items:
model, perms = item
model_str = full_name(model)
if model_str == pattern:
# exact match
included.append(item)
elif fnmatch(model_str, pattern) and \
item not in wildcard_models:
# wildcard match, put item in separate list so it can be
# sorted alphabetically later
wildcard_models.append(item)
if wildcard_models:
# sort wildcard matches alphabetically before adding them
wildcard_models.sort(
key=lambda x: x[0]._meta.verbose_name_plural
)
included += wildcard_models
result = included[:]
for pattern in exclude:
for item in included:
model, perms = item
if fnmatch(full_name(model), pattern):
try:
result.remove(item)
except ValueError: # if the item was already removed skip
pass
return result |
def pack(self):
"""
Pack the frame into a string according to the following scheme:
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| |Masking-key, if MASK set to 1 |
+-------------------------------+-------------------------------+
| Masking-key (continued) | Payload Data |
+-------------------------------- - - - - - - - - - - - - - - - +
: Payload Data continued ... :
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Payload Data continued ... |
+---------------------------------------------------------------+
"""
header = struct.pack('!B', (self.final << 7) | (self.rsv1 << 6)
| (self.rsv2 << 5) | (self.rsv3 << 4)
| (self.opcode & 0xf))
mask = bool(self.masking_key) << 7
payload_len = len(self.payload)
if payload_len <= 125:
header += struct.pack('!B', mask | payload_len)
elif payload_len < (1 << 16):
header += struct.pack('!BH', mask | 126, payload_len)
elif payload_len < (1 << 63):
header += struct.pack('!BQ', mask | 127, payload_len)
else:
# FIXME: RFC 6455 defines an action for this...
raise Exception('the payload length is too damn high!')
if mask:
return header + self.masking_key + self.mask_payload()
return header + self.payload | Pack the frame into a string according to the following scheme:
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| |Masking-key, if MASK set to 1 |
+-------------------------------+-------------------------------+
| Masking-key (continued) | Payload Data |
+-------------------------------- - - - - - - - - - - - - - - - +
: Payload Data continued ... :
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Payload Data continued ... |
+---------------------------------------------------------------+ | Below is the the instruction that describes the task:
### Input:
Pack the frame into a string according to the following scheme:
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| |Masking-key, if MASK set to 1 |
+-------------------------------+-------------------------------+
| Masking-key (continued) | Payload Data |
+-------------------------------- - - - - - - - - - - - - - - - +
: Payload Data continued ... :
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Payload Data continued ... |
+---------------------------------------------------------------+
### Response:
def pack(self):
"""
Pack the frame into a string according to the following scheme:
+-+-+-+-+-------+-+-------------+-------------------------------+
|F|R|R|R| opcode|M| Payload len | Extended payload length |
|I|S|S|S| (4) |A| (7) | (16/64) |
|N|V|V|V| |S| | (if payload len==126/127) |
| |1|2|3| |K| | |
+-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
| Extended payload length continued, if payload len == 127 |
+ - - - - - - - - - - - - - - - +-------------------------------+
| |Masking-key, if MASK set to 1 |
+-------------------------------+-------------------------------+
| Masking-key (continued) | Payload Data |
+-------------------------------- - - - - - - - - - - - - - - - +
: Payload Data continued ... :
+ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
| Payload Data continued ... |
+---------------------------------------------------------------+
"""
header = struct.pack('!B', (self.final << 7) | (self.rsv1 << 6)
| (self.rsv2 << 5) | (self.rsv3 << 4)
| (self.opcode & 0xf))
mask = bool(self.masking_key) << 7
payload_len = len(self.payload)
if payload_len <= 125:
header += struct.pack('!B', mask | payload_len)
elif payload_len < (1 << 16):
header += struct.pack('!BH', mask | 126, payload_len)
elif payload_len < (1 << 63):
header += struct.pack('!BQ', mask | 127, payload_len)
else:
# FIXME: RFC 6455 defines an action for this...
raise Exception('the payload length is too damn high!')
if mask:
return header + self.masking_key + self.mask_payload()
return header + self.payload |
async def dump_varint(writer, val):
"""
Binary dump of the variable size integer
:param writer:
:param val:
:return:
"""
if val <= 63:
return await dump_varint_t(writer, PortableRawSizeMark.BYTE, val)
elif val <= 16383:
return await dump_varint_t(writer, PortableRawSizeMark.WORD, val)
elif val <= 1073741823:
return await dump_varint_t(writer, PortableRawSizeMark.DWORD, val)
else:
if val > 4611686018427387903:
raise ValueError('Int too big')
return await dump_varint_t(writer, PortableRawSizeMark.INT64, val) | Binary dump of the variable size integer
:param writer:
:param val:
:return: | Below is the the instruction that describes the task:
### Input:
Binary dump of the variable size integer
:param writer:
:param val:
:return:
### Response:
async def dump_varint(writer, val):
"""
Binary dump of the variable size integer
:param writer:
:param val:
:return:
"""
if val <= 63:
return await dump_varint_t(writer, PortableRawSizeMark.BYTE, val)
elif val <= 16383:
return await dump_varint_t(writer, PortableRawSizeMark.WORD, val)
elif val <= 1073741823:
return await dump_varint_t(writer, PortableRawSizeMark.DWORD, val)
else:
if val > 4611686018427387903:
raise ValueError('Int too big')
return await dump_varint_t(writer, PortableRawSizeMark.INT64, val) |
def _check_ip_available(ip_addr):
'''
Proxmox VMs refuse to start when the IP is already being used.
This function can be used to prevent VMs being created with duplicate
IP's or to generate a warning.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
vm_config = vm_details['config']
if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr:
log.debug('IP "%s" is already defined', ip_addr)
return False
log.debug('IP \'%s\' is available to be defined', ip_addr)
return True | Proxmox VMs refuse to start when the IP is already being used.
This function can be used to prevent VMs being created with duplicate
IP's or to generate a warning. | Below is the the instruction that describes the task:
### Input:
Proxmox VMs refuse to start when the IP is already being used.
This function can be used to prevent VMs being created with duplicate
IP's or to generate a warning.
### Response:
def _check_ip_available(ip_addr):
'''
Proxmox VMs refuse to start when the IP is already being used.
This function can be used to prevent VMs being created with duplicate
IP's or to generate a warning.
'''
for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=True)):
vm_config = vm_details['config']
if ip_addr in vm_config['ip_address'] or vm_config['ip_address'] == ip_addr:
log.debug('IP "%s" is already defined', ip_addr)
return False
log.debug('IP \'%s\' is available to be defined', ip_addr)
return True |
def load_python_global(module, name):
"""
Evaluate an OpenMath symbol describing a global Python object
EXAMPLES::
>>> from openmath.convert_pickle import to_python
>>> from openmath.convert_pickle import load_python_global
>>> load_python_global('math', 'sin')
<built-in function sin>
>>> from openmath import openmath as om
>>> o = om.OMSymbol(cdbase="http://python.org/", cd='math', name='sin')
>>> to_python(o)
<built-in function sin>
"""
# The builtin module has been renamed in python3
if module == '__builtin__' and six.PY3:
module = 'builtins'
module = importlib.import_module(module)
return getattr(module, name) | Evaluate an OpenMath symbol describing a global Python object
EXAMPLES::
>>> from openmath.convert_pickle import to_python
>>> from openmath.convert_pickle import load_python_global
>>> load_python_global('math', 'sin')
<built-in function sin>
>>> from openmath import openmath as om
>>> o = om.OMSymbol(cdbase="http://python.org/", cd='math', name='sin')
>>> to_python(o)
<built-in function sin> | Below is the the instruction that describes the task:
### Input:
Evaluate an OpenMath symbol describing a global Python object
EXAMPLES::
>>> from openmath.convert_pickle import to_python
>>> from openmath.convert_pickle import load_python_global
>>> load_python_global('math', 'sin')
<built-in function sin>
>>> from openmath import openmath as om
>>> o = om.OMSymbol(cdbase="http://python.org/", cd='math', name='sin')
>>> to_python(o)
<built-in function sin>
### Response:
def load_python_global(module, name):
"""
Evaluate an OpenMath symbol describing a global Python object
EXAMPLES::
>>> from openmath.convert_pickle import to_python
>>> from openmath.convert_pickle import load_python_global
>>> load_python_global('math', 'sin')
<built-in function sin>
>>> from openmath import openmath as om
>>> o = om.OMSymbol(cdbase="http://python.org/", cd='math', name='sin')
>>> to_python(o)
<built-in function sin>
"""
# The builtin module has been renamed in python3
if module == '__builtin__' and six.PY3:
module = 'builtins'
module = importlib.import_module(module)
return getattr(module, name) |
def load_config_yaml(self, flags, config_dict):
""" Load config dict and yaml dict and then override both with flags dict. """
if config_dict is None:
print('Config File not specified. Using only input flags.')
return flags
try:
config_yaml_dict = self.cfg_from_file(flags['YAML_FILE'], config_dict)
except KeyError:
print('Yaml File not specified. Using only input flags and config file.')
return config_dict
print('Using input flags, config file, and yaml file.')
config_yaml_flags_dict = self._merge_a_into_b_simple(flags, config_yaml_dict)
return config_yaml_flags_dict | Load config dict and yaml dict and then override both with flags dict. | Below is the the instruction that describes the task:
### Input:
Load config dict and yaml dict and then override both with flags dict.
### Response:
def load_config_yaml(self, flags, config_dict):
""" Load config dict and yaml dict and then override both with flags dict. """
if config_dict is None:
print('Config File not specified. Using only input flags.')
return flags
try:
config_yaml_dict = self.cfg_from_file(flags['YAML_FILE'], config_dict)
except KeyError:
print('Yaml File not specified. Using only input flags and config file.')
return config_dict
print('Using input flags, config file, and yaml file.')
config_yaml_flags_dict = self._merge_a_into_b_simple(flags, config_yaml_dict)
return config_yaml_flags_dict |
def render_message(self):
"""
渲染消息
:return: 渲染后的消息
"""
message = None
if self.title:
message = '标题:{0}'.format(self.title)
if self.message_time:
message = '{0}\n时间:{1}'.format(message, self.time)
if message:
message = '{0}\n内容:{1}'.format(message, self.content)
else:
message = self.content
return message | 渲染消息
:return: 渲染后的消息 | Below is the the instruction that describes the task:
### Input:
渲染消息
:return: 渲染后的消息
### Response:
def render_message(self):
"""
渲染消息
:return: 渲染后的消息
"""
message = None
if self.title:
message = '标题:{0}'.format(self.title)
if self.message_time:
message = '{0}\n时间:{1}'.format(message, self.time)
if message:
message = '{0}\n内容:{1}'.format(message, self.content)
else:
message = self.content
return message |
def factory(cls, object_source):
"""Return a proper object
"""
if object_source.type is ObjectRaw.Types.object:
return ObjectObject(object_source)
elif object_source.type not in ObjectRaw.Types or object_source.type is ObjectRaw.Types.type:
return ObjectType(object_source)
elif object_source.type is ObjectRaw.Types.array:
return ObjectArray(object_source)
elif object_source.type is ObjectRaw.Types.dynamic:
return ObjectDynamic(object_source)
elif object_source.type is ObjectRaw.Types.const:
return ObjectConst(object_source)
elif object_source.type is ObjectRaw.Types.enum:
return ObjectEnum(object_source)
else:
return Object(object_source) | Return a proper object | Below is the the instruction that describes the task:
### Input:
Return a proper object
### Response:
def factory(cls, object_source):
"""Return a proper object
"""
if object_source.type is ObjectRaw.Types.object:
return ObjectObject(object_source)
elif object_source.type not in ObjectRaw.Types or object_source.type is ObjectRaw.Types.type:
return ObjectType(object_source)
elif object_source.type is ObjectRaw.Types.array:
return ObjectArray(object_source)
elif object_source.type is ObjectRaw.Types.dynamic:
return ObjectDynamic(object_source)
elif object_source.type is ObjectRaw.Types.const:
return ObjectConst(object_source)
elif object_source.type is ObjectRaw.Types.enum:
return ObjectEnum(object_source)
else:
return Object(object_source) |
def _get_enabled_disabled(enabled_prop="true"):
'''
DRY: Get all service FMRIs and their enabled property
'''
ret = set()
cmd = '/usr/bin/svcprop -c -p general/enabled "*"'
lines = __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if comps[2] == enabled_prop:
ret.add(comps[0].split("/:properties")[0])
return sorted(ret) | DRY: Get all service FMRIs and their enabled property | Below is the the instruction that describes the task:
### Input:
DRY: Get all service FMRIs and their enabled property
### Response:
def _get_enabled_disabled(enabled_prop="true"):
'''
DRY: Get all service FMRIs and their enabled property
'''
ret = set()
cmd = '/usr/bin/svcprop -c -p general/enabled "*"'
lines = __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines()
for line in lines:
comps = line.split()
if not comps:
continue
if comps[2] == enabled_prop:
ret.add(comps[0].split("/:properties")[0])
return sorted(ret) |
def replace_cancel_operation_by_id(cls, cancel_operation_id, cancel_operation, **kwargs):
"""Replace CancelOperation
Replace all attributes of CancelOperation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_cancel_operation_by_id(cancel_operation_id, cancel_operation, async=True)
>>> result = thread.get()
:param async bool
:param str cancel_operation_id: ID of cancelOperation to replace (required)
:param CancelOperation cancel_operation: Attributes of cancelOperation to replace (required)
:return: CancelOperation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs)
else:
(data) = cls._replace_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs)
return data | Replace CancelOperation
Replace all attributes of CancelOperation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_cancel_operation_by_id(cancel_operation_id, cancel_operation, async=True)
>>> result = thread.get()
:param async bool
:param str cancel_operation_id: ID of cancelOperation to replace (required)
:param CancelOperation cancel_operation: Attributes of cancelOperation to replace (required)
:return: CancelOperation
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Replace CancelOperation
Replace all attributes of CancelOperation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_cancel_operation_by_id(cancel_operation_id, cancel_operation, async=True)
>>> result = thread.get()
:param async bool
:param str cancel_operation_id: ID of cancelOperation to replace (required)
:param CancelOperation cancel_operation: Attributes of cancelOperation to replace (required)
:return: CancelOperation
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_cancel_operation_by_id(cls, cancel_operation_id, cancel_operation, **kwargs):
"""Replace CancelOperation
Replace all attributes of CancelOperation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_cancel_operation_by_id(cancel_operation_id, cancel_operation, async=True)
>>> result = thread.get()
:param async bool
:param str cancel_operation_id: ID of cancelOperation to replace (required)
:param CancelOperation cancel_operation: Attributes of cancelOperation to replace (required)
:return: CancelOperation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs)
else:
(data) = cls._replace_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs)
return data |
def breadth(dirs):
"""
Crawl through directories like os.walk, but use a 'breadth first' approach
(os.walk uses 'depth first')
"""
while dirs:
next_dirs = []
print("Dirs: '{}'".format(dirs))
for d in dirs:
next_dirs = []
try:
for name in os.listdir(d):
p = os.path.join(d, name)
if os.path.isdir(p):
print(p)
next_dirs.append(p)
except PermissionError as nallowed:
print(nallowed)
dirs = next_dirs
if dirs:
yield dirs | Crawl through directories like os.walk, but use a 'breadth first' approach
(os.walk uses 'depth first') | Below is the the instruction that describes the task:
### Input:
Crawl through directories like os.walk, but use a 'breadth first' approach
(os.walk uses 'depth first')
### Response:
def breadth(dirs):
"""
Crawl through directories like os.walk, but use a 'breadth first' approach
(os.walk uses 'depth first')
"""
while dirs:
next_dirs = []
print("Dirs: '{}'".format(dirs))
for d in dirs:
next_dirs = []
try:
for name in os.listdir(d):
p = os.path.join(d, name)
if os.path.isdir(p):
print(p)
next_dirs.append(p)
except PermissionError as nallowed:
print(nallowed)
dirs = next_dirs
if dirs:
yield dirs |
def normalize_pdf(mu, pofmu):
"""
Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length.
"""
if min(pofmu) < 0:
raise ValueError("Probabilities cannot be negative, don't ask me to "
"normalize a function with negative values!")
if min(mu) < 0:
raise ValueError("Rates cannot be negative, don't ask me to "
"normalize a function over a negative domain!")
dp = integral_element(mu, pofmu)
return mu, pofmu/sum(dp) | Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length. | Below is the the instruction that describes the task:
### Input:
Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length.
### Response:
def normalize_pdf(mu, pofmu):
"""
Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length.
"""
if min(pofmu) < 0:
raise ValueError("Probabilities cannot be negative, don't ask me to "
"normalize a function with negative values!")
if min(mu) < 0:
raise ValueError("Rates cannot be negative, don't ask me to "
"normalize a function over a negative domain!")
dp = integral_element(mu, pofmu)
return mu, pofmu/sum(dp) |
def action_create(self, courseid, taskid, path):
""" Delete a file or a directory """
# the path is given by the user. Let's normalize it
path = path.strip()
if not path.startswith("/"):
path = "/" + path
want_directory = path.endswith("/")
wanted_path = self.verify_path(courseid, taskid, path, True)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Invalid new path"))
task_fs = self.task_factory.get_task_fs(courseid, taskid)
if want_directory:
task_fs.from_subfolder(wanted_path).ensure_exists()
else:
task_fs.put(wanted_path, b"")
return self.show_tab_file(courseid, taskid) | Delete a file or a directory | Below is the the instruction that describes the task:
### Input:
Delete a file or a directory
### Response:
def action_create(self, courseid, taskid, path):
""" Delete a file or a directory """
# the path is given by the user. Let's normalize it
path = path.strip()
if not path.startswith("/"):
path = "/" + path
want_directory = path.endswith("/")
wanted_path = self.verify_path(courseid, taskid, path, True)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Invalid new path"))
task_fs = self.task_factory.get_task_fs(courseid, taskid)
if want_directory:
task_fs.from_subfolder(wanted_path).ensure_exists()
else:
task_fs.put(wanted_path, b"")
return self.show_tab_file(courseid, taskid) |
def versions(self) -> List(BlenderVersion):
"""
The versions associated with Blender
"""
return [BlenderVersion(tag) for tag in self.git_repo.tags] + [BlenderVersion(BLENDER_VERSION_MASTER)] | The versions associated with Blender | Below is the the instruction that describes the task:
### Input:
The versions associated with Blender
### Response:
def versions(self) -> List(BlenderVersion):
"""
The versions associated with Blender
"""
return [BlenderVersion(tag) for tag in self.git_repo.tags] + [BlenderVersion(BLENDER_VERSION_MASTER)] |
def assert_is_valid_key(key):
"""
Raise KeyError if a given config key violates any requirements.
The requirements are the following and can be individually deactivated
in ``sacred.SETTINGS.CONFIG_KEYS``:
* ENFORCE_MONGO_COMPATIBLE (default: True):
make sure the keys don't contain a '.' or start with a '$'
* ENFORCE_JSONPICKLE_COMPATIBLE (default: True):
make sure the keys do not contain any reserved jsonpickle tags
This is very important. Only deactivate if you know what you are doing.
* ENFORCE_STRING (default: False):
make sure all keys are string.
* ENFORCE_VALID_PYTHON_IDENTIFIER (default: False):
make sure all keys are valid python identifiers.
Parameters
----------
key:
The key that should be checked
Raises
------
KeyError:
if the key violates any requirements
"""
if SETTINGS.CONFIG.ENFORCE_KEYS_MONGO_COMPATIBLE and (
isinstance(key, basestring) and ('.' in key or key[0] == '$')):
raise KeyError('Invalid key "{}". Config-keys cannot '
'contain "." or start with "$"'.format(key))
if SETTINGS.CONFIG.ENFORCE_KEYS_JSONPICKLE_COMPATIBLE and \
isinstance(key, basestring) and (
key in jsonpickle.tags.RESERVED or key.startswith('json://')):
raise KeyError('Invalid key "{}". Config-keys cannot be one of the'
'reserved jsonpickle tags: {}'
.format(key, jsonpickle.tags.RESERVED))
if SETTINGS.CONFIG.ENFORCE_STRING_KEYS and (
not isinstance(key, basestring)):
raise KeyError('Invalid key "{}". Config-keys have to be strings, '
'but was {}'.format(key, type(key)))
if SETTINGS.CONFIG.ENFORCE_VALID_PYTHON_IDENTIFIER_KEYS and (
isinstance(key, basestring) and not PYTHON_IDENTIFIER.match(key)):
raise KeyError('Key "{}" is not a valid python identifier'
.format(key))
if SETTINGS.CONFIG.ENFORCE_KEYS_NO_EQUALS and (
isinstance(key, basestring) and '=' in key):
raise KeyError('Invalid key "{}". Config keys may not contain an'
'equals sign ("=").'.format('=')) | Raise KeyError if a given config key violates any requirements.
The requirements are the following and can be individually deactivated
in ``sacred.SETTINGS.CONFIG_KEYS``:
* ENFORCE_MONGO_COMPATIBLE (default: True):
make sure the keys don't contain a '.' or start with a '$'
* ENFORCE_JSONPICKLE_COMPATIBLE (default: True):
make sure the keys do not contain any reserved jsonpickle tags
This is very important. Only deactivate if you know what you are doing.
* ENFORCE_STRING (default: False):
make sure all keys are string.
* ENFORCE_VALID_PYTHON_IDENTIFIER (default: False):
make sure all keys are valid python identifiers.
Parameters
----------
key:
The key that should be checked
Raises
------
KeyError:
if the key violates any requirements | Below is the the instruction that describes the task:
### Input:
Raise KeyError if a given config key violates any requirements.
The requirements are the following and can be individually deactivated
in ``sacred.SETTINGS.CONFIG_KEYS``:
* ENFORCE_MONGO_COMPATIBLE (default: True):
make sure the keys don't contain a '.' or start with a '$'
* ENFORCE_JSONPICKLE_COMPATIBLE (default: True):
make sure the keys do not contain any reserved jsonpickle tags
This is very important. Only deactivate if you know what you are doing.
* ENFORCE_STRING (default: False):
make sure all keys are string.
* ENFORCE_VALID_PYTHON_IDENTIFIER (default: False):
make sure all keys are valid python identifiers.
Parameters
----------
key:
The key that should be checked
Raises
------
KeyError:
if the key violates any requirements
### Response:
def assert_is_valid_key(key):
"""
Raise KeyError if a given config key violates any requirements.
The requirements are the following and can be individually deactivated
in ``sacred.SETTINGS.CONFIG_KEYS``:
* ENFORCE_MONGO_COMPATIBLE (default: True):
make sure the keys don't contain a '.' or start with a '$'
* ENFORCE_JSONPICKLE_COMPATIBLE (default: True):
make sure the keys do not contain any reserved jsonpickle tags
This is very important. Only deactivate if you know what you are doing.
* ENFORCE_STRING (default: False):
make sure all keys are string.
* ENFORCE_VALID_PYTHON_IDENTIFIER (default: False):
make sure all keys are valid python identifiers.
Parameters
----------
key:
The key that should be checked
Raises
------
KeyError:
if the key violates any requirements
"""
if SETTINGS.CONFIG.ENFORCE_KEYS_MONGO_COMPATIBLE and (
isinstance(key, basestring) and ('.' in key or key[0] == '$')):
raise KeyError('Invalid key "{}". Config-keys cannot '
'contain "." or start with "$"'.format(key))
if SETTINGS.CONFIG.ENFORCE_KEYS_JSONPICKLE_COMPATIBLE and \
isinstance(key, basestring) and (
key in jsonpickle.tags.RESERVED or key.startswith('json://')):
raise KeyError('Invalid key "{}". Config-keys cannot be one of the'
'reserved jsonpickle tags: {}'
.format(key, jsonpickle.tags.RESERVED))
if SETTINGS.CONFIG.ENFORCE_STRING_KEYS and (
not isinstance(key, basestring)):
raise KeyError('Invalid key "{}". Config-keys have to be strings, '
'but was {}'.format(key, type(key)))
if SETTINGS.CONFIG.ENFORCE_VALID_PYTHON_IDENTIFIER_KEYS and (
isinstance(key, basestring) and not PYTHON_IDENTIFIER.match(key)):
raise KeyError('Key "{}" is not a valid python identifier'
.format(key))
if SETTINGS.CONFIG.ENFORCE_KEYS_NO_EQUALS and (
isinstance(key, basestring) and '=' in key):
raise KeyError('Invalid key "{}". Config keys may not contain an'
'equals sign ("=").'.format('=')) |
def get_utc_iso_date(date_str):
"""Convert date str into a iso-formatted UTC date str, i.e.:
yyyymmddhhmmss
:type date_str: str
:param date_str: date string to be parsed.
:rtype: str
:returns: iso-formatted UTC date str.
"""
try:
utc_tuple = dateutil.parser.parse(date_str).utctimetuple()
except ValueError:
try:
date_str = ' '.join(date_str.split(' ')[:-1])
utc_tuple = dateutil.parser.parse(date_str).utctimetuple()
except ValueError:
date_str = ''.join(date_str.split('(')[:-1]).strip(')')
utc_tuple = dateutil.parser.parse(date_str).utctimetuple()
date_object = datetime.datetime.fromtimestamp(time.mktime(utc_tuple))
utc_date_str = ''.join([x for x in date_object.isoformat() if x not in '-T:'])
return utc_date_str | Convert date str into a iso-formatted UTC date str, i.e.:
yyyymmddhhmmss
:type date_str: str
:param date_str: date string to be parsed.
:rtype: str
:returns: iso-formatted UTC date str. | Below is the the instruction that describes the task:
### Input:
Convert date str into a iso-formatted UTC date str, i.e.:
yyyymmddhhmmss
:type date_str: str
:param date_str: date string to be parsed.
:rtype: str
:returns: iso-formatted UTC date str.
### Response:
def get_utc_iso_date(date_str):
"""Convert date str into a iso-formatted UTC date str, i.e.:
yyyymmddhhmmss
:type date_str: str
:param date_str: date string to be parsed.
:rtype: str
:returns: iso-formatted UTC date str.
"""
try:
utc_tuple = dateutil.parser.parse(date_str).utctimetuple()
except ValueError:
try:
date_str = ' '.join(date_str.split(' ')[:-1])
utc_tuple = dateutil.parser.parse(date_str).utctimetuple()
except ValueError:
date_str = ''.join(date_str.split('(')[:-1]).strip(')')
utc_tuple = dateutil.parser.parse(date_str).utctimetuple()
date_object = datetime.datetime.fromtimestamp(time.mktime(utc_tuple))
utc_date_str = ''.join([x for x in date_object.isoformat() if x not in '-T:'])
return utc_date_str |
def add_params_to_url(url, params):
"""Adds params to url
:param url: Url
:param params: Params to add
:return: original url with new params
"""
url_parts = list(urlparse.urlparse(url)) # get url parts
query = dict(urlparse.parse_qsl(url_parts[4])) # get url query
query.update(params) # add new params
url_parts[4] = urlencode(query)
return urlparse.urlunparse(url_parts) | Adds params to url
:param url: Url
:param params: Params to add
:return: original url with new params | Below is the the instruction that describes the task:
### Input:
Adds params to url
:param url: Url
:param params: Params to add
:return: original url with new params
### Response:
def add_params_to_url(url, params):
"""Adds params to url
:param url: Url
:param params: Params to add
:return: original url with new params
"""
url_parts = list(urlparse.urlparse(url)) # get url parts
query = dict(urlparse.parse_qsl(url_parts[4])) # get url query
query.update(params) # add new params
url_parts[4] = urlencode(query)
return urlparse.urlunparse(url_parts) |
def get_filename4code(module, content, ext=None):
"""Generate filename based on content
The function ensures that the (temporary) directory exists, so that the
file can be written.
Example:
filename = get_filename4code("myfilter", code)
"""
imagedir = module + "-images"
fn = hashlib.sha1(content.encode(sys.getfilesystemencoding())).hexdigest()
try:
os.mkdir(imagedir)
sys.stderr.write('Created directory ' + imagedir + '\n')
except OSError:
pass
if ext:
fn += "." + ext
return os.path.join(imagedir, fn) | Generate filename based on content
The function ensures that the (temporary) directory exists, so that the
file can be written.
Example:
filename = get_filename4code("myfilter", code) | Below is the the instruction that describes the task:
### Input:
Generate filename based on content
The function ensures that the (temporary) directory exists, so that the
file can be written.
Example:
filename = get_filename4code("myfilter", code)
### Response:
def get_filename4code(module, content, ext=None):
"""Generate filename based on content
The function ensures that the (temporary) directory exists, so that the
file can be written.
Example:
filename = get_filename4code("myfilter", code)
"""
imagedir = module + "-images"
fn = hashlib.sha1(content.encode(sys.getfilesystemencoding())).hexdigest()
try:
os.mkdir(imagedir)
sys.stderr.write('Created directory ' + imagedir + '\n')
except OSError:
pass
if ext:
fn += "." + ext
return os.path.join(imagedir, fn) |
def change_status(self, bucket, key, status, cond):
"""修改文件的状态
修改文件的存储类型为可用或禁用:
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为启用,1为禁用
"""
resource = entry(bucket, key)
if cond and isinstance(cond, dict):
condstr = ""
for k, v in cond.items():
condstr += "{0}={1}&".format(k, v)
condstr = urlsafe_base64_encode(condstr[:-1])
return self.__rs_do('chstatus', resource, 'status/{0}'.format(status), 'cond', condstr)
return self.__rs_do('chstatus', resource, 'status/{0}'.format(status)) | 修改文件的状态
修改文件的存储类型为可用或禁用:
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为启用,1为禁用 | Below is the the instruction that describes the task:
### Input:
修改文件的状态
修改文件的存储类型为可用或禁用:
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为启用,1为禁用
### Response:
def change_status(self, bucket, key, status, cond):
"""修改文件的状态
修改文件的存储类型为可用或禁用:
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为启用,1为禁用
"""
resource = entry(bucket, key)
if cond and isinstance(cond, dict):
condstr = ""
for k, v in cond.items():
condstr += "{0}={1}&".format(k, v)
condstr = urlsafe_base64_encode(condstr[:-1])
return self.__rs_do('chstatus', resource, 'status/{0}'.format(status), 'cond', condstr)
return self.__rs_do('chstatus', resource, 'status/{0}'.format(status)) |
def rename_file(self, old_save_name, new_save_name, new_path):
"""This only updates the name and path we use to track the file's size
and upload progress. Doesn't rename it on the back end or make us
upload from anywhere else.
"""
if old_save_name in self._files:
del self._files[old_save_name]
self.update_file(new_save_name, new_path) | This only updates the name and path we use to track the file's size
and upload progress. Doesn't rename it on the back end or make us
upload from anywhere else. | Below is the the instruction that describes the task:
### Input:
This only updates the name and path we use to track the file's size
and upload progress. Doesn't rename it on the back end or make us
upload from anywhere else.
### Response:
def rename_file(self, old_save_name, new_save_name, new_path):
"""This only updates the name and path we use to track the file's size
and upload progress. Doesn't rename it on the back end or make us
upload from anywhere else.
"""
if old_save_name in self._files:
del self._files[old_save_name]
self.update_file(new_save_name, new_path) |
def get_fraglength_dict(fastafiles):
"""Returns dictionary of sequence fragment lengths, keyed by query name.
- fastafiles - list of FASTA input whole sequence files
Loops over input files and, for each, produces a dictionary with fragment
lengths, keyed by sequence ID. These are returned as a dictionary with
the keys being query IDs derived from filenames.
"""
fraglength_dict = {}
for filename in fastafiles:
qname = os.path.split(filename)[-1].split("-fragments")[0]
fraglength_dict[qname] = get_fragment_lengths(filename)
return fraglength_dict | Returns dictionary of sequence fragment lengths, keyed by query name.
- fastafiles - list of FASTA input whole sequence files
Loops over input files and, for each, produces a dictionary with fragment
lengths, keyed by sequence ID. These are returned as a dictionary with
the keys being query IDs derived from filenames. | Below is the the instruction that describes the task:
### Input:
Returns dictionary of sequence fragment lengths, keyed by query name.
- fastafiles - list of FASTA input whole sequence files
Loops over input files and, for each, produces a dictionary with fragment
lengths, keyed by sequence ID. These are returned as a dictionary with
the keys being query IDs derived from filenames.
### Response:
def get_fraglength_dict(fastafiles):
"""Returns dictionary of sequence fragment lengths, keyed by query name.
- fastafiles - list of FASTA input whole sequence files
Loops over input files and, for each, produces a dictionary with fragment
lengths, keyed by sequence ID. These are returned as a dictionary with
the keys being query IDs derived from filenames.
"""
fraglength_dict = {}
for filename in fastafiles:
qname = os.path.split(filename)[-1].split("-fragments")[0]
fraglength_dict[qname] = get_fragment_lengths(filename)
return fraglength_dict |
async def resume(self):
"""Sends the RESUME packet."""
payload = {
'op': self.RESUME,
'd': {
'seq': self.sequence,
'session_id': self.session_id,
'token': self.token
}
}
await self.send_as_json(payload)
log.info('Shard ID %s has sent the RESUME payload.', self.shard_id) | Sends the RESUME packet. | Below is the the instruction that describes the task:
### Input:
Sends the RESUME packet.
### Response:
async def resume(self):
"""Sends the RESUME packet."""
payload = {
'op': self.RESUME,
'd': {
'seq': self.sequence,
'session_id': self.session_id,
'token': self.token
}
}
await self.send_as_json(payload)
log.info('Shard ID %s has sent the RESUME payload.', self.shard_id) |
def get_cached_translation(instance, language_code=None, related_name=None, use_fallback=False):
"""
Fetch an cached translation.
.. versionadded 1.2 Added the ``related_name`` parameter.
"""
if language_code is None:
language_code = instance.get_current_language()
translated_model = instance._parler_meta.get_model_by_related_name(related_name)
values = _get_cached_values(instance, translated_model, language_code, use_fallback)
if not values:
return None
try:
translation = translated_model(**values)
except TypeError:
# Some model field was removed, cache entry is no longer working.
return None
translation._state.adding = False
return translation | Fetch an cached translation.
.. versionadded 1.2 Added the ``related_name`` parameter. | Below is the the instruction that describes the task:
### Input:
Fetch an cached translation.
.. versionadded 1.2 Added the ``related_name`` parameter.
### Response:
def get_cached_translation(instance, language_code=None, related_name=None, use_fallback=False):
"""
Fetch an cached translation.
.. versionadded 1.2 Added the ``related_name`` parameter.
"""
if language_code is None:
language_code = instance.get_current_language()
translated_model = instance._parler_meta.get_model_by_related_name(related_name)
values = _get_cached_values(instance, translated_model, language_code, use_fallback)
if not values:
return None
try:
translation = translated_model(**values)
except TypeError:
# Some model field was removed, cache entry is no longer working.
return None
translation._state.adding = False
return translation |
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
conf):
'''
Execute hiera and return the data
'''
cmd = 'hiera -c {0}'.format(conf)
for key, val in six.iteritems(__grains__):
if isinstance(val, six.string_types):
cmd += ' {0}=\'{1}\''.format(key, val)
try:
data = salt.utils.yaml.safe_load(__salt__['cmd.run'](cmd))
except Exception:
log.critical('Hiera YAML data failed to parse from conf %s', conf)
return {}
return data | Execute hiera and return the data | Below is the the instruction that describes the task:
### Input:
Execute hiera and return the data
### Response:
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
conf):
'''
Execute hiera and return the data
'''
cmd = 'hiera -c {0}'.format(conf)
for key, val in six.iteritems(__grains__):
if isinstance(val, six.string_types):
cmd += ' {0}=\'{1}\''.format(key, val)
try:
data = salt.utils.yaml.safe_load(__salt__['cmd.run'](cmd))
except Exception:
log.critical('Hiera YAML data failed to parse from conf %s', conf)
return {}
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.