code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def show_shortcuts(self):
"""Print all shortcuts."""
gui_name = self.gui.name
actions_name = self.name
name = ('{} - {}'.format(gui_name, actions_name)
if actions_name else gui_name)
_show_shortcuts(self.shortcuts, name) | Print all shortcuts. | Below is the the instruction that describes the task:
### Input:
Print all shortcuts.
### Response:
def show_shortcuts(self):
"""Print all shortcuts."""
gui_name = self.gui.name
actions_name = self.name
name = ('{} - {}'.format(gui_name, actions_name)
if actions_name else gui_name)
_show_shortcuts(self.shortcuts, name) |
def executor(func_or_executor: Union[Executor, str, Callable[..., T_Retval]]) -> \
Union[WrappedCallable, Callable[..., WrappedCallable]]:
"""
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is used.
Otherwise, the argument must be a PEP 3148 compliant thread pool executor or the name of an
:class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is ignored and the
wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the event loop
thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor instance or
the name of an :class:`~concurrent.futures.Executor` resource
"""
def outer(func: Callable[..., T_Retval],
executor: Union[Executor, str] = None) -> Callable[..., Awaitable[T_Retval]]:
def wrapper(*args, **kwargs):
try:
loop = get_event_loop()
except RuntimeError:
# Event loop not available -- we're in a worker thread
return func(*args, **kwargs)
# Resolve the executor resource name to an Executor instance
if isinstance(executor, str):
try:
ctx = next(obj for obj in args[:2] if isinstance(obj, Context))
except StopIteration:
raise RuntimeError('the callable needs to be called with a Context as the '
'first or second positional argument')
_executor = ctx.require_resource(Executor, executor)
else:
_executor = executor
callback = partial(func, *args, **kwargs)
return loop.run_in_executor(_executor, callback)
assert check_argument_types()
assert not inspect.iscoroutinefunction(func), \
'Cannot wrap coroutine functions to be run in an executor'
return wraps(func)(wrapper)
if isinstance(func_or_executor, (str, Executor)):
return partial(outer, executor=func_or_executor)
else:
return outer(func_or_executor) | Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is used.
Otherwise, the argument must be a PEP 3148 compliant thread pool executor or the name of an
:class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is ignored and the
wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the event loop
thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor instance or
the name of an :class:`~concurrent.futures.Executor` resource | Below is the the instruction that describes the task:
### Input:
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is used.
Otherwise, the argument must be a PEP 3148 compliant thread pool executor or the name of an
:class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is ignored and the
wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the event loop
thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor instance or
the name of an :class:`~concurrent.futures.Executor` resource
### Response:
def executor(func_or_executor: Union[Executor, str, Callable[..., T_Retval]]) -> \
Union[WrappedCallable, Callable[..., WrappedCallable]]:
"""
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is used.
Otherwise, the argument must be a PEP 3148 compliant thread pool executor or the name of an
:class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is ignored and the
wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the event loop
thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor instance or
the name of an :class:`~concurrent.futures.Executor` resource
"""
def outer(func: Callable[..., T_Retval],
executor: Union[Executor, str] = None) -> Callable[..., Awaitable[T_Retval]]:
def wrapper(*args, **kwargs):
try:
loop = get_event_loop()
except RuntimeError:
# Event loop not available -- we're in a worker thread
return func(*args, **kwargs)
# Resolve the executor resource name to an Executor instance
if isinstance(executor, str):
try:
ctx = next(obj for obj in args[:2] if isinstance(obj, Context))
except StopIteration:
raise RuntimeError('the callable needs to be called with a Context as the '
'first or second positional argument')
_executor = ctx.require_resource(Executor, executor)
else:
_executor = executor
callback = partial(func, *args, **kwargs)
return loop.run_in_executor(_executor, callback)
assert check_argument_types()
assert not inspect.iscoroutinefunction(func), \
'Cannot wrap coroutine functions to be run in an executor'
return wraps(func)(wrapper)
if isinstance(func_or_executor, (str, Executor)):
return partial(outer, executor=func_or_executor)
else:
return outer(func_or_executor) |
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results | Executes sql against db_name in a thread | Below is the the instruction that describes the task:
### Input:
Executes sql against db_name in a thread
### Response:
async def execute(
self,
db_name,
sql,
params=None,
truncate=False,
custom_time_limit=None,
page_size=None,
):
"""Executes sql against db_name in a thread"""
page_size = page_size or self.page_size
def sql_operation_in_thread(conn):
time_limit_ms = self.sql_time_limit_ms
if custom_time_limit and custom_time_limit < time_limit_ms:
time_limit_ms = custom_time_limit
with sqlite_timelimit(conn, time_limit_ms):
try:
cursor = conn.cursor()
cursor.execute(sql, params or {})
max_returned_rows = self.max_returned_rows
if max_returned_rows == page_size:
max_returned_rows += 1
if max_returned_rows and truncate:
rows = cursor.fetchmany(max_returned_rows + 1)
truncated = len(rows) > max_returned_rows
rows = rows[:max_returned_rows]
else:
rows = cursor.fetchall()
truncated = False
except sqlite3.OperationalError as e:
if e.args == ('interrupted',):
raise InterruptedError(e)
print(
"ERROR: conn={}, sql = {}, params = {}: {}".format(
conn, repr(sql), params, e
)
)
raise
if truncate:
return Results(rows, truncated, cursor.description)
else:
return Results(rows, False, cursor.description)
with trace("sql", (db_name, sql.strip(), params)):
results = await self.execute_against_connection_in_thread(
db_name, sql_operation_in_thread
)
return results |
def update_atom_numbers(self):
"""Update links "first_atom_number" -> "second_atom_number"
:return: None.
:rtype: :py:obj:`None`.
"""
self._ctab_data['first_atom_number'] = self.first_atom.atom_number
self._ctab_data['second_atom_number'] = self.second_atom.atom_number | Update links "first_atom_number" -> "second_atom_number"
:return: None.
:rtype: :py:obj:`None`. | Below is the the instruction that describes the task:
### Input:
Update links "first_atom_number" -> "second_atom_number"
:return: None.
:rtype: :py:obj:`None`.
### Response:
def update_atom_numbers(self):
"""Update links "first_atom_number" -> "second_atom_number"
:return: None.
:rtype: :py:obj:`None`.
"""
self._ctab_data['first_atom_number'] = self.first_atom.atom_number
self._ctab_data['second_atom_number'] = self.second_atom.atom_number |
def load_actions(spec, group=None, expr_parser=None):
"""Each item can be an action name as a string or a dict. When using a dict,
one key/item pair must be the action name and its options and the rest action
decorator names and their options.
Example:
load_actions(["login_required", {"flash": {"message": "hello world", "label": "warning"}}])
"""
if expr_parser is None:
expr_parser = ExpressionParser()
actions = ActionList()
for name in spec:
options = {}
as_ = None
decorators = []
if isinstance(name, dict):
actionspec = dict(name)
as_ = actionspec.pop("as", None)
for dec, dec_cls in action_decorators:
if dec in actionspec:
decorators.append((dec_cls, expr_parser.compile(actionspec.pop(dec))))
name, options = actionspec.popitem()
if options:
options = expr_parser.compile(options)
if isinstance(name, Action):
action = name
elif isinstance(name, ActionFunction):
action = name.action
else:
action = action_resolver.resolve_or_delayed(name, options, group, as_)
for dec_cls, arg in decorators:
action = dec_cls(action, arg)
actions.append(action)
return actions | Each item can be an action name as a string or a dict. When using a dict,
one key/item pair must be the action name and its options and the rest action
decorator names and their options.
Example:
load_actions(["login_required", {"flash": {"message": "hello world", "label": "warning"}}]) | Below is the the instruction that describes the task:
### Input:
Each item can be an action name as a string or a dict. When using a dict,
one key/item pair must be the action name and its options and the rest action
decorator names and their options.
Example:
load_actions(["login_required", {"flash": {"message": "hello world", "label": "warning"}}])
### Response:
def load_actions(spec, group=None, expr_parser=None):
"""Each item can be an action name as a string or a dict. When using a dict,
one key/item pair must be the action name and its options and the rest action
decorator names and their options.
Example:
load_actions(["login_required", {"flash": {"message": "hello world", "label": "warning"}}])
"""
if expr_parser is None:
expr_parser = ExpressionParser()
actions = ActionList()
for name in spec:
options = {}
as_ = None
decorators = []
if isinstance(name, dict):
actionspec = dict(name)
as_ = actionspec.pop("as", None)
for dec, dec_cls in action_decorators:
if dec in actionspec:
decorators.append((dec_cls, expr_parser.compile(actionspec.pop(dec))))
name, options = actionspec.popitem()
if options:
options = expr_parser.compile(options)
if isinstance(name, Action):
action = name
elif isinstance(name, ActionFunction):
action = name.action
else:
action = action_resolver.resolve_or_delayed(name, options, group, as_)
for dec_cls, arg in decorators:
action = dec_cls(action, arg)
actions.append(action)
return actions |
def _store_compressed_sequence_to_node(self, node):
"""
make a compressed representation of a pair of sequences only counting
the number of times a particular pair of states (e.g. (A,T)) is observed
the the aligned sequences of parent and child.
Parameters
-----------
node : PhyloTree.Clade
Tree node. **Note** because the method operates
on the sequences on both sides of a branch, sequence reconstruction
must be performed prior to calling this method.
"""
seq_pairs, multiplicity = self.gtr.compress_sequence_pair(node.up.cseq,
node.cseq,
pattern_multiplicity = self.multiplicity,
ignore_gaps = self.ignore_gaps)
node.compressed_sequence = {'pair':seq_pairs, 'multiplicity':multiplicity} | make a compressed representation of a pair of sequences only counting
the number of times a particular pair of states (e.g. (A,T)) is observed
the the aligned sequences of parent and child.
Parameters
-----------
node : PhyloTree.Clade
Tree node. **Note** because the method operates
on the sequences on both sides of a branch, sequence reconstruction
must be performed prior to calling this method. | Below is the the instruction that describes the task:
### Input:
make a compressed representation of a pair of sequences only counting
the number of times a particular pair of states (e.g. (A,T)) is observed
the the aligned sequences of parent and child.
Parameters
-----------
node : PhyloTree.Clade
Tree node. **Note** because the method operates
on the sequences on both sides of a branch, sequence reconstruction
must be performed prior to calling this method.
### Response:
def _store_compressed_sequence_to_node(self, node):
"""
make a compressed representation of a pair of sequences only counting
the number of times a particular pair of states (e.g. (A,T)) is observed
the the aligned sequences of parent and child.
Parameters
-----------
node : PhyloTree.Clade
Tree node. **Note** because the method operates
on the sequences on both sides of a branch, sequence reconstruction
must be performed prior to calling this method.
"""
seq_pairs, multiplicity = self.gtr.compress_sequence_pair(node.up.cseq,
node.cseq,
pattern_multiplicity = self.multiplicity,
ignore_gaps = self.ignore_gaps)
node.compressed_sequence = {'pair':seq_pairs, 'multiplicity':multiplicity} |
def _mirror_penalized(self, f_values, idx):
"""obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
"""
assert len(f_values) >= 2 * len(idx)
m = np.max(np.abs(f_values))
for i in len(idx):
if f_values[idx[i]] > f_values[-1 - i]:
f_values[idx[i]] += m
else:
f_values[-1 - i] += m
return f_values | obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``. | Below is the the instruction that describes the task:
### Input:
obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
### Response:
def _mirror_penalized(self, f_values, idx):
"""obsolete and subject to removal (TODO),
return modified f-values such that for each mirror one becomes worst.
This function is useless when selective mirroring is applied with no
more than (lambda-mu)/2 solutions.
Mirrors are leading and trailing values in ``f_values``.
"""
assert len(f_values) >= 2 * len(idx)
m = np.max(np.abs(f_values))
for i in len(idx):
if f_values[idx[i]] > f_values[-1 - i]:
f_values[idx[i]] += m
else:
f_values[-1 - i] += m
return f_values |
def set_mode(path, mode):
'''
Set the mode of a file
This just calls get_mode, which returns None because we don't use mode on
Windows
Args:
path: The path to the file or directory
mode: The mode (not used)
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644
'''
func_name = '{0}.set_mode'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The function %s should not be used on Windows systems; '
'see function docs for details. The value returned is '
'always None. Use set_perms instead.', func_name)
return get_mode(path) | Set the mode of a file
This just calls get_mode, which returns None because we don't use mode on
Windows
Args:
path: The path to the file or directory
mode: The mode (not used)
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644 | Below is the the instruction that describes the task:
### Input:
Set the mode of a file
This just calls get_mode, which returns None because we don't use mode on
Windows
Args:
path: The path to the file or directory
mode: The mode (not used)
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644
### Response:
def set_mode(path, mode):
'''
Set the mode of a file
This just calls get_mode, which returns None because we don't use mode on
Windows
Args:
path: The path to the file or directory
mode: The mode (not used)
Returns:
None
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644
'''
func_name = '{0}.set_mode'.format(__virtualname__)
if __opts__.get('fun', '') == func_name:
log.info('The function %s should not be used on Windows systems; '
'see function docs for details. The value returned is '
'always None. Use set_perms instead.', func_name)
return get_mode(path) |
def grep(query, directory):
"""This function will search through the directory structure of the
application and for each directory it finds it launches an Async task to
run itself. For each .py file it finds, it actually greps the file and then
returns the found output."""
dir_contents = os.listdir(directory)
results = []
for item in dir_contents:
path = os.path.join(directory, item)
if os.path.isdir(path):
build_and_start(query, path)
else:
if item.endswith('.py'):
results.extend(grep_file(query, path))
return results | This function will search through the directory structure of the
application and for each directory it finds it launches an Async task to
run itself. For each .py file it finds, it actually greps the file and then
returns the found output. | Below is the the instruction that describes the task:
### Input:
This function will search through the directory structure of the
application and for each directory it finds it launches an Async task to
run itself. For each .py file it finds, it actually greps the file and then
returns the found output.
### Response:
def grep(query, directory):
"""This function will search through the directory structure of the
application and for each directory it finds it launches an Async task to
run itself. For each .py file it finds, it actually greps the file and then
returns the found output."""
dir_contents = os.listdir(directory)
results = []
for item in dir_contents:
path = os.path.join(directory, item)
if os.path.isdir(path):
build_and_start(query, path)
else:
if item.endswith('.py'):
results.extend(grep_file(query, path))
return results |
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause") | Pauses the stream. | Below is the the instruction that describes the task:
### Input:
Pauses the stream.
### Response:
def pause(self):
"""Pauses the stream."""
res = librtmp.RTMP_Pause(self.client.rtmp, 1)
if res < 1:
raise RTMPError("Failed to pause") |
def as_signed(self):
""" Returns the field *value* of the `Decimal` field as a signed integer."""
return self._cast(self._value, self._min(True), self._max(True), True) | Returns the field *value* of the `Decimal` field as a signed integer. | Below is the the instruction that describes the task:
### Input:
Returns the field *value* of the `Decimal` field as a signed integer.
### Response:
def as_signed(self):
""" Returns the field *value* of the `Decimal` field as a signed integer."""
return self._cast(self._value, self._min(True), self._max(True), True) |
def median(timeseries, segmentlength, noverlap=None, window=None, plan=None):
"""Calculate a PSD of this `TimeSeries` using a median average method
The median average is similar to Welch's method, using a median average
rather than mean.
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
window : `tuple`, `str`, optional
window parameters to apply to timeseries prior to FFT
plan : `REAL8FFTPlan`, optional
LAL FFT plan to use when generating average spectrum
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
lal.REAL8AverageSpectrumMedian
"""
return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap,
method='median', window=window, plan=plan) | Calculate a PSD of this `TimeSeries` using a median average method
The median average is similar to Welch's method, using a median average
rather than mean.
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
window : `tuple`, `str`, optional
window parameters to apply to timeseries prior to FFT
plan : `REAL8FFTPlan`, optional
LAL FFT plan to use when generating average spectrum
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
lal.REAL8AverageSpectrumMedian | Below is the the instruction that describes the task:
### Input:
Calculate a PSD of this `TimeSeries` using a median average method
The median average is similar to Welch's method, using a median average
rather than mean.
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
window : `tuple`, `str`, optional
window parameters to apply to timeseries prior to FFT
plan : `REAL8FFTPlan`, optional
LAL FFT plan to use when generating average spectrum
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
lal.REAL8AverageSpectrumMedian
### Response:
def median(timeseries, segmentlength, noverlap=None, window=None, plan=None):
"""Calculate a PSD of this `TimeSeries` using a median average method
The median average is similar to Welch's method, using a median average
rather than mean.
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
window : `tuple`, `str`, optional
window parameters to apply to timeseries prior to FFT
plan : `REAL8FFTPlan`, optional
LAL FFT plan to use when generating average spectrum
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
lal.REAL8AverageSpectrumMedian
"""
return _lal_spectrum(timeseries, segmentlength, noverlap=noverlap,
method='median', window=window, plan=plan) |
def get_record_types(self):
"""Gets the record types available in this object.
A record ``Type`` explicitly indicates the specification of an
interface to the record. A record may or may not inherit other
record interfaces through interface inheritance in which case
support of a record type may not be explicit in the returned
list. Interoperability with the typed interface to this object
should be performed through ``hasRecordType()``.
return: (osid.type.TypeList) - the record types available
*compliance: mandatory -- This method must be implemented.*
"""
from ..type.objects import TypeList
type_list = []
for type_idstr in self._supported_record_type_ids:
type_list.append(Type(**self._record_type_data_sets[Id(type_idstr).get_identifier()]))
return TypeList(type_list) | Gets the record types available in this object.
A record ``Type`` explicitly indicates the specification of an
interface to the record. A record may or may not inherit other
record interfaces through interface inheritance in which case
support of a record type may not be explicit in the returned
list. Interoperability with the typed interface to this object
should be performed through ``hasRecordType()``.
return: (osid.type.TypeList) - the record types available
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the record types available in this object.
A record ``Type`` explicitly indicates the specification of an
interface to the record. A record may or may not inherit other
record interfaces through interface inheritance in which case
support of a record type may not be explicit in the returned
list. Interoperability with the typed interface to this object
should be performed through ``hasRecordType()``.
return: (osid.type.TypeList) - the record types available
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_record_types(self):
"""Gets the record types available in this object.
A record ``Type`` explicitly indicates the specification of an
interface to the record. A record may or may not inherit other
record interfaces through interface inheritance in which case
support of a record type may not be explicit in the returned
list. Interoperability with the typed interface to this object
should be performed through ``hasRecordType()``.
return: (osid.type.TypeList) - the record types available
*compliance: mandatory -- This method must be implemented.*
"""
from ..type.objects import TypeList
type_list = []
for type_idstr in self._supported_record_type_ids:
type_list.append(Type(**self._record_type_data_sets[Id(type_idstr).get_identifier()]))
return TypeList(type_list) |
def element(self, inp=None, data_ptr=None, order=None):
"""Create a new element.
Parameters
----------
inp : `array-like`, optional
Input used to initialize the new element.
If ``inp`` is `None`, an empty element is created with no
guarantee of its state (memory allocation only).
The new element will use ``order`` as storage order if
provided, otherwise `default_order`.
Otherwise, a copy is avoided whenever possible. This requires
correct `shape` and `dtype`, and if ``order`` is provided,
also contiguousness in that ordering. If any of these
conditions is not met, a copy is made.
data_ptr : int, optional
Pointer to the start memory address of a contiguous Numpy array
or an equivalent raw container with the same total number of
bytes. For this option, ``order`` must be either ``'C'`` or
``'F'``.
The option is also mutually exclusive with ``inp``.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
Returns
-------
element : `NumpyTensor`
The new element, created from ``inp`` or from scratch.
Examples
--------
Without arguments, an uninitialized element is created. With an
array-like input, the element can be initialized:
>>> space = odl.rn(3)
>>> empty = space.element()
>>> empty.shape
(3,)
>>> empty.space
rn(3)
>>> x = space.element([1, 2, 3])
>>> x
rn(3).element([ 1., 2., 3.])
If the input already is a `numpy.ndarray` of correct `dtype`, it
will merely be wrapped, i.e., both array and space element access
the same memory, such that mutations will affect both:
>>> arr = np.array([1, 2, 3], dtype=float)
>>> elem = odl.rn(3).element(arr)
>>> elem[0] = 0
>>> elem
rn(3).element([ 0., 2., 3.])
>>> arr
array([ 0., 2., 3.])
Elements can also be constructed from a data pointer, resulting
again in shared memory:
>>> int_space = odl.tensor_space((2, 3), dtype=int)
>>> arr = np.array([[1, 2, 3],
... [4, 5, 6]], dtype=int, order='F')
>>> ptr = arr.ctypes.data
>>> y = int_space.element(data_ptr=ptr, order='F')
>>> y
tensor_space((2, 3), dtype=int).element(
[[1, 2, 3],
[4, 5, 6]]
)
>>> y[0, 1] = -1
>>> arr
array([[ 1, -1, 3],
[ 4, 5, 6]])
"""
if order is not None and str(order).upper() not in ('C', 'F'):
raise ValueError("`order` {!r} not understood".format(order))
if inp is None and data_ptr is None:
if order is None:
arr = np.empty(self.shape, dtype=self.dtype,
order=self.default_order)
else:
arr = np.empty(self.shape, dtype=self.dtype, order=order)
return self.element_type(self, arr)
elif inp is None and data_ptr is not None:
if order is None:
raise ValueError('`order` cannot be None for element '
'creation from pointer')
ctype_array_def = ctypes.c_byte * self.nbytes
as_ctype_array = ctype_array_def.from_address(data_ptr)
as_numpy_array = np.ctypeslib.as_array(as_ctype_array)
arr = as_numpy_array.view(dtype=self.dtype)
arr = arr.reshape(self.shape, order=order)
return self.element_type(self, arr)
elif inp is not None and data_ptr is None:
if inp in self and order is None:
# Short-circuit for space elements and no enforced ordering
return inp
# Try to not copy but require dtype and order if given
# (`order=None` is ok as np.array argument)
arr = np.array(inp, copy=False, dtype=self.dtype, ndmin=self.ndim,
order=order)
# Make sure the result is writeable, if not make copy.
# This happens for e.g. results of `np.broadcast_to()`.
if not arr.flags.writeable:
arr = arr.copy()
if arr.shape != self.shape:
raise ValueError('shape of `inp` not equal to space shape: '
'{} != {}'.format(arr.shape, self.shape))
return self.element_type(self, arr)
else:
raise TypeError('cannot provide both `inp` and `data_ptr`') | Create a new element.
Parameters
----------
inp : `array-like`, optional
Input used to initialize the new element.
If ``inp`` is `None`, an empty element is created with no
guarantee of its state (memory allocation only).
The new element will use ``order`` as storage order if
provided, otherwise `default_order`.
Otherwise, a copy is avoided whenever possible. This requires
correct `shape` and `dtype`, and if ``order`` is provided,
also contiguousness in that ordering. If any of these
conditions is not met, a copy is made.
data_ptr : int, optional
Pointer to the start memory address of a contiguous Numpy array
or an equivalent raw container with the same total number of
bytes. For this option, ``order`` must be either ``'C'`` or
``'F'``.
The option is also mutually exclusive with ``inp``.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
Returns
-------
element : `NumpyTensor`
The new element, created from ``inp`` or from scratch.
Examples
--------
Without arguments, an uninitialized element is created. With an
array-like input, the element can be initialized:
>>> space = odl.rn(3)
>>> empty = space.element()
>>> empty.shape
(3,)
>>> empty.space
rn(3)
>>> x = space.element([1, 2, 3])
>>> x
rn(3).element([ 1., 2., 3.])
If the input already is a `numpy.ndarray` of correct `dtype`, it
will merely be wrapped, i.e., both array and space element access
the same memory, such that mutations will affect both:
>>> arr = np.array([1, 2, 3], dtype=float)
>>> elem = odl.rn(3).element(arr)
>>> elem[0] = 0
>>> elem
rn(3).element([ 0., 2., 3.])
>>> arr
array([ 0., 2., 3.])
Elements can also be constructed from a data pointer, resulting
again in shared memory:
>>> int_space = odl.tensor_space((2, 3), dtype=int)
>>> arr = np.array([[1, 2, 3],
... [4, 5, 6]], dtype=int, order='F')
>>> ptr = arr.ctypes.data
>>> y = int_space.element(data_ptr=ptr, order='F')
>>> y
tensor_space((2, 3), dtype=int).element(
[[1, 2, 3],
[4, 5, 6]]
)
>>> y[0, 1] = -1
>>> arr
array([[ 1, -1, 3],
[ 4, 5, 6]]) | Below is the the instruction that describes the task:
### Input:
Create a new element.
Parameters
----------
inp : `array-like`, optional
Input used to initialize the new element.
If ``inp`` is `None`, an empty element is created with no
guarantee of its state (memory allocation only).
The new element will use ``order`` as storage order if
provided, otherwise `default_order`.
Otherwise, a copy is avoided whenever possible. This requires
correct `shape` and `dtype`, and if ``order`` is provided,
also contiguousness in that ordering. If any of these
conditions is not met, a copy is made.
data_ptr : int, optional
Pointer to the start memory address of a contiguous Numpy array
or an equivalent raw container with the same total number of
bytes. For this option, ``order`` must be either ``'C'`` or
``'F'``.
The option is also mutually exclusive with ``inp``.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
Returns
-------
element : `NumpyTensor`
The new element, created from ``inp`` or from scratch.
Examples
--------
Without arguments, an uninitialized element is created. With an
array-like input, the element can be initialized:
>>> space = odl.rn(3)
>>> empty = space.element()
>>> empty.shape
(3,)
>>> empty.space
rn(3)
>>> x = space.element([1, 2, 3])
>>> x
rn(3).element([ 1., 2., 3.])
If the input already is a `numpy.ndarray` of correct `dtype`, it
will merely be wrapped, i.e., both array and space element access
the same memory, such that mutations will affect both:
>>> arr = np.array([1, 2, 3], dtype=float)
>>> elem = odl.rn(3).element(arr)
>>> elem[0] = 0
>>> elem
rn(3).element([ 0., 2., 3.])
>>> arr
array([ 0., 2., 3.])
Elements can also be constructed from a data pointer, resulting
again in shared memory:
>>> int_space = odl.tensor_space((2, 3), dtype=int)
>>> arr = np.array([[1, 2, 3],
... [4, 5, 6]], dtype=int, order='F')
>>> ptr = arr.ctypes.data
>>> y = int_space.element(data_ptr=ptr, order='F')
>>> y
tensor_space((2, 3), dtype=int).element(
[[1, 2, 3],
[4, 5, 6]]
)
>>> y[0, 1] = -1
>>> arr
array([[ 1, -1, 3],
[ 4, 5, 6]])
### Response:
def element(self, inp=None, data_ptr=None, order=None):
"""Create a new element.
Parameters
----------
inp : `array-like`, optional
Input used to initialize the new element.
If ``inp`` is `None`, an empty element is created with no
guarantee of its state (memory allocation only).
The new element will use ``order`` as storage order if
provided, otherwise `default_order`.
Otherwise, a copy is avoided whenever possible. This requires
correct `shape` and `dtype`, and if ``order`` is provided,
also contiguousness in that ordering. If any of these
conditions is not met, a copy is made.
data_ptr : int, optional
Pointer to the start memory address of a contiguous Numpy array
or an equivalent raw container with the same total number of
bytes. For this option, ``order`` must be either ``'C'`` or
``'F'``.
The option is also mutually exclusive with ``inp``.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
Returns
-------
element : `NumpyTensor`
The new element, created from ``inp`` or from scratch.
Examples
--------
Without arguments, an uninitialized element is created. With an
array-like input, the element can be initialized:
>>> space = odl.rn(3)
>>> empty = space.element()
>>> empty.shape
(3,)
>>> empty.space
rn(3)
>>> x = space.element([1, 2, 3])
>>> x
rn(3).element([ 1., 2., 3.])
If the input already is a `numpy.ndarray` of correct `dtype`, it
will merely be wrapped, i.e., both array and space element access
the same memory, such that mutations will affect both:
>>> arr = np.array([1, 2, 3], dtype=float)
>>> elem = odl.rn(3).element(arr)
>>> elem[0] = 0
>>> elem
rn(3).element([ 0., 2., 3.])
>>> arr
array([ 0., 2., 3.])
Elements can also be constructed from a data pointer, resulting
again in shared memory:
>>> int_space = odl.tensor_space((2, 3), dtype=int)
>>> arr = np.array([[1, 2, 3],
... [4, 5, 6]], dtype=int, order='F')
>>> ptr = arr.ctypes.data
>>> y = int_space.element(data_ptr=ptr, order='F')
>>> y
tensor_space((2, 3), dtype=int).element(
[[1, 2, 3],
[4, 5, 6]]
)
>>> y[0, 1] = -1
>>> arr
array([[ 1, -1, 3],
[ 4, 5, 6]])
"""
if order is not None and str(order).upper() not in ('C', 'F'):
raise ValueError("`order` {!r} not understood".format(order))
if inp is None and data_ptr is None:
if order is None:
arr = np.empty(self.shape, dtype=self.dtype,
order=self.default_order)
else:
arr = np.empty(self.shape, dtype=self.dtype, order=order)
return self.element_type(self, arr)
elif inp is None and data_ptr is not None:
if order is None:
raise ValueError('`order` cannot be None for element '
'creation from pointer')
ctype_array_def = ctypes.c_byte * self.nbytes
as_ctype_array = ctype_array_def.from_address(data_ptr)
as_numpy_array = np.ctypeslib.as_array(as_ctype_array)
arr = as_numpy_array.view(dtype=self.dtype)
arr = arr.reshape(self.shape, order=order)
return self.element_type(self, arr)
elif inp is not None and data_ptr is None:
if inp in self and order is None:
# Short-circuit for space elements and no enforced ordering
return inp
# Try to not copy but require dtype and order if given
# (`order=None` is ok as np.array argument)
arr = np.array(inp, copy=False, dtype=self.dtype, ndmin=self.ndim,
order=order)
# Make sure the result is writeable, if not make copy.
# This happens for e.g. results of `np.broadcast_to()`.
if not arr.flags.writeable:
arr = arr.copy()
if arr.shape != self.shape:
raise ValueError('shape of `inp` not equal to space shape: '
'{} != {}'.format(arr.shape, self.shape))
return self.element_type(self, arr)
else:
raise TypeError('cannot provide both `inp` and `data_ptr`') |
def delete(self, main_type, sub_type, unique_id, owner=None):
"""
Deletes the Indicator/Group/Victim or Security Label
Args:
main_type:
sub_type:
unique_id:
owner:
"""
params = {'owner': owner} if owner else {}
if not sub_type:
url = '/v2/{}/{}'.format(main_type, unique_id)
else:
url = '/v2/{}/{}/{}'.format(main_type, sub_type, unique_id)
return self.tcex.session.delete(url, params=params) | Deletes the Indicator/Group/Victim or Security Label
Args:
main_type:
sub_type:
unique_id:
owner: | Below is the the instruction that describes the task:
### Input:
Deletes the Indicator/Group/Victim or Security Label
Args:
main_type:
sub_type:
unique_id:
owner:
### Response:
def delete(self, main_type, sub_type, unique_id, owner=None):
"""
Deletes the Indicator/Group/Victim or Security Label
Args:
main_type:
sub_type:
unique_id:
owner:
"""
params = {'owner': owner} if owner else {}
if not sub_type:
url = '/v2/{}/{}'.format(main_type, unique_id)
else:
url = '/v2/{}/{}/{}'.format(main_type, sub_type, unique_id)
return self.tcex.session.delete(url, params=params) |
async def load_user(self, request):
"""Load user from request."""
if USER_KEY not in request:
session = await self.load(request)
if 'id' not in session:
return None
request[USER_KEY] = request.user = await self._user_loader(session['id'])
return request[USER_KEY] | Load user from request. | Below is the the instruction that describes the task:
### Input:
Load user from request.
### Response:
async def load_user(self, request):
"""Load user from request."""
if USER_KEY not in request:
session = await self.load(request)
if 'id' not in session:
return None
request[USER_KEY] = request.user = await self._user_loader(session['id'])
return request[USER_KEY] |
def __update_clusters(self):
"""!
@brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points
are captured by according clusters and as a result clusters are updated.
@return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data.
"""
clusters = [[] for _ in range(len(self.__centers))]
dataset_differences = self.__calculate_dataset_difference(len(clusters))
optimum_indexes = numpy.argmin(dataset_differences, axis=0)
for index_point in range(len(optimum_indexes)):
index_cluster = optimum_indexes[index_point]
clusters[index_cluster].append(index_point)
clusters = [cluster for cluster in clusters if len(cluster) > 0]
return clusters | !
@brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points
are captured by according clusters and as a result clusters are updated.
@return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data. | Below is the the instruction that describes the task:
### Input:
!
@brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points
are captured by according clusters and as a result clusters are updated.
@return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data.
### Response:
def __update_clusters(self):
"""!
@brief Calculate distance (in line with specified metric) to each point from the each cluster. Nearest points
are captured by according clusters and as a result clusters are updated.
@return (list) Updated clusters as list of clusters. Each cluster contains indexes of objects from data.
"""
clusters = [[] for _ in range(len(self.__centers))]
dataset_differences = self.__calculate_dataset_difference(len(clusters))
optimum_indexes = numpy.argmin(dataset_differences, axis=0)
for index_point in range(len(optimum_indexes)):
index_cluster = optimum_indexes[index_point]
clusters[index_cluster].append(index_point)
clusters = [cluster for cluster in clusters if len(cluster) > 0]
return clusters |
def _format_markdown_requisite(state, stateid, makelink=True):
'''
format requisite as a link users can click
'''
fmt_id = '{0}: {1}'.format(state, stateid)
if makelink:
return ' * [{0}](#{1})\n'.format(fmt_id, _format_markdown_link(fmt_id))
else:
return ' * `{0}`\n'.format(fmt_id) | format requisite as a link users can click | Below is the the instruction that describes the task:
### Input:
format requisite as a link users can click
### Response:
def _format_markdown_requisite(state, stateid, makelink=True):
'''
format requisite as a link users can click
'''
fmt_id = '{0}: {1}'.format(state, stateid)
if makelink:
return ' * [{0}](#{1})\n'.format(fmt_id, _format_markdown_link(fmt_id))
else:
return ' * `{0}`\n'.format(fmt_id) |
def initialize(self):
"""
Initialize the self._poolerClass
"""
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = {name: getattr(self, name) for name in self._poolerArgNames}
autoArgs["inputDimensions"] = [self._inputWidth]
autoArgs["columnDimensions"] = [self._columnCount]
autoArgs["potentialRadius"] = self._inputWidth
autoArgs["historyLength"] = self._historyLength
autoArgs["minHistory"] = self._minHistory
# Allocate the pooler
self._pooler = self._poolerClass(**autoArgs) | Initialize the self._poolerClass | Below is the the instruction that describes the task:
### Input:
Initialize the self._poolerClass
### Response:
def initialize(self):
"""
Initialize the self._poolerClass
"""
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = {name: getattr(self, name) for name in self._poolerArgNames}
autoArgs["inputDimensions"] = [self._inputWidth]
autoArgs["columnDimensions"] = [self._columnCount]
autoArgs["potentialRadius"] = self._inputWidth
autoArgs["historyLength"] = self._historyLength
autoArgs["minHistory"] = self._minHistory
# Allocate the pooler
self._pooler = self._poolerClass(**autoArgs) |
def files(self):
"""Get files iterator.
:returns: Files iterator.
"""
if self.model is None:
raise MissingModelError()
records_buckets = RecordsBuckets.query.filter_by(
record_id=self.id).first()
if not records_buckets:
bucket = self._create_bucket()
if not bucket:
return None
RecordsBuckets.create(record=self.model, bucket=bucket)
else:
bucket = records_buckets.bucket
return self.files_iter_cls(self, bucket=bucket, file_cls=self.file_cls) | Get files iterator.
:returns: Files iterator. | Below is the the instruction that describes the task:
### Input:
Get files iterator.
:returns: Files iterator.
### Response:
def files(self):
"""Get files iterator.
:returns: Files iterator.
"""
if self.model is None:
raise MissingModelError()
records_buckets = RecordsBuckets.query.filter_by(
record_id=self.id).first()
if not records_buckets:
bucket = self._create_bucket()
if not bucket:
return None
RecordsBuckets.create(record=self.model, bucket=bucket)
else:
bucket = records_buckets.bucket
return self.files_iter_cls(self, bucket=bucket, file_cls=self.file_cls) |
def parse_psimitab(content, fmt='tab27'):
"""https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki
"""
columns = [
'Unique identifier for interactor A',
'Unique identifier for interactor B',
'Alternative identifier for interactor A',
'Alternative identifier for interactor B',
'Aliases for A',
'Aliases for B',
'Interaction detection methods',
'First author',
'Identifier of the publication',
'NCBI Taxonomy identifier for interactor A',
'NCBI Taxonomy identifier for interactor B',
'Interaction types',
'Source databases',
'Interaction identifier(s)',
'Confidence score']
columns += [
'Complex expansion',
'Biological role A', 'Biological role B',
'Experimental role A', 'Experimental role B',
'Interactor type A', 'Interactor type B',
'Xref for interactor A', 'Xref for interactor B',
'Xref for the interaction',
'Annotaions for interactor A', 'Annotations for interactor B',
'Annotations for the interaction',
'NCBI Taxonomy identifier for the host organism',
'Prameters of the interaction',
'Creaction date', 'Update date',
'Checksum for the interactor A', 'Checksum for the interactor B',
'Checksum for the interaction',
'negative',
'Feature(s) for interactor A', 'Feature(s) for interactor B',
'Stoichiometry for interactor A', 'Stoichiometroy for interactor B',
'Participant identification method for interactor A',
'Participant identification method for interactor B'
]
if fmt == 'tab25':
columns = columns[: 15]
rexp = re.compile(r"(?P<fields>((\"([^\"]|((?<=\\)\"))*\")|([^\t\"])|((?<=\\)\"))+)(\t|$)")
retval = []
for line in content.split('\n'):
line = line.strip()
if line == '' or line[0] == '#':
continue
start = 0
tmp = []
for mobj in rexp.finditer(line):
if mobj.start() != start:
print(repr(line))
assert mobj.start() == start
start = mobj.end()
tmp.append(mobj.group('fields'))
assert len(tmp) == len(columns)
retval.append(dict(zip(columns, tmp)))
return retval | https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki | Below is the the instruction that describes the task:
### Input:
https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki
### Response:
def parse_psimitab(content, fmt='tab27'):
"""https://code.google.com/archive/p/psimi/wikis/PsimiTab27Format.wiki
"""
columns = [
'Unique identifier for interactor A',
'Unique identifier for interactor B',
'Alternative identifier for interactor A',
'Alternative identifier for interactor B',
'Aliases for A',
'Aliases for B',
'Interaction detection methods',
'First author',
'Identifier of the publication',
'NCBI Taxonomy identifier for interactor A',
'NCBI Taxonomy identifier for interactor B',
'Interaction types',
'Source databases',
'Interaction identifier(s)',
'Confidence score']
columns += [
'Complex expansion',
'Biological role A', 'Biological role B',
'Experimental role A', 'Experimental role B',
'Interactor type A', 'Interactor type B',
'Xref for interactor A', 'Xref for interactor B',
'Xref for the interaction',
'Annotaions for interactor A', 'Annotations for interactor B',
'Annotations for the interaction',
'NCBI Taxonomy identifier for the host organism',
'Prameters of the interaction',
'Creaction date', 'Update date',
'Checksum for the interactor A', 'Checksum for the interactor B',
'Checksum for the interaction',
'negative',
'Feature(s) for interactor A', 'Feature(s) for interactor B',
'Stoichiometry for interactor A', 'Stoichiometroy for interactor B',
'Participant identification method for interactor A',
'Participant identification method for interactor B'
]
if fmt == 'tab25':
columns = columns[: 15]
rexp = re.compile(r"(?P<fields>((\"([^\"]|((?<=\\)\"))*\")|([^\t\"])|((?<=\\)\"))+)(\t|$)")
retval = []
for line in content.split('\n'):
line = line.strip()
if line == '' or line[0] == '#':
continue
start = 0
tmp = []
for mobj in rexp.finditer(line):
if mobj.start() != start:
print(repr(line))
assert mobj.start() == start
start = mobj.end()
tmp.append(mobj.group('fields'))
assert len(tmp) == len(columns)
retval.append(dict(zip(columns, tmp)))
return retval |
def available_ports(low=1024, high=65535, exclude_ranges=None):
"""
Returns a set of possible ports (excluding system,
ephemeral and well-known ports).
Pass ``high`` and/or ``low`` to limit the port range.
"""
if exclude_ranges is None:
exclude_ranges = []
available = utils.ranges_to_set(UNASSIGNED_RANGES)
exclude = utils.ranges_to_set(
ephemeral.port_ranges() + exclude_ranges +
[
SYSTEM_PORT_RANGE,
(SYSTEM_PORT_RANGE[1], low),
(high, 65536)
]
)
return available.difference(exclude) | Returns a set of possible ports (excluding system,
ephemeral and well-known ports).
Pass ``high`` and/or ``low`` to limit the port range. | Below is the the instruction that describes the task:
### Input:
Returns a set of possible ports (excluding system,
ephemeral and well-known ports).
Pass ``high`` and/or ``low`` to limit the port range.
### Response:
def available_ports(low=1024, high=65535, exclude_ranges=None):
"""
Returns a set of possible ports (excluding system,
ephemeral and well-known ports).
Pass ``high`` and/or ``low`` to limit the port range.
"""
if exclude_ranges is None:
exclude_ranges = []
available = utils.ranges_to_set(UNASSIGNED_RANGES)
exclude = utils.ranges_to_set(
ephemeral.port_ranges() + exclude_ranges +
[
SYSTEM_PORT_RANGE,
(SYSTEM_PORT_RANGE[1], low),
(high, 65536)
]
)
return available.difference(exclude) |
def create(model_config, batch_size, vectors=None):
""" Create an IMDB dataset """
path = model_config.data_dir('imdb')
text_field = data.Field(lower=True, tokenize='spacy', batch_first=True)
label_field = data.LabelField(is_target=True)
train_source, test_source = IMDBCached.splits(
root=path,
text_field=text_field,
label_field=label_field
)
text_field.build_vocab(train_source, max_size=25_000, vectors=vectors)
label_field.build_vocab(train_source)
train_iterator, test_iterator = data.BucketIterator.splits(
(train_source, test_source),
batch_size=batch_size,
device=model_config.torch_device(),
shuffle=True
)
return TextData(
train_source, test_source, train_iterator, test_iterator, text_field, label_field
) | Create an IMDB dataset | Below is the the instruction that describes the task:
### Input:
Create an IMDB dataset
### Response:
def create(model_config, batch_size, vectors=None):
""" Create an IMDB dataset """
path = model_config.data_dir('imdb')
text_field = data.Field(lower=True, tokenize='spacy', batch_first=True)
label_field = data.LabelField(is_target=True)
train_source, test_source = IMDBCached.splits(
root=path,
text_field=text_field,
label_field=label_field
)
text_field.build_vocab(train_source, max_size=25_000, vectors=vectors)
label_field.build_vocab(train_source)
train_iterator, test_iterator = data.BucketIterator.splits(
(train_source, test_source),
batch_size=batch_size,
device=model_config.torch_device(),
shuffle=True
)
return TextData(
train_source, test_source, train_iterator, test_iterator, text_field, label_field
) |
def load_labware(self, labware: Labware) -> Labware:
""" Specify the presence of a piece of labware on the module.
:param labware: The labware object. This object should be already
initialized and its parent should be set to this
module's geometry. To initialize and load a labware
onto the module in one step, see
:py:meth:`load_labware_by_name`.
:returns: The properly-linked labware object
"""
mod_labware = self._geometry.add_labware(labware)
self._ctx.deck.recalculate_high_z()
return mod_labware | Specify the presence of a piece of labware on the module.
:param labware: The labware object. This object should be already
initialized and its parent should be set to this
module's geometry. To initialize and load a labware
onto the module in one step, see
:py:meth:`load_labware_by_name`.
:returns: The properly-linked labware object | Below is the the instruction that describes the task:
### Input:
Specify the presence of a piece of labware on the module.
:param labware: The labware object. This object should be already
initialized and its parent should be set to this
module's geometry. To initialize and load a labware
onto the module in one step, see
:py:meth:`load_labware_by_name`.
:returns: The properly-linked labware object
### Response:
def load_labware(self, labware: Labware) -> Labware:
""" Specify the presence of a piece of labware on the module.
:param labware: The labware object. This object should be already
initialized and its parent should be set to this
module's geometry. To initialize and load a labware
onto the module in one step, see
:py:meth:`load_labware_by_name`.
:returns: The properly-linked labware object
"""
mod_labware = self._geometry.add_labware(labware)
self._ctx.deck.recalculate_high_z()
return mod_labware |
def pad(self, lmax):
"""
Return an SHMagCoeffs class where the coefficients are zero padded or
truncated to a different lmax.
Usage
-----
clm = x.pad(lmax)
Returns
-------
clm : SHMagCoeffs class instance
Parameters
----------
lmax : int
Maximum spherical harmonic degree to output.
"""
clm = self.copy()
if lmax <= self.lmax:
clm.coeffs = clm.coeffs[:, :lmax+1, :lmax+1]
clm.mask = clm.mask[:, :lmax+1, :lmax+1]
if self.errors is not None:
clm.errors = clm.errors[:, :lmax+1, :lmax+1]
else:
clm.coeffs = _np.pad(clm.coeffs, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
if self.errors is not None:
clm.errors = _np.pad(
clm.errors, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=_np.bool)
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
clm.mask = mask
clm.lmax = lmax
return clm | Return an SHMagCoeffs class where the coefficients are zero padded or
truncated to a different lmax.
Usage
-----
clm = x.pad(lmax)
Returns
-------
clm : SHMagCoeffs class instance
Parameters
----------
lmax : int
Maximum spherical harmonic degree to output. | Below is the the instruction that describes the task:
### Input:
Return an SHMagCoeffs class where the coefficients are zero padded or
truncated to a different lmax.
Usage
-----
clm = x.pad(lmax)
Returns
-------
clm : SHMagCoeffs class instance
Parameters
----------
lmax : int
Maximum spherical harmonic degree to output.
### Response:
def pad(self, lmax):
"""
Return an SHMagCoeffs class where the coefficients are zero padded or
truncated to a different lmax.
Usage
-----
clm = x.pad(lmax)
Returns
-------
clm : SHMagCoeffs class instance
Parameters
----------
lmax : int
Maximum spherical harmonic degree to output.
"""
clm = self.copy()
if lmax <= self.lmax:
clm.coeffs = clm.coeffs[:, :lmax+1, :lmax+1]
clm.mask = clm.mask[:, :lmax+1, :lmax+1]
if self.errors is not None:
clm.errors = clm.errors[:, :lmax+1, :lmax+1]
else:
clm.coeffs = _np.pad(clm.coeffs, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
if self.errors is not None:
clm.errors = _np.pad(
clm.errors, ((0, 0), (0, lmax - self.lmax),
(0, lmax - self.lmax)), 'constant')
mask = _np.zeros((2, lmax + 1, lmax + 1), dtype=_np.bool)
for l in _np.arange(lmax + 1):
mask[:, l, :l + 1] = True
mask[1, :, 0] = False
clm.mask = mask
clm.lmax = lmax
return clm |
def edit_user_push_restrictions(self, *users):
"""
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_
:users: list of strings
"""
assert all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.protection_url + "/restrictions/users",
input=users
) | :calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_
:users: list of strings | Below is the the instruction that describes the task:
### Input:
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_
:users: list of strings
### Response:
def edit_user_push_restrictions(self, *users):
"""
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_
:users: list of strings
"""
assert all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in users), users
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.protection_url + "/restrictions/users",
input=users
) |
def create_namespaced_pod(self, namespace, body, **kwargs):
"""
create a Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_pod(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Pod body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_pod_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_pod_with_http_info(namespace, body, **kwargs)
return data | create a Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_pod(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Pod body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Pod
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
create a Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_pod(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Pod body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
### Response:
def create_namespaced_pod(self, namespace, body, **kwargs):
"""
create a Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_pod(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Pod body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_pod_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_pod_with_http_info(namespace, body, **kwargs)
return data |
def cmd(send, _, args):
"""Shows the currently guarded nicks.
Syntax: {command}
"""
guarded = args['handler'].guarded
if not guarded:
send("Nobody is guarded.")
else:
send(", ".join(guarded)) | Shows the currently guarded nicks.
Syntax: {command} | Below is the the instruction that describes the task:
### Input:
Shows the currently guarded nicks.
Syntax: {command}
### Response:
def cmd(send, _, args):
"""Shows the currently guarded nicks.
Syntax: {command}
"""
guarded = args['handler'].guarded
if not guarded:
send("Nobody is guarded.")
else:
send(", ".join(guarded)) |
def decorate_client(api_client, func, name):
"""A helper for decorating :class:`bravado.client.SwaggerClient`.
:class:`bravado.client.SwaggerClient` can be extended by creating a class
which wraps all calls to it. This helper is used in a :func:`__getattr__`
to check if the attr exists on the api_client. If the attr does not exist
raise :class:`AttributeError`, if it exists and is not callable return it,
and if it is callable return a partial function calling `func` with `name`.
Example usage:
.. code-block:: python
class SomeClientDecorator(object):
def __init__(self, api_client, ...):
self.api_client = api_client
# First arg should be suffiently unique to not conflict with any of
# the kwargs
def wrap_call(self, client_call_name, *args, **kwargs):
...
def __getattr__(self, name):
return decorate_client(self.api_client, self.wrap_call, name)
:param api_client: the client which is being decorated
:type api_client: :class:`bravado.client.SwaggerClient`
:param func: a callable which accepts `name`, `*args`, `**kwargs`
:type func: callable
:param name: the attribute being accessed
:type name: string
:returns: the attribute from the `api_client` or a partial of `func`
:raises: :class:`AttributeError`
"""
client_attr = getattr(api_client, name)
if not callable(client_attr):
return client_attr
return OperationDecorator(client_attr, functools.partial(func, name)) | A helper for decorating :class:`bravado.client.SwaggerClient`.
:class:`bravado.client.SwaggerClient` can be extended by creating a class
which wraps all calls to it. This helper is used in a :func:`__getattr__`
to check if the attr exists on the api_client. If the attr does not exist
raise :class:`AttributeError`, if it exists and is not callable return it,
and if it is callable return a partial function calling `func` with `name`.
Example usage:
.. code-block:: python
class SomeClientDecorator(object):
def __init__(self, api_client, ...):
self.api_client = api_client
# First arg should be suffiently unique to not conflict with any of
# the kwargs
def wrap_call(self, client_call_name, *args, **kwargs):
...
def __getattr__(self, name):
return decorate_client(self.api_client, self.wrap_call, name)
:param api_client: the client which is being decorated
:type api_client: :class:`bravado.client.SwaggerClient`
:param func: a callable which accepts `name`, `*args`, `**kwargs`
:type func: callable
:param name: the attribute being accessed
:type name: string
:returns: the attribute from the `api_client` or a partial of `func`
:raises: :class:`AttributeError` | Below is the the instruction that describes the task:
### Input:
A helper for decorating :class:`bravado.client.SwaggerClient`.
:class:`bravado.client.SwaggerClient` can be extended by creating a class
which wraps all calls to it. This helper is used in a :func:`__getattr__`
to check if the attr exists on the api_client. If the attr does not exist
raise :class:`AttributeError`, if it exists and is not callable return it,
and if it is callable return a partial function calling `func` with `name`.
Example usage:
.. code-block:: python
class SomeClientDecorator(object):
def __init__(self, api_client, ...):
self.api_client = api_client
# First arg should be suffiently unique to not conflict with any of
# the kwargs
def wrap_call(self, client_call_name, *args, **kwargs):
...
def __getattr__(self, name):
return decorate_client(self.api_client, self.wrap_call, name)
:param api_client: the client which is being decorated
:type api_client: :class:`bravado.client.SwaggerClient`
:param func: a callable which accepts `name`, `*args`, `**kwargs`
:type func: callable
:param name: the attribute being accessed
:type name: string
:returns: the attribute from the `api_client` or a partial of `func`
:raises: :class:`AttributeError`
### Response:
def decorate_client(api_client, func, name):
"""A helper for decorating :class:`bravado.client.SwaggerClient`.
:class:`bravado.client.SwaggerClient` can be extended by creating a class
which wraps all calls to it. This helper is used in a :func:`__getattr__`
to check if the attr exists on the api_client. If the attr does not exist
raise :class:`AttributeError`, if it exists and is not callable return it,
and if it is callable return a partial function calling `func` with `name`.
Example usage:
.. code-block:: python
class SomeClientDecorator(object):
def __init__(self, api_client, ...):
self.api_client = api_client
# First arg should be suffiently unique to not conflict with any of
# the kwargs
def wrap_call(self, client_call_name, *args, **kwargs):
...
def __getattr__(self, name):
return decorate_client(self.api_client, self.wrap_call, name)
:param api_client: the client which is being decorated
:type api_client: :class:`bravado.client.SwaggerClient`
:param func: a callable which accepts `name`, `*args`, `**kwargs`
:type func: callable
:param name: the attribute being accessed
:type name: string
:returns: the attribute from the `api_client` or a partial of `func`
:raises: :class:`AttributeError`
"""
client_attr = getattr(api_client, name)
if not callable(client_attr):
return client_attr
return OperationDecorator(client_attr, functools.partial(func, name)) |
def int3(params, ctxt, scope, stream, coord, interp):
"""Define the ``Int3()`` function in the interpreter. Calling
``Int3()`` will drop the user into an interactive debugger.
"""
if interp._no_debug:
return
if interp._int3:
interp.debugger = PfpDbg(interp)
interp.debugger.cmdloop() | Define the ``Int3()`` function in the interpreter. Calling
``Int3()`` will drop the user into an interactive debugger. | Below is the the instruction that describes the task:
### Input:
Define the ``Int3()`` function in the interpreter. Calling
``Int3()`` will drop the user into an interactive debugger.
### Response:
def int3(params, ctxt, scope, stream, coord, interp):
"""Define the ``Int3()`` function in the interpreter. Calling
``Int3()`` will drop the user into an interactive debugger.
"""
if interp._no_debug:
return
if interp._int3:
interp.debugger = PfpDbg(interp)
interp.debugger.cmdloop() |
def _mmComputeSequenceRepresentationData(self):
"""
Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values.
"""
if not self._sequenceRepresentationDataStale:
return
unionSDRTrace = self.mmGetTraceUnionSDR()
sequenceLabelsTrace = self.mmGetTraceSequenceLabels()
resetsTrace = self.mmGetTraceResets()
n = len(unionSDRTrace.data)
overlapMatrix = numpy.empty((n, n), dtype=uintType)
stabilityConfusionUnionSDR = []
distinctnessConfusionUnionSDR = []
for i in xrange(n):
for j in xrange(i+1):
overlapUnionSDR = len(unionSDRTrace.data[i] & unionSDRTrace.data[j])
overlapMatrix[i][j] = overlapUnionSDR
overlapMatrix[j][i] = overlapUnionSDR
if (i != j and
sequenceLabelsTrace.data[i] is not None and
not resetsTrace.data[i] and
sequenceLabelsTrace.data[j] is not None and
not resetsTrace.data[j]):
if sequenceLabelsTrace.data[i] == sequenceLabelsTrace.data[j]:
stabilityConfusionUnionSDR.append(overlapUnionSDR)
else:
distinctnessConfusionUnionSDR.append(overlapUnionSDR)
self._mmData["overlap"] = overlapMatrix
self._mmData["stabilityConfusion"] = stabilityConfusionUnionSDR
self._mmData["distinctnessConfusion"] = distinctnessConfusionUnionSDR
self._sequenceRepresentationDataStale = False | Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values. | Below is the the instruction that describes the task:
### Input:
Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values.
### Response:
def _mmComputeSequenceRepresentationData(self):
"""
Calculates values for the overlap distance matrix, stability within a
sequence, and distinctness between sequences. These values are cached so
that they do need to be recomputed for calls to each of several accessor
methods that use these values.
"""
if not self._sequenceRepresentationDataStale:
return
unionSDRTrace = self.mmGetTraceUnionSDR()
sequenceLabelsTrace = self.mmGetTraceSequenceLabels()
resetsTrace = self.mmGetTraceResets()
n = len(unionSDRTrace.data)
overlapMatrix = numpy.empty((n, n), dtype=uintType)
stabilityConfusionUnionSDR = []
distinctnessConfusionUnionSDR = []
for i in xrange(n):
for j in xrange(i+1):
overlapUnionSDR = len(unionSDRTrace.data[i] & unionSDRTrace.data[j])
overlapMatrix[i][j] = overlapUnionSDR
overlapMatrix[j][i] = overlapUnionSDR
if (i != j and
sequenceLabelsTrace.data[i] is not None and
not resetsTrace.data[i] and
sequenceLabelsTrace.data[j] is not None and
not resetsTrace.data[j]):
if sequenceLabelsTrace.data[i] == sequenceLabelsTrace.data[j]:
stabilityConfusionUnionSDR.append(overlapUnionSDR)
else:
distinctnessConfusionUnionSDR.append(overlapUnionSDR)
self._mmData["overlap"] = overlapMatrix
self._mmData["stabilityConfusion"] = stabilityConfusionUnionSDR
self._mmData["distinctnessConfusion"] = distinctnessConfusionUnionSDR
self._sequenceRepresentationDataStale = False |
def finish (self, key1set, key2set, mask=True):
"""Returns (weights, means, variances), where:
weights
ndarray of number of samples per key; shape (n1, n2), where n1 is the
size of key1set and n2 is the size of key2set.
means
computed mean value for each key
variances
computed variance for each key
"""
n1_us = len (self._key1map)
n2_us = len (self._key2map)
wt_us = self._m0[:n1_us,:n2_us]
badwt = (wt_us == 0) | ~np.isfinite (wt_us)
wt_us[badwt] = 1
mean_us = self._m1[:n1_us,:n2_us] / wt_us
var_us = self._m2[:n1_us,:n2_us] / wt_us - mean_us**2
wt_us[badwt] = 0
mean_us[badwt] = np.nan
var_us[badwt] = np.nan
n1_them = len (key1set)
n2_them = len (key2set)
wt = np.zeros ((n1_them, n2_them), dtype=self._m0.dtype)
mean = np.empty ((n1_them, n2_them), dtype=self._m1.dtype)
mean.fill (np.nan)
var = np.empty_like (mean)
var.fill (np.nan)
# You can't fancy-index on two axes simultaneously, so we do a manual
# loop on the first axis.
us_idx2 = []
them_idx2 = []
for them_i2, key2 in enumerate (key2set):
us_i2 = self._key2map[key2]
if us_i2 < n2_us:
them_idx2.append (them_i2)
us_idx2.append (us_i2)
# otherwise, we must not have seen that key
for them_i1, key1 in enumerate (key1set):
us_i1 = self._key1map[key1]
if us_i1 >= n1_us:
continue # don't have this key
wt[them_i1,them_idx2] = wt_us[us_i1,us_idx2]
mean[them_i1,them_idx2] = mean_us[us_i1,us_idx2]
var[them_i1,them_idx2] = var_us[us_i1,us_idx2]
if mask:
m = ~np.isfinite (mean)
mean = np.ma.MaskedArray (mean, m)
var = np.ma.MaskedArray (var, m)
self._m0 = self._m1 = self._m2 = None
self._key1map.clear ()
self._key2map.clear ()
return wt, mean, var | Returns (weights, means, variances), where:
weights
ndarray of number of samples per key; shape (n1, n2), where n1 is the
size of key1set and n2 is the size of key2set.
means
computed mean value for each key
variances
computed variance for each key | Below is the the instruction that describes the task:
### Input:
Returns (weights, means, variances), where:
weights
ndarray of number of samples per key; shape (n1, n2), where n1 is the
size of key1set and n2 is the size of key2set.
means
computed mean value for each key
variances
computed variance for each key
### Response:
def finish (self, key1set, key2set, mask=True):
"""Returns (weights, means, variances), where:
weights
ndarray of number of samples per key; shape (n1, n2), where n1 is the
size of key1set and n2 is the size of key2set.
means
computed mean value for each key
variances
computed variance for each key
"""
n1_us = len (self._key1map)
n2_us = len (self._key2map)
wt_us = self._m0[:n1_us,:n2_us]
badwt = (wt_us == 0) | ~np.isfinite (wt_us)
wt_us[badwt] = 1
mean_us = self._m1[:n1_us,:n2_us] / wt_us
var_us = self._m2[:n1_us,:n2_us] / wt_us - mean_us**2
wt_us[badwt] = 0
mean_us[badwt] = np.nan
var_us[badwt] = np.nan
n1_them = len (key1set)
n2_them = len (key2set)
wt = np.zeros ((n1_them, n2_them), dtype=self._m0.dtype)
mean = np.empty ((n1_them, n2_them), dtype=self._m1.dtype)
mean.fill (np.nan)
var = np.empty_like (mean)
var.fill (np.nan)
# You can't fancy-index on two axes simultaneously, so we do a manual
# loop on the first axis.
us_idx2 = []
them_idx2 = []
for them_i2, key2 in enumerate (key2set):
us_i2 = self._key2map[key2]
if us_i2 < n2_us:
them_idx2.append (them_i2)
us_idx2.append (us_i2)
# otherwise, we must not have seen that key
for them_i1, key1 in enumerate (key1set):
us_i1 = self._key1map[key1]
if us_i1 >= n1_us:
continue # don't have this key
wt[them_i1,them_idx2] = wt_us[us_i1,us_idx2]
mean[them_i1,them_idx2] = mean_us[us_i1,us_idx2]
var[them_i1,them_idx2] = var_us[us_i1,us_idx2]
if mask:
m = ~np.isfinite (mean)
mean = np.ma.MaskedArray (mean, m)
var = np.ma.MaskedArray (var, m)
self._m0 = self._m1 = self._m2 = None
self._key1map.clear ()
self._key2map.clear ()
return wt, mean, var |
def gossip_connect(self, format, *args):
"""
Set-up gossip discovery of other nodes. A node may connect to multiple
other nodes, for redundancy paths. For details of the gossip network
design, see the CZMQ zgossip class.
"""
return lib.zyre_gossip_connect(self._as_parameter_, format, *args) | Set-up gossip discovery of other nodes. A node may connect to multiple
other nodes, for redundancy paths. For details of the gossip network
design, see the CZMQ zgossip class. | Below is the the instruction that describes the task:
### Input:
Set-up gossip discovery of other nodes. A node may connect to multiple
other nodes, for redundancy paths. For details of the gossip network
design, see the CZMQ zgossip class.
### Response:
def gossip_connect(self, format, *args):
"""
Set-up gossip discovery of other nodes. A node may connect to multiple
other nodes, for redundancy paths. For details of the gossip network
design, see the CZMQ zgossip class.
"""
return lib.zyre_gossip_connect(self._as_parameter_, format, *args) |
def _accept(self):
"""
Work loop runs forever (or until running is False)
:return:
"""
logger.warning("Reactor " + self._name + " is starting")
while self.running:
try:
self._completeTask()
except:
logger.exception("Unexpected exception during request processing")
logger.warning("Reactor " + self._name + " is terminating") | Work loop runs forever (or until running is False)
:return: | Below is the the instruction that describes the task:
### Input:
Work loop runs forever (or until running is False)
:return:
### Response:
def _accept(self):
"""
Work loop runs forever (or until running is False)
:return:
"""
logger.warning("Reactor " + self._name + " is starting")
while self.running:
try:
self._completeTask()
except:
logger.exception("Unexpected exception during request processing")
logger.warning("Reactor " + self._name + " is terminating") |
def from_file(filename):
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
spec = Spec()
with open(filename, "r", encoding="utf-8") as f:
parse_context = {"current_subpackage": None}
for line in f:
spec, parse_context = _parse(spec, parse_context, line)
return spec | Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object. | Below is the the instruction that describes the task:
### Input:
Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
### Response:
def from_file(filename):
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
spec = Spec()
with open(filename, "r", encoding="utf-8") as f:
parse_context = {"current_subpackage": None}
for line in f:
spec, parse_context = _parse(spec, parse_context, line)
return spec |
async def encode_jwt(self, identity: SessionIdentity) -> str:
""" 将identity编码为JWT """
assert identity
payload = {
"sub": identity.identity,
"user_id": identity.user_id,
"exp": int(time.time() + self._max_age) # seconds from 1970-1-1 UTC
}
if identity.client_id:
payload['aud'] = identity.client_id
token = jwt.encode({'alg': 'HS256'}, payload, self._secure_key)
return token.decode('ascii') | 将identity编码为JWT | Below is the the instruction that describes the task:
### Input:
将identity编码为JWT
### Response:
async def encode_jwt(self, identity: SessionIdentity) -> str:
""" 将identity编码为JWT """
assert identity
payload = {
"sub": identity.identity,
"user_id": identity.user_id,
"exp": int(time.time() + self._max_age) # seconds from 1970-1-1 UTC
}
if identity.client_id:
payload['aud'] = identity.client_id
token = jwt.encode({'alg': 'HS256'}, payload, self._secure_key)
return token.decode('ascii') |
def _get_redis_keys_opts():
'''
Build the key opts based on the user options.
'''
return {
'bank_prefix': __opts__.get('cache.redis.bank_prefix', _BANK_PREFIX),
'bank_keys_prefix': __opts__.get('cache.redis.bank_keys_prefix', _BANK_KEYS_PREFIX),
'key_prefix': __opts__.get('cache.redis.key_prefix', _KEY_PREFIX),
'separator': __opts__.get('cache.redis.separator', _SEPARATOR)
} | Build the key opts based on the user options. | Below is the the instruction that describes the task:
### Input:
Build the key opts based on the user options.
### Response:
def _get_redis_keys_opts():
'''
Build the key opts based on the user options.
'''
return {
'bank_prefix': __opts__.get('cache.redis.bank_prefix', _BANK_PREFIX),
'bank_keys_prefix': __opts__.get('cache.redis.bank_keys_prefix', _BANK_KEYS_PREFIX),
'key_prefix': __opts__.get('cache.redis.key_prefix', _KEY_PREFIX),
'separator': __opts__.get('cache.redis.separator', _SEPARATOR)
} |
async def down(self):
"""Press key down."""
await self._send_commands(
self._move('Down', 0, 20, 250),
self._move('Move', 1, 20, 255),
self._move('Move', 2, 20, 260),
self._move('Move', 3, 20, 265),
self._move('Move', 4, 20, 270),
self._move('Move', 5, 20, 275),
self._move('Up', 6, 20, 275)) | Press key down. | Below is the the instruction that describes the task:
### Input:
Press key down.
### Response:
async def down(self):
"""Press key down."""
await self._send_commands(
self._move('Down', 0, 20, 250),
self._move('Move', 1, 20, 255),
self._move('Move', 2, 20, 260),
self._move('Move', 3, 20, 265),
self._move('Move', 4, 20, 270),
self._move('Move', 5, 20, 275),
self._move('Up', 6, 20, 275)) |
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
offset = chunk * self._HASH_SIZE
self.index.seek(offset)
chunk_name = self.index.read(self._HASH_SIZE)
try:
return self.chunk_cache.Get(chunk_name)
except KeyError:
pass
# We don't have this chunk already cached. The most common read
# access pattern is contiguous reading so since we have to go to
# the data store already, we read ahead to reduce round trips.
self.index.seek(offset)
readahead = []
for _ in range(self._READAHEAD):
name = self.index.read(self._HASH_SIZE)
if name and name not in self.chunk_cache:
readahead.append(rdf_objects.BlobID.FromBytes(name))
self._ReadChunks(readahead)
try:
return self.chunk_cache.Get(chunk_name)
except KeyError:
raise aff4.ChunkNotFoundError("Cannot open chunk %s" % chunk) | Retrieve the relevant blob from the AFF4 data store or cache. | Below is the the instruction that describes the task:
### Input:
Retrieve the relevant blob from the AFF4 data store or cache.
### Response:
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
offset = chunk * self._HASH_SIZE
self.index.seek(offset)
chunk_name = self.index.read(self._HASH_SIZE)
try:
return self.chunk_cache.Get(chunk_name)
except KeyError:
pass
# We don't have this chunk already cached. The most common read
# access pattern is contiguous reading so since we have to go to
# the data store already, we read ahead to reduce round trips.
self.index.seek(offset)
readahead = []
for _ in range(self._READAHEAD):
name = self.index.read(self._HASH_SIZE)
if name and name not in self.chunk_cache:
readahead.append(rdf_objects.BlobID.FromBytes(name))
self._ReadChunks(readahead)
try:
return self.chunk_cache.Get(chunk_name)
except KeyError:
raise aff4.ChunkNotFoundError("Cannot open chunk %s" % chunk) |
def mean(data, units=False, time=False):
"""
Function to compute mean of data
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time
units : bool
Average over units
time : bool
Average over time
Returns
-------
if units=False and time=False:
error
if units=True:
1 dim numpy.ndarray; time series
if time=True:
1 dim numpy.ndarray; series of unit means across time
if units=True and time=True:
float; unit and time mean
Examples
--------
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True)
array([ 2.5, 3.5, 4.5])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True)
array([ 2., 5.])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True)
3.5
"""
assert(units is not False or time is not False)
if units is True and time is False:
return np.mean(data, axis=0)
elif units is False and time is True:
return np.mean(data, axis=1)
elif units is True and time is True:
return np.mean(data) | Function to compute mean of data
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time
units : bool
Average over units
time : bool
Average over time
Returns
-------
if units=False and time=False:
error
if units=True:
1 dim numpy.ndarray; time series
if time=True:
1 dim numpy.ndarray; series of unit means across time
if units=True and time=True:
float; unit and time mean
Examples
--------
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True)
array([ 2.5, 3.5, 4.5])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True)
array([ 2., 5.])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True)
3.5 | Below is the the instruction that describes the task:
### Input:
Function to compute mean of data
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time
units : bool
Average over units
time : bool
Average over time
Returns
-------
if units=False and time=False:
error
if units=True:
1 dim numpy.ndarray; time series
if time=True:
1 dim numpy.ndarray; series of unit means across time
if units=True and time=True:
float; unit and time mean
Examples
--------
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True)
array([ 2.5, 3.5, 4.5])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True)
array([ 2., 5.])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True)
3.5
### Response:
def mean(data, units=False, time=False):
"""
Function to compute mean of data
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time
units : bool
Average over units
time : bool
Average over time
Returns
-------
if units=False and time=False:
error
if units=True:
1 dim numpy.ndarray; time series
if time=True:
1 dim numpy.ndarray; series of unit means across time
if units=True and time=True:
float; unit and time mean
Examples
--------
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True)
array([ 2.5, 3.5, 4.5])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), time=True)
array([ 2., 5.])
>>> mean(np.array([[1, 2, 3], [4, 5, 6]]), units=True,time=True)
3.5
"""
assert(units is not False or time is not False)
if units is True and time is False:
return np.mean(data, axis=0)
elif units is False and time is True:
return np.mean(data, axis=1)
elif units is True and time is True:
return np.mean(data) |
def lifetimes_samples(self):
r""" Samples of the timescales """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].lifetimes
return res | r""" Samples of the timescales | Below is the the instruction that describes the task:
### Input:
r""" Samples of the timescales
### Response:
def lifetimes_samples(self):
r""" Samples of the timescales """
res = np.empty((self.nsamples, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :] = self._sampled_hmms[i].lifetimes
return res |
def gettextvalue(self, window_name, object_name, startPosition=0, endPosition=0):
"""
Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return object_handle.AXValue | Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string | Below is the the instruction that describes the task:
### Input:
Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string
### Response:
def gettextvalue(self, window_name, object_name, startPosition=0, endPosition=0):
"""
Get text value
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param startPosition: Starting position of text to fetch
@type: startPosition: int
@param endPosition: Ending position of text to fetch
@type: endPosition: int
@return: text on success.
@rtype: string
"""
object_handle = self._get_object_handle(window_name, object_name)
if not object_handle.AXEnabled:
raise LdtpServerException(u"Object %s state disabled" % object_name)
return object_handle.AXValue |
def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
for individual in case_obj['individuals']:
if individual['individual_id'] == individual_id:
individual_obj = individual
return (individual_obj['display_name'], individual_obj['vcf2cytosure']) | vcf2cytosure CGH file for inidividual. | Below is the the instruction that describes the task:
### Input:
vcf2cytosure CGH file for inidividual.
### Response:
def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
for individual in case_obj['individuals']:
if individual['individual_id'] == individual_id:
individual_obj = individual
return (individual_obj['display_name'], individual_obj['vcf2cytosure']) |
def stats():
'''
Runs the quotastats command, and returns the parsed output
CLI Example:
.. code-block:: bash
salt '*' quota.stats
'''
ret = {}
out = __salt__['cmd.run']('quotastats').splitlines()
for line in out:
if not line:
continue
comps = line.split(': ')
ret[comps[0]] = comps[1]
return ret | Runs the quotastats command, and returns the parsed output
CLI Example:
.. code-block:: bash
salt '*' quota.stats | Below is the the instruction that describes the task:
### Input:
Runs the quotastats command, and returns the parsed output
CLI Example:
.. code-block:: bash
salt '*' quota.stats
### Response:
def stats():
'''
Runs the quotastats command, and returns the parsed output
CLI Example:
.. code-block:: bash
salt '*' quota.stats
'''
ret = {}
out = __salt__['cmd.run']('quotastats').splitlines()
for line in out:
if not line:
continue
comps = line.split(': ')
ret[comps[0]] = comps[1]
return ret |
def pitRemove(self,
elevation_grid,
pit_filled_elevation_grid,
input_depression_mask_grid=None,
consider4way=False,
):
"""
Remove low spots from DEM.
"""
log("PROCESS: PitRemove")
self.pit_filled_elevation_grid = pit_filled_elevation_grid
# Construct the taudem command line.
cmd = [os.path.join(self.taudem_exe_path, 'pitremove'),
'-z', elevation_grid,
'-fel', self.pit_filled_elevation_grid,
]
if input_depression_mask_grid:
cmd += ['-depmask', input_depression_mask_grid]
if consider4way:
cmd += ['-4way']
self._run_mpi_cmd(cmd)
# create projection file
self._add_prj_file(elevation_grid,
self.pit_filled_elevation_grid) | Remove low spots from DEM. | Below is the the instruction that describes the task:
### Input:
Remove low spots from DEM.
### Response:
def pitRemove(self,
elevation_grid,
pit_filled_elevation_grid,
input_depression_mask_grid=None,
consider4way=False,
):
"""
Remove low spots from DEM.
"""
log("PROCESS: PitRemove")
self.pit_filled_elevation_grid = pit_filled_elevation_grid
# Construct the taudem command line.
cmd = [os.path.join(self.taudem_exe_path, 'pitremove'),
'-z', elevation_grid,
'-fel', self.pit_filled_elevation_grid,
]
if input_depression_mask_grid:
cmd += ['-depmask', input_depression_mask_grid]
if consider4way:
cmd += ['-4way']
self._run_mpi_cmd(cmd)
# create projection file
self._add_prj_file(elevation_grid,
self.pit_filled_elevation_grid) |
def critical_angle(pressure, u, v, heights, stormu, stormv):
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
heights : `pint.Quantity`
Heights from sounding.
stormu : `pint.Quantity`
U-component of storm motion.
stormv : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees
"""
# Convert everything to m/s
u = u.to('m/s')
v = v.to('m/s')
stormu = stormu.to('m/s')
stormv = stormv.to('m/s')
sort_inds = np.argsort(pressure[::-1])
pressure = pressure[sort_inds]
heights = heights[sort_inds]
u = u[sort_inds]
v = v[sort_inds]
# Calculate sfc-500m shear vector
shr5 = bulk_shear(pressure, u, v, heights=heights, depth=500 * units('meter'))
# Make everything relative to the sfc wind orientation
umn = stormu - u[0]
vmn = stormv - v[0]
vshr = np.asarray([shr5[0].magnitude, shr5[1].magnitude])
vsm = np.asarray([umn.magnitude, vmn.magnitude])
angle_c = np.dot(vshr, vsm) / (np.linalg.norm(vshr) * np.linalg.norm(vsm))
critical_angle = np.arccos(angle_c) * units('radian')
return critical_angle.to('degrees') | r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
heights : `pint.Quantity`
Heights from sounding.
stormu : `pint.Quantity`
U-component of storm motion.
stormv : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees | Below is the the instruction that describes the task:
### Input:
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
heights : `pint.Quantity`
Heights from sounding.
stormu : `pint.Quantity`
U-component of storm motion.
stormv : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees
### Response:
def critical_angle(pressure, u, v, heights, stormu, stormv):
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
heights : `pint.Quantity`
Heights from sounding.
stormu : `pint.Quantity`
U-component of storm motion.
stormv : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees
"""
# Convert everything to m/s
u = u.to('m/s')
v = v.to('m/s')
stormu = stormu.to('m/s')
stormv = stormv.to('m/s')
sort_inds = np.argsort(pressure[::-1])
pressure = pressure[sort_inds]
heights = heights[sort_inds]
u = u[sort_inds]
v = v[sort_inds]
# Calculate sfc-500m shear vector
shr5 = bulk_shear(pressure, u, v, heights=heights, depth=500 * units('meter'))
# Make everything relative to the sfc wind orientation
umn = stormu - u[0]
vmn = stormv - v[0]
vshr = np.asarray([shr5[0].magnitude, shr5[1].magnitude])
vsm = np.asarray([umn.magnitude, vmn.magnitude])
angle_c = np.dot(vshr, vsm) / (np.linalg.norm(vshr) * np.linalg.norm(vsm))
critical_angle = np.arccos(angle_c) * units('radian')
return critical_angle.to('degrees') |
def write(self, f, grp, name, data, type_string, options):
""" Writes an object's metadata to file.
Writes the Python object 'data' to 'name' in h5py.Group 'grp'.
.. versionchanged:: 0.2
Arguements changed.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
grp : h5py.Group or h5py.File
The parent HDF5 Group (or File if at '/') that contains the
object with the specified name.
name : str
Name of the object.
data
The object to write to file.
type_string : str or None
The type string for `data`. If it is ``None``, one will have
to be gotten by ``get_type_string``.
options : hdf5storage.core.Options
hdf5storage options object.
Raises
------
NotImplementedError
If writing 'data' to file is currently not supported.
hdf5storage.exceptions.TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to
``'error'``.
Notes
-----
Must be overridden in a subclass because a
``NotImplementedError`` is thrown immediately.
See Also
--------
hdf5storage.utilities.write_data
"""
raise NotImplementedError('Can''t write data type: '
+ str(type(data))) | Writes an object's metadata to file.
Writes the Python object 'data' to 'name' in h5py.Group 'grp'.
.. versionchanged:: 0.2
Arguements changed.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
grp : h5py.Group or h5py.File
The parent HDF5 Group (or File if at '/') that contains the
object with the specified name.
name : str
Name of the object.
data
The object to write to file.
type_string : str or None
The type string for `data`. If it is ``None``, one will have
to be gotten by ``get_type_string``.
options : hdf5storage.core.Options
hdf5storage options object.
Raises
------
NotImplementedError
If writing 'data' to file is currently not supported.
hdf5storage.exceptions.TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to
``'error'``.
Notes
-----
Must be overridden in a subclass because a
``NotImplementedError`` is thrown immediately.
See Also
--------
hdf5storage.utilities.write_data | Below is the the instruction that describes the task:
### Input:
Writes an object's metadata to file.
Writes the Python object 'data' to 'name' in h5py.Group 'grp'.
.. versionchanged:: 0.2
Arguements changed.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
grp : h5py.Group or h5py.File
The parent HDF5 Group (or File if at '/') that contains the
object with the specified name.
name : str
Name of the object.
data
The object to write to file.
type_string : str or None
The type string for `data`. If it is ``None``, one will have
to be gotten by ``get_type_string``.
options : hdf5storage.core.Options
hdf5storage options object.
Raises
------
NotImplementedError
If writing 'data' to file is currently not supported.
hdf5storage.exceptions.TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to
``'error'``.
Notes
-----
Must be overridden in a subclass because a
``NotImplementedError`` is thrown immediately.
See Also
--------
hdf5storage.utilities.write_data
### Response:
def write(self, f, grp, name, data, type_string, options):
""" Writes an object's metadata to file.
Writes the Python object 'data' to 'name' in h5py.Group 'grp'.
.. versionchanged:: 0.2
Arguements changed.
Parameters
----------
f : h5py.File
The HDF5 file handle that is open.
grp : h5py.Group or h5py.File
The parent HDF5 Group (or File if at '/') that contains the
object with the specified name.
name : str
Name of the object.
data
The object to write to file.
type_string : str or None
The type string for `data`. If it is ``None``, one will have
to be gotten by ``get_type_string``.
options : hdf5storage.core.Options
hdf5storage options object.
Raises
------
NotImplementedError
If writing 'data' to file is currently not supported.
hdf5storage.exceptions.TypeNotMatlabCompatibleError
If writing a type not compatible with MATLAB and
`options.action_for_matlab_incompatible` is set to
``'error'``.
Notes
-----
Must be overridden in a subclass because a
``NotImplementedError`` is thrown immediately.
See Also
--------
hdf5storage.utilities.write_data
"""
raise NotImplementedError('Can''t write data type: '
+ str(type(data))) |
def from_argparse(cls, opts, nonSpin=False):
"""
Initialize an instance of the massRangeParameters class from an
argparse.OptionParser instance. This assumes that
insert_mass_range_option_group
and
verify_mass_range_options
have already been called before initializing the class.
"""
if nonSpin:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass)
else:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, maxNSSpinMag=opts.max_ns_spin_mag,
maxBHSpinMag=opts.max_bh_spin_mag,
nsbhFlag=opts.nsbh_flag,
max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
ns_bh_boundary_mass=opts.ns_bh_boundary_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass) | Initialize an instance of the massRangeParameters class from an
argparse.OptionParser instance. This assumes that
insert_mass_range_option_group
and
verify_mass_range_options
have already been called before initializing the class. | Below is the the instruction that describes the task:
### Input:
Initialize an instance of the massRangeParameters class from an
argparse.OptionParser instance. This assumes that
insert_mass_range_option_group
and
verify_mass_range_options
have already been called before initializing the class.
### Response:
def from_argparse(cls, opts, nonSpin=False):
"""
Initialize an instance of the massRangeParameters class from an
argparse.OptionParser instance. This assumes that
insert_mass_range_option_group
and
verify_mass_range_options
have already been called before initializing the class.
"""
if nonSpin:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass)
else:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, maxNSSpinMag=opts.max_ns_spin_mag,
maxBHSpinMag=opts.max_bh_spin_mag,
nsbhFlag=opts.nsbh_flag,
max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
ns_bh_boundary_mass=opts.ns_bh_boundary_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass) |
def user_update(auth=None, **kwargs):
'''
Update a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_update name=user1 enabled=False description='new description'
salt '*' keystoneng.user_update name=user1 new_name=newuser
'''
cloud = get_openstack_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
if 'new_name' in kwargs:
kwargs['name'] = kwargs.pop('new_name')
return cloud.update_user(**kwargs) | Update a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_update name=user1 enabled=False description='new description'
salt '*' keystoneng.user_update name=user1 new_name=newuser | Below is the the instruction that describes the task:
### Input:
Update a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_update name=user1 enabled=False description='new description'
salt '*' keystoneng.user_update name=user1 new_name=newuser
### Response:
def user_update(auth=None, **kwargs):
'''
Update a user
CLI Example:
.. code-block:: bash
salt '*' keystoneng.user_update name=user1 enabled=False description='new description'
salt '*' keystoneng.user_update name=user1 new_name=newuser
'''
cloud = get_openstack_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
if 'new_name' in kwargs:
kwargs['name'] = kwargs.pop('new_name')
return cloud.update_user(**kwargs) |
def laplacian_reordering(G):
'''Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue.'''
L = G.laplacian()
vals, vecs = np.linalg.eigh(L)
min_positive_idx = np.argmax(vals == vals[vals>0].min())
vec = vecs[:, min_positive_idx]
return permute_graph(G, np.argsort(vec)) | Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue. | Below is the the instruction that describes the task:
### Input:
Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue.
### Response:
def laplacian_reordering(G):
'''Reorder vertices using the eigenvector of the graph Laplacian corresponding
to the first positive eigenvalue.'''
L = G.laplacian()
vals, vecs = np.linalg.eigh(L)
min_positive_idx = np.argmax(vals == vals[vals>0].min())
vec = vecs[:, min_positive_idx]
return permute_graph(G, np.argsort(vec)) |
def post(self, request, *args, **kwargs):
"""
Adds new product to the current shopping cart
"""
POST = json.loads(request.body.decode('utf-8'))
if 'product_pk' in POST and 'quantity' in POST:
cart = ShoppingCartProxy(request)
cart.add(
product_pk=int(POST['product_pk']),
quantity=int(POST['quantity'])
)
return JsonResponse(cart.products)
return HttpResponseBadRequest() | Adds new product to the current shopping cart | Below is the the instruction that describes the task:
### Input:
Adds new product to the current shopping cart
### Response:
def post(self, request, *args, **kwargs):
"""
Adds new product to the current shopping cart
"""
POST = json.loads(request.body.decode('utf-8'))
if 'product_pk' in POST and 'quantity' in POST:
cart = ShoppingCartProxy(request)
cart.add(
product_pk=int(POST['product_pk']),
quantity=int(POST['quantity'])
)
return JsonResponse(cart.products)
return HttpResponseBadRequest() |
def _download_if_necessary(self, url, download_if_missing, overwrite):
"""
Return local cached path to a remote file, download it if necessary.
"""
cached_path = self.cached_path(url)
missing = not exists(cached_path)
if (missing or overwrite) and download_if_missing:
logger.info("Fetching %s from URL %s", cached_path, url)
datacache.download._download_and_decompress_if_necessary(
full_path=cached_path,
download_url=url,
timeout=3600)
elif missing:
raise MissingRemoteFile(url)
return cached_path | Return local cached path to a remote file, download it if necessary. | Below is the the instruction that describes the task:
### Input:
Return local cached path to a remote file, download it if necessary.
### Response:
def _download_if_necessary(self, url, download_if_missing, overwrite):
"""
Return local cached path to a remote file, download it if necessary.
"""
cached_path = self.cached_path(url)
missing = not exists(cached_path)
if (missing or overwrite) and download_if_missing:
logger.info("Fetching %s from URL %s", cached_path, url)
datacache.download._download_and_decompress_if_necessary(
full_path=cached_path,
download_url=url,
timeout=3600)
elif missing:
raise MissingRemoteFile(url)
return cached_path |
def set(self, path, data):
"""
Sets the content of a ZooKeeper node
:param path: Z-Path
:param data: New content
"""
return self._zk.set(self.__path(path), data) | Sets the content of a ZooKeeper node
:param path: Z-Path
:param data: New content | Below is the the instruction that describes the task:
### Input:
Sets the content of a ZooKeeper node
:param path: Z-Path
:param data: New content
### Response:
def set(self, path, data):
"""
Sets the content of a ZooKeeper node
:param path: Z-Path
:param data: New content
"""
return self._zk.set(self.__path(path), data) |
def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None):
"""
add random effects term for single trait models (no trait-trait covariance matrix)
Args:
K: NxN sample covariance matrix
is_noise: bool labeling the noise term (noise term has K=eye)
normalize: if True, K and Ks are scales such that K.diagonal().mean()==1
Ks: NxN test cross covariance for predictions
"""
assert self.P == 1, 'Incompatible number of traits'
assert K!=None or is_noise, 'Specify covariance structure'
if is_noise:
assert self.noisPos==None, 'noise term already exists'
K = SP.eye(self.Nt)
self.noisPos = self.n_terms
else:
assert K.shape[0]==self.Nt, 'Incompatible shape'
assert K.shape[1]==self.Nt, 'Incompatible shape'
if Ks!=None:
assert Ks.shape[0]==self.N, 'Incompatible shape'
if normalize:
Norm = 1/K.diagonal().mean()
K *= Norm
if Ks!=None: Ks *= Norm
self.vd.addTerm(limix.CSingleTraitTerm(K))
if Ks!=None: self.setKstar(self.n_terms,Ks)
self.n_terms+=1
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None | add random effects term for single trait models (no trait-trait covariance matrix)
Args:
K: NxN sample covariance matrix
is_noise: bool labeling the noise term (noise term has K=eye)
normalize: if True, K and Ks are scales such that K.diagonal().mean()==1
Ks: NxN test cross covariance for predictions | Below is the the instruction that describes the task:
### Input:
add random effects term for single trait models (no trait-trait covariance matrix)
Args:
K: NxN sample covariance matrix
is_noise: bool labeling the noise term (noise term has K=eye)
normalize: if True, K and Ks are scales such that K.diagonal().mean()==1
Ks: NxN test cross covariance for predictions
### Response:
def addSingleTraitTerm(self,K=None,is_noise=False,normalize=True,Ks=None):
"""
add random effects term for single trait models (no trait-trait covariance matrix)
Args:
K: NxN sample covariance matrix
is_noise: bool labeling the noise term (noise term has K=eye)
normalize: if True, K and Ks are scales such that K.diagonal().mean()==1
Ks: NxN test cross covariance for predictions
"""
assert self.P == 1, 'Incompatible number of traits'
assert K!=None or is_noise, 'Specify covariance structure'
if is_noise:
assert self.noisPos==None, 'noise term already exists'
K = SP.eye(self.Nt)
self.noisPos = self.n_terms
else:
assert K.shape[0]==self.Nt, 'Incompatible shape'
assert K.shape[1]==self.Nt, 'Incompatible shape'
if Ks!=None:
assert Ks.shape[0]==self.N, 'Incompatible shape'
if normalize:
Norm = 1/K.diagonal().mean()
K *= Norm
if Ks!=None: Ks *= Norm
self.vd.addTerm(limix.CSingleTraitTerm(K))
if Ks!=None: self.setKstar(self.n_terms,Ks)
self.n_terms+=1
self.gp = None
self.init = False
self.fast = False
self.optimum = None
self.cache['Sigma'] = None
self.cache['Hessian'] = None
self.cache['Lparams'] = None
self.cache['paramsST']= None |
def list_auxiliary_states(self):
"""Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListAuxiliaryStates(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)] | Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states. | Below is the the instruction that describes the task:
### Input:
Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
### Response:
def list_auxiliary_states(self):
"""Lists all the auxiliary states in the symbol.
Example
-------
>>> a = mx.sym.var('a')
>>> b = mx.sym.var('b')
>>> c = a + b
>>> c.list_auxiliary_states()
[]
Example of auxiliary states in `BatchNorm`.
>>> data = mx.symbol.Variable('data')
>>> weight = mx.sym.Variable(name='fc1_weight')
>>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)
>>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')
>>> fc2.list_auxiliary_states()
['batchnorm0_moving_mean', 'batchnorm0_moving_var']
Returns
-------
aux_states : list of str
List of the auxiliary states in input symbol.
Notes
-----
Auxiliary states are special states of symbols that do not correspond to an argument,
and are not updated by gradient descent. Common examples of auxiliary states
include the `moving_mean` and `moving_variance` in `BatchNorm`.
Most operators do not have auxiliary states.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListAuxiliaryStates(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)] |
def _add_numeric_methods_disabled(cls):
"""
Add in numeric methods to disable other than add/sub.
"""
cls.__pow__ = make_invalid_op('__pow__')
cls.__rpow__ = make_invalid_op('__rpow__')
cls.__mul__ = make_invalid_op('__mul__')
cls.__rmul__ = make_invalid_op('__rmul__')
cls.__floordiv__ = make_invalid_op('__floordiv__')
cls.__rfloordiv__ = make_invalid_op('__rfloordiv__')
cls.__truediv__ = make_invalid_op('__truediv__')
cls.__rtruediv__ = make_invalid_op('__rtruediv__')
cls.__mod__ = make_invalid_op('__mod__')
cls.__divmod__ = make_invalid_op('__divmod__')
cls.__neg__ = make_invalid_op('__neg__')
cls.__pos__ = make_invalid_op('__pos__')
cls.__abs__ = make_invalid_op('__abs__')
cls.__inv__ = make_invalid_op('__inv__') | Add in numeric methods to disable other than add/sub. | Below is the the instruction that describes the task:
### Input:
Add in numeric methods to disable other than add/sub.
### Response:
def _add_numeric_methods_disabled(cls):
"""
Add in numeric methods to disable other than add/sub.
"""
cls.__pow__ = make_invalid_op('__pow__')
cls.__rpow__ = make_invalid_op('__rpow__')
cls.__mul__ = make_invalid_op('__mul__')
cls.__rmul__ = make_invalid_op('__rmul__')
cls.__floordiv__ = make_invalid_op('__floordiv__')
cls.__rfloordiv__ = make_invalid_op('__rfloordiv__')
cls.__truediv__ = make_invalid_op('__truediv__')
cls.__rtruediv__ = make_invalid_op('__rtruediv__')
cls.__mod__ = make_invalid_op('__mod__')
cls.__divmod__ = make_invalid_op('__divmod__')
cls.__neg__ = make_invalid_op('__neg__')
cls.__pos__ = make_invalid_op('__pos__')
cls.__abs__ = make_invalid_op('__abs__')
cls.__inv__ = make_invalid_op('__inv__') |
def add_schedule(self, name, activation_date, day_period='one_time',
final_action='ALERT_FAILURE', activated=True,
minute_period='one_time', day_mask=None,
repeat_until_date=None, comment=None):
"""
Add a schedule to an existing task.
:param str name: name for this schedule
:param int activation_date: when to start this task. Activation date
should be a UTC time represented in milliseconds.
:param str day_period: when this task should be run. Valid options:
'one_time', 'daily', 'weekly', 'monthly', 'yearly'. If 'daily' is
selected, you can also provide a value for 'minute_period'.
(default: 'one_time')
:param str minute_period: only required if day_period is set to 'daily'.
Valid options: 'each_quarter' (15 min), 'each_half' (30 minutes), or
'hourly', 'one_time' (default: 'one_time')
:param int day_mask: If the task day_period=weekly, then specify the day
or days for repeating. Day masks are: sun=1, mon=2, tue=4, wed=8,
thu=16, fri=32, sat=64. To repeat for instance every Monday, Wednesday
and Friday, the value must be 2 + 8 + 32 = 42
:param str final_action: what type of action to perform after the
scheduled task runs. Options are: 'ALERT_FAILURE', 'ALERT', or
'NO_ACTION' (default: ALERT_FAILURE)
:param bool activated: whether to activate the schedule (default: True)
:param str repeat_until_date: if this is anything but a one time task run,
you can specify the date when this task should end. The format is the
same as the `activation_date` param.
:param str comment: optional comment
:raises ActionCommandFailed: failed adding schedule
:return: None
"""
json = {
'name': name,
'activation_date': activation_date,
'day_period': day_period,
'day_mask': day_mask,
'activated': activated,
'final_action': final_action,
'minute_period': minute_period,
'repeat_until_date': repeat_until_date if repeat_until_date else None,
'comment': comment}
if 'daily' in day_period:
minute_period = minute_period if minute_period != 'one_time' else 'hourly'
json['minute_period'] = minute_period
return self.make_request(
ActionCommandFailed,
method='create',
resource='task_schedule',
json=json) | Add a schedule to an existing task.
:param str name: name for this schedule
:param int activation_date: when to start this task. Activation date
should be a UTC time represented in milliseconds.
:param str day_period: when this task should be run. Valid options:
'one_time', 'daily', 'weekly', 'monthly', 'yearly'. If 'daily' is
selected, you can also provide a value for 'minute_period'.
(default: 'one_time')
:param str minute_period: only required if day_period is set to 'daily'.
Valid options: 'each_quarter' (15 min), 'each_half' (30 minutes), or
'hourly', 'one_time' (default: 'one_time')
:param int day_mask: If the task day_period=weekly, then specify the day
or days for repeating. Day masks are: sun=1, mon=2, tue=4, wed=8,
thu=16, fri=32, sat=64. To repeat for instance every Monday, Wednesday
and Friday, the value must be 2 + 8 + 32 = 42
:param str final_action: what type of action to perform after the
scheduled task runs. Options are: 'ALERT_FAILURE', 'ALERT', or
'NO_ACTION' (default: ALERT_FAILURE)
:param bool activated: whether to activate the schedule (default: True)
:param str repeat_until_date: if this is anything but a one time task run,
you can specify the date when this task should end. The format is the
same as the `activation_date` param.
:param str comment: optional comment
:raises ActionCommandFailed: failed adding schedule
:return: None | Below is the the instruction that describes the task:
### Input:
Add a schedule to an existing task.
:param str name: name for this schedule
:param int activation_date: when to start this task. Activation date
should be a UTC time represented in milliseconds.
:param str day_period: when this task should be run. Valid options:
'one_time', 'daily', 'weekly', 'monthly', 'yearly'. If 'daily' is
selected, you can also provide a value for 'minute_period'.
(default: 'one_time')
:param str minute_period: only required if day_period is set to 'daily'.
Valid options: 'each_quarter' (15 min), 'each_half' (30 minutes), or
'hourly', 'one_time' (default: 'one_time')
:param int day_mask: If the task day_period=weekly, then specify the day
or days for repeating. Day masks are: sun=1, mon=2, tue=4, wed=8,
thu=16, fri=32, sat=64. To repeat for instance every Monday, Wednesday
and Friday, the value must be 2 + 8 + 32 = 42
:param str final_action: what type of action to perform after the
scheduled task runs. Options are: 'ALERT_FAILURE', 'ALERT', or
'NO_ACTION' (default: ALERT_FAILURE)
:param bool activated: whether to activate the schedule (default: True)
:param str repeat_until_date: if this is anything but a one time task run,
you can specify the date when this task should end. The format is the
same as the `activation_date` param.
:param str comment: optional comment
:raises ActionCommandFailed: failed adding schedule
:return: None
### Response:
def add_schedule(self, name, activation_date, day_period='one_time',
final_action='ALERT_FAILURE', activated=True,
minute_period='one_time', day_mask=None,
repeat_until_date=None, comment=None):
"""
Add a schedule to an existing task.
:param str name: name for this schedule
:param int activation_date: when to start this task. Activation date
should be a UTC time represented in milliseconds.
:param str day_period: when this task should be run. Valid options:
'one_time', 'daily', 'weekly', 'monthly', 'yearly'. If 'daily' is
selected, you can also provide a value for 'minute_period'.
(default: 'one_time')
:param str minute_period: only required if day_period is set to 'daily'.
Valid options: 'each_quarter' (15 min), 'each_half' (30 minutes), or
'hourly', 'one_time' (default: 'one_time')
:param int day_mask: If the task day_period=weekly, then specify the day
or days for repeating. Day masks are: sun=1, mon=2, tue=4, wed=8,
thu=16, fri=32, sat=64. To repeat for instance every Monday, Wednesday
and Friday, the value must be 2 + 8 + 32 = 42
:param str final_action: what type of action to perform after the
scheduled task runs. Options are: 'ALERT_FAILURE', 'ALERT', or
'NO_ACTION' (default: ALERT_FAILURE)
:param bool activated: whether to activate the schedule (default: True)
:param str repeat_until_date: if this is anything but a one time task run,
you can specify the date when this task should end. The format is the
same as the `activation_date` param.
:param str comment: optional comment
:raises ActionCommandFailed: failed adding schedule
:return: None
"""
json = {
'name': name,
'activation_date': activation_date,
'day_period': day_period,
'day_mask': day_mask,
'activated': activated,
'final_action': final_action,
'minute_period': minute_period,
'repeat_until_date': repeat_until_date if repeat_until_date else None,
'comment': comment}
if 'daily' in day_period:
minute_period = minute_period if minute_period != 'one_time' else 'hourly'
json['minute_period'] = minute_period
return self.make_request(
ActionCommandFailed,
method='create',
resource='task_schedule',
json=json) |
def unhandled(self, key):
"""Handle other keyboard actions not handled by the ListBox widget.
"""
self.key = key
self.size = self.tui.get_cols_rows()
if self.search is True:
if self.enter is False and self.no_matches is False:
if len(key) == 1 and key.isprintable():
self.search_string += key
self._search()
elif self.enter is True and not self.search_string:
self.search = False
self.enter = False
return
if not self.urls and key not in "Qq":
return # No other actions are useful with no URLs
if self.help_menu is False:
try:
self.keys[key]()
except KeyError:
pass | Handle other keyboard actions not handled by the ListBox widget. | Below is the the instruction that describes the task:
### Input:
Handle other keyboard actions not handled by the ListBox widget.
### Response:
def unhandled(self, key):
"""Handle other keyboard actions not handled by the ListBox widget.
"""
self.key = key
self.size = self.tui.get_cols_rows()
if self.search is True:
if self.enter is False and self.no_matches is False:
if len(key) == 1 and key.isprintable():
self.search_string += key
self._search()
elif self.enter is True and not self.search_string:
self.search = False
self.enter = False
return
if not self.urls and key not in "Qq":
return # No other actions are useful with no URLs
if self.help_menu is False:
try:
self.keys[key]()
except KeyError:
pass |
def wrap(self, methodName, types, skip=2):
"""
Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list.
"""
def handler(fields):
try:
args = [
field if typ is str else
int(field or 0) if typ is int else
float(field or 0) if typ is float else
bool(int(field or 0))
for (typ, field) in zip(types, fields[skip:])]
method(*args)
except Exception:
self.logger.exception(f'Error for {methodName}:')
method = getattr(self.wrapper, methodName, None)
return handler if method else lambda *args: None | Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list. | Below is the the instruction that describes the task:
### Input:
Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list.
### Response:
def wrap(self, methodName, types, skip=2):
"""
Create a message handler that invokes a wrapper method
with the in-order message fields as parameters, skipping over
the first ``skip`` fields, and parsed according to the ``types`` list.
"""
def handler(fields):
try:
args = [
field if typ is str else
int(field or 0) if typ is int else
float(field or 0) if typ is float else
bool(int(field or 0))
for (typ, field) in zip(types, fields[skip:])]
method(*args)
except Exception:
self.logger.exception(f'Error for {methodName}:')
method = getattr(self.wrapper, methodName, None)
return handler if method else lambda *args: None |
def read_partial_map(filenames, column, fullsky=True, **kwargs):
"""
Read a partial HEALPix file(s) and return pixels and values/map. Can
handle 3D healpix maps (pix, value, zdim). Returned array has
shape (dimz,npix).
Parameters:
-----------
filenames : list of input filenames
column : column of interest
fullsky : partial or fullsky map
kwargs : passed to fitsio.read
Returns:
--------
(nside,pix,map) : pixel array and healpix map (partial or fullsky)
"""
# Make sure that PIXEL is in columns
#kwargs['columns'] = ['PIXEL',column]
kwargs['columns'] = ['PIXEL'] + np.atleast_1d(column).tolist()
filenames = np.atleast_1d(filenames)
header = fitsio.read_header(filenames[0],ext=kwargs.get('ext',1))
data = ugali.utils.fileio.load_files(filenames,**kwargs)
pix = data['PIXEL']
value = data[column]
nside = header['NSIDE']
npix = hp.nside2npix(nside)
ndupes = len(pix) - len(np.unique(pix))
if ndupes > 0:
msg = '%i duplicate pixels during load.'%(ndupes)
raise Exception(msg)
if fullsky and not np.isscalar(column):
raise Exception("Cannot make fullsky map from list of columns.")
if fullsky:
shape = list(value.shape)
shape[0] = npix
hpxmap = hp.UNSEEN * np.ones(shape,dtype=value.dtype)
hpxmap[pix] = value
return (nside,pix,hpxmap.T)
else:
return (nside,pix,value.T) | Read a partial HEALPix file(s) and return pixels and values/map. Can
handle 3D healpix maps (pix, value, zdim). Returned array has
shape (dimz,npix).
Parameters:
-----------
filenames : list of input filenames
column : column of interest
fullsky : partial or fullsky map
kwargs : passed to fitsio.read
Returns:
--------
(nside,pix,map) : pixel array and healpix map (partial or fullsky) | Below is the the instruction that describes the task:
### Input:
Read a partial HEALPix file(s) and return pixels and values/map. Can
handle 3D healpix maps (pix, value, zdim). Returned array has
shape (dimz,npix).
Parameters:
-----------
filenames : list of input filenames
column : column of interest
fullsky : partial or fullsky map
kwargs : passed to fitsio.read
Returns:
--------
(nside,pix,map) : pixel array and healpix map (partial or fullsky)
### Response:
def read_partial_map(filenames, column, fullsky=True, **kwargs):
"""
Read a partial HEALPix file(s) and return pixels and values/map. Can
handle 3D healpix maps (pix, value, zdim). Returned array has
shape (dimz,npix).
Parameters:
-----------
filenames : list of input filenames
column : column of interest
fullsky : partial or fullsky map
kwargs : passed to fitsio.read
Returns:
--------
(nside,pix,map) : pixel array and healpix map (partial or fullsky)
"""
# Make sure that PIXEL is in columns
#kwargs['columns'] = ['PIXEL',column]
kwargs['columns'] = ['PIXEL'] + np.atleast_1d(column).tolist()
filenames = np.atleast_1d(filenames)
header = fitsio.read_header(filenames[0],ext=kwargs.get('ext',1))
data = ugali.utils.fileio.load_files(filenames,**kwargs)
pix = data['PIXEL']
value = data[column]
nside = header['NSIDE']
npix = hp.nside2npix(nside)
ndupes = len(pix) - len(np.unique(pix))
if ndupes > 0:
msg = '%i duplicate pixels during load.'%(ndupes)
raise Exception(msg)
if fullsky and not np.isscalar(column):
raise Exception("Cannot make fullsky map from list of columns.")
if fullsky:
shape = list(value.shape)
shape[0] = npix
hpxmap = hp.UNSEEN * np.ones(shape,dtype=value.dtype)
hpxmap[pix] = value
return (nside,pix,hpxmap.T)
else:
return (nside,pix,value.T) |
def write(config):
"""Commits any pending modifications, ie save a configuration file if
it has been marked "dirty" as a result of an normal
assignment. The modifications are written to the first
writable source in this config object.
.. note::
This is a static method, ie not a method on any object
instance. This is because all attribute access on a
LayeredConfig object is meant to retrieve configuration
settings.
:param config: The configuration object to save
:type config: layeredconfig.LayeredConfig
"""
root = config
while root._parent:
root = root._parent
for source in root._sources:
if source.writable and source.dirty:
source.save() | Commits any pending modifications, ie save a configuration file if
it has been marked "dirty" as a result of an normal
assignment. The modifications are written to the first
writable source in this config object.
.. note::
This is a static method, ie not a method on any object
instance. This is because all attribute access on a
LayeredConfig object is meant to retrieve configuration
settings.
:param config: The configuration object to save
:type config: layeredconfig.LayeredConfig | Below is the the instruction that describes the task:
### Input:
Commits any pending modifications, ie save a configuration file if
it has been marked "dirty" as a result of an normal
assignment. The modifications are written to the first
writable source in this config object.
.. note::
This is a static method, ie not a method on any object
instance. This is because all attribute access on a
LayeredConfig object is meant to retrieve configuration
settings.
:param config: The configuration object to save
:type config: layeredconfig.LayeredConfig
### Response:
def write(config):
"""Commits any pending modifications, ie save a configuration file if
it has been marked "dirty" as a result of an normal
assignment. The modifications are written to the first
writable source in this config object.
.. note::
This is a static method, ie not a method on any object
instance. This is because all attribute access on a
LayeredConfig object is meant to retrieve configuration
settings.
:param config: The configuration object to save
:type config: layeredconfig.LayeredConfig
"""
root = config
while root._parent:
root = root._parent
for source in root._sources:
if source.writable and source.dirty:
source.save() |
def Parse(self, stat, knowledge_base):
"""Convert the timezone to Olson format."""
_ = knowledge_base
value = stat.registry_data.GetValue()
result = ZONE_LIST.get(value.strip())
if not result:
yield rdfvalue.RDFString("Unknown (%s)" % value.strip())
yield rdfvalue.RDFString(result) | Convert the timezone to Olson format. | Below is the the instruction that describes the task:
### Input:
Convert the timezone to Olson format.
### Response:
def Parse(self, stat, knowledge_base):
"""Convert the timezone to Olson format."""
_ = knowledge_base
value = stat.registry_data.GetValue()
result = ZONE_LIST.get(value.strip())
if not result:
yield rdfvalue.RDFString("Unknown (%s)" % value.strip())
yield rdfvalue.RDFString(result) |
def get(self, primary=False, limit=None, offset=None):
"""
:param primary: bool if True will only return accounts the user is primary on
:param offset: int of the offset to use
:param limit: int of max number of puzzles to return
:return: list of Account dict the current user has access to
"""
return self.connection.get('account/array',
primary=primary,
limit=limit,
offset=offset) | :param primary: bool if True will only return accounts the user is primary on
:param offset: int of the offset to use
:param limit: int of max number of puzzles to return
:return: list of Account dict the current user has access to | Below is the the instruction that describes the task:
### Input:
:param primary: bool if True will only return accounts the user is primary on
:param offset: int of the offset to use
:param limit: int of max number of puzzles to return
:return: list of Account dict the current user has access to
### Response:
def get(self, primary=False, limit=None, offset=None):
"""
:param primary: bool if True will only return accounts the user is primary on
:param offset: int of the offset to use
:param limit: int of max number of puzzles to return
:return: list of Account dict the current user has access to
"""
return self.connection.get('account/array',
primary=primary,
limit=limit,
offset=offset) |
def recon(self):
"""
Prep
"""
logging.info('Running MOB-recon on Assemblies')
with progressbar(self.metadata) as bar:
commands = list()
for sample in bar:
# Create and populate the mob_recon genobject
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].outputdir = os.path.join(sample.general.outputdirectory, self.analysistype)
sample[self.analysistype].contig_report = os.path.join(sample[self.analysistype].outputdir,
'contig_report.txt')
sample[self.analysistype].report_dict = dict()
sample[self.analysistype].logout = os.path.join(sample[self.analysistype].outputdir, 'out')
sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].outputdir, 'err')
make_path(sample[self.analysistype].outputdir)
if sample.general.bestassemblyfile != 'NA':
sample.commands.mobrecon = 'mob_recon -i {fasta} -o {outdir} --run_typer -n {threads} -d {db}'\
.format(fasta=sample.general.bestassemblyfile,
outdir=sample[self.analysistype].outputdir,
threads=self.threads,
db=os.path.join(self.databasepath, 'mob_suite'))
# Ensure that the report doesn't already exist
if not os.path.isfile(sample[self.analysistype].contig_report):
# Run the analyses
commands.append(sample.commands.mobrecon)
p = multiprocessing.Pool(processes=self.threads)
out_err = p.map(MobRecon.run_cmd, commands)
p.close()
p.join()
# At this point, out_err has a list of tuples with out as index 0 in each tuple and
# err at index 1 in each tuple. These will be in the same order as the samples, so retrieve them by index.
index = 0
for sample in bar:
# Write the outputs to the log file
out = out_err[index][0]
err = out_err[index][1]
write_to_logfile(out=sample.commands.mobrecon,
err=sample.commands.mobrecon,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=sample[self.analysistype].logout,
analysiserr=sample[self.analysistype].logerr)
write_to_logfile(out=out,
err=err,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=sample[self.analysistype].logout,
analysiserr=sample[self.analysistype].logerr)
index += 1 | Prep | Below is the the instruction that describes the task:
### Input:
Prep
### Response:
def recon(self):
"""
Prep
"""
logging.info('Running MOB-recon on Assemblies')
with progressbar(self.metadata) as bar:
commands = list()
for sample in bar:
# Create and populate the mob_recon genobject
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].outputdir = os.path.join(sample.general.outputdirectory, self.analysistype)
sample[self.analysistype].contig_report = os.path.join(sample[self.analysistype].outputdir,
'contig_report.txt')
sample[self.analysistype].report_dict = dict()
sample[self.analysistype].logout = os.path.join(sample[self.analysistype].outputdir, 'out')
sample[self.analysistype].logerr = os.path.join(sample[self.analysistype].outputdir, 'err')
make_path(sample[self.analysistype].outputdir)
if sample.general.bestassemblyfile != 'NA':
sample.commands.mobrecon = 'mob_recon -i {fasta} -o {outdir} --run_typer -n {threads} -d {db}'\
.format(fasta=sample.general.bestassemblyfile,
outdir=sample[self.analysistype].outputdir,
threads=self.threads,
db=os.path.join(self.databasepath, 'mob_suite'))
# Ensure that the report doesn't already exist
if not os.path.isfile(sample[self.analysistype].contig_report):
# Run the analyses
commands.append(sample.commands.mobrecon)
p = multiprocessing.Pool(processes=self.threads)
out_err = p.map(MobRecon.run_cmd, commands)
p.close()
p.join()
# At this point, out_err has a list of tuples with out as index 0 in each tuple and
# err at index 1 in each tuple. These will be in the same order as the samples, so retrieve them by index.
index = 0
for sample in bar:
# Write the outputs to the log file
out = out_err[index][0]
err = out_err[index][1]
write_to_logfile(out=sample.commands.mobrecon,
err=sample.commands.mobrecon,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=sample[self.analysistype].logout,
analysiserr=sample[self.analysistype].logerr)
write_to_logfile(out=out,
err=err,
logfile=self.logfile,
samplelog=sample.general.logout,
sampleerr=sample.general.logerr,
analysislog=sample[self.analysistype].logout,
analysiserr=sample[self.analysistype].logerr)
index += 1 |
def confd_state_internal_cdb_client_lock(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
cdb = ET.SubElement(internal, "cdb")
client = ET.SubElement(cdb, "client")
lock = ET.SubElement(client, "lock")
lock.text = kwargs.pop('lock')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def confd_state_internal_cdb_client_lock(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
cdb = ET.SubElement(internal, "cdb")
client = ET.SubElement(cdb, "client")
lock = ET.SubElement(client, "lock")
lock.text = kwargs.pop('lock')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _set_client(self):
"""Set client property if not set."""
if self._client is None:
if mongo_proxy:
self._client = mongo_proxy.MongoProxy(
pymongo.MongoClient(self.connection_string),
logger=LOG)
else:
LOG.warning("MongoDBProxy not imported. AutoReconnect "
"is not enabled.")
self._client = mongo_proxy.MongoProxy(
pymongo.MongoClient(self.connection_string),
logger=LOG)
LOG.debug("Created new connection to MongoDB: %s",
self.safe_connection_string) | Set client property if not set. | Below is the the instruction that describes the task:
### Input:
Set client property if not set.
### Response:
def _set_client(self):
"""Set client property if not set."""
if self._client is None:
if mongo_proxy:
self._client = mongo_proxy.MongoProxy(
pymongo.MongoClient(self.connection_string),
logger=LOG)
else:
LOG.warning("MongoDBProxy not imported. AutoReconnect "
"is not enabled.")
self._client = mongo_proxy.MongoProxy(
pymongo.MongoClient(self.connection_string),
logger=LOG)
LOG.debug("Created new connection to MongoDB: %s",
self.safe_connection_string) |
def set_autocamera(self,mode='density'):
"""
- set_autocamera(mode='density'): By default, Scene defines its
own Camera. However, there is no a general way for doing so. Scene
uses a density criterion for getting the point of view. If this is
not a good option for your problem, you can choose among:
|'minmax'|'density'|'median'|'mean'|. If None of the previous methods
work well, you may define the camera params by yourself.
"""
self.Camera.set_autocamera(self._Particles,mode=mode)
self._camera_params = self.Camera.get_params()
self._x, self._y, self._hsml, self._kview = self.__compute_scene()
self._m = self._Particles._mass[self._kview] | - set_autocamera(mode='density'): By default, Scene defines its
own Camera. However, there is no a general way for doing so. Scene
uses a density criterion for getting the point of view. If this is
not a good option for your problem, you can choose among:
|'minmax'|'density'|'median'|'mean'|. If None of the previous methods
work well, you may define the camera params by yourself. | Below is the the instruction that describes the task:
### Input:
- set_autocamera(mode='density'): By default, Scene defines its
own Camera. However, there is no a general way for doing so. Scene
uses a density criterion for getting the point of view. If this is
not a good option for your problem, you can choose among:
|'minmax'|'density'|'median'|'mean'|. If None of the previous methods
work well, you may define the camera params by yourself.
### Response:
def set_autocamera(self,mode='density'):
"""
- set_autocamera(mode='density'): By default, Scene defines its
own Camera. However, there is no a general way for doing so. Scene
uses a density criterion for getting the point of view. If this is
not a good option for your problem, you can choose among:
|'minmax'|'density'|'median'|'mean'|. If None of the previous methods
work well, you may define the camera params by yourself.
"""
self.Camera.set_autocamera(self._Particles,mode=mode)
self._camera_params = self.Camera.get_params()
self._x, self._y, self._hsml, self._kview = self.__compute_scene()
self._m = self._Particles._mass[self._kview] |
def loguniform(low, high, random_state):
'''
low: an float that represent an lower bound
high: an float that represent an upper bound
random_state: an object of numpy.random.RandomState
'''
assert low > 0, 'Lower bound must be positive'
return np.exp(uniform(np.log(low), np.log(high), random_state)) | low: an float that represent an lower bound
high: an float that represent an upper bound
random_state: an object of numpy.random.RandomState | Below is the the instruction that describes the task:
### Input:
low: an float that represent an lower bound
high: an float that represent an upper bound
random_state: an object of numpy.random.RandomState
### Response:
def loguniform(low, high, random_state):
'''
low: an float that represent an lower bound
high: an float that represent an upper bound
random_state: an object of numpy.random.RandomState
'''
assert low > 0, 'Lower bound must be positive'
return np.exp(uniform(np.log(low), np.log(high), random_state)) |
def view_name_from(path):
"Resolve a path to the full python module name of the related view function"
try:
return CACHED_VIEWS[path]
except KeyError:
view = resolve(path)
module = path
name = ''
if hasattr(view.func, '__module__'):
module = resolve(path).func.__module__
if hasattr(view.func, '__name__'):
name = resolve(path).func.__name__
view = "%s.%s" % (module, name)
CACHED_VIEWS[path] = view
return view | Resolve a path to the full python module name of the related view function | Below is the the instruction that describes the task:
### Input:
Resolve a path to the full python module name of the related view function
### Response:
def view_name_from(path):
"Resolve a path to the full python module name of the related view function"
try:
return CACHED_VIEWS[path]
except KeyError:
view = resolve(path)
module = path
name = ''
if hasattr(view.func, '__module__'):
module = resolve(path).func.__module__
if hasattr(view.func, '__name__'):
name = resolve(path).func.__name__
view = "%s.%s" % (module, name)
CACHED_VIEWS[path] = view
return view |
def route(self, dst, dev=None):
"""
Provide best route to IPv6 destination address, based on Scapy6
internal routing table content.
When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
of the set is used. Be aware of that behavior when using wildcards in
upper parts of addresses !
If 'dst' parameter is a FQDN, name resolution is performed and result
is used.
if optional 'dev' parameter is provided a specific interface, filtering
is performed to limit search to route associated to that interface.
"""
# Transform "2001:db8:cafe:*::1-5:0/120" to one IPv6 address of the set
dst = dst.split("/")[0]
savedst = dst # In case following inet_pton() fails
dst = dst.replace("*","0")
l = dst.find("-")
while l >= 0:
m = (dst[l:]+":").find(":")
dst = dst[:l]+dst[l+m:]
l = dst.find("-")
try:
inet_pton(socket.AF_INET6, dst)
except socket.error:
dst = socket.getaddrinfo(savedst, None, socket.AF_INET6)[0][-1][0]
# TODO : Check if name resolution went well
# Deal with dev-specific request for cache search
k = dst
if dev is not None:
k = dst + "%%" + dev
if k in self.cache:
return self.cache[k]
pathes = []
# TODO : review all kinds of addresses (scope and *cast) to see
# if we are able to cope with everything possible. I'm convinced
# it's not the case.
# -- arnaud
for p, plen, gw, iface, cset in self.routes:
if dev is not None and iface != dev:
continue
if in6_isincluded(dst, p, plen):
pathes.append((plen, (iface, cset, gw)))
elif (in6_ismlladdr(dst) and in6_islladdr(p) and in6_islladdr(cset[0])):
pathes.append((plen, (iface, cset, gw)))
if not pathes:
warning("No route found for IPv6 destination %s (no default route?). This affects only IPv6" % dst)
return (LOOPBACK_NAME, "::", "::") # XXX Linux specific
# Sort with longest prefix first
pathes.sort(reverse=True)
best_plen = pathes[0][0]
pathes = filter(lambda x: x[0] == best_plen, pathes)
res = []
for p in pathes: # Here we select best source address for every route
tmp = p[1]
srcaddr = get_source_addr_from_candidate_set(dst, p[1][1])
if srcaddr is not None:
res.append((p[0], (tmp[0], srcaddr, tmp[2])))
if res == []:
warning("Found a route for IPv6 destination '%s', but no possible source address. This affects only IPv6" % dst)
return (LOOPBACK_NAME, b"::", b"::") # XXX Linux specific
# Symptom : 2 routes with same weight (our weight is plen)
# Solution :
# - dst is unicast global. Check if it is 6to4 and we have a source
# 6to4 address in those available
# - dst is link local (unicast or multicast) and multiple output
# interfaces are available. Take main one (conf.iface6)
# - if none of the previous or ambiguity persists, be lazy and keep
# first one
# XXX TODO : in a _near_ future, include metric in the game
if len(res) > 1:
tmp = []
if in6_isgladdr(dst) and in6_isaddr6to4(dst):
# TODO : see if taking the longest match between dst and
# every source addresses would provide better results
#tmp = filter(lambda x: in6_isaddr6to4(x[1][1]), res)
tmp = [ x for x in res if in6_isaddr6to4(x[1][1]) ]
elif in6_ismaddr(dst) or in6_islladdr(dst):
# TODO : I'm sure we are not covering all addresses. Check that
#tmp = filter(lambda x: x[1][0] == conf.iface6, res)
tmp = [ x for x in res if x[1][0] == conf.iface6 ]
if tmp:
res = tmp
# Fill the cache (including dev-specific request)
k = dst
if dev is not None:
k = dst + "%%" + dev
self.cache[k] = res[0][1]
return res[0][1] | Provide best route to IPv6 destination address, based on Scapy6
internal routing table content.
When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
of the set is used. Be aware of that behavior when using wildcards in
upper parts of addresses !
If 'dst' parameter is a FQDN, name resolution is performed and result
is used.
if optional 'dev' parameter is provided a specific interface, filtering
is performed to limit search to route associated to that interface. | Below is the the instruction that describes the task:
### Input:
Provide best route to IPv6 destination address, based on Scapy6
internal routing table content.
When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
of the set is used. Be aware of that behavior when using wildcards in
upper parts of addresses !
If 'dst' parameter is a FQDN, name resolution is performed and result
is used.
if optional 'dev' parameter is provided a specific interface, filtering
is performed to limit search to route associated to that interface.
### Response:
def route(self, dst, dev=None):
"""
Provide best route to IPv6 destination address, based on Scapy6
internal routing table content.
When a set of address is passed (e.g. 2001:db8:cafe:*::1-5) an address
of the set is used. Be aware of that behavior when using wildcards in
upper parts of addresses !
If 'dst' parameter is a FQDN, name resolution is performed and result
is used.
if optional 'dev' parameter is provided a specific interface, filtering
is performed to limit search to route associated to that interface.
"""
# Transform "2001:db8:cafe:*::1-5:0/120" to one IPv6 address of the set
dst = dst.split("/")[0]
savedst = dst # In case following inet_pton() fails
dst = dst.replace("*","0")
l = dst.find("-")
while l >= 0:
m = (dst[l:]+":").find(":")
dst = dst[:l]+dst[l+m:]
l = dst.find("-")
try:
inet_pton(socket.AF_INET6, dst)
except socket.error:
dst = socket.getaddrinfo(savedst, None, socket.AF_INET6)[0][-1][0]
# TODO : Check if name resolution went well
# Deal with dev-specific request for cache search
k = dst
if dev is not None:
k = dst + "%%" + dev
if k in self.cache:
return self.cache[k]
pathes = []
# TODO : review all kinds of addresses (scope and *cast) to see
# if we are able to cope with everything possible. I'm convinced
# it's not the case.
# -- arnaud
for p, plen, gw, iface, cset in self.routes:
if dev is not None and iface != dev:
continue
if in6_isincluded(dst, p, plen):
pathes.append((plen, (iface, cset, gw)))
elif (in6_ismlladdr(dst) and in6_islladdr(p) and in6_islladdr(cset[0])):
pathes.append((plen, (iface, cset, gw)))
if not pathes:
warning("No route found for IPv6 destination %s (no default route?). This affects only IPv6" % dst)
return (LOOPBACK_NAME, "::", "::") # XXX Linux specific
# Sort with longest prefix first
pathes.sort(reverse=True)
best_plen = pathes[0][0]
pathes = filter(lambda x: x[0] == best_plen, pathes)
res = []
for p in pathes: # Here we select best source address for every route
tmp = p[1]
srcaddr = get_source_addr_from_candidate_set(dst, p[1][1])
if srcaddr is not None:
res.append((p[0], (tmp[0], srcaddr, tmp[2])))
if res == []:
warning("Found a route for IPv6 destination '%s', but no possible source address. This affects only IPv6" % dst)
return (LOOPBACK_NAME, b"::", b"::") # XXX Linux specific
# Symptom : 2 routes with same weight (our weight is plen)
# Solution :
# - dst is unicast global. Check if it is 6to4 and we have a source
# 6to4 address in those available
# - dst is link local (unicast or multicast) and multiple output
# interfaces are available. Take main one (conf.iface6)
# - if none of the previous or ambiguity persists, be lazy and keep
# first one
# XXX TODO : in a _near_ future, include metric in the game
if len(res) > 1:
tmp = []
if in6_isgladdr(dst) and in6_isaddr6to4(dst):
# TODO : see if taking the longest match between dst and
# every source addresses would provide better results
#tmp = filter(lambda x: in6_isaddr6to4(x[1][1]), res)
tmp = [ x for x in res if in6_isaddr6to4(x[1][1]) ]
elif in6_ismaddr(dst) or in6_islladdr(dst):
# TODO : I'm sure we are not covering all addresses. Check that
#tmp = filter(lambda x: x[1][0] == conf.iface6, res)
tmp = [ x for x in res if x[1][0] == conf.iface6 ]
if tmp:
res = tmp
# Fill the cache (including dev-specific request)
k = dst
if dev is not None:
k = dst + "%%" + dev
self.cache[k] = res[0][1]
return res[0][1] |
def get_crawler_stats(self):
'''
Gather crawler stats
@return: A dict of stats
'''
self.logger.debug("Gathering crawler stats")
the_dict = {}
the_dict['spiders'] = self.get_spider_stats()['spiders']
the_dict['machines'] = self.get_machine_stats()['machines']
the_dict['queue'] = self.get_queue_stats()['queues']
return the_dict | Gather crawler stats
@return: A dict of stats | Below is the the instruction that describes the task:
### Input:
Gather crawler stats
@return: A dict of stats
### Response:
def get_crawler_stats(self):
'''
Gather crawler stats
@return: A dict of stats
'''
self.logger.debug("Gathering crawler stats")
the_dict = {}
the_dict['spiders'] = self.get_spider_stats()['spiders']
the_dict['machines'] = self.get_machine_stats()['machines']
the_dict['queue'] = self.get_queue_stats()['queues']
return the_dict |
def __set_mutation_type(self, hgvs_string):
"""Interpret the mutation type (missense, etc.) and set appropriate flags.
Args:
hgvs_string (str): hgvs syntax with "p." removed
"""
self.__set_lost_stop_status(hgvs_string)
self.__set_lost_start_status(hgvs_string)
self.__set_missense_status(hgvs_string) # missense mutations
self.__set_indel_status() # indel mutations
self.__set_frame_shift_status() # check for fs
self.__set_premature_stop_codon_status(hgvs_string) | Interpret the mutation type (missense, etc.) and set appropriate flags.
Args:
hgvs_string (str): hgvs syntax with "p." removed | Below is the the instruction that describes the task:
### Input:
Interpret the mutation type (missense, etc.) and set appropriate flags.
Args:
hgvs_string (str): hgvs syntax with "p." removed
### Response:
def __set_mutation_type(self, hgvs_string):
"""Interpret the mutation type (missense, etc.) and set appropriate flags.
Args:
hgvs_string (str): hgvs syntax with "p." removed
"""
self.__set_lost_stop_status(hgvs_string)
self.__set_lost_start_status(hgvs_string)
self.__set_missense_status(hgvs_string) # missense mutations
self.__set_indel_status() # indel mutations
self.__set_frame_shift_status() # check for fs
self.__set_premature_stop_codon_status(hgvs_string) |
def _pulse_data(self, value):
"""Pulse the `enable` flag to process value."""
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(1)
self.bus.write_byte(self._address, value | PCF8574_E | self._backlight)
c.usleep(1)
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(100)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_DATAMASK
self._mcp_data |= value << MCP230XX_DATASHIFT
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data |= MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(100) | Pulse the `enable` flag to process value. | Below is the the instruction that describes the task:
### Input:
Pulse the `enable` flag to process value.
### Response:
def _pulse_data(self, value):
"""Pulse the `enable` flag to process value."""
if self._i2c_expander == 'PCF8574':
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(1)
self.bus.write_byte(self._address, value | PCF8574_E | self._backlight)
c.usleep(1)
self.bus.write_byte(self._address, ((value & ~PCF8574_E) | self._backlight))
c.usleep(100)
elif self._i2c_expander in ['MCP23008', 'MCP23017']:
self._mcp_data &= ~MCP230XX_DATAMASK
self._mcp_data |= value << MCP230XX_DATASHIFT
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data |= MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(1)
self._mcp_data &= ~MCP230XX_E
self.bus.write_byte_data(self._address, self._mcp_gpio, self._mcp_data)
c.usleep(100) |
def grok_template_file(src):
"""Determine the real deal template file"""
if not src.startswith('builtin:'):
return abspath(src)
builtin = src.split(':')[1]
builtin = "templates/%s.j2" % builtin
return resource_filename(__name__, builtin) | Determine the real deal template file | Below is the the instruction that describes the task:
### Input:
Determine the real deal template file
### Response:
def grok_template_file(src):
"""Determine the real deal template file"""
if not src.startswith('builtin:'):
return abspath(src)
builtin = src.split(':')[1]
builtin = "templates/%s.j2" % builtin
return resource_filename(__name__, builtin) |
def get_list_fields(self):
"""Get all list fields"""
from trionyx.renderer import renderer
model_fields = {f.name: f for f in self.model.get_fields(True, True)}
def create_list_fields(config_fields, list_fields=None):
list_fields = list_fields if list_fields else {}
for field in config_fields:
config = field
if isinstance(field, str):
config = {'field': field}
if 'field' not in config:
raise Exception("Field config is missing field: {}".format(config))
if 'label' not in config:
if config['field'] in model_fields:
config['label'] = model_fields[config['field']].verbose_name
else:
config['label'] = config['field']
if 'renderer' not in config:
config['renderer'] = renderer.render_field
list_fields[config['field']] = config
return list_fields
list_fields = create_list_fields(model_fields.keys())
if self.list_fields:
list_fields = create_list_fields(self.list_fields, list_fields)
return list_fields | Get all list fields | Below is the the instruction that describes the task:
### Input:
Get all list fields
### Response:
def get_list_fields(self):
"""Get all list fields"""
from trionyx.renderer import renderer
model_fields = {f.name: f for f in self.model.get_fields(True, True)}
def create_list_fields(config_fields, list_fields=None):
list_fields = list_fields if list_fields else {}
for field in config_fields:
config = field
if isinstance(field, str):
config = {'field': field}
if 'field' not in config:
raise Exception("Field config is missing field: {}".format(config))
if 'label' not in config:
if config['field'] in model_fields:
config['label'] = model_fields[config['field']].verbose_name
else:
config['label'] = config['field']
if 'renderer' not in config:
config['renderer'] = renderer.render_field
list_fields[config['field']] = config
return list_fields
list_fields = create_list_fields(model_fields.keys())
if self.list_fields:
list_fields = create_list_fields(self.list_fields, list_fields)
return list_fields |
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)] | Cycle through values as the loop progresses. | Below is the the instruction that describes the task:
### Input:
Cycle through values as the loop progresses.
### Response:
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)] |
def update_organisation(self, query_params=None):
'''
Update this organisations information. Returns a new organisation
object.
'''
organisation_json = self.fetch_json(
uri_path=self.base_uri,
http_method='PUT',
query_params=query_params or {}
)
return self.create_organisation(organisation_json) | Update this organisations information. Returns a new organisation
object. | Below is the the instruction that describes the task:
### Input:
Update this organisations information. Returns a new organisation
object.
### Response:
def update_organisation(self, query_params=None):
'''
Update this organisations information. Returns a new organisation
object.
'''
organisation_json = self.fetch_json(
uri_path=self.base_uri,
http_method='PUT',
query_params=query_params or {}
)
return self.create_organisation(organisation_json) |
def justify_token(tok, col_width):
"""
Justify a string to fill one or more columns.
"""
get_len = tools.display_len if PY3 else len
tok_len = get_len(tok)
diff_len = tok_len - len(tok) if PY3 else 0
cols = (int(math.ceil(float(tok_len) / col_width))
if col_width < tok_len + 4 else 1)
if cols > 1:
return tok.ljust((col_width * cols) + (4 * cols) - diff_len)
else:
return tok.ljust(col_width + 4 - diff_len) | Justify a string to fill one or more columns. | Below is the the instruction that describes the task:
### Input:
Justify a string to fill one or more columns.
### Response:
def justify_token(tok, col_width):
"""
Justify a string to fill one or more columns.
"""
get_len = tools.display_len if PY3 else len
tok_len = get_len(tok)
diff_len = tok_len - len(tok) if PY3 else 0
cols = (int(math.ceil(float(tok_len) / col_width))
if col_width < tok_len + 4 else 1)
if cols > 1:
return tok.ljust((col_width * cols) + (4 * cols) - diff_len)
else:
return tok.ljust(col_width + 4 - diff_len) |
def blast_to_cigar(query_seq, match_seq, subject_seq, cigar_age='new'):
"""converts BLAST alignment into a old or new CIGAR line
Args:
query_seq (str): Aligned portion of query sequence
match_seq (str): Alignment sequence
subject_seq (str): Aligned portion of subject/reference sequence
cigar_age (str): ['old', 'new'] CIGAR format to use, new is highly
detailed while old is fairly minimalistic
Returns:
str: CIGAR string
Raises:
ValueError: If query_seq, match_seq, and match_seq not same length
Examples:
>>> query = 'AAGGG--CCTTGTA'
>>> subject = 'AAGCCTTCCAGGTA'
>>> alignment_old = '||||| |||||||'
>>> alignment_new = 'AAG++ CC++GTA'
>>> blast_to_cigar(query, alignment_new, subject)
3=2X2D2=2X3=
>>> blast_to_cigar(query, alignment_old, subject, cigar_age='old')
5M2D7M
"""
if not len(query_seq) == len(match_seq) \
or not len(query_seq) == len(subject_seq) \
or not len(subject_seq) == len(match_seq):
raise ValueError('query_seq, match_seq, and subject_seq not same '
'lengths.')
# Translate XML alignment to CIGAR characters
cigar_line_raw = []
for query, match, subject in zip(query_seq, match_seq, subject_seq):
if query == '-': # Deletion
cigar_line_raw.append('D')
continue
elif subject == '-': # Insertion
cigar_line_raw.append('I')
continue
elif match == '+' or match == '|' or match.isalpha(): # Match
if match != '+' and cigar_age == 'new': # Positive match
cigar_line_raw.append('=')
continue
elif match == '+' and cigar_age == 'new': # Mismatch
cigar_line_raw.append('X')
continue
else:
cigar_line_raw.append('M')
continue
elif cigar_age == 'new':
cigar_line_raw.append('X')
continue
else:
cigar_line_raw.append('M')
# Replace repeat characters with numbers
cigar_line = []
last_position = ''
repeats = 1
cigar_len = len(cigar_line_raw)
for letter in enumerate(cigar_line_raw):
if letter[1] == last_position:
repeats += 1
else:
if repeats != 1:
cigar_line.append(str(repeats))
repeats = 1
cigar_line.append(last_position)
if letter[0] == cigar_len - 1:
if repeats != 1:
cigar_line.append(str(repeats))
repeats = 1
cigar_line.append(letter[1])
last_position = letter[1]
return ''.join(cigar_line) | converts BLAST alignment into a old or new CIGAR line
Args:
query_seq (str): Aligned portion of query sequence
match_seq (str): Alignment sequence
subject_seq (str): Aligned portion of subject/reference sequence
cigar_age (str): ['old', 'new'] CIGAR format to use, new is highly
detailed while old is fairly minimalistic
Returns:
str: CIGAR string
Raises:
ValueError: If query_seq, match_seq, and match_seq not same length
Examples:
>>> query = 'AAGGG--CCTTGTA'
>>> subject = 'AAGCCTTCCAGGTA'
>>> alignment_old = '||||| |||||||'
>>> alignment_new = 'AAG++ CC++GTA'
>>> blast_to_cigar(query, alignment_new, subject)
3=2X2D2=2X3=
>>> blast_to_cigar(query, alignment_old, subject, cigar_age='old')
5M2D7M | Below is the the instruction that describes the task:
### Input:
converts BLAST alignment into a old or new CIGAR line
Args:
query_seq (str): Aligned portion of query sequence
match_seq (str): Alignment sequence
subject_seq (str): Aligned portion of subject/reference sequence
cigar_age (str): ['old', 'new'] CIGAR format to use, new is highly
detailed while old is fairly minimalistic
Returns:
str: CIGAR string
Raises:
ValueError: If query_seq, match_seq, and match_seq not same length
Examples:
>>> query = 'AAGGG--CCTTGTA'
>>> subject = 'AAGCCTTCCAGGTA'
>>> alignment_old = '||||| |||||||'
>>> alignment_new = 'AAG++ CC++GTA'
>>> blast_to_cigar(query, alignment_new, subject)
3=2X2D2=2X3=
>>> blast_to_cigar(query, alignment_old, subject, cigar_age='old')
5M2D7M
### Response:
def blast_to_cigar(query_seq, match_seq, subject_seq, cigar_age='new'):
"""converts BLAST alignment into a old or new CIGAR line
Args:
query_seq (str): Aligned portion of query sequence
match_seq (str): Alignment sequence
subject_seq (str): Aligned portion of subject/reference sequence
cigar_age (str): ['old', 'new'] CIGAR format to use, new is highly
detailed while old is fairly minimalistic
Returns:
str: CIGAR string
Raises:
ValueError: If query_seq, match_seq, and match_seq not same length
Examples:
>>> query = 'AAGGG--CCTTGTA'
>>> subject = 'AAGCCTTCCAGGTA'
>>> alignment_old = '||||| |||||||'
>>> alignment_new = 'AAG++ CC++GTA'
>>> blast_to_cigar(query, alignment_new, subject)
3=2X2D2=2X3=
>>> blast_to_cigar(query, alignment_old, subject, cigar_age='old')
5M2D7M
"""
if not len(query_seq) == len(match_seq) \
or not len(query_seq) == len(subject_seq) \
or not len(subject_seq) == len(match_seq):
raise ValueError('query_seq, match_seq, and subject_seq not same '
'lengths.')
# Translate XML alignment to CIGAR characters
cigar_line_raw = []
for query, match, subject in zip(query_seq, match_seq, subject_seq):
if query == '-': # Deletion
cigar_line_raw.append('D')
continue
elif subject == '-': # Insertion
cigar_line_raw.append('I')
continue
elif match == '+' or match == '|' or match.isalpha(): # Match
if match != '+' and cigar_age == 'new': # Positive match
cigar_line_raw.append('=')
continue
elif match == '+' and cigar_age == 'new': # Mismatch
cigar_line_raw.append('X')
continue
else:
cigar_line_raw.append('M')
continue
elif cigar_age == 'new':
cigar_line_raw.append('X')
continue
else:
cigar_line_raw.append('M')
# Replace repeat characters with numbers
cigar_line = []
last_position = ''
repeats = 1
cigar_len = len(cigar_line_raw)
for letter in enumerate(cigar_line_raw):
if letter[1] == last_position:
repeats += 1
else:
if repeats != 1:
cigar_line.append(str(repeats))
repeats = 1
cigar_line.append(last_position)
if letter[0] == cigar_len - 1:
if repeats != 1:
cigar_line.append(str(repeats))
repeats = 1
cigar_line.append(letter[1])
last_position = letter[1]
return ''.join(cigar_line) |
def Detect(self, baseline, host_data):
"""Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
"""
result = CheckResult()
for detector in self.detectors:
finding = detector(baseline, host_data)
if finding:
result.ExtendAnomalies([finding])
if result:
return result | Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise. | Below is the the instruction that describes the task:
### Input:
Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
### Response:
def Detect(self, baseline, host_data):
"""Run host_data through detectors and return them if a detector triggers.
Args:
baseline: The base set of rdf values used to evaluate whether an issue
exists.
host_data: The rdf values passed back by the filters.
Returns:
A CheckResult message containing anomalies if any detectors identified an
issue, None otherwise.
"""
result = CheckResult()
for detector in self.detectors:
finding = detector(baseline, host_data)
if finding:
result.ExtendAnomalies([finding])
if result:
return result |
def ban_delete(self, ban_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/bans#delete-ban"
api_path = "/api/v2/bans/{ban_id}"
api_path = api_path.format(ban_id=ban_id)
return self.call(api_path, method="DELETE", **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/bans#delete-ban | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/bans#delete-ban
### Response:
def ban_delete(self, ban_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/bans#delete-ban"
api_path = "/api/v2/bans/{ban_id}"
api_path = api_path.format(ban_id=ban_id)
return self.call(api_path, method="DELETE", **kwargs) |
def execute(self, action):
"""
Executes action, observes next state and reward.
Args:
actions: Actions to execute.
Returns:
Tuple of (next state, bool indicating terminal, reward)
"""
next_state, rew, done, _ = self.env.step(action)
return next_state, rew, done | Executes action, observes next state and reward.
Args:
actions: Actions to execute.
Returns:
Tuple of (next state, bool indicating terminal, reward) | Below is the the instruction that describes the task:
### Input:
Executes action, observes next state and reward.
Args:
actions: Actions to execute.
Returns:
Tuple of (next state, bool indicating terminal, reward)
### Response:
def execute(self, action):
"""
Executes action, observes next state and reward.
Args:
actions: Actions to execute.
Returns:
Tuple of (next state, bool indicating terminal, reward)
"""
next_state, rew, done, _ = self.env.step(action)
return next_state, rew, done |
def setColumnCount( self, count ):
"""
Sets the number of columns used for this tree widget, updating the \
column resizing modes to stretch the first column.
:param count | <int>
"""
super(XColorTreeWidget, self).setColumnCount(count)
header = self.header()
header.setResizeMode(0, header.Stretch)
for i in range(1, count):
header.setResizeMode(i, header.Fixed) | Sets the number of columns used for this tree widget, updating the \
column resizing modes to stretch the first column.
:param count | <int> | Below is the the instruction that describes the task:
### Input:
Sets the number of columns used for this tree widget, updating the \
column resizing modes to stretch the first column.
:param count | <int>
### Response:
def setColumnCount( self, count ):
"""
Sets the number of columns used for this tree widget, updating the \
column resizing modes to stretch the first column.
:param count | <int>
"""
super(XColorTreeWidget, self).setColumnCount(count)
header = self.header()
header.setResizeMode(0, header.Stretch)
for i in range(1, count):
header.setResizeMode(i, header.Fixed) |
def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] != "STUDY":
had_info = True
keyvals, next_section = self._parse_keyvals(line_iter)
attr_name = self._sections[section[0]]
if attr_name in self._nolist:
try:
keyvals = keyvals[0]
except IndexError:
keyvals = {}
setattr(rec, attr_name, keyvals)
section = next_section
return rec, had_info | Parse a section of an ISA-Tab, assigning information to a supplied record. | Below is the the instruction that describes the task:
### Input:
Parse a section of an ISA-Tab, assigning information to a supplied record.
### Response:
def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] != "STUDY":
had_info = True
keyvals, next_section = self._parse_keyvals(line_iter)
attr_name = self._sections[section[0]]
if attr_name in self._nolist:
try:
keyvals = keyvals[0]
except IndexError:
keyvals = {}
setattr(rec, attr_name, keyvals)
section = next_section
return rec, had_info |
def ParallelCalculate(cls,syslst,properties=['energy'],system_changes=all_changes):
'''
Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties.
'''
print('Launching:',end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
try :
s.calc.block=False
s.calc.calculate(atoms=s,properties=properties,system_changes=system_changes)
except CalcNotReadyError:
s.calc.block=True
print(n+1, end=' ')
sys.stdout.flush()
print()
print(' Done:', end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
s.calc.read_results()
print( n+1, end=' ')
sys.stdout.flush()
print()
return syslst | Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties. | Below is the the instruction that describes the task:
### Input:
Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties.
### Response:
def ParallelCalculate(cls,syslst,properties=['energy'],system_changes=all_changes):
'''
Run a series of calculations in parallel using (implicitely) some
remote machine/cluster. The function returns the list of systems ready
for the extraction of calculated properties.
'''
print('Launching:',end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
try :
s.calc.block=False
s.calc.calculate(atoms=s,properties=properties,system_changes=system_changes)
except CalcNotReadyError:
s.calc.block=True
print(n+1, end=' ')
sys.stdout.flush()
print()
print(' Done:', end=' ')
sys.stdout.flush()
for n,s in enumerate(syslst):
s.calc.read_results()
print( n+1, end=' ')
sys.stdout.flush()
print()
return syslst |
def get(self, id):
'''List all followers for a given object'''
args = parser.parse_args()
model = self.model.objects.only('id').get_or_404(id=id)
qs = Follow.objects(following=model, until=None)
return qs.paginate(args['page'], args['page_size']) | List all followers for a given object | Below is the the instruction that describes the task:
### Input:
List all followers for a given object
### Response:
def get(self, id):
'''List all followers for a given object'''
args = parser.parse_args()
model = self.model.objects.only('id').get_or_404(id=id)
qs = Follow.objects(following=model, until=None)
return qs.paginate(args['page'], args['page_size']) |
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Add tasks to a project. | Below is the the instruction that describes the task:
### Input:
Add tasks to a project.
### Response:
def _add_tasks(config, tasks_file, tasks_type, priority, redundancy):
"""Add tasks to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(tasks_file, tasks_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# If true, warn user
# if sleep: # pragma: no cover
# click.secho(msg, fg='yellow')
# Show progress bar
with click.progressbar(data, label="Adding Tasks") as pgbar:
for d in pgbar:
task_info = create_task_info(d)
response = config.pbclient.create_task(project_id=project.id,
info=task_info,
n_answers=redundancy,
priority_0=priority)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data)
check_api_error(response)
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s tasks added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise |
def reshape(self, *shape):
"""
Reshape the Series object
Cannot change the last dimension.
Parameters
----------
shape: one or more ints
New shape
"""
if prod(self.shape) != prod(shape):
raise ValueError("Reshaping must leave the number of elements unchanged")
if self.shape[-1] != shape[-1]:
raise ValueError("Reshaping cannot change the size of the constituent series (last dimension)")
if self.labels is not None:
newlabels = self.labels.reshape(*shape[:-1])
else:
newlabels = None
return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',)) | Reshape the Series object
Cannot change the last dimension.
Parameters
----------
shape: one or more ints
New shape | Below is the the instruction that describes the task:
### Input:
Reshape the Series object
Cannot change the last dimension.
Parameters
----------
shape: one or more ints
New shape
### Response:
def reshape(self, *shape):
"""
Reshape the Series object
Cannot change the last dimension.
Parameters
----------
shape: one or more ints
New shape
"""
if prod(self.shape) != prod(shape):
raise ValueError("Reshaping must leave the number of elements unchanged")
if self.shape[-1] != shape[-1]:
raise ValueError("Reshaping cannot change the size of the constituent series (last dimension)")
if self.labels is not None:
newlabels = self.labels.reshape(*shape[:-1])
else:
newlabels = None
return self._constructor(self.values.reshape(shape), labels=newlabels).__finalize__(self, noprop=('labels',)) |
def timedelta_to_duration(obj):
"""Converts timedelta to duration
>>> timedelta_to_duration(datetime.timedelta(0, 600))
>>> "10m"
"""
minutes, hours, days = 0, 0, 0
seconds = int(obj.total_seconds())
if seconds > 59:
minutes = seconds // 60
seconds = seconds % 60
if minutes > 59:
hours = minutes // 60
minutes = minutes % 60
if hours > 23:
days = hours // 24
hours = hours % 24
response = []
if days:
response.append('%sd' % days)
if hours:
response.append('%sh' % hours)
if minutes:
response.append('%sm' % minutes)
if seconds or not response:
response.append('%ss' % seconds)
return "".join(response) | Converts timedelta to duration
>>> timedelta_to_duration(datetime.timedelta(0, 600))
>>> "10m" | Below is the the instruction that describes the task:
### Input:
Converts timedelta to duration
>>> timedelta_to_duration(datetime.timedelta(0, 600))
>>> "10m"
### Response:
def timedelta_to_duration(obj):
"""Converts timedelta to duration
>>> timedelta_to_duration(datetime.timedelta(0, 600))
>>> "10m"
"""
minutes, hours, days = 0, 0, 0
seconds = int(obj.total_seconds())
if seconds > 59:
minutes = seconds // 60
seconds = seconds % 60
if minutes > 59:
hours = minutes // 60
minutes = minutes % 60
if hours > 23:
days = hours // 24
hours = hours % 24
response = []
if days:
response.append('%sd' % days)
if hours:
response.append('%sh' % hours)
if minutes:
response.append('%sm' % minutes)
if seconds or not response:
response.append('%ss' % seconds)
return "".join(response) |
def make_messages(context: Context, javascript=False, fuzzy=False):
"""
Collects text into translation source files
"""
kwargs = {
'all': True,
'keep_pot': True,
'no_wrap': True,
}
if fuzzy:
kwargs['allow_fuzzy'] = True
if javascript:
kwargs.update(domain='djangojs', ignore_patterns=['*.bundle.js'])
with in_dir(context.app.django_app_name):
return context.management_command('makemessages', **kwargs) | Collects text into translation source files | Below is the the instruction that describes the task:
### Input:
Collects text into translation source files
### Response:
def make_messages(context: Context, javascript=False, fuzzy=False):
"""
Collects text into translation source files
"""
kwargs = {
'all': True,
'keep_pot': True,
'no_wrap': True,
}
if fuzzy:
kwargs['allow_fuzzy'] = True
if javascript:
kwargs.update(domain='djangojs', ignore_patterns=['*.bundle.js'])
with in_dir(context.app.django_app_name):
return context.management_command('makemessages', **kwargs) |
def index_modules(idx=None, path=None):
"""
Indexes objs from all modules
"""
suppress_output()
modules = defaultdict(list)
pkglist = pkgutil.walk_packages(onerror=lambda x: True)
print(pkglist)
if path:
pkglist = pkgutil.walk_packages(path, onerror=lambda x: True)
for modl, name, ispkg in pkglist:
try:
path = os.path.join(modl.path, name.split('.')[-1])
except AttributeError:
# Triggered on zipimport.zipimporter
continue
if os.path.isdir(path):
path = os.path.join(path, '__init__')
path += '.py'
objs = []
if os.path.exists(path):
try:
objs = read_objs_from_path(path)
except:
continue
elif not re.search(MODULE_BLACKLIST, name):
try:
mod = __import__(name)
objs = [k for k in dir(mod) if not k.startswith('__')]
except:
continue
else:
continue
for obj in objs:
if name not in modules[obj]:
modules[obj].append(name)
suppress_output(True)
return merge_dicts(idx, dict(modules)) | Indexes objs from all modules | Below is the the instruction that describes the task:
### Input:
Indexes objs from all modules
### Response:
def index_modules(idx=None, path=None):
"""
Indexes objs from all modules
"""
suppress_output()
modules = defaultdict(list)
pkglist = pkgutil.walk_packages(onerror=lambda x: True)
print(pkglist)
if path:
pkglist = pkgutil.walk_packages(path, onerror=lambda x: True)
for modl, name, ispkg in pkglist:
try:
path = os.path.join(modl.path, name.split('.')[-1])
except AttributeError:
# Triggered on zipimport.zipimporter
continue
if os.path.isdir(path):
path = os.path.join(path, '__init__')
path += '.py'
objs = []
if os.path.exists(path):
try:
objs = read_objs_from_path(path)
except:
continue
elif not re.search(MODULE_BLACKLIST, name):
try:
mod = __import__(name)
objs = [k for k in dir(mod) if not k.startswith('__')]
except:
continue
else:
continue
for obj in objs:
if name not in modules[obj]:
modules[obj].append(name)
suppress_output(True)
return merge_dicts(idx, dict(modules)) |
def encode_varint_1(num):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
num (int): Value to encode
Returns:
bytearray: Encoded presentation of integer with length from 1 to 10
bytes
"""
# Shift sign to the end of number
num = (num << 1) ^ (num >> 63)
# Max 10 bytes. We assert those are allocated
buf = bytearray(10)
for i in range(10):
# 7 lowest bits from the number and set 8th if we still have pending
# bits left to encode
buf[i] = num & 0x7f | (0x80 if num > 0x7f else 0)
num = num >> 7
if num == 0:
break
else:
# Max size of endcoded double is 10 bytes for unsigned values
raise ValueError("Out of double range")
return buf[:i + 1] | Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
num (int): Value to encode
Returns:
bytearray: Encoded presentation of integer with length from 1 to 10
bytes | Below is the the instruction that describes the task:
### Input:
Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
num (int): Value to encode
Returns:
bytearray: Encoded presentation of integer with length from 1 to 10
bytes
### Response:
def encode_varint_1(num):
""" Encode an integer to a varint presentation. See
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
on how those can be produced.
Arguments:
num (int): Value to encode
Returns:
bytearray: Encoded presentation of integer with length from 1 to 10
bytes
"""
# Shift sign to the end of number
num = (num << 1) ^ (num >> 63)
# Max 10 bytes. We assert those are allocated
buf = bytearray(10)
for i in range(10):
# 7 lowest bits from the number and set 8th if we still have pending
# bits left to encode
buf[i] = num & 0x7f | (0x80 if num > 0x7f else 0)
num = num >> 7
if num == 0:
break
else:
# Max size of endcoded double is 10 bytes for unsigned values
raise ValueError("Out of double range")
return buf[:i + 1] |
def ipv6(value):
"""
Return whether or not given value is a valid IP version 6 address
(including IPv4-mapped IPv6 addresses).
This validator is based on `WTForms IPAddress validator`_.
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv6('abcd:ef::42:1')
True
>>> ipv6('::ffff:192.0.2.128')
True
>>> ipv6('::192.0.2.128')
True
>>> ipv6('abc.0.0.1')
ValidationFailure(func=ipv6, args={'value': 'abc.0.0.1'})
.. versionadded:: 0.2
:param value: IP address string to validate
"""
ipv6_groups = value.split(':')
if len(ipv6_groups) == 1:
return False
ipv4_groups = ipv6_groups[-1].split('.')
if len(ipv4_groups) > 1:
if not ipv4(ipv6_groups[-1]):
return False
ipv6_groups = ipv6_groups[:-1]
else:
ipv4_groups = []
max_groups = 6 if ipv4_groups else 8
if len(ipv6_groups) > max_groups:
return False
count_blank = 0
for part in ipv6_groups:
if not part:
count_blank += 1
continue
try:
num = int(part, 16)
except ValueError:
return False
else:
if not 0 <= num <= 65536:
return False
if count_blank < 2:
return True
elif count_blank == 2 and not ipv6_groups[0] and not ipv6_groups[1]:
return True
return False | Return whether or not given value is a valid IP version 6 address
(including IPv4-mapped IPv6 addresses).
This validator is based on `WTForms IPAddress validator`_.
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv6('abcd:ef::42:1')
True
>>> ipv6('::ffff:192.0.2.128')
True
>>> ipv6('::192.0.2.128')
True
>>> ipv6('abc.0.0.1')
ValidationFailure(func=ipv6, args={'value': 'abc.0.0.1'})
.. versionadded:: 0.2
:param value: IP address string to validate | Below is the the instruction that describes the task:
### Input:
Return whether or not given value is a valid IP version 6 address
(including IPv4-mapped IPv6 addresses).
This validator is based on `WTForms IPAddress validator`_.
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv6('abcd:ef::42:1')
True
>>> ipv6('::ffff:192.0.2.128')
True
>>> ipv6('::192.0.2.128')
True
>>> ipv6('abc.0.0.1')
ValidationFailure(func=ipv6, args={'value': 'abc.0.0.1'})
.. versionadded:: 0.2
:param value: IP address string to validate
### Response:
def ipv6(value):
"""
Return whether or not given value is a valid IP version 6 address
(including IPv4-mapped IPv6 addresses).
This validator is based on `WTForms IPAddress validator`_.
.. _WTForms IPAddress validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> ipv6('abcd:ef::42:1')
True
>>> ipv6('::ffff:192.0.2.128')
True
>>> ipv6('::192.0.2.128')
True
>>> ipv6('abc.0.0.1')
ValidationFailure(func=ipv6, args={'value': 'abc.0.0.1'})
.. versionadded:: 0.2
:param value: IP address string to validate
"""
ipv6_groups = value.split(':')
if len(ipv6_groups) == 1:
return False
ipv4_groups = ipv6_groups[-1].split('.')
if len(ipv4_groups) > 1:
if not ipv4(ipv6_groups[-1]):
return False
ipv6_groups = ipv6_groups[:-1]
else:
ipv4_groups = []
max_groups = 6 if ipv4_groups else 8
if len(ipv6_groups) > max_groups:
return False
count_blank = 0
for part in ipv6_groups:
if not part:
count_blank += 1
continue
try:
num = int(part, 16)
except ValueError:
return False
else:
if not 0 <= num <= 65536:
return False
if count_blank < 2:
return True
elif count_blank == 2 and not ipv6_groups[0] and not ipv6_groups[1]:
return True
return False |
def find(self, pattern):
""" Searches for an image pattern in the given region
Throws ``FindFailed`` exception if the image could not be found.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
findFailedRetry = True
while findFailedRetry:
match = self.exists(pattern)
if match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return match | Searches for an image pattern in the given region
Throws ``FindFailed`` exception if the image could not be found.
Sikuli supports OCR search with a text parameter. This does not (yet). | Below is the the instruction that describes the task:
### Input:
Searches for an image pattern in the given region
Throws ``FindFailed`` exception if the image could not be found.
Sikuli supports OCR search with a text parameter. This does not (yet).
### Response:
def find(self, pattern):
""" Searches for an image pattern in the given region
Throws ``FindFailed`` exception if the image could not be found.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
findFailedRetry = True
while findFailedRetry:
match = self.exists(pattern)
if match is not None:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return match |
def uuid(self, **params):
"""
Generate a UUID with a human-readable representation.
Returns `(human_repr, full_digest)`. Accepts the same keyword arguments
as :meth:`humanize` (they'll be passed straight through).
"""
digest = str(uuidlib.uuid4()).replace('-', '')
return self.humanize(digest, **params), digest | Generate a UUID with a human-readable representation.
Returns `(human_repr, full_digest)`. Accepts the same keyword arguments
as :meth:`humanize` (they'll be passed straight through). | Below is the the instruction that describes the task:
### Input:
Generate a UUID with a human-readable representation.
Returns `(human_repr, full_digest)`. Accepts the same keyword arguments
as :meth:`humanize` (they'll be passed straight through).
### Response:
def uuid(self, **params):
"""
Generate a UUID with a human-readable representation.
Returns `(human_repr, full_digest)`. Accepts the same keyword arguments
as :meth:`humanize` (they'll be passed straight through).
"""
digest = str(uuidlib.uuid4()).replace('-', '')
return self.humanize(digest, **params), digest |
def _extract_assignment_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
dom = get_page(self._session, OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
return [element['submissionLearnerSchema']['definition']
['assignmentInstructions']['definition']['value']
for element in dom['elements']] | Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str] | Below is the the instruction that describes the task:
### Input:
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
### Response:
def _extract_assignment_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
dom = get_page(self._session, OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
json=True,
course_id=self._course_id,
element_id=element_id)
return [element['submissionLearnerSchema']['definition']
['assignmentInstructions']['definition']['value']
for element in dom['elements']] |
def followers(self, blogname, **kwargs):
"""
Gets the followers of the given blog
:param limit: an int, the number of followers you want returned
:param offset: an int, the follower to start at, for pagination.
# Start at the 20th blog and get 20 more blogs.
client.followers({'offset': 20, 'limit': 20})
:returns: A dict created from the JSON response
"""
url = "/v2/blog/{}/followers".format(blogname)
return self.send_api_request("get", url, kwargs, ['limit', 'offset']) | Gets the followers of the given blog
:param limit: an int, the number of followers you want returned
:param offset: an int, the follower to start at, for pagination.
# Start at the 20th blog and get 20 more blogs.
client.followers({'offset': 20, 'limit': 20})
:returns: A dict created from the JSON response | Below is the the instruction that describes the task:
### Input:
Gets the followers of the given blog
:param limit: an int, the number of followers you want returned
:param offset: an int, the follower to start at, for pagination.
# Start at the 20th blog and get 20 more blogs.
client.followers({'offset': 20, 'limit': 20})
:returns: A dict created from the JSON response
### Response:
def followers(self, blogname, **kwargs):
"""
Gets the followers of the given blog
:param limit: an int, the number of followers you want returned
:param offset: an int, the follower to start at, for pagination.
# Start at the 20th blog and get 20 more blogs.
client.followers({'offset': 20, 'limit': 20})
:returns: A dict created from the JSON response
"""
url = "/v2/blog/{}/followers".format(blogname)
return self.send_api_request("get", url, kwargs, ['limit', 'offset']) |
def index(self, strictindex):
"""
Return a chunk in a sequence referenced by index.
"""
return self._select(self._pointer.index(self.ruamelindex(strictindex))) | Return a chunk in a sequence referenced by index. | Below is the the instruction that describes the task:
### Input:
Return a chunk in a sequence referenced by index.
### Response:
def index(self, strictindex):
"""
Return a chunk in a sequence referenced by index.
"""
return self._select(self._pointer.index(self.ruamelindex(strictindex))) |
def send_broks_to_modules(self):
"""Put broks into module queues
Only broks without sent_to_externals to True are sent
Only modules that ask for broks will get some
:return: None
"""
t00 = time.time()
nb_sent = 0
broks = []
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = True
broks.append(brok)
if not broks:
return
logger.debug("sending %d broks to modules...", len(broks))
for mod in self.my_daemon.modules_manager.get_external_instances():
logger.debug("Look for sending to module %s", mod.get_name())
module_queue = mod.to_q
if module_queue:
to_send = [b for b in broks if mod.want_brok(b)]
module_queue.put(to_send)
nb_sent += len(to_send)
# No more need to send them
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = False
brok.sent_to_externals = True
logger.debug("Time to send %d broks (after %d secs)", nb_sent, time.time() - t00) | Put broks into module queues
Only broks without sent_to_externals to True are sent
Only modules that ask for broks will get some
:return: None | Below is the the instruction that describes the task:
### Input:
Put broks into module queues
Only broks without sent_to_externals to True are sent
Only modules that ask for broks will get some
:return: None
### Response:
def send_broks_to_modules(self):
"""Put broks into module queues
Only broks without sent_to_externals to True are sent
Only modules that ask for broks will get some
:return: None
"""
t00 = time.time()
nb_sent = 0
broks = []
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = True
broks.append(brok)
if not broks:
return
logger.debug("sending %d broks to modules...", len(broks))
for mod in self.my_daemon.modules_manager.get_external_instances():
logger.debug("Look for sending to module %s", mod.get_name())
module_queue = mod.to_q
if module_queue:
to_send = [b for b in broks if mod.want_brok(b)]
module_queue.put(to_send)
nb_sent += len(to_send)
# No more need to send them
for broker_link in list(self.my_daemon.brokers.values()):
for brok in broker_link.broks:
if not getattr(brok, 'sent_to_externals', False):
brok.to_send = False
brok.sent_to_externals = True
logger.debug("Time to send %d broks (after %d secs)", nb_sent, time.time() - t00) |
def get_bounding_box(self):
"""Get the bounding box of this file."""
from pyproj import Geod
geod = Geod(ellps='WGS84')
dataset_group = DATASET_KEYS[self.datasets[0]]
idx = 0
lons_ring = None
lats_ring = None
while True:
path = 'Data_Products/{dataset_group}/{dataset_group}_Gran_{idx}/attr/'
prefix = path.format(dataset_group=dataset_group, idx=idx)
try:
lats = self.file_content[prefix + 'G-Ring_Latitude']
lons = self.file_content[prefix + 'G-Ring_Longitude']
if lons_ring is None:
lons_ring = lons
lats_ring = lats
else:
prev_lon = lons_ring[0]
prev_lat = lats_ring[0]
dists = list(geod.inv(lon, lat, prev_lon, prev_lat)[2] for lon, lat in zip(lons, lats))
first_idx = np.argmin(dists)
if first_idx == 2 and len(lons) == 8:
lons_ring = np.hstack((lons[:3], lons_ring[:-2], lons[4:]))
lats_ring = np.hstack((lats[:3], lats_ring[:-2], lats[4:]))
else:
raise NotImplementedError("Don't know how to handle G-Rings of length %d" % len(lons))
except KeyError:
break
idx += 1
return lons_ring, lats_ring | Get the bounding box of this file. | Below is the the instruction that describes the task:
### Input:
Get the bounding box of this file.
### Response:
def get_bounding_box(self):
"""Get the bounding box of this file."""
from pyproj import Geod
geod = Geod(ellps='WGS84')
dataset_group = DATASET_KEYS[self.datasets[0]]
idx = 0
lons_ring = None
lats_ring = None
while True:
path = 'Data_Products/{dataset_group}/{dataset_group}_Gran_{idx}/attr/'
prefix = path.format(dataset_group=dataset_group, idx=idx)
try:
lats = self.file_content[prefix + 'G-Ring_Latitude']
lons = self.file_content[prefix + 'G-Ring_Longitude']
if lons_ring is None:
lons_ring = lons
lats_ring = lats
else:
prev_lon = lons_ring[0]
prev_lat = lats_ring[0]
dists = list(geod.inv(lon, lat, prev_lon, prev_lat)[2] for lon, lat in zip(lons, lats))
first_idx = np.argmin(dists)
if first_idx == 2 and len(lons) == 8:
lons_ring = np.hstack((lons[:3], lons_ring[:-2], lons[4:]))
lats_ring = np.hstack((lats[:3], lats_ring[:-2], lats[4:]))
else:
raise NotImplementedError("Don't know how to handle G-Rings of length %d" % len(lons))
except KeyError:
break
idx += 1
return lons_ring, lats_ring |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.