code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _parse_features(cls, feat_response):
# type: (Text) -> Dict[Text, Text]
"""Parse a dict of features from FTP feat response.
"""
features = {}
if feat_response.split("-")[0] == "211":
for line in feat_response.splitlines():
if line.startswith(" "):
key, _, value = line[1:].partition(" ")
features[key] = value
return features | Parse a dict of features from FTP feat response. | Below is the the instruction that describes the task:
### Input:
Parse a dict of features from FTP feat response.
### Response:
def _parse_features(cls, feat_response):
# type: (Text) -> Dict[Text, Text]
"""Parse a dict of features from FTP feat response.
"""
features = {}
if feat_response.split("-")[0] == "211":
for line in feat_response.splitlines():
if line.startswith(" "):
key, _, value = line[1:].partition(" ")
features[key] = value
return features |
def _kill_process(self, pid, cgroups=None, sig=signal.SIGKILL):
"""
Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case.
"""
if self._user is not None:
if not cgroups:
cgroups = find_cgroups_of_process(pid)
# In case we started a tool with sudo, we cannot kill the started
# process itself, because sudo always runs as root.
# So if we are asked to kill the started process itself (the first
# process in the cgroup), we instead kill the child of sudo
# (the second process in the cgroup).
pids = cgroups.get_all_tasks(FREEZER)
try:
if pid == next(pids):
pid = next(pids)
except StopIteration:
# pids seems to not have enough values
pass
finally:
pids.close()
self._kill_process0(pid, sig) | Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case. | Below is the the instruction that describes the task:
### Input:
Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case.
### Response:
def _kill_process(self, pid, cgroups=None, sig=signal.SIGKILL):
"""
Try to send signal to given process, either directly of with sudo.
Because we cannot send signals to the sudo process itself,
this method checks whether the target is the sudo process
and redirects the signal to sudo's child in this case.
"""
if self._user is not None:
if not cgroups:
cgroups = find_cgroups_of_process(pid)
# In case we started a tool with sudo, we cannot kill the started
# process itself, because sudo always runs as root.
# So if we are asked to kill the started process itself (the first
# process in the cgroup), we instead kill the child of sudo
# (the second process in the cgroup).
pids = cgroups.get_all_tasks(FREEZER)
try:
if pid == next(pids):
pid = next(pids)
except StopIteration:
# pids seems to not have enough values
pass
finally:
pids.close()
self._kill_process0(pid, sig) |
def main():
""" Main
"""
args, otherthings = parse_known_args()
if args.version:
print_version()
return 0
verbosity = 0
if args.verbose:
verbosity = 1
far = Far(verbosity=verbosity)
if args.dry_run:
far.dry_run(old=args.old)
else:
far.find_and_replace(old=args.old, new=args.new) | Main | Below is the the instruction that describes the task:
### Input:
Main
### Response:
def main():
""" Main
"""
args, otherthings = parse_known_args()
if args.version:
print_version()
return 0
verbosity = 0
if args.verbose:
verbosity = 1
far = Far(verbosity=verbosity)
if args.dry_run:
far.dry_run(old=args.old)
else:
far.find_and_replace(old=args.old, new=args.new) |
def skip_all(self):
"""Skip all readings in this walker."""
storage, streaming = self.engine.count()
if self.selector.output:
self.offset = streaming
else:
self.offset = storage
self._count = 0 | Skip all readings in this walker. | Below is the the instruction that describes the task:
### Input:
Skip all readings in this walker.
### Response:
def skip_all(self):
"""Skip all readings in this walker."""
storage, streaming = self.engine.count()
if self.selector.output:
self.offset = streaming
else:
self.offset = storage
self._count = 0 |
def get_most_relevant_words_for_topic(vocab, rel_mat, topic, n=None):
"""
Get words from `vocab` for `topic` ordered by most to least relevance (Sievert and Shirley 2014) using the relevance
matrix `rel_mat` obtained from `get_topic_word_relevance()`.
Optionally only return the `n` most relevant words.
"""
_check_relevant_words_for_topic_args(vocab, rel_mat, topic)
return _words_by_score(vocab, rel_mat[topic], least_to_most=False, n=n) | Get words from `vocab` for `topic` ordered by most to least relevance (Sievert and Shirley 2014) using the relevance
matrix `rel_mat` obtained from `get_topic_word_relevance()`.
Optionally only return the `n` most relevant words. | Below is the the instruction that describes the task:
### Input:
Get words from `vocab` for `topic` ordered by most to least relevance (Sievert and Shirley 2014) using the relevance
matrix `rel_mat` obtained from `get_topic_word_relevance()`.
Optionally only return the `n` most relevant words.
### Response:
def get_most_relevant_words_for_topic(vocab, rel_mat, topic, n=None):
"""
Get words from `vocab` for `topic` ordered by most to least relevance (Sievert and Shirley 2014) using the relevance
matrix `rel_mat` obtained from `get_topic_word_relevance()`.
Optionally only return the `n` most relevant words.
"""
_check_relevant_words_for_topic_args(vocab, rel_mat, topic)
return _words_by_score(vocab, rel_mat[topic], least_to_most=False, n=n) |
def _exposure_to_weights(self, y, exposure=None, weights=None):
"""simple tool to create a common API
Parameters
----------
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
y : y normalized by exposure
weights : array-like shape (n_samples,)
"""
y = y.ravel()
if exposure is not None:
exposure = np.array(exposure).astype('f').ravel()
exposure = check_array(exposure, name='sample exposure',
ndim=1, verbose=self.verbose)
else:
exposure = np.ones_like(y.ravel()).astype('float64')
# check data
exposure = exposure.ravel()
check_lengths(y, exposure)
# normalize response
y = y / exposure
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
else:
weights = np.ones_like(y).astype('float64')
check_lengths(weights, exposure)
# set exposure as the weight
# we do this because we have divided our response
# so if we make an error of 1 now, we need it to count more heavily.
weights = weights * exposure
return y, weights | simple tool to create a common API
Parameters
----------
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
y : y normalized by exposure
weights : array-like shape (n_samples,) | Below is the the instruction that describes the task:
### Input:
simple tool to create a common API
Parameters
----------
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
y : y normalized by exposure
weights : array-like shape (n_samples,)
### Response:
def _exposure_to_weights(self, y, exposure=None, weights=None):
"""simple tool to create a common API
Parameters
----------
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
y : y normalized by exposure
weights : array-like shape (n_samples,)
"""
y = y.ravel()
if exposure is not None:
exposure = np.array(exposure).astype('f').ravel()
exposure = check_array(exposure, name='sample exposure',
ndim=1, verbose=self.verbose)
else:
exposure = np.ones_like(y.ravel()).astype('float64')
# check data
exposure = exposure.ravel()
check_lengths(y, exposure)
# normalize response
y = y / exposure
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
else:
weights = np.ones_like(y).astype('float64')
check_lengths(weights, exposure)
# set exposure as the weight
# we do this because we have divided our response
# so if we make an error of 1 now, we need it to count more heavily.
weights = weights * exposure
return y, weights |
def comparelist_view(self, request, object_id, extra_context=None):
"""Allow selecting versions to compare."""
opts = self.model._meta
object_id = unquote(object_id)
current = get_object_or_404(self.model, pk=object_id)
# As done by reversion's history_view
action_list = [
{
"revision": version.revision,
"url": reverse("%s:%s_%s_compare" % (self.admin_site.name, opts.app_label, opts.model_name), args=(quote(version.object_id), version.id)),
} for version in self._reversion_order_version_queryset(Version.objects.get_for_object_reference(
self.model,
object_id).select_related("revision__user"))]
context = {"action_list": action_list,
"opts": opts,
"object_id": quote(object_id),
"original": current,
}
extra_context = extra_context or {}
context.update(extra_context)
return render(request, self.compare_list_template or self._get_template_list("compare_list.html"),
context) | Allow selecting versions to compare. | Below is the the instruction that describes the task:
### Input:
Allow selecting versions to compare.
### Response:
def comparelist_view(self, request, object_id, extra_context=None):
"""Allow selecting versions to compare."""
opts = self.model._meta
object_id = unquote(object_id)
current = get_object_or_404(self.model, pk=object_id)
# As done by reversion's history_view
action_list = [
{
"revision": version.revision,
"url": reverse("%s:%s_%s_compare" % (self.admin_site.name, opts.app_label, opts.model_name), args=(quote(version.object_id), version.id)),
} for version in self._reversion_order_version_queryset(Version.objects.get_for_object_reference(
self.model,
object_id).select_related("revision__user"))]
context = {"action_list": action_list,
"opts": opts,
"object_id": quote(object_id),
"original": current,
}
extra_context = extra_context or {}
context.update(extra_context)
return render(request, self.compare_list_template or self._get_template_list("compare_list.html"),
context) |
def print_genl_hdr(ofd, start):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L821.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
start -- bytearray() or bytearray_ptr() instance.
"""
ghdr = genlmsghdr(start)
ofd(' [GENERIC NETLINK HEADER] %d octets', GENL_HDRLEN)
ofd(' .cmd = %d', ghdr.cmd)
ofd(' .version = %d', ghdr.version)
ofd(' .unused = %#d', ghdr.reserved) | https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L821.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
start -- bytearray() or bytearray_ptr() instance. | Below is the the instruction that describes the task:
### Input:
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L821.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
start -- bytearray() or bytearray_ptr() instance.
### Response:
def print_genl_hdr(ofd, start):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L821.
Positional arguments:
ofd -- function to call with arguments similar to `logging.debug`.
start -- bytearray() or bytearray_ptr() instance.
"""
ghdr = genlmsghdr(start)
ofd(' [GENERIC NETLINK HEADER] %d octets', GENL_HDRLEN)
ofd(' .cmd = %d', ghdr.cmd)
ofd(' .version = %d', ghdr.version)
ofd(' .unused = %#d', ghdr.reserved) |
def do_http_repo(self, repo):
'''
[Re]define REPO as http package repository.
http_repo REPO
'''
self.abort_on_missing_effective_repo_name(repo, 'http_repo')
repo_name = self.get_effective_repo_name(repo)
try:
self.network.set(repo_name, REPO.TYPE, REPOTYPE.HTTP)
except UnknownRepoError:
self.network.define_http_repo(repo_name)
self.network.active_repo = repo_name | [Re]define REPO as http package repository.
http_repo REPO | Below is the the instruction that describes the task:
### Input:
[Re]define REPO as http package repository.
http_repo REPO
### Response:
def do_http_repo(self, repo):
'''
[Re]define REPO as http package repository.
http_repo REPO
'''
self.abort_on_missing_effective_repo_name(repo, 'http_repo')
repo_name = self.get_effective_repo_name(repo)
try:
self.network.set(repo_name, REPO.TYPE, REPOTYPE.HTTP)
except UnknownRepoError:
self.network.define_http_repo(repo_name)
self.network.active_repo = repo_name |
def point(self, t):
"""returns the coordinates of the Bezier curve evaluated at t."""
distance = self.end - self.start
return self.start + distance*t | returns the coordinates of the Bezier curve evaluated at t. | Below is the the instruction that describes the task:
### Input:
returns the coordinates of the Bezier curve evaluated at t.
### Response:
def point(self, t):
"""returns the coordinates of the Bezier curve evaluated at t."""
distance = self.end - self.start
return self.start + distance*t |
def from_string(cls, action_str):
"""
Creates a new Action instance from the given string.
The given string **must** match one of those patterns:
* module.function
* module.function()
* module.function(arg1=value1, arg2=value2)
Any other form will trigger an Exception.
The function parses the given string and tries to load the function
from the given module.
Raises :class:`exceptions.SyntaxError` if the compiled source code is
invalid.
Raises :class:`exceptions.ValueError` if the given source code contains
null bytes.
Raises :class:`exceptions.UnsupportedActionError` if the given source
code can not be parsed (doesn't match one of the supported patterns).
Raises :class:`exceptions.UnsupportedActionArgumentError` if one the
given argument has an unsupported type (we only support
:class:`ast.Num` and :class:`ast.Str`).
Returns a new :class:`Action` instance.
"""
args = {}
try:
mod_obj = ast.parse(action_str)
except (SyntaxError, ValueError) as e:
raise e
else:
call_obj = mod_obj.body[0].value
if isinstance(call_obj, ast.Attribute):
# Seems like we have a simple function name
# (for example `module.function`)
module = call_obj.value.id
func = call_obj.attr
elif isinstance(call_obj, ast.Call):
# Seems like we have a function call, maybe with
# a few parameters.
# Note that we only support `module.function()` format.
# You can't use `function()`.
try:
module = call_obj.func.value.id
func = call_obj.func.attr
except AttributeError:
raise UnsupportedActionError(action_str)
else:
# If we have arguments, they MUST be named:
for kwarg in call_obj.keywords:
# We only support Strings and Numerics:
if isinstance(kwarg.value, ast.Num):
args.update({kwarg.arg: kwarg.value.n})
elif isinstance(kwarg.value, ast.Str):
args.update({kwarg.arg: kwarg.value.s})
else:
raise UnsupportedActionArgumentError(action_str,
kwarg)
else:
raise UnsupportedActionError(action_str)
return cls(module, func, args) | Creates a new Action instance from the given string.
The given string **must** match one of those patterns:
* module.function
* module.function()
* module.function(arg1=value1, arg2=value2)
Any other form will trigger an Exception.
The function parses the given string and tries to load the function
from the given module.
Raises :class:`exceptions.SyntaxError` if the compiled source code is
invalid.
Raises :class:`exceptions.ValueError` if the given source code contains
null bytes.
Raises :class:`exceptions.UnsupportedActionError` if the given source
code can not be parsed (doesn't match one of the supported patterns).
Raises :class:`exceptions.UnsupportedActionArgumentError` if one the
given argument has an unsupported type (we only support
:class:`ast.Num` and :class:`ast.Str`).
Returns a new :class:`Action` instance. | Below is the the instruction that describes the task:
### Input:
Creates a new Action instance from the given string.
The given string **must** match one of those patterns:
* module.function
* module.function()
* module.function(arg1=value1, arg2=value2)
Any other form will trigger an Exception.
The function parses the given string and tries to load the function
from the given module.
Raises :class:`exceptions.SyntaxError` if the compiled source code is
invalid.
Raises :class:`exceptions.ValueError` if the given source code contains
null bytes.
Raises :class:`exceptions.UnsupportedActionError` if the given source
code can not be parsed (doesn't match one of the supported patterns).
Raises :class:`exceptions.UnsupportedActionArgumentError` if one the
given argument has an unsupported type (we only support
:class:`ast.Num` and :class:`ast.Str`).
Returns a new :class:`Action` instance.
### Response:
def from_string(cls, action_str):
"""
Creates a new Action instance from the given string.
The given string **must** match one of those patterns:
* module.function
* module.function()
* module.function(arg1=value1, arg2=value2)
Any other form will trigger an Exception.
The function parses the given string and tries to load the function
from the given module.
Raises :class:`exceptions.SyntaxError` if the compiled source code is
invalid.
Raises :class:`exceptions.ValueError` if the given source code contains
null bytes.
Raises :class:`exceptions.UnsupportedActionError` if the given source
code can not be parsed (doesn't match one of the supported patterns).
Raises :class:`exceptions.UnsupportedActionArgumentError` if one the
given argument has an unsupported type (we only support
:class:`ast.Num` and :class:`ast.Str`).
Returns a new :class:`Action` instance.
"""
args = {}
try:
mod_obj = ast.parse(action_str)
except (SyntaxError, ValueError) as e:
raise e
else:
call_obj = mod_obj.body[0].value
if isinstance(call_obj, ast.Attribute):
# Seems like we have a simple function name
# (for example `module.function`)
module = call_obj.value.id
func = call_obj.attr
elif isinstance(call_obj, ast.Call):
# Seems like we have a function call, maybe with
# a few parameters.
# Note that we only support `module.function()` format.
# You can't use `function()`.
try:
module = call_obj.func.value.id
func = call_obj.func.attr
except AttributeError:
raise UnsupportedActionError(action_str)
else:
# If we have arguments, they MUST be named:
for kwarg in call_obj.keywords:
# We only support Strings and Numerics:
if isinstance(kwarg.value, ast.Num):
args.update({kwarg.arg: kwarg.value.n})
elif isinstance(kwarg.value, ast.Str):
args.update({kwarg.arg: kwarg.value.s})
else:
raise UnsupportedActionArgumentError(action_str,
kwarg)
else:
raise UnsupportedActionError(action_str)
return cls(module, func, args) |
def get_service_marketplace(self, available=True, unavailable=False,
deprecated=False):
"""
Returns a list of service names. Can return all services, just
those supported by PredixPy, or just those not yet supported by
PredixPy.
:param available: Return the services that are
available in PredixPy. (Defaults to True)
:param unavailable: Return the services that are not yet
supported by PredixPy. (Defaults to False)
:param deprecated: Return the services that are
supported by PredixPy but no longer available. (True)
"""
supported = set(self.supported.keys())
all_services = set(self.space.get_services())
results = set()
if available:
results.update(supported)
if unavailable:
results.update(all_services.difference(supported))
if deprecated:
results.update(supported.difference(all_services))
return list(results) | Returns a list of service names. Can return all services, just
those supported by PredixPy, or just those not yet supported by
PredixPy.
:param available: Return the services that are
available in PredixPy. (Defaults to True)
:param unavailable: Return the services that are not yet
supported by PredixPy. (Defaults to False)
:param deprecated: Return the services that are
supported by PredixPy but no longer available. (True) | Below is the the instruction that describes the task:
### Input:
Returns a list of service names. Can return all services, just
those supported by PredixPy, or just those not yet supported by
PredixPy.
:param available: Return the services that are
available in PredixPy. (Defaults to True)
:param unavailable: Return the services that are not yet
supported by PredixPy. (Defaults to False)
:param deprecated: Return the services that are
supported by PredixPy but no longer available. (True)
### Response:
def get_service_marketplace(self, available=True, unavailable=False,
deprecated=False):
"""
Returns a list of service names. Can return all services, just
those supported by PredixPy, or just those not yet supported by
PredixPy.
:param available: Return the services that are
available in PredixPy. (Defaults to True)
:param unavailable: Return the services that are not yet
supported by PredixPy. (Defaults to False)
:param deprecated: Return the services that are
supported by PredixPy but no longer available. (True)
"""
supported = set(self.supported.keys())
all_services = set(self.space.get_services())
results = set()
if available:
results.update(supported)
if unavailable:
results.update(all_services.difference(supported))
if deprecated:
results.update(supported.difference(all_services))
return list(results) |
def do_reload(module: types.ModuleType, newer_than: int) -> bool:
"""
Executes the reload of the specified module if the source file that it was
loaded from was updated more recently than the specified time
:param module:
A module object to be reloaded
:param newer_than:
The time in seconds since epoch that should be used to determine if
the module needs to be reloaded. If the module source was modified
more recently than this time, the module will be refreshed.
:return:
Whether or not the module was reloaded
"""
path = getattr(module, '__file__')
directory = getattr(module, '__path__', [None])[0]
if path is None and directory:
path = os.path.join(directory, '__init__.py')
last_modified = os.path.getmtime(path)
if last_modified < newer_than:
return False
try:
importlib.reload(module)
return True
except ImportError:
return False | Executes the reload of the specified module if the source file that it was
loaded from was updated more recently than the specified time
:param module:
A module object to be reloaded
:param newer_than:
The time in seconds since epoch that should be used to determine if
the module needs to be reloaded. If the module source was modified
more recently than this time, the module will be refreshed.
:return:
Whether or not the module was reloaded | Below is the the instruction that describes the task:
### Input:
Executes the reload of the specified module if the source file that it was
loaded from was updated more recently than the specified time
:param module:
A module object to be reloaded
:param newer_than:
The time in seconds since epoch that should be used to determine if
the module needs to be reloaded. If the module source was modified
more recently than this time, the module will be refreshed.
:return:
Whether or not the module was reloaded
### Response:
def do_reload(module: types.ModuleType, newer_than: int) -> bool:
"""
Executes the reload of the specified module if the source file that it was
loaded from was updated more recently than the specified time
:param module:
A module object to be reloaded
:param newer_than:
The time in seconds since epoch that should be used to determine if
the module needs to be reloaded. If the module source was modified
more recently than this time, the module will be refreshed.
:return:
Whether or not the module was reloaded
"""
path = getattr(module, '__file__')
directory = getattr(module, '__path__', [None])[0]
if path is None and directory:
path = os.path.join(directory, '__init__.py')
last_modified = os.path.getmtime(path)
if last_modified < newer_than:
return False
try:
importlib.reload(module)
return True
except ImportError:
return False |
def get_model_home():
'''
Returns a root folder path for downloading models.
'''
d = os.path.join(get_data_home(), 'nnp_models')
if not os.path.isdir(d):
os.makedirs(d)
return d | Returns a root folder path for downloading models. | Below is the the instruction that describes the task:
### Input:
Returns a root folder path for downloading models.
### Response:
def get_model_home():
'''
Returns a root folder path for downloading models.
'''
d = os.path.join(get_data_home(), 'nnp_models')
if not os.path.isdir(d):
os.makedirs(d)
return d |
def ntpl2array(ntpl):
"""
Convert a :func:`collections.namedtuple` object to a :class:`numpy.ndarray`
object that can be saved using :func:`numpy.savez`.
Parameters
----------
ntpl : collections.namedtuple object
Named tuple object to be converted to ndarray
Returns
-------
arr : ndarray
Array representation of input named tuple
"""
return np.asarray((np.hstack([col for col in ntpl]), ntpl._fields,
ntpl.__class__.__name__)) | Convert a :func:`collections.namedtuple` object to a :class:`numpy.ndarray`
object that can be saved using :func:`numpy.savez`.
Parameters
----------
ntpl : collections.namedtuple object
Named tuple object to be converted to ndarray
Returns
-------
arr : ndarray
Array representation of input named tuple | Below is the the instruction that describes the task:
### Input:
Convert a :func:`collections.namedtuple` object to a :class:`numpy.ndarray`
object that can be saved using :func:`numpy.savez`.
Parameters
----------
ntpl : collections.namedtuple object
Named tuple object to be converted to ndarray
Returns
-------
arr : ndarray
Array representation of input named tuple
### Response:
def ntpl2array(ntpl):
"""
Convert a :func:`collections.namedtuple` object to a :class:`numpy.ndarray`
object that can be saved using :func:`numpy.savez`.
Parameters
----------
ntpl : collections.namedtuple object
Named tuple object to be converted to ndarray
Returns
-------
arr : ndarray
Array representation of input named tuple
"""
return np.asarray((np.hstack([col for col in ntpl]), ntpl._fields,
ntpl.__class__.__name__)) |
def feed(self, image):
"""
Feeds the network with an image
:param image: image (perhaps loaded with CV2)
:return: network result
"""
network_name = self.__class__.__name__.lower()
with tf.variable_scope(network_name):
return self._feed(image) | Feeds the network with an image
:param image: image (perhaps loaded with CV2)
:return: network result | Below is the the instruction that describes the task:
### Input:
Feeds the network with an image
:param image: image (perhaps loaded with CV2)
:return: network result
### Response:
def feed(self, image):
"""
Feeds the network with an image
:param image: image (perhaps loaded with CV2)
:return: network result
"""
network_name = self.__class__.__name__.lower()
with tf.variable_scope(network_name):
return self._feed(image) |
def _find_template(self, template, tree, blocks, with_filename,
source, comment):
"""
If tree is true, then will display the track of template extend or include
"""
from uliweb import application
from uliweb.core.template import _format_code
def get_rel_filename(filename, path):
f1 = os.path.splitdrive(filename)[1]
f2 = os.path.splitdrive(path)[1]
f = os.path.relpath(f1, f2).replace('\\', '/')
if f.startswith('..'):
return filename.replace('\\', '/')
else:
return f
template_file = None
if not tree:
application.template_loader.comment = comment
files = application.template_loader.find_templates(template)
if files:
template_file = files[0]
for x in files:
print x
if source:
print
print '---------------- source of %s ---------------' % template
t = application.template_loader.load(template_file)
if t and comment:
print _format_code(t.code).rstrip()
print
else:
print t.code
print
else:
print 'Not Found'
else:
application.template_loader.print_tree(template)
if template_file and blocks:
application.template_loader.print_blocks(template, with_filename) | If tree is true, then will display the track of template extend or include | Below is the the instruction that describes the task:
### Input:
If tree is true, then will display the track of template extend or include
### Response:
def _find_template(self, template, tree, blocks, with_filename,
source, comment):
"""
If tree is true, then will display the track of template extend or include
"""
from uliweb import application
from uliweb.core.template import _format_code
def get_rel_filename(filename, path):
f1 = os.path.splitdrive(filename)[1]
f2 = os.path.splitdrive(path)[1]
f = os.path.relpath(f1, f2).replace('\\', '/')
if f.startswith('..'):
return filename.replace('\\', '/')
else:
return f
template_file = None
if not tree:
application.template_loader.comment = comment
files = application.template_loader.find_templates(template)
if files:
template_file = files[0]
for x in files:
print x
if source:
print
print '---------------- source of %s ---------------' % template
t = application.template_loader.load(template_file)
if t and comment:
print _format_code(t.code).rstrip()
print
else:
print t.code
print
else:
print 'Not Found'
else:
application.template_loader.print_tree(template)
if template_file and blocks:
application.template_loader.print_blocks(template, with_filename) |
def filter(self, node, condition):
"""
This method accepts a node and the condition function; a
generator will be returned to yield the nodes that got matched
by the condition.
"""
if not isinstance(node, Node):
raise TypeError('not a node')
for child in node:
if condition(child):
yield child
for subchild in self.filter(child, condition):
yield subchild | This method accepts a node and the condition function; a
generator will be returned to yield the nodes that got matched
by the condition. | Below is the the instruction that describes the task:
### Input:
This method accepts a node and the condition function; a
generator will be returned to yield the nodes that got matched
by the condition.
### Response:
def filter(self, node, condition):
"""
This method accepts a node and the condition function; a
generator will be returned to yield the nodes that got matched
by the condition.
"""
if not isinstance(node, Node):
raise TypeError('not a node')
for child in node:
if condition(child):
yield child
for subchild in self.filter(child, condition):
yield subchild |
def decode_file(fname):
"""Given the name of a pcap file, open it, decode the contents and yield each packet."""
if _debug: decode_file._debug("decode_file %r", fname)
if not pcap:
raise RuntimeError("failed to import pcap")
# create a pcap object, reading from the file
p = pcap.pcap(fname)
# loop through the packets
for i, (timestamp, data) in enumerate(p):
try:
pkt = decode_packet(data)
if not pkt:
continue
except Exception as err:
if _debug: decode_file._debug(" - exception decoding packet %d: %r", i+1, err)
continue
# save the packet number (as viewed in Wireshark) and timestamp
pkt._number = i + 1
pkt._timestamp = timestamp
yield pkt | Given the name of a pcap file, open it, decode the contents and yield each packet. | Below is the the instruction that describes the task:
### Input:
Given the name of a pcap file, open it, decode the contents and yield each packet.
### Response:
def decode_file(fname):
"""Given the name of a pcap file, open it, decode the contents and yield each packet."""
if _debug: decode_file._debug("decode_file %r", fname)
if not pcap:
raise RuntimeError("failed to import pcap")
# create a pcap object, reading from the file
p = pcap.pcap(fname)
# loop through the packets
for i, (timestamp, data) in enumerate(p):
try:
pkt = decode_packet(data)
if not pkt:
continue
except Exception as err:
if _debug: decode_file._debug(" - exception decoding packet %d: %r", i+1, err)
continue
# save the packet number (as viewed in Wireshark) and timestamp
pkt._number = i + 1
pkt._timestamp = timestamp
yield pkt |
def __calculate_always_decrease_rw_values(
table_name, read_units, provisioned_reads,
write_units, provisioned_writes):
""" Calculate values for always-decrease-rw-together
This will only return reads and writes decreases if both reads and writes
are lower than the current provisioning
:type table_name: str
:param table_name: Name of the DynamoDB table
:type read_units: int
:param read_units: New read unit provisioning
:type provisioned_reads: int
:param provisioned_reads: Currently provisioned reads
:type write_units: int
:param write_units: New write unit provisioning
:type provisioned_writes: int
:param provisioned_writes: Currently provisioned writes
:returns: (int, int) -- (reads, writes)
"""
if read_units <= provisioned_reads and write_units <= provisioned_writes:
return (read_units, write_units)
if read_units < provisioned_reads:
logger.info(
'{0} - Reads could be decreased, but we are waiting for '
'writes to get lower than the threshold before '
'scaling down'.format(table_name))
read_units = provisioned_reads
elif write_units < provisioned_writes:
logger.info(
'{0} - Writes could be decreased, but we are waiting for '
'reads to get lower than the threshold before '
'scaling down'.format(table_name))
write_units = provisioned_writes
return (read_units, write_units) | Calculate values for always-decrease-rw-together
This will only return reads and writes decreases if both reads and writes
are lower than the current provisioning
:type table_name: str
:param table_name: Name of the DynamoDB table
:type read_units: int
:param read_units: New read unit provisioning
:type provisioned_reads: int
:param provisioned_reads: Currently provisioned reads
:type write_units: int
:param write_units: New write unit provisioning
:type provisioned_writes: int
:param provisioned_writes: Currently provisioned writes
:returns: (int, int) -- (reads, writes) | Below is the the instruction that describes the task:
### Input:
Calculate values for always-decrease-rw-together
This will only return reads and writes decreases if both reads and writes
are lower than the current provisioning
:type table_name: str
:param table_name: Name of the DynamoDB table
:type read_units: int
:param read_units: New read unit provisioning
:type provisioned_reads: int
:param provisioned_reads: Currently provisioned reads
:type write_units: int
:param write_units: New write unit provisioning
:type provisioned_writes: int
:param provisioned_writes: Currently provisioned writes
:returns: (int, int) -- (reads, writes)
### Response:
def __calculate_always_decrease_rw_values(
table_name, read_units, provisioned_reads,
write_units, provisioned_writes):
""" Calculate values for always-decrease-rw-together
This will only return reads and writes decreases if both reads and writes
are lower than the current provisioning
:type table_name: str
:param table_name: Name of the DynamoDB table
:type read_units: int
:param read_units: New read unit provisioning
:type provisioned_reads: int
:param provisioned_reads: Currently provisioned reads
:type write_units: int
:param write_units: New write unit provisioning
:type provisioned_writes: int
:param provisioned_writes: Currently provisioned writes
:returns: (int, int) -- (reads, writes)
"""
if read_units <= provisioned_reads and write_units <= provisioned_writes:
return (read_units, write_units)
if read_units < provisioned_reads:
logger.info(
'{0} - Reads could be decreased, but we are waiting for '
'writes to get lower than the threshold before '
'scaling down'.format(table_name))
read_units = provisioned_reads
elif write_units < provisioned_writes:
logger.info(
'{0} - Writes could be decreased, but we are waiting for '
'reads to get lower than the threshold before '
'scaling down'.format(table_name))
write_units = provisioned_writes
return (read_units, write_units) |
def login(request):
'''
Logs in the user via given login and password.
'''
serializer_class = registration_settings.LOGIN_SERIALIZER_CLASS
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.get_authenticated_user()
if not user:
raise BadRequest('Login or password invalid.')
extra_data = perform_login(request, user)
return get_ok_response('Login successful', extra_data=extra_data) | Logs in the user via given login and password. | Below is the the instruction that describes the task:
### Input:
Logs in the user via given login and password.
### Response:
def login(request):
'''
Logs in the user via given login and password.
'''
serializer_class = registration_settings.LOGIN_SERIALIZER_CLASS
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.get_authenticated_user()
if not user:
raise BadRequest('Login or password invalid.')
extra_data = perform_login(request, user)
return get_ok_response('Login successful', extra_data=extra_data) |
def static_arp_entry(self, ipaddress, macaddress, arp_type='static', netmask=32):
"""
Add an arp entry to this physical interface.
::
interface = engine.physical_interface.get(0)
interface.static_arp_entry(
ipaddress='23.23.23.23',
arp_type='static',
macaddress='02:02:02:02:04:04')
interface.save()
:param str ipaddress: ip address for entry
:param str macaddress: macaddress for ip address
:param str arp_type: type of entry, 'static' or 'proxy' (default: static)
:param str,int netmask: netmask for entry (default: 32)
:return: None
"""
self.data['arp_entry'].append({
'ipaddress': ipaddress,
'macaddress': macaddress,
'netmask': netmask,
'type': arp_type}) | Add an arp entry to this physical interface.
::
interface = engine.physical_interface.get(0)
interface.static_arp_entry(
ipaddress='23.23.23.23',
arp_type='static',
macaddress='02:02:02:02:04:04')
interface.save()
:param str ipaddress: ip address for entry
:param str macaddress: macaddress for ip address
:param str arp_type: type of entry, 'static' or 'proxy' (default: static)
:param str,int netmask: netmask for entry (default: 32)
:return: None | Below is the the instruction that describes the task:
### Input:
Add an arp entry to this physical interface.
::
interface = engine.physical_interface.get(0)
interface.static_arp_entry(
ipaddress='23.23.23.23',
arp_type='static',
macaddress='02:02:02:02:04:04')
interface.save()
:param str ipaddress: ip address for entry
:param str macaddress: macaddress for ip address
:param str arp_type: type of entry, 'static' or 'proxy' (default: static)
:param str,int netmask: netmask for entry (default: 32)
:return: None
### Response:
def static_arp_entry(self, ipaddress, macaddress, arp_type='static', netmask=32):
"""
Add an arp entry to this physical interface.
::
interface = engine.physical_interface.get(0)
interface.static_arp_entry(
ipaddress='23.23.23.23',
arp_type='static',
macaddress='02:02:02:02:04:04')
interface.save()
:param str ipaddress: ip address for entry
:param str macaddress: macaddress for ip address
:param str arp_type: type of entry, 'static' or 'proxy' (default: static)
:param str,int netmask: netmask for entry (default: 32)
:return: None
"""
self.data['arp_entry'].append({
'ipaddress': ipaddress,
'macaddress': macaddress,
'netmask': netmask,
'type': arp_type}) |
def CheckIfCanStartClientFlow(self, username, flow_name):
"""Checks whether a given user can start a given flow."""
del username # Unused.
flow_cls = flow.GRRFlow.GetPlugin(flow_name)
if not flow_cls.category:
raise access_control.UnauthorizedAccess(
"Flow %s can't be started via the API." % flow_name) | Checks whether a given user can start a given flow. | Below is the the instruction that describes the task:
### Input:
Checks whether a given user can start a given flow.
### Response:
def CheckIfCanStartClientFlow(self, username, flow_name):
"""Checks whether a given user can start a given flow."""
del username # Unused.
flow_cls = flow.GRRFlow.GetPlugin(flow_name)
if not flow_cls.category:
raise access_control.UnauthorizedAccess(
"Flow %s can't be started via the API." % flow_name) |
def attack(a, d, s):
"""
Linear ADS fading attack stream generator, useful to be multiplied with a
given stream.
Parameters
----------
a :
"Attack" time, in number of samples.
d :
"Decay" time, in number of samples.
s :
"Sustain" amplitude level (should be based on attack amplitude).
The sustain can be a Stream, if desired.
Returns
-------
Stream instance yielding an endless envelope, or a finite envelope if the
sustain input is a finite Stream. The attack amplitude is is 1.0.
"""
# Configure sustain possibilities
if isinstance(s, collections.Iterable):
it_s = iter(s)
s = next(it_s)
else:
it_s = None
# Attack and decay lines
m_a = 1. / a
m_d = (s - 1.) / d
len_a = int(a + .5)
len_d = int(d + .5)
for sample in xrange(len_a):
yield sample * m_a
for sample in xrange(len_d):
yield 1. + sample * m_d
# Sustain!
if it_s is None:
while True:
yield s
else:
for s in it_s:
yield s | Linear ADS fading attack stream generator, useful to be multiplied with a
given stream.
Parameters
----------
a :
"Attack" time, in number of samples.
d :
"Decay" time, in number of samples.
s :
"Sustain" amplitude level (should be based on attack amplitude).
The sustain can be a Stream, if desired.
Returns
-------
Stream instance yielding an endless envelope, or a finite envelope if the
sustain input is a finite Stream. The attack amplitude is is 1.0. | Below is the the instruction that describes the task:
### Input:
Linear ADS fading attack stream generator, useful to be multiplied with a
given stream.
Parameters
----------
a :
"Attack" time, in number of samples.
d :
"Decay" time, in number of samples.
s :
"Sustain" amplitude level (should be based on attack amplitude).
The sustain can be a Stream, if desired.
Returns
-------
Stream instance yielding an endless envelope, or a finite envelope if the
sustain input is a finite Stream. The attack amplitude is is 1.0.
### Response:
def attack(a, d, s):
"""
Linear ADS fading attack stream generator, useful to be multiplied with a
given stream.
Parameters
----------
a :
"Attack" time, in number of samples.
d :
"Decay" time, in number of samples.
s :
"Sustain" amplitude level (should be based on attack amplitude).
The sustain can be a Stream, if desired.
Returns
-------
Stream instance yielding an endless envelope, or a finite envelope if the
sustain input is a finite Stream. The attack amplitude is is 1.0.
"""
# Configure sustain possibilities
if isinstance(s, collections.Iterable):
it_s = iter(s)
s = next(it_s)
else:
it_s = None
# Attack and decay lines
m_a = 1. / a
m_d = (s - 1.) / d
len_a = int(a + .5)
len_d = int(d + .5)
for sample in xrange(len_a):
yield sample * m_a
for sample in xrange(len_d):
yield 1. + sample * m_d
# Sustain!
if it_s is None:
while True:
yield s
else:
for s in it_s:
yield s |
def _check_start_timestamp(self):
"""Check that starting timestamp exists for cumulative metrics."""
if self.descriptor.type in (
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,
):
for ts in self.time_series:
if ts.start_timestamp is None:
raise ValueError("time_series.start_timestamp must exist "
"for cumulative metrics") | Check that starting timestamp exists for cumulative metrics. | Below is the the instruction that describes the task:
### Input:
Check that starting timestamp exists for cumulative metrics.
### Response:
def _check_start_timestamp(self):
"""Check that starting timestamp exists for cumulative metrics."""
if self.descriptor.type in (
metric_descriptor.MetricDescriptorType.CUMULATIVE_INT64,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DOUBLE,
metric_descriptor.MetricDescriptorType.CUMULATIVE_DISTRIBUTION,
):
for ts in self.time_series:
if ts.start_timestamp is None:
raise ValueError("time_series.start_timestamp must exist "
"for cumulative metrics") |
def _convert_to_from(self, to_mag, from_mag, fromVMag=None):
""" Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag
"""
lumtype = self.spectral_type.lumType
# rounds decimal types, TODO perhaps we should interpolate?
specClass = self.spectral_type.roundedSpecClass
if not specClass: # TODO investigate implications of this
raise ValueError('Can not convert when no spectral class is given')
if lumtype not in ('V', ''):
raise ValueError("Can only convert for main sequence stars. Got {0} type".format(lumtype))
if to_mag == 'V':
col, sign = self.column_for_V_conversion[from_mag]
try: # TODO replace with pandas table
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
from_mag_val = self.__dict__['mag'+from_mag] # safer than eval
if isNanOrNone(from_mag_val):
# logger.debug('2 '+from_mag)
raise ValueError('You cannot convert from a magnitude you have not specified in class')
return from_mag_val + (offset*sign)
elif from_mag == 'V':
if fromVMag is None:
# trying to second guess here could mess up a K->B calulation by using the intermediate measured V. While
# this would probably be preferable it is not was was asked and therefore could give unexpected results
raise ValueError('Must give fromVMag, even if it is self.magV')
col, sign = self.column_for_V_conversion[to_mag]
try:
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
return fromVMag + (offset*sign*-1) # -1 as we are now converting the other way
else:
raise ValueError('Can only convert from and to V magnitude. Use .convert() instead') | Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag | Below is the the instruction that describes the task:
### Input:
Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag
### Response:
def _convert_to_from(self, to_mag, from_mag, fromVMag=None):
""" Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag
"""
lumtype = self.spectral_type.lumType
# rounds decimal types, TODO perhaps we should interpolate?
specClass = self.spectral_type.roundedSpecClass
if not specClass: # TODO investigate implications of this
raise ValueError('Can not convert when no spectral class is given')
if lumtype not in ('V', ''):
raise ValueError("Can only convert for main sequence stars. Got {0} type".format(lumtype))
if to_mag == 'V':
col, sign = self.column_for_V_conversion[from_mag]
try: # TODO replace with pandas table
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
from_mag_val = self.__dict__['mag'+from_mag] # safer than eval
if isNanOrNone(from_mag_val):
# logger.debug('2 '+from_mag)
raise ValueError('You cannot convert from a magnitude you have not specified in class')
return from_mag_val + (offset*sign)
elif from_mag == 'V':
if fromVMag is None:
# trying to second guess here could mess up a K->B calulation by using the intermediate measured V. While
# this would probably be preferable it is not was was asked and therefore could give unexpected results
raise ValueError('Must give fromVMag, even if it is self.magV')
col, sign = self.column_for_V_conversion[to_mag]
try:
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
return fromVMag + (offset*sign*-1) # -1 as we are now converting the other way
else:
raise ValueError('Can only convert from and to V magnitude. Use .convert() instead') |
def _apply(self, ctx: ExtensionContext) -> AugmentedDict:
"""
Replaces any {{env::*}} directives with it's actual environment variable value or a default.
Args:
ctx: The processing context.
Returns:
Returns the altered node key and value.
"""
node_key, node_value = ctx.node
def process(pattern: Pattern[str], _str: str) -> str:
_match = pattern.match(_str)
if _match is None:
return _str
# We got a match
# Group 0: Whole match; Group 1: Our placeholder; Group 2: The environment variable
placeholder, envvar = _match.group(1), _match.group(2)
envvalue = os.environ.get(envvar, None)
if envvalue is None and self.fail_on_unset:
raise ExtensionError("Environment variable '{}' is unset.".format(envvar))
return _str.replace(placeholder, envvalue or self.default)
_pattern = re.compile(self.__pattern__)
node_key = process(_pattern, node_key)
node_value = process(_pattern, node_value)
return {node_key: node_value} | Replaces any {{env::*}} directives with it's actual environment variable value or a default.
Args:
ctx: The processing context.
Returns:
Returns the altered node key and value. | Below is the the instruction that describes the task:
### Input:
Replaces any {{env::*}} directives with it's actual environment variable value or a default.
Args:
ctx: The processing context.
Returns:
Returns the altered node key and value.
### Response:
def _apply(self, ctx: ExtensionContext) -> AugmentedDict:
"""
Replaces any {{env::*}} directives with it's actual environment variable value or a default.
Args:
ctx: The processing context.
Returns:
Returns the altered node key and value.
"""
node_key, node_value = ctx.node
def process(pattern: Pattern[str], _str: str) -> str:
_match = pattern.match(_str)
if _match is None:
return _str
# We got a match
# Group 0: Whole match; Group 1: Our placeholder; Group 2: The environment variable
placeholder, envvar = _match.group(1), _match.group(2)
envvalue = os.environ.get(envvar, None)
if envvalue is None and self.fail_on_unset:
raise ExtensionError("Environment variable '{}' is unset.".format(envvar))
return _str.replace(placeholder, envvalue or self.default)
_pattern = re.compile(self.__pattern__)
node_key = process(_pattern, node_key)
node_value = process(_pattern, node_value)
return {node_key: node_value} |
def ParseFileEntry(self, parser_mediator, file_entry):
"""Parses a file entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): a file entry.
"""
stat_object = file_entry.GetStat()
if not stat_object:
return
file_system_type = self._GetFileSystemTypeFromFileEntry(file_entry)
event_data = FileStatEventData()
event_data.file_entry_type = stat_object.type
event_data.file_size = getattr(stat_object, 'size', None)
event_data.file_system_type = file_system_type
event_data.is_allocated = file_entry.IsAllocated()
if file_entry.access_time:
event = time_events.DateTimeValuesEvent(
file_entry.access_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.creation_time:
event = time_events.DateTimeValuesEvent(
file_entry.creation_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.change_time:
event = time_events.DateTimeValuesEvent(
file_entry.change_time, definitions.TIME_DESCRIPTION_CHANGE)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.modification_time:
event = time_events.DateTimeValuesEvent(
file_entry.modification_time,
definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for time_attribute, usage in self._TIMESTAMP_DESCRIPTIONS.items():
posix_time = getattr(stat_object, time_attribute, None)
if posix_time is None:
continue
nano_time_attribute = '{0:s}_nano'.format(time_attribute)
nano_time_attribute = getattr(stat_object, nano_time_attribute, None)
timestamp = posix_time * 1000000
if nano_time_attribute is not None:
# Note that the _nano values are in intervals of 100th nano seconds.
micro_time_attribute, _ = divmod(nano_time_attribute, 10)
timestamp += micro_time_attribute
# TSK will return 0 if the timestamp is not set.
if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and
not timestamp):
continue
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a file entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): a file entry. | Below is the the instruction that describes the task:
### Input:
Parses a file entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): a file entry.
### Response:
def ParseFileEntry(self, parser_mediator, file_entry):
"""Parses a file entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): a file entry.
"""
stat_object = file_entry.GetStat()
if not stat_object:
return
file_system_type = self._GetFileSystemTypeFromFileEntry(file_entry)
event_data = FileStatEventData()
event_data.file_entry_type = stat_object.type
event_data.file_size = getattr(stat_object, 'size', None)
event_data.file_system_type = file_system_type
event_data.is_allocated = file_entry.IsAllocated()
if file_entry.access_time:
event = time_events.DateTimeValuesEvent(
file_entry.access_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.creation_time:
event = time_events.DateTimeValuesEvent(
file_entry.creation_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.change_time:
event = time_events.DateTimeValuesEvent(
file_entry.change_time, definitions.TIME_DESCRIPTION_CHANGE)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.modification_time:
event = time_events.DateTimeValuesEvent(
file_entry.modification_time,
definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for time_attribute, usage in self._TIMESTAMP_DESCRIPTIONS.items():
posix_time = getattr(stat_object, time_attribute, None)
if posix_time is None:
continue
nano_time_attribute = '{0:s}_nano'.format(time_attribute)
nano_time_attribute = getattr(stat_object, nano_time_attribute, None)
timestamp = posix_time * 1000000
if nano_time_attribute is not None:
# Note that the _nano values are in intervals of 100th nano seconds.
micro_time_attribute, _ = divmod(nano_time_attribute, 10)
timestamp += micro_time_attribute
# TSK will return 0 if the timestamp is not set.
if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and
not timestamp):
continue
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def _load_neighbors_from_external_source(self) -> None:
"""
Loads the neighbors of the node from the igraph `Graph` instance that is
wrapped by the graph that has this node.
"""
graph: SpotifyArtistGraph = self._graph
items: List[NameExternalIDPair] = graph.client.similar_artists(self.external_id)
limit: int = graph.neighbor_count if graph.neighbor_count > 0 else self._NEIGHBORS_TO_LOAD
if len(items) > limit:
del items[limit:]
for item in items:
neighbor: SpotifyArtistNode = graph.nodes.get_node_by_name(item.name,
can_validate_and_load=True,
external_id=item.external_id)
# Strangely we need this guard because the Spofity API's search method doesn't
# recognise certain artist names.
# Actually it could also be a bug in SpotifyClient.search_artists_by_name(),
# the artist name sent as a request parameter may not be encoded 100% correctly...
# Anyway, this is a working hotfix.
if neighbor is not None:
graph.add_edge(self, neighbor) | Loads the neighbors of the node from the igraph `Graph` instance that is
wrapped by the graph that has this node. | Below is the the instruction that describes the task:
### Input:
Loads the neighbors of the node from the igraph `Graph` instance that is
wrapped by the graph that has this node.
### Response:
def _load_neighbors_from_external_source(self) -> None:
"""
Loads the neighbors of the node from the igraph `Graph` instance that is
wrapped by the graph that has this node.
"""
graph: SpotifyArtistGraph = self._graph
items: List[NameExternalIDPair] = graph.client.similar_artists(self.external_id)
limit: int = graph.neighbor_count if graph.neighbor_count > 0 else self._NEIGHBORS_TO_LOAD
if len(items) > limit:
del items[limit:]
for item in items:
neighbor: SpotifyArtistNode = graph.nodes.get_node_by_name(item.name,
can_validate_and_load=True,
external_id=item.external_id)
# Strangely we need this guard because the Spofity API's search method doesn't
# recognise certain artist names.
# Actually it could also be a bug in SpotifyClient.search_artists_by_name(),
# the artist name sent as a request parameter may not be encoded 100% correctly...
# Anyway, this is a working hotfix.
if neighbor is not None:
graph.add_edge(self, neighbor) |
def get_patient_mhc_haplotype(job, patient_dict):
"""
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
"""
haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
haplotype_archive = untargz(haplotype_archive, os.getcwd())
output_dict = {}
for filename in 'mhci_alleles.list', 'mhcii_alleles.list':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive,
filename))
return output_dict | Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID | Below is the the instruction that describes the task:
### Input:
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
### Response:
def get_patient_mhc_haplotype(job, patient_dict):
"""
Convenience function to get the mhc haplotype from the patient dict
:param dict patient_dict: dict of patient info
:return: The MHCI and MHCII haplotypes
:rtype: toil.fileStore.FileID
"""
haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
haplotype_archive = untargz(haplotype_archive, os.getcwd())
output_dict = {}
for filename in 'mhci_alleles.list', 'mhcii_alleles.list':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive,
filename))
return output_dict |
def generate_john_smith_chunk(path_to_original):
'''
This _looks_ like a Chunk only in that it generates StreamItem
instances when iterated upon.
'''
## Every StreamItem has a stream_time property. It usually comes
## from the document creation time. Here, we assume the JS corpus
## was created at one moment at the end of 1998:
creation_time = '1998-12-31T23:59:59.999999Z'
correct_time = 915148799
if not os.path.isabs(path_to_original):
path_to_original = os.path.join(os.getcwd(), path_to_original)
## iterate over the files in the 35 input directories
for label_id in range(35):
dir_path = os.path.join(path_to_original, str(label_id))
fnames = os.listdir(dir_path)
fnames.sort()
for fname in fnames:
stream_item = streamcorpus.make_stream_item(
creation_time,
## make up an abs_url
os.path.join(
'john-smith-corpus', str(label_id), fname))
if int(stream_item.stream_time.epoch_ticks) != correct_time:
raise PipelineBaseException('wrong stream_time construction: %r-->%r != %r'\
% (creation_time, stream_item.stream_time.epoch_ticks,
correct_time))
## These docs came from the authors of the paper cited above.
stream_item.source = 'bagga-and-baldwin'
## build a ContentItem for the body
body = streamcorpus.ContentItem()
raw_string = open(os.path.join(dir_path, fname)).read()
## We know that this is already clean and has nothing
## tricky in it, because we manually cleansed it. To
## illustrate how we stick all strings into thrift, we
## convert this to unicode (which introduces no changes)
## and then encode it as utf-8, which also introduces no
## changes. Thrift stores strings as 8-bit character
## strings.
# http://www.mail-archive.com/thrift-user@incubator.apache.org/msg00210.html
body.clean_visible = unicode(raw_string).encode('utf8')
## attach the content_item to the stream_item
stream_item.body = body
stream_item.body.language = streamcorpus.Language(code='en', name='ENGLISH')
## The authors also annotated the corpus
anno = streamcorpus.Annotator()
anno.annotator_id = 'bagga-and-baldwin'
anno.annotation_time = stream_item.stream_time
## build a Label for the doc-level label:
rating = streamcorpus.Rating()
rating.annotator = anno
rating.target = streamcorpus.Target(target_id = str(label_id)) # must be string
rating.contains_mention = True
rating.mentions = ['john', 'smith']
## put this one label in the array of labels
streamcorpus.add_annotation(stream_item, rating)
## provide this stream_item to the pipeline
yield stream_item | This _looks_ like a Chunk only in that it generates StreamItem
instances when iterated upon. | Below is the the instruction that describes the task:
### Input:
This _looks_ like a Chunk only in that it generates StreamItem
instances when iterated upon.
### Response:
def generate_john_smith_chunk(path_to_original):
'''
This _looks_ like a Chunk only in that it generates StreamItem
instances when iterated upon.
'''
## Every StreamItem has a stream_time property. It usually comes
## from the document creation time. Here, we assume the JS corpus
## was created at one moment at the end of 1998:
creation_time = '1998-12-31T23:59:59.999999Z'
correct_time = 915148799
if not os.path.isabs(path_to_original):
path_to_original = os.path.join(os.getcwd(), path_to_original)
## iterate over the files in the 35 input directories
for label_id in range(35):
dir_path = os.path.join(path_to_original, str(label_id))
fnames = os.listdir(dir_path)
fnames.sort()
for fname in fnames:
stream_item = streamcorpus.make_stream_item(
creation_time,
## make up an abs_url
os.path.join(
'john-smith-corpus', str(label_id), fname))
if int(stream_item.stream_time.epoch_ticks) != correct_time:
raise PipelineBaseException('wrong stream_time construction: %r-->%r != %r'\
% (creation_time, stream_item.stream_time.epoch_ticks,
correct_time))
## These docs came from the authors of the paper cited above.
stream_item.source = 'bagga-and-baldwin'
## build a ContentItem for the body
body = streamcorpus.ContentItem()
raw_string = open(os.path.join(dir_path, fname)).read()
## We know that this is already clean and has nothing
## tricky in it, because we manually cleansed it. To
## illustrate how we stick all strings into thrift, we
## convert this to unicode (which introduces no changes)
## and then encode it as utf-8, which also introduces no
## changes. Thrift stores strings as 8-bit character
## strings.
# http://www.mail-archive.com/thrift-user@incubator.apache.org/msg00210.html
body.clean_visible = unicode(raw_string).encode('utf8')
## attach the content_item to the stream_item
stream_item.body = body
stream_item.body.language = streamcorpus.Language(code='en', name='ENGLISH')
## The authors also annotated the corpus
anno = streamcorpus.Annotator()
anno.annotator_id = 'bagga-and-baldwin'
anno.annotation_time = stream_item.stream_time
## build a Label for the doc-level label:
rating = streamcorpus.Rating()
rating.annotator = anno
rating.target = streamcorpus.Target(target_id = str(label_id)) # must be string
rating.contains_mention = True
rating.mentions = ['john', 'smith']
## put this one label in the array of labels
streamcorpus.add_annotation(stream_item, rating)
## provide this stream_item to the pipeline
yield stream_item |
def find(self, sub, in_current_line=False, include_current_position=False,
ignore_case=False, count=1):
"""
Find `text` after the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurance.
"""
assert isinstance(ignore_case, bool)
if in_current_line:
text = self.current_line_after_cursor
else:
text = self.text_after_cursor
if not include_current_position:
if len(text) == 0:
return # (Otherwise, we always get a match for the empty string.)
else:
text = text[1:]
flags = re.IGNORECASE if ignore_case else 0
iterator = re.finditer(re.escape(sub), text, flags)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
if include_current_position:
return match.start(0)
else:
return match.start(0) + 1
except StopIteration:
pass | Find `text` after the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurance. | Below is the the instruction that describes the task:
### Input:
Find `text` after the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurance.
### Response:
def find(self, sub, in_current_line=False, include_current_position=False,
ignore_case=False, count=1):
"""
Find `text` after the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurance.
"""
assert isinstance(ignore_case, bool)
if in_current_line:
text = self.current_line_after_cursor
else:
text = self.text_after_cursor
if not include_current_position:
if len(text) == 0:
return # (Otherwise, we always get a match for the empty string.)
else:
text = text[1:]
flags = re.IGNORECASE if ignore_case else 0
iterator = re.finditer(re.escape(sub), text, flags)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
if include_current_position:
return match.start(0)
else:
return match.start(0) + 1
except StopIteration:
pass |
def referenced_tables(self):
"""Return referenced tables from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.referencedTables
:rtype: list of dict
:returns: mappings describing the query plan, or an empty list
if the query has not yet completed.
"""
tables = []
datasets_by_project_name = {}
for table in self._job_statistics().get("referencedTables", ()):
t_project = table["projectId"]
ds_id = table["datasetId"]
t_dataset = datasets_by_project_name.get((t_project, ds_id))
if t_dataset is None:
t_dataset = DatasetReference(t_project, ds_id)
datasets_by_project_name[(t_project, ds_id)] = t_dataset
t_name = table["tableId"]
tables.append(t_dataset.table(t_name))
return tables | Return referenced tables from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.referencedTables
:rtype: list of dict
:returns: mappings describing the query plan, or an empty list
if the query has not yet completed. | Below is the the instruction that describes the task:
### Input:
Return referenced tables from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.referencedTables
:rtype: list of dict
:returns: mappings describing the query plan, or an empty list
if the query has not yet completed.
### Response:
def referenced_tables(self):
"""Return referenced tables from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.referencedTables
:rtype: list of dict
:returns: mappings describing the query plan, or an empty list
if the query has not yet completed.
"""
tables = []
datasets_by_project_name = {}
for table in self._job_statistics().get("referencedTables", ()):
t_project = table["projectId"]
ds_id = table["datasetId"]
t_dataset = datasets_by_project_name.get((t_project, ds_id))
if t_dataset is None:
t_dataset = DatasetReference(t_project, ds_id)
datasets_by_project_name[(t_project, ds_id)] = t_dataset
t_name = table["tableId"]
tables.append(t_dataset.table(t_name))
return tables |
def _make_index(self, fasta, index):
""" Index a single, one-sequence fasta-file"""
out = open(index, "wb")
f = open(fasta)
# Skip first line of fasta-file
line = f.readline()
offset = f.tell()
line = f.readline()
while line:
out.write(pack(self.pack_char, offset))
offset = f.tell()
line = f.readline()
f.close()
out.close() | Index a single, one-sequence fasta-file | Below is the the instruction that describes the task:
### Input:
Index a single, one-sequence fasta-file
### Response:
def _make_index(self, fasta, index):
""" Index a single, one-sequence fasta-file"""
out = open(index, "wb")
f = open(fasta)
# Skip first line of fasta-file
line = f.readline()
offset = f.tell()
line = f.readline()
while line:
out.write(pack(self.pack_char, offset))
offset = f.tell()
line = f.readline()
f.close()
out.close() |
def handle_event(self, packet):
"""Handle incoming packet from rflink gateway."""
if packet.get('command'):
task = self.send_command_ack(packet['id'], packet['command'])
self.loop.create_task(task) | Handle incoming packet from rflink gateway. | Below is the the instruction that describes the task:
### Input:
Handle incoming packet from rflink gateway.
### Response:
def handle_event(self, packet):
"""Handle incoming packet from rflink gateway."""
if packet.get('command'):
task = self.send_command_ack(packet['id'], packet['command'])
self.loop.create_task(task) |
def _parse_the_ned_position_results(
self,
ra,
dec,
nedResults):
"""
*parse the ned results*
**Key Arguments:**
- ``ra`` -- the search ra
- ``dec`` -- the search dec
**Return:**
- ``results`` -- list of result dictionaries
"""
self.log.info('starting the ``_parse_the_ned_results`` method')
results = []
resultLen = 0
if nedResults:
# OPEN THE RESULT FILE FROM NED
pathToReadFile = nedResults
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(
pathToReadFile, encoding='utf-8', mode='rb')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
readFile.close()
# CHECK FOR ERRORS
if "Results from query to NASA/IPAC Extragalactic Database" not in thisData:
print "something went wrong with the NED query"
self.log.error(
"something went wrong with the NED query" % locals())
sys.exit(0)
# SEARCH FROM MATCHES IN RESULTS FILE
matchObject = re.search(
r"No\.\|Object Name.*?\n(.*)", thisData, re.S)
if matchObject:
theseLines = string.split(matchObject.group(), '\n')
resultLen = len(theseLines)
csvReader = csv.DictReader(
theseLines, dialect='excel', delimiter='|', quotechar='"')
for row in csvReader:
thisEntry = {"searchRa": ra, "searchDec": dec,
"matchName": row["Object Name"].strip()}
results.append(thisEntry)
if self.nearestOnly:
break
self.log.info('completed the ``_parse_the_ned_results`` method')
return results, resultLen | *parse the ned results*
**Key Arguments:**
- ``ra`` -- the search ra
- ``dec`` -- the search dec
**Return:**
- ``results`` -- list of result dictionaries | Below is the the instruction that describes the task:
### Input:
*parse the ned results*
**Key Arguments:**
- ``ra`` -- the search ra
- ``dec`` -- the search dec
**Return:**
- ``results`` -- list of result dictionaries
### Response:
def _parse_the_ned_position_results(
self,
ra,
dec,
nedResults):
"""
*parse the ned results*
**Key Arguments:**
- ``ra`` -- the search ra
- ``dec`` -- the search dec
**Return:**
- ``results`` -- list of result dictionaries
"""
self.log.info('starting the ``_parse_the_ned_results`` method')
results = []
resultLen = 0
if nedResults:
# OPEN THE RESULT FILE FROM NED
pathToReadFile = nedResults
try:
self.log.debug("attempting to open the file %s" %
(pathToReadFile,))
readFile = codecs.open(
pathToReadFile, encoding='utf-8', mode='rb')
thisData = readFile.read()
readFile.close()
except IOError, e:
message = 'could not open the file %s' % (pathToReadFile,)
self.log.critical(message)
raise IOError(message)
readFile.close()
# CHECK FOR ERRORS
if "Results from query to NASA/IPAC Extragalactic Database" not in thisData:
print "something went wrong with the NED query"
self.log.error(
"something went wrong with the NED query" % locals())
sys.exit(0)
# SEARCH FROM MATCHES IN RESULTS FILE
matchObject = re.search(
r"No\.\|Object Name.*?\n(.*)", thisData, re.S)
if matchObject:
theseLines = string.split(matchObject.group(), '\n')
resultLen = len(theseLines)
csvReader = csv.DictReader(
theseLines, dialect='excel', delimiter='|', quotechar='"')
for row in csvReader:
thisEntry = {"searchRa": ra, "searchDec": dec,
"matchName": row["Object Name"].strip()}
results.append(thisEntry)
if self.nearestOnly:
break
self.log.info('completed the ``_parse_the_ned_results`` method')
return results, resultLen |
def _add_ctc_loss(pred, seq_len, num_label, loss_type):
""" Adds CTC loss on top of pred symbol and returns the resulting symbol """
label = mx.sym.Variable('label')
if loss_type == 'warpctc':
print("Using WarpCTC Loss")
sm = _add_warp_ctc_loss(pred, seq_len, num_label, label)
else:
print("Using MXNet CTC Loss")
assert loss_type == 'ctc'
sm = _add_mxnet_ctc_loss(pred, seq_len, label)
return sm | Adds CTC loss on top of pred symbol and returns the resulting symbol | Below is the the instruction that describes the task:
### Input:
Adds CTC loss on top of pred symbol and returns the resulting symbol
### Response:
def _add_ctc_loss(pred, seq_len, num_label, loss_type):
""" Adds CTC loss on top of pred symbol and returns the resulting symbol """
label = mx.sym.Variable('label')
if loss_type == 'warpctc':
print("Using WarpCTC Loss")
sm = _add_warp_ctc_loss(pred, seq_len, num_label, label)
else:
print("Using MXNet CTC Loss")
assert loss_type == 'ctc'
sm = _add_mxnet_ctc_loss(pred, seq_len, label)
return sm |
def start_container(self, image, container_name: str, repo_path: Path):
""" Starts a container with the image and name ``container_name`` and copies the repository into the container.
:type image: docker.models.images.Image
:rtype: docker.models.container.Container
"""
command = "bash -i"
if self.inherit_image:
command = "sh -i"
container = self.client.containers.run(image, command=command, detach=True, tty=True, name=container_name,
working_dir=str((Path("/srv/data") / self.cwd).resolve()),
auto_remove=True)
container.exec_run(["mkdir", "-p", "/srv/scripts"])
container.put_archive("/srv", self.tar_files(repo_path))
container.put_archive("/srv/scripts", self.tar_runner())
return container | Starts a container with the image and name ``container_name`` and copies the repository into the container.
:type image: docker.models.images.Image
:rtype: docker.models.container.Container | Below is the the instruction that describes the task:
### Input:
Starts a container with the image and name ``container_name`` and copies the repository into the container.
:type image: docker.models.images.Image
:rtype: docker.models.container.Container
### Response:
def start_container(self, image, container_name: str, repo_path: Path):
""" Starts a container with the image and name ``container_name`` and copies the repository into the container.
:type image: docker.models.images.Image
:rtype: docker.models.container.Container
"""
command = "bash -i"
if self.inherit_image:
command = "sh -i"
container = self.client.containers.run(image, command=command, detach=True, tty=True, name=container_name,
working_dir=str((Path("/srv/data") / self.cwd).resolve()),
auto_remove=True)
container.exec_run(["mkdir", "-p", "/srv/scripts"])
container.put_archive("/srv", self.tar_files(repo_path))
container.put_archive("/srv/scripts", self.tar_runner())
return container |
def template_engine(self, entry, startnode):
"""The format template interpetation engine. See the comment at the
beginning of this module for the how we interpret format
specifications such as %c, %C, and so on.
"""
# print("-----")
# print(startnode)
# print(entry[0])
# print('======')
fmt = entry[0]
arg = 1
i = 0
m = escape.search(fmt)
while m:
i = m.end()
self.write(m.group('prefix'))
typ = m.group('type') or '{'
node = startnode
if m.group('child'):
try:
node = node[int(m.group('child'))]
except:
from trepan.api import debug; debug()
pass
if typ == '%': self.write('%')
elif typ == '+':
self.line_number += 1
self.indent_more()
elif typ == '-':
self.line_number += 1
self.indent_less()
elif typ == '|':
self.line_number += 1
self.write(self.indent)
# Used mostly on the LHS of an assignment
# BUILD_TUPLE_n is pretty printed and may take care of other uses.
elif typ == ',':
if (node.kind in ('unpack', 'unpack_w_parens') and
node[0].attr == 1):
self.write(',')
elif typ == 'c':
index = entry[arg]
if isinstance(index, tuple):
assert node[index[0]] == index[1], (
"at %s[%d], expected '%s' node; got '%s'" % (
node.kind, arg, index[1], node[index[0]].kind)
)
index = index[0]
assert isinstance(index, int), (
"at %s[%d], %s should be int or tuple" % (
node.kind, arg, type(index)))
self.preorder(node[index])
arg += 1
elif typ == 'p':
p = self.prec
(index, self.prec) = entry[arg]
self.preorder(node[index])
self.prec = p
arg += 1
elif typ == 'C':
low, high, sep = entry[arg]
remaining = len(node[low:high])
for subnode in node[low:high]:
self.preorder(subnode)
remaining -= 1
if remaining > 0:
self.write(sep)
pass
pass
arg += 1
elif typ == 'D':
low, high, sep = entry[arg]
remaining = len(node[low:high])
for subnode in node[low:high]:
remaining -= 1
if len(subnode) > 0:
self.preorder(subnode)
if remaining > 0:
self.write(sep)
pass
pass
pass
arg += 1
elif typ == 'x':
# This code is only used in fragments
assert isinstance(entry[arg], tuple)
arg += 1
elif typ == 'P':
p = self.prec
low, high, sep, self.prec = entry[arg]
remaining = len(node[low:high])
# remaining = len(node[low:high])
for subnode in node[low:high]:
self.preorder(subnode)
remaining -= 1
if remaining > 0:
self.write(sep)
self.prec = p
arg += 1
elif typ == '{':
d = node.__dict__
expr = m.group('expr')
# Line mapping stuff
if (hasattr(node, 'linestart') and node.linestart
and hasattr(node, 'current_line_number')):
self.source_linemap[self.current_line_number] = node.linestart
try:
self.write(eval(expr, d, d))
except:
raise
m = escape.search(fmt, i)
self.write(fmt[i:]) | The format template interpetation engine. See the comment at the
beginning of this module for the how we interpret format
specifications such as %c, %C, and so on. | Below is the the instruction that describes the task:
### Input:
The format template interpetation engine. See the comment at the
beginning of this module for the how we interpret format
specifications such as %c, %C, and so on.
### Response:
def template_engine(self, entry, startnode):
"""The format template interpetation engine. See the comment at the
beginning of this module for the how we interpret format
specifications such as %c, %C, and so on.
"""
# print("-----")
# print(startnode)
# print(entry[0])
# print('======')
fmt = entry[0]
arg = 1
i = 0
m = escape.search(fmt)
while m:
i = m.end()
self.write(m.group('prefix'))
typ = m.group('type') or '{'
node = startnode
if m.group('child'):
try:
node = node[int(m.group('child'))]
except:
from trepan.api import debug; debug()
pass
if typ == '%': self.write('%')
elif typ == '+':
self.line_number += 1
self.indent_more()
elif typ == '-':
self.line_number += 1
self.indent_less()
elif typ == '|':
self.line_number += 1
self.write(self.indent)
# Used mostly on the LHS of an assignment
# BUILD_TUPLE_n is pretty printed and may take care of other uses.
elif typ == ',':
if (node.kind in ('unpack', 'unpack_w_parens') and
node[0].attr == 1):
self.write(',')
elif typ == 'c':
index = entry[arg]
if isinstance(index, tuple):
assert node[index[0]] == index[1], (
"at %s[%d], expected '%s' node; got '%s'" % (
node.kind, arg, index[1], node[index[0]].kind)
)
index = index[0]
assert isinstance(index, int), (
"at %s[%d], %s should be int or tuple" % (
node.kind, arg, type(index)))
self.preorder(node[index])
arg += 1
elif typ == 'p':
p = self.prec
(index, self.prec) = entry[arg]
self.preorder(node[index])
self.prec = p
arg += 1
elif typ == 'C':
low, high, sep = entry[arg]
remaining = len(node[low:high])
for subnode in node[low:high]:
self.preorder(subnode)
remaining -= 1
if remaining > 0:
self.write(sep)
pass
pass
arg += 1
elif typ == 'D':
low, high, sep = entry[arg]
remaining = len(node[low:high])
for subnode in node[low:high]:
remaining -= 1
if len(subnode) > 0:
self.preorder(subnode)
if remaining > 0:
self.write(sep)
pass
pass
pass
arg += 1
elif typ == 'x':
# This code is only used in fragments
assert isinstance(entry[arg], tuple)
arg += 1
elif typ == 'P':
p = self.prec
low, high, sep, self.prec = entry[arg]
remaining = len(node[low:high])
# remaining = len(node[low:high])
for subnode in node[low:high]:
self.preorder(subnode)
remaining -= 1
if remaining > 0:
self.write(sep)
self.prec = p
arg += 1
elif typ == '{':
d = node.__dict__
expr = m.group('expr')
# Line mapping stuff
if (hasattr(node, 'linestart') and node.linestart
and hasattr(node, 'current_line_number')):
self.source_linemap[self.current_line_number] = node.linestart
try:
self.write(eval(expr, d, d))
except:
raise
m = escape.search(fmt, i)
self.write(fmt[i:]) |
def openList(self, char, mLastSection):
"""
These next three functions open, continue, and close the list
element appropriate to the prefix character passed into them.
"""
result = self.closeParagraph(mLastSection)
mDTopen = False
if char == u'*':
result += u'<ul><li>'
elif char == u'#':
result += u'<ol><li>'
elif char == u':':
result += u'<dl><dd>'
elif char == u';':
result += u'<dl><dt>'
mDTopen = True
else:
result += u'<!-- ERR 1 -->'
return result, mDTopen | These next three functions open, continue, and close the list
element appropriate to the prefix character passed into them. | Below is the the instruction that describes the task:
### Input:
These next three functions open, continue, and close the list
element appropriate to the prefix character passed into them.
### Response:
def openList(self, char, mLastSection):
"""
These next three functions open, continue, and close the list
element appropriate to the prefix character passed into them.
"""
result = self.closeParagraph(mLastSection)
mDTopen = False
if char == u'*':
result += u'<ul><li>'
elif char == u'#':
result += u'<ol><li>'
elif char == u':':
result += u'<dl><dd>'
elif char == u';':
result += u'<dl><dt>'
mDTopen = True
else:
result += u'<!-- ERR 1 -->'
return result, mDTopen |
def _remove_extension_for_non_raw_file(self, name):
"""
Implemented as image and video files' Cloudinary public id
shouldn't contain file extensions, otherwise Cloudinary url
would contain doubled extension - Cloudinary adds extension to url
to allow file conversion to arbitrary file, like png to jpg.
"""
file_resource_type = self._get_resource_type(name)
if file_resource_type is None or file_resource_type == self.RESOURCE_TYPE:
return name
else:
extension = self._get_file_extension(name)
return name[:-len(extension) - 1] | Implemented as image and video files' Cloudinary public id
shouldn't contain file extensions, otherwise Cloudinary url
would contain doubled extension - Cloudinary adds extension to url
to allow file conversion to arbitrary file, like png to jpg. | Below is the the instruction that describes the task:
### Input:
Implemented as image and video files' Cloudinary public id
shouldn't contain file extensions, otherwise Cloudinary url
would contain doubled extension - Cloudinary adds extension to url
to allow file conversion to arbitrary file, like png to jpg.
### Response:
def _remove_extension_for_non_raw_file(self, name):
"""
Implemented as image and video files' Cloudinary public id
shouldn't contain file extensions, otherwise Cloudinary url
would contain doubled extension - Cloudinary adds extension to url
to allow file conversion to arbitrary file, like png to jpg.
"""
file_resource_type = self._get_resource_type(name)
if file_resource_type is None or file_resource_type == self.RESOURCE_TYPE:
return name
else:
extension = self._get_file_extension(name)
return name[:-len(extension) - 1] |
def get_ref(obj):
"""
Get string reference to object. Stores a weak reference in a dictionary
using the object's id as the key. If the object cannot be weakly
referenced (e.g. dictionaries, frame objects), store a strong references
in a classic dictionary.
Returns the object's id as a string.
"""
oid = id(obj)
try:
server.id2ref[oid] = obj
except TypeError:
server.id2obj[oid] = obj
return str(oid) | Get string reference to object. Stores a weak reference in a dictionary
using the object's id as the key. If the object cannot be weakly
referenced (e.g. dictionaries, frame objects), store a strong references
in a classic dictionary.
Returns the object's id as a string. | Below is the the instruction that describes the task:
### Input:
Get string reference to object. Stores a weak reference in a dictionary
using the object's id as the key. If the object cannot be weakly
referenced (e.g. dictionaries, frame objects), store a strong references
in a classic dictionary.
Returns the object's id as a string.
### Response:
def get_ref(obj):
"""
Get string reference to object. Stores a weak reference in a dictionary
using the object's id as the key. If the object cannot be weakly
referenced (e.g. dictionaries, frame objects), store a strong references
in a classic dictionary.
Returns the object's id as a string.
"""
oid = id(obj)
try:
server.id2ref[oid] = obj
except TypeError:
server.id2obj[oid] = obj
return str(oid) |
def estimate_cpd(self, node, prior_type='BDeu', pseudo_counts=[], equivalent_sample_size=5):
"""
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
prior_type: 'dirichlet', 'BDeu', 'K2',
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; 2-D array of shape
(node_card, product of parents_card) with a "virtual" count for
each variable state in the CPD.
The virtual counts are added to the actual state counts found in the data.
(if a list is provided, a lexicographic ordering of states is assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))`.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> estimator = BayesianEstimator(model, data)
>>> cpd_C = estimator.estimate_cpd('C', prior_type="dirichlet", pseudo_counts=[1, 2])
>>> print(cpd_C)
╒══════╤══════╤══════╤══════╤════════════════════╕
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(0) │ 0.25 │ 0.25 │ 0.5 │ 0.3333333333333333 │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(1) │ 0.75 │ 0.75 │ 0.5 │ 0.6666666666666666 │
╘══════╧══════╧══════╧══════╧════════════════════╛
"""
node_cardinality = len(self.state_names[node])
parents = sorted(self.model.get_parents(node))
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
cpd_shape = (node_cardinality, np.prod(parents_cardinalities, dtype=int))
if prior_type == 'K2':
pseudo_counts = np.ones(cpd_shape, dtype=int)
elif prior_type == 'BDeu':
alpha = float(equivalent_sample_size) / (node_cardinality * np.prod(parents_cardinalities))
pseudo_counts = np.ones(cpd_shape, dtype=float) * alpha
elif prior_type == 'dirichlet':
pseudo_counts = np.array(pseudo_counts)
if pseudo_counts.shape != cpd_shape:
raise ValueError("The shape of pseudo_counts must be: {shape}".format(
shape=str(cpd_shape)))
else:
raise ValueError("'prior_type' not specified")
state_counts = self.state_counts(node)
bayesian_counts = state_counts + pseudo_counts
cpd = TabularCPD(node, node_cardinality, np.array(bayesian_counts),
evidence=parents,
evidence_card=parents_cardinalities,
state_names=self.state_names)
cpd.normalize()
return cpd | Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
prior_type: 'dirichlet', 'BDeu', 'K2',
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; 2-D array of shape
(node_card, product of parents_card) with a "virtual" count for
each variable state in the CPD.
The virtual counts are added to the actual state counts found in the data.
(if a list is provided, a lexicographic ordering of states is assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))`.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> estimator = BayesianEstimator(model, data)
>>> cpd_C = estimator.estimate_cpd('C', prior_type="dirichlet", pseudo_counts=[1, 2])
>>> print(cpd_C)
╒══════╤══════╤══════╤══════╤════════════════════╕
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(0) │ 0.25 │ 0.25 │ 0.5 │ 0.3333333333333333 │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(1) │ 0.75 │ 0.75 │ 0.5 │ 0.6666666666666666 │
╘══════╧══════╧══════╧══════╧════════════════════╛ | Below is the the instruction that describes the task:
### Input:
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
prior_type: 'dirichlet', 'BDeu', 'K2',
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; 2-D array of shape
(node_card, product of parents_card) with a "virtual" count for
each variable state in the CPD.
The virtual counts are added to the actual state counts found in the data.
(if a list is provided, a lexicographic ordering of states is assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))`.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> estimator = BayesianEstimator(model, data)
>>> cpd_C = estimator.estimate_cpd('C', prior_type="dirichlet", pseudo_counts=[1, 2])
>>> print(cpd_C)
╒══════╤══════╤══════╤══════╤════════════════════╕
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(0) │ 0.25 │ 0.25 │ 0.5 │ 0.3333333333333333 │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(1) │ 0.75 │ 0.75 │ 0.5 │ 0.6666666666666666 │
╘══════╧══════╧══════╧══════╧════════════════════╛
### Response:
def estimate_cpd(self, node, prior_type='BDeu', pseudo_counts=[], equivalent_sample_size=5):
"""
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
prior_type: 'dirichlet', 'BDeu', 'K2',
string indicting which type of prior to use for the model parameters.
- If 'prior_type' is 'dirichlet', the following must be provided:
'pseudo_counts' = dirichlet hyperparameters; 2-D array of shape
(node_card, product of parents_card) with a "virtual" count for
each variable state in the CPD.
The virtual counts are added to the actual state counts found in the data.
(if a list is provided, a lexicographic ordering of states is assumed)
- If 'prior_type' is 'BDeu', then an 'equivalent_sample_size'
must be specified instead of 'pseudo_counts'. This is equivalent to
'prior_type=dirichlet' and using uniform 'pseudo_counts' of
`equivalent_sample_size/(node_cardinality*np.prod(parents_cardinalities))`.
- A prior_type of 'K2' is a shorthand for 'dirichlet' + setting every pseudo_count to 1,
regardless of the cardinality of the variable.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import BayesianEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> estimator = BayesianEstimator(model, data)
>>> cpd_C = estimator.estimate_cpd('C', prior_type="dirichlet", pseudo_counts=[1, 2])
>>> print(cpd_C)
╒══════╤══════╤══════╤══════╤════════════════════╕
│ A │ A(0) │ A(0) │ A(1) │ A(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ B │ B(0) │ B(1) │ B(0) │ B(1) │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(0) │ 0.25 │ 0.25 │ 0.5 │ 0.3333333333333333 │
├──────┼──────┼──────┼──────┼────────────────────┤
│ C(1) │ 0.75 │ 0.75 │ 0.5 │ 0.6666666666666666 │
╘══════╧══════╧══════╧══════╧════════════════════╛
"""
node_cardinality = len(self.state_names[node])
parents = sorted(self.model.get_parents(node))
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
cpd_shape = (node_cardinality, np.prod(parents_cardinalities, dtype=int))
if prior_type == 'K2':
pseudo_counts = np.ones(cpd_shape, dtype=int)
elif prior_type == 'BDeu':
alpha = float(equivalent_sample_size) / (node_cardinality * np.prod(parents_cardinalities))
pseudo_counts = np.ones(cpd_shape, dtype=float) * alpha
elif prior_type == 'dirichlet':
pseudo_counts = np.array(pseudo_counts)
if pseudo_counts.shape != cpd_shape:
raise ValueError("The shape of pseudo_counts must be: {shape}".format(
shape=str(cpd_shape)))
else:
raise ValueError("'prior_type' not specified")
state_counts = self.state_counts(node)
bayesian_counts = state_counts + pseudo_counts
cpd = TabularCPD(node, node_cardinality, np.array(bayesian_counts),
evidence=parents,
evidence_card=parents_cardinalities,
state_names=self.state_names)
cpd.normalize()
return cpd |
async def remove_tracks(self, *tracks):
"""Remove one or more tracks from the current user’s ‘Your Music’ library.
Parameters
----------
tracks : Sequence[Union[Track, str]]
A sequence of track objects or spotify IDs
"""
_tracks = [(obj if isinstance(obj, str) else obj.id) for obj in tracks]
await self.user.http.delete_saved_tracks(','.join(_tracks)) | Remove one or more tracks from the current user’s ‘Your Music’ library.
Parameters
----------
tracks : Sequence[Union[Track, str]]
A sequence of track objects or spotify IDs | Below is the the instruction that describes the task:
### Input:
Remove one or more tracks from the current user’s ‘Your Music’ library.
Parameters
----------
tracks : Sequence[Union[Track, str]]
A sequence of track objects or spotify IDs
### Response:
async def remove_tracks(self, *tracks):
"""Remove one or more tracks from the current user’s ‘Your Music’ library.
Parameters
----------
tracks : Sequence[Union[Track, str]]
A sequence of track objects or spotify IDs
"""
_tracks = [(obj if isinstance(obj, str) else obj.id) for obj in tracks]
await self.user.http.delete_saved_tracks(','.join(_tracks)) |
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
"""Helper to `maybe_call_fn_and_grads`."""
with tf.compat.v1.name_scope(name, 'value_and_gradients',
[fn_arg_list, result, grads]):
def _convert_to_tensor(x, name):
ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(
value=x_, name=name)
return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')
if result is None:
result = fn(*fn_arg_list)
if grads is None and tf.executing_eagerly():
# Ensure we disable bijector cacheing in eager mode.
# TODO(b/72831017): Remove this once bijector cacheing is fixed for
# eager mode.
fn_arg_list = [0 + x for x in fn_arg_list]
result = _convert_to_tensor(result, 'fn_result')
if grads is not None:
grads = _convert_to_tensor(grads, 'fn_grad')
return result, grads
if is_list_like(result) and len(result) == len(fn_arg_list):
# Compute the block diagonal of Jacobian.
# TODO(b/79158574): Guard this calculation by an arg which explicitly
# requests block diagonal Jacobian calculation.
def fn_slice(i):
"""Needed to prevent `cell-var-from-loop` pylint warning."""
return lambda x: fn(*(fn_arg_list[:i] + [x] + fn_arg_list[i+1:]))
grads = [
tfp_math_value_and_gradients(fn_slice(i), fn_arg_list[i])[1]
for i in range(len(result))
]
else:
_, grads = tfp_math_value_and_gradients(fn, fn_arg_list)
return result, grads | Helper to `maybe_call_fn_and_grads`. | Below is the the instruction that describes the task:
### Input:
Helper to `maybe_call_fn_and_grads`.
### Response:
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
"""Helper to `maybe_call_fn_and_grads`."""
with tf.compat.v1.name_scope(name, 'value_and_gradients',
[fn_arg_list, result, grads]):
def _convert_to_tensor(x, name):
ctt = lambda x_: x_ if x_ is None else tf.convert_to_tensor(
value=x_, name=name)
return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')
if result is None:
result = fn(*fn_arg_list)
if grads is None and tf.executing_eagerly():
# Ensure we disable bijector cacheing in eager mode.
# TODO(b/72831017): Remove this once bijector cacheing is fixed for
# eager mode.
fn_arg_list = [0 + x for x in fn_arg_list]
result = _convert_to_tensor(result, 'fn_result')
if grads is not None:
grads = _convert_to_tensor(grads, 'fn_grad')
return result, grads
if is_list_like(result) and len(result) == len(fn_arg_list):
# Compute the block diagonal of Jacobian.
# TODO(b/79158574): Guard this calculation by an arg which explicitly
# requests block diagonal Jacobian calculation.
def fn_slice(i):
"""Needed to prevent `cell-var-from-loop` pylint warning."""
return lambda x: fn(*(fn_arg_list[:i] + [x] + fn_arg_list[i+1:]))
grads = [
tfp_math_value_and_gradients(fn_slice(i), fn_arg_list[i])[1]
for i in range(len(result))
]
else:
_, grads = tfp_math_value_and_gradients(fn, fn_arg_list)
return result, grads |
def main(argv=None):
'''
Main entry-point for calling layouts directly as a program.
'''
# Prep argparse
ap = argparse.ArgumentParser(
description='Basic query options for Python HID-IO Layouts repository',
)
ap.add_argument('--list', action='store_true', help='List available layout aliases.')
ap.add_argument('--get', metavar='NAME', help='Retrieve the given layout, and return the JSON data')
# Parse arguments
args = ap.parse_args(argv)
# Create layouts context manager
mgr = Layouts()
# Check if generating a list
if args.list:
for name in mgr.list_layouts():
print(name)
# Retrieve JSON layout
if args.get is not None:
layout = mgr.get_layout(args.get)
print(json.dumps(layout.json())) | Main entry-point for calling layouts directly as a program. | Below is the the instruction that describes the task:
### Input:
Main entry-point for calling layouts directly as a program.
### Response:
def main(argv=None):
'''
Main entry-point for calling layouts directly as a program.
'''
# Prep argparse
ap = argparse.ArgumentParser(
description='Basic query options for Python HID-IO Layouts repository',
)
ap.add_argument('--list', action='store_true', help='List available layout aliases.')
ap.add_argument('--get', metavar='NAME', help='Retrieve the given layout, and return the JSON data')
# Parse arguments
args = ap.parse_args(argv)
# Create layouts context manager
mgr = Layouts()
# Check if generating a list
if args.list:
for name in mgr.list_layouts():
print(name)
# Retrieve JSON layout
if args.get is not None:
layout = mgr.get_layout(args.get)
print(json.dumps(layout.json())) |
def _get_validated_visibility_timeout(self, timeout=None):
"""
:raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT
:raises TypeError: If visibility timeout was not specified
"""
if timeout is not None:
visibility_timeout = int(timeout)
else:
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT:
raise ValueError
return visibility_timeout | :raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT
:raises TypeError: If visibility timeout was not specified | Below is the the instruction that describes the task:
### Input:
:raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT
:raises TypeError: If visibility timeout was not specified
### Response:
def _get_validated_visibility_timeout(self, timeout=None):
"""
:raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT
:raises TypeError: If visibility timeout was not specified
"""
if timeout is not None:
visibility_timeout = int(timeout)
else:
visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0])
if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT:
raise ValueError
return visibility_timeout |
def part(self, channels, message=""):
"""Send a PART command."""
self.send_items('PART', ','.join(always_iterable(channels)), message) | Send a PART command. | Below is the the instruction that describes the task:
### Input:
Send a PART command.
### Response:
def part(self, channels, message=""):
"""Send a PART command."""
self.send_items('PART', ','.join(always_iterable(channels)), message) |
def active_link(context, viewnames, css_class=None, strict=None, *args, **kwargs):
"""
Renders the given CSS class if the request path matches the path of the view.
:param context: The context where the tag was called. Used to access the request object.
:param viewnames: The name of the view or views separated by || (include namespaces if any).
:param css_class: The CSS class to render.
:param strict: If True, the tag will perform an exact match with the request path.
:return:
"""
if css_class is None:
css_class = getattr(settings, 'ACTIVE_LINK_CSS_CLASS', 'active')
if strict is None:
strict = getattr(settings, 'ACTIVE_LINK_STRICT', False)
request = context.get('request')
if request is None:
# Can't work without the request object.
return ''
active = False
views = viewnames.split('||')
for viewname in views:
path = reverse(viewname.strip(), args=args, kwargs=kwargs)
request_path = escape_uri_path(request.path)
if strict:
active = request_path == path
else:
active = request_path.find(path) == 0
if active:
break
if active:
return css_class
return '' | Renders the given CSS class if the request path matches the path of the view.
:param context: The context where the tag was called. Used to access the request object.
:param viewnames: The name of the view or views separated by || (include namespaces if any).
:param css_class: The CSS class to render.
:param strict: If True, the tag will perform an exact match with the request path.
:return: | Below is the the instruction that describes the task:
### Input:
Renders the given CSS class if the request path matches the path of the view.
:param context: The context where the tag was called. Used to access the request object.
:param viewnames: The name of the view or views separated by || (include namespaces if any).
:param css_class: The CSS class to render.
:param strict: If True, the tag will perform an exact match with the request path.
:return:
### Response:
def active_link(context, viewnames, css_class=None, strict=None, *args, **kwargs):
"""
Renders the given CSS class if the request path matches the path of the view.
:param context: The context where the tag was called. Used to access the request object.
:param viewnames: The name of the view or views separated by || (include namespaces if any).
:param css_class: The CSS class to render.
:param strict: If True, the tag will perform an exact match with the request path.
:return:
"""
if css_class is None:
css_class = getattr(settings, 'ACTIVE_LINK_CSS_CLASS', 'active')
if strict is None:
strict = getattr(settings, 'ACTIVE_LINK_STRICT', False)
request = context.get('request')
if request is None:
# Can't work without the request object.
return ''
active = False
views = viewnames.split('||')
for viewname in views:
path = reverse(viewname.strip(), args=args, kwargs=kwargs)
request_path = escape_uri_path(request.path)
if strict:
active = request_path == path
else:
active = request_path.find(path) == 0
if active:
break
if active:
return css_class
return '' |
def check_valid_varname(varname,
custom_units,
custom_structs,
constants,
pos,
error_prefix="Variable name invalid.",
exc=None):
""" Handle invalid variable names """
exc = VariableDeclarationException if exc is None else exc
valid_varname, msg = is_varname_valid(varname, custom_units, custom_structs, constants)
if not valid_varname:
raise exc(error_prefix + msg, pos)
return True | Handle invalid variable names | Below is the the instruction that describes the task:
### Input:
Handle invalid variable names
### Response:
def check_valid_varname(varname,
custom_units,
custom_structs,
constants,
pos,
error_prefix="Variable name invalid.",
exc=None):
""" Handle invalid variable names """
exc = VariableDeclarationException if exc is None else exc
valid_varname, msg = is_varname_valid(varname, custom_units, custom_structs, constants)
if not valid_varname:
raise exc(error_prefix + msg, pos)
return True |
def __import_pem(self, key_name, pem_file_path, password):
"""
Import PEM certificate with provider
:param key_name: name of the key to import
:param pem_file_path: path to the pem file
:param password: optional password for the pem file
"""
key_import = self.__get_function_or_ex_function('import_key_pair_from_file')
pem_file = os.path.expandvars(os.path.expanduser(pem_file_path))
try:
pem = paramiko.RSAKey.from_private_key_file(pem_file, password)
except SSHException:
try:
pem = paramiko.DSSKey.from_private_key_file(pem_file, password)
except SSHException as e:
raise KeypairError('could not import {f}, neither as RSA key nor as DSA key: {e}'
.format(f=pem_file_path, e=e))
if not pem:
raise KeypairError('could not import {f}'.format(f=pem_file_path))
else:
with NamedTemporaryFile('w+t') as f:
f.write('{n} {p}'.format(n=pem.get_name(), p=pem.get_base64()))
key_import(name=key_name, key_file_path=f.name) | Import PEM certificate with provider
:param key_name: name of the key to import
:param pem_file_path: path to the pem file
:param password: optional password for the pem file | Below is the the instruction that describes the task:
### Input:
Import PEM certificate with provider
:param key_name: name of the key to import
:param pem_file_path: path to the pem file
:param password: optional password for the pem file
### Response:
def __import_pem(self, key_name, pem_file_path, password):
"""
Import PEM certificate with provider
:param key_name: name of the key to import
:param pem_file_path: path to the pem file
:param password: optional password for the pem file
"""
key_import = self.__get_function_or_ex_function('import_key_pair_from_file')
pem_file = os.path.expandvars(os.path.expanduser(pem_file_path))
try:
pem = paramiko.RSAKey.from_private_key_file(pem_file, password)
except SSHException:
try:
pem = paramiko.DSSKey.from_private_key_file(pem_file, password)
except SSHException as e:
raise KeypairError('could not import {f}, neither as RSA key nor as DSA key: {e}'
.format(f=pem_file_path, e=e))
if not pem:
raise KeypairError('could not import {f}'.format(f=pem_file_path))
else:
with NamedTemporaryFile('w+t') as f:
f.write('{n} {p}'.format(n=pem.get_name(), p=pem.get_base64()))
key_import(name=key_name, key_file_path=f.name) |
def stop_all_periodic_tasks(self, remove_tasks=True):
"""Stop sending any messages that were started using bus.send_periodic
:param bool remove_tasks:
Stop tracking the stopped tasks.
"""
for task in self._periodic_tasks:
task.stop(remove_task=remove_tasks) | Stop sending any messages that were started using bus.send_periodic
:param bool remove_tasks:
Stop tracking the stopped tasks. | Below is the the instruction that describes the task:
### Input:
Stop sending any messages that were started using bus.send_periodic
:param bool remove_tasks:
Stop tracking the stopped tasks.
### Response:
def stop_all_periodic_tasks(self, remove_tasks=True):
"""Stop sending any messages that were started using bus.send_periodic
:param bool remove_tasks:
Stop tracking the stopped tasks.
"""
for task in self._periodic_tasks:
task.stop(remove_task=remove_tasks) |
def check_vamp():
"Check Vamp host availability"
try:
from timeside.plugins.analyzer.externals import vamp_plugin
except VampImportError:
warnings.warn('Vamp host is not available', ImportWarning,
stacklevel=2)
_WITH_VAMP = False
else:
_WITH_VAMP = True
del vamp_plugin
return _WITH_VAMP | Check Vamp host availability | Below is the the instruction that describes the task:
### Input:
Check Vamp host availability
### Response:
def check_vamp():
"Check Vamp host availability"
try:
from timeside.plugins.analyzer.externals import vamp_plugin
except VampImportError:
warnings.warn('Vamp host is not available', ImportWarning,
stacklevel=2)
_WITH_VAMP = False
else:
_WITH_VAMP = True
del vamp_plugin
return _WITH_VAMP |
def execution_errors(self):
"""
Return a list of commands that encountered execution errors, with the error.
Each dictionary entry gives the command dictionary and the error dictionary
:return: list of commands that gave errors, with their error information
"""
if self.split_actions:
# throttling split this action, get errors from the split
return [dict(e) for s in self.split_actions for e in s.errors]
else:
return [dict(e) for e in self.errors] | Return a list of commands that encountered execution errors, with the error.
Each dictionary entry gives the command dictionary and the error dictionary
:return: list of commands that gave errors, with their error information | Below is the the instruction that describes the task:
### Input:
Return a list of commands that encountered execution errors, with the error.
Each dictionary entry gives the command dictionary and the error dictionary
:return: list of commands that gave errors, with their error information
### Response:
def execution_errors(self):
"""
Return a list of commands that encountered execution errors, with the error.
Each dictionary entry gives the command dictionary and the error dictionary
:return: list of commands that gave errors, with their error information
"""
if self.split_actions:
# throttling split this action, get errors from the split
return [dict(e) for s in self.split_actions for e in s.errors]
else:
return [dict(e) for e in self.errors] |
def create_zones(self, add_zones):
"""Create instances of additional zones for the receiver."""
for zone, zname in add_zones.items():
# Name either set explicitly or name of Main Zone with suffix
zonename = "{} {}".format(self._name, zone) if (
zname is None) else zname
zone_inst = DenonAVRZones(self, zone, zonename)
self._zones[zone] = zone_inst | Create instances of additional zones for the receiver. | Below is the the instruction that describes the task:
### Input:
Create instances of additional zones for the receiver.
### Response:
def create_zones(self, add_zones):
"""Create instances of additional zones for the receiver."""
for zone, zname in add_zones.items():
# Name either set explicitly or name of Main Zone with suffix
zonename = "{} {}".format(self._name, zone) if (
zname is None) else zname
zone_inst = DenonAVRZones(self, zone, zonename)
self._zones[zone] = zone_inst |
def find_all(self, collection):
"""
Search a collection for all available items.
Args:
collection: The db collection. See main class documentation.
Returns:
List of all items in the collection.
"""
obj = getattr(self.db, collection)
result = obj.find()
return result | Search a collection for all available items.
Args:
collection: The db collection. See main class documentation.
Returns:
List of all items in the collection. | Below is the the instruction that describes the task:
### Input:
Search a collection for all available items.
Args:
collection: The db collection. See main class documentation.
Returns:
List of all items in the collection.
### Response:
def find_all(self, collection):
"""
Search a collection for all available items.
Args:
collection: The db collection. See main class documentation.
Returns:
List of all items in the collection.
"""
obj = getattr(self.db, collection)
result = obj.find()
return result |
def resolve(self, year: int = YEAR_ANY) -> MDate:
"""Return the date object in the solar / lunar year.
:param year:
:return:
"""
year = year or self.year
if year:
return self._resolve(year)
raise ValueError('Unable resolve the date without a specified year.') | Return the date object in the solar / lunar year.
:param year:
:return: | Below is the the instruction that describes the task:
### Input:
Return the date object in the solar / lunar year.
:param year:
:return:
### Response:
def resolve(self, year: int = YEAR_ANY) -> MDate:
"""Return the date object in the solar / lunar year.
:param year:
:return:
"""
year = year or self.year
if year:
return self._resolve(year)
raise ValueError('Unable resolve the date without a specified year.') |
def clear_limit(self, identifier=None):
"""
Remove a single limit or all defined limits
:param identifier: The identifier to clear limits for, or if no identifier is supplied, clears ALL limits
:type identifier: int
:return: True if a limit was successfully found and removed, False if no limit could be matched for removal
:rtype : bool
"""
# Remove a single limit
if identifier:
if identifier in self._limits:
del self._limits[identifier]
return True
else:
return False
# Remove all limits
if self._limits:
self._limits.clear()
return True
else:
return False | Remove a single limit or all defined limits
:param identifier: The identifier to clear limits for, or if no identifier is supplied, clears ALL limits
:type identifier: int
:return: True if a limit was successfully found and removed, False if no limit could be matched for removal
:rtype : bool | Below is the the instruction that describes the task:
### Input:
Remove a single limit or all defined limits
:param identifier: The identifier to clear limits for, or if no identifier is supplied, clears ALL limits
:type identifier: int
:return: True if a limit was successfully found and removed, False if no limit could be matched for removal
:rtype : bool
### Response:
def clear_limit(self, identifier=None):
"""
Remove a single limit or all defined limits
:param identifier: The identifier to clear limits for, or if no identifier is supplied, clears ALL limits
:type identifier: int
:return: True if a limit was successfully found and removed, False if no limit could be matched for removal
:rtype : bool
"""
# Remove a single limit
if identifier:
if identifier in self._limits:
del self._limits[identifier]
return True
else:
return False
# Remove all limits
if self._limits:
self._limits.clear()
return True
else:
return False |
def write_file(self, filename, content):
"""Write a file.
This is useful when writing a file that will fit within memory
:param filename: ``str``
:param content: ``str``
"""
with open(filename, 'wb') as f:
self.log.debug(content)
f.write(content) | Write a file.
This is useful when writing a file that will fit within memory
:param filename: ``str``
:param content: ``str`` | Below is the the instruction that describes the task:
### Input:
Write a file.
This is useful when writing a file that will fit within memory
:param filename: ``str``
:param content: ``str``
### Response:
def write_file(self, filename, content):
"""Write a file.
This is useful when writing a file that will fit within memory
:param filename: ``str``
:param content: ``str``
"""
with open(filename, 'wb') as f:
self.log.debug(content)
f.write(content) |
def write_short_at(self, n, pos, pack_into=Struct('>H').pack_into):
'''
Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Short %d out of range 0..0xFFFF', n)
return self | Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames. | Below is the the instruction that describes the task:
### Input:
Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames.
### Response:
def write_short_at(self, n, pos, pack_into=Struct('>H').pack_into):
'''
Write an unsigned 16bit value at a specific position in the buffer.
Used for writing tables and frames.
'''
if 0 <= n <= 0xFFFF:
pack_into(self._output_buffer, pos, n)
else:
raise ValueError('Short %d out of range 0..0xFFFF', n)
return self |
def MyDiplomacyEnum(ctx):
"""Player's Diplomacy Enumeration."""
return Enum(
ctx,
gaia=0,
self=1,
ally=2,
neutral=3,
enemy=4,
invalid_player=-1
) | Player's Diplomacy Enumeration. | Below is the the instruction that describes the task:
### Input:
Player's Diplomacy Enumeration.
### Response:
def MyDiplomacyEnum(ctx):
"""Player's Diplomacy Enumeration."""
return Enum(
ctx,
gaia=0,
self=1,
ally=2,
neutral=3,
enemy=4,
invalid_player=-1
) |
def read_numpy(fh, byteorder, dtype, count, offsetsize):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count) | Read tag data from file and return as numpy array. | Below is the the instruction that describes the task:
### Input:
Read tag data from file and return as numpy array.
### Response:
def read_numpy(fh, byteorder, dtype, count, offsetsize):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count) |
def check_include_exclude(path_str, include_pat=None, exclude_pat=None):
'''
Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match
'''
def _pat_check(path_str, check_pat):
if re.match('E@', check_pat):
return True if re.search(
check_pat[2:],
path_str
) else False
else:
return True if fnmatch.fnmatch(
path_str,
check_pat
) else False
ret = True # -- default true
# Before pattern match, check if it is regexp (E@'') or glob(default)
if include_pat:
if isinstance(include_pat, list):
for include_line in include_pat:
retchk_include = _pat_check(path_str, include_line)
if retchk_include:
break
else:
retchk_include = _pat_check(path_str, include_pat)
if exclude_pat:
if isinstance(exclude_pat, list):
for exclude_line in exclude_pat:
retchk_exclude = not _pat_check(path_str, exclude_line)
if not retchk_exclude:
break
else:
retchk_exclude = not _pat_check(path_str, exclude_pat)
# Now apply include/exclude conditions
if include_pat and not exclude_pat:
ret = retchk_include
elif exclude_pat and not include_pat:
ret = retchk_exclude
elif include_pat and exclude_pat:
ret = retchk_include and retchk_exclude
else:
ret = True
return ret | Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match | Below is the the instruction that describes the task:
### Input:
Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match
### Response:
def check_include_exclude(path_str, include_pat=None, exclude_pat=None):
'''
Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match
'''
def _pat_check(path_str, check_pat):
if re.match('E@', check_pat):
return True if re.search(
check_pat[2:],
path_str
) else False
else:
return True if fnmatch.fnmatch(
path_str,
check_pat
) else False
ret = True # -- default true
# Before pattern match, check if it is regexp (E@'') or glob(default)
if include_pat:
if isinstance(include_pat, list):
for include_line in include_pat:
retchk_include = _pat_check(path_str, include_line)
if retchk_include:
break
else:
retchk_include = _pat_check(path_str, include_pat)
if exclude_pat:
if isinstance(exclude_pat, list):
for exclude_line in exclude_pat:
retchk_exclude = not _pat_check(path_str, exclude_line)
if not retchk_exclude:
break
else:
retchk_exclude = not _pat_check(path_str, exclude_pat)
# Now apply include/exclude conditions
if include_pat and not exclude_pat:
ret = retchk_include
elif exclude_pat and not include_pat:
ret = retchk_exclude
elif include_pat and exclude_pat:
ret = retchk_include and retchk_exclude
else:
ret = True
return ret |
def insert(self, row):
"""
Add a row (type: dict) by inserting it into the table.
Columns must exist.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key.
"""
self._check_dropped()
res = self.engine.execute(self.table.insert(row))
if len(res.inserted_primary_key) > 0:
return res.inserted_primary_key[0] | Add a row (type: dict) by inserting it into the table.
Columns must exist.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key. | Below is the the instruction that describes the task:
### Input:
Add a row (type: dict) by inserting it into the table.
Columns must exist.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key.
### Response:
def insert(self, row):
"""
Add a row (type: dict) by inserting it into the table.
Columns must exist.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key.
"""
self._check_dropped()
res = self.engine.execute(self.table.insert(row))
if len(res.inserted_primary_key) > 0:
return res.inserted_primary_key[0] |
def binary_shader_for_rules(self, output_jar, jar, rules, jvm_options=None):
"""Yields an `Executor.Runner` that will perform shading of the binary `jar` when `run()`.
No default rules are applied; only the rules passed in as a parameter will be used.
:param unicode output_jar: The path to dump the shaded jar to; will be over-written if it
exists.
:param unicode jar: The path to the jar file to shade.
:param list rules: The rules to apply for shading.
:param list jvm_options: an optional sequence of options for the underlying jvm
:returns: An `Executor.Runner` that can be `run()` to shade the given `jar`.
:rtype: :class:`pants.java.executor.Executor.Runner`
"""
with self.temporary_rules_file(rules) as rules_file:
logger.debug('Running jarjar with rules:\n{}'.format(' '.join(rule.render() for rule in rules)))
yield self._executor.runner(classpath=self._jarjar_classpath,
main='org.pantsbuild.jarjar.Main',
jvm_options=jvm_options,
args=['process', rules_file, jar, output_jar]) | Yields an `Executor.Runner` that will perform shading of the binary `jar` when `run()`.
No default rules are applied; only the rules passed in as a parameter will be used.
:param unicode output_jar: The path to dump the shaded jar to; will be over-written if it
exists.
:param unicode jar: The path to the jar file to shade.
:param list rules: The rules to apply for shading.
:param list jvm_options: an optional sequence of options for the underlying jvm
:returns: An `Executor.Runner` that can be `run()` to shade the given `jar`.
:rtype: :class:`pants.java.executor.Executor.Runner` | Below is the the instruction that describes the task:
### Input:
Yields an `Executor.Runner` that will perform shading of the binary `jar` when `run()`.
No default rules are applied; only the rules passed in as a parameter will be used.
:param unicode output_jar: The path to dump the shaded jar to; will be over-written if it
exists.
:param unicode jar: The path to the jar file to shade.
:param list rules: The rules to apply for shading.
:param list jvm_options: an optional sequence of options for the underlying jvm
:returns: An `Executor.Runner` that can be `run()` to shade the given `jar`.
:rtype: :class:`pants.java.executor.Executor.Runner`
### Response:
def binary_shader_for_rules(self, output_jar, jar, rules, jvm_options=None):
"""Yields an `Executor.Runner` that will perform shading of the binary `jar` when `run()`.
No default rules are applied; only the rules passed in as a parameter will be used.
:param unicode output_jar: The path to dump the shaded jar to; will be over-written if it
exists.
:param unicode jar: The path to the jar file to shade.
:param list rules: The rules to apply for shading.
:param list jvm_options: an optional sequence of options for the underlying jvm
:returns: An `Executor.Runner` that can be `run()` to shade the given `jar`.
:rtype: :class:`pants.java.executor.Executor.Runner`
"""
with self.temporary_rules_file(rules) as rules_file:
logger.debug('Running jarjar with rules:\n{}'.format(' '.join(rule.render() for rule in rules)))
yield self._executor.runner(classpath=self._jarjar_classpath,
main='org.pantsbuild.jarjar.Main',
jvm_options=jvm_options,
args=['process', rules_file, jar, output_jar]) |
def _populate_type_attributes(self):
"""
Converts each struct, union, and route from a forward reference to a
full definition.
"""
for namespace in self.api.namespaces.values():
env = self._get_or_create_env(namespace.name)
# do annotations before everything else, since populating aliases
# and datatypes involves setting annotations
for annotation in namespace.annotations:
if isinstance(annotation, CustomAnnotation):
loc = annotation._ast_node.lineno, annotation._ast_node.path
if annotation.annotation_type_ns:
if annotation.annotation_type_ns not in env:
raise InvalidSpec(
'Namespace %s is not imported' %
quote(annotation.annotation_type_ns), *loc)
annotation_type_env = env[annotation.annotation_type_ns]
if not isinstance(annotation_type_env, Environment):
raise InvalidSpec(
'%s is not a namespace.' %
quote(annotation.annotation_type_ns), *loc)
else:
annotation_type_env = env
if annotation.annotation_type_name not in annotation_type_env:
raise InvalidSpec(
'Annotation type %s does not exist' %
quote(annotation.annotation_type_name), *loc)
annotation_type = annotation_type_env[annotation.annotation_type_name]
if not isinstance(annotation_type, AnnotationType):
raise InvalidSpec(
'%s is not an annotation type' % quote(annotation.annotation_type_name),
*loc
)
annotation.set_attributes(annotation_type)
for alias in namespace.aliases:
data_type = self._resolve_type(env, alias._ast_node.type_ref)
alias.set_attributes(alias._ast_node.doc, data_type)
annotations = [self._resolve_annotation_type(env, annotation)
for annotation in alias._ast_node.annotations]
alias.set_annotations(annotations)
for data_type in namespace.data_types:
if not data_type._is_forward_ref:
continue
self._resolution_in_progress.add(data_type)
if isinstance(data_type, Struct):
self._populate_struct_type_attributes(env, data_type)
elif isinstance(data_type, Union):
self._populate_union_type_attributes(env, data_type)
else:
raise AssertionError('Unhandled type: %r' %
type(data_type))
self._resolution_in_progress.remove(data_type)
assert len(self._resolution_in_progress) == 0 | Converts each struct, union, and route from a forward reference to a
full definition. | Below is the the instruction that describes the task:
### Input:
Converts each struct, union, and route from a forward reference to a
full definition.
### Response:
def _populate_type_attributes(self):
"""
Converts each struct, union, and route from a forward reference to a
full definition.
"""
for namespace in self.api.namespaces.values():
env = self._get_or_create_env(namespace.name)
# do annotations before everything else, since populating aliases
# and datatypes involves setting annotations
for annotation in namespace.annotations:
if isinstance(annotation, CustomAnnotation):
loc = annotation._ast_node.lineno, annotation._ast_node.path
if annotation.annotation_type_ns:
if annotation.annotation_type_ns not in env:
raise InvalidSpec(
'Namespace %s is not imported' %
quote(annotation.annotation_type_ns), *loc)
annotation_type_env = env[annotation.annotation_type_ns]
if not isinstance(annotation_type_env, Environment):
raise InvalidSpec(
'%s is not a namespace.' %
quote(annotation.annotation_type_ns), *loc)
else:
annotation_type_env = env
if annotation.annotation_type_name not in annotation_type_env:
raise InvalidSpec(
'Annotation type %s does not exist' %
quote(annotation.annotation_type_name), *loc)
annotation_type = annotation_type_env[annotation.annotation_type_name]
if not isinstance(annotation_type, AnnotationType):
raise InvalidSpec(
'%s is not an annotation type' % quote(annotation.annotation_type_name),
*loc
)
annotation.set_attributes(annotation_type)
for alias in namespace.aliases:
data_type = self._resolve_type(env, alias._ast_node.type_ref)
alias.set_attributes(alias._ast_node.doc, data_type)
annotations = [self._resolve_annotation_type(env, annotation)
for annotation in alias._ast_node.annotations]
alias.set_annotations(annotations)
for data_type in namespace.data_types:
if not data_type._is_forward_ref:
continue
self._resolution_in_progress.add(data_type)
if isinstance(data_type, Struct):
self._populate_struct_type_attributes(env, data_type)
elif isinstance(data_type, Union):
self._populate_union_type_attributes(env, data_type)
else:
raise AssertionError('Unhandled type: %r' %
type(data_type))
self._resolution_in_progress.remove(data_type)
assert len(self._resolution_in_progress) == 0 |
def parse_numtuple(s,intype,length=2,scale=1):
'''parse a string into a list of numbers of a type'''
if intype == int:
numrx = intrx_s;
elif intype == float:
numrx = fltrx_s;
else:
raise NotImplementedError("Not implemented for type: {}".format(
intype));
if parse_utuple(s, numrx, length=length) is None:
raise ValueError("{} is not a valid number tuple.".format(s));
return [x*scale for x in evalt(s)]; | parse a string into a list of numbers of a type | Below is the the instruction that describes the task:
### Input:
parse a string into a list of numbers of a type
### Response:
def parse_numtuple(s,intype,length=2,scale=1):
'''parse a string into a list of numbers of a type'''
if intype == int:
numrx = intrx_s;
elif intype == float:
numrx = fltrx_s;
else:
raise NotImplementedError("Not implemented for type: {}".format(
intype));
if parse_utuple(s, numrx, length=length) is None:
raise ValueError("{} is not a valid number tuple.".format(s));
return [x*scale for x in evalt(s)]; |
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret | split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list | Below is the the instruction that describes the task:
### Input:
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
### Response:
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret |
def _exc_to_net(param1, success):
""" translate http code to net code. if accertion failed, set net code to 314 """
if len(param1) <= 3:
# FIXME: we're unable to use better logic here, because we should support non-http codes
# but, we should look for core.util.HTTP or some other common logic
# here
if success:
return 0
else:
return 314
exc = param1.split(' ')[-1]
if exc in KNOWN_EXC.keys():
return KNOWN_EXC[exc]
else:
logger.warning(
"Unknown Java exception, consider adding it to dictionary: %s",
param1)
return 41 | translate http code to net code. if accertion failed, set net code to 314 | Below is the the instruction that describes the task:
### Input:
translate http code to net code. if accertion failed, set net code to 314
### Response:
def _exc_to_net(param1, success):
""" translate http code to net code. if accertion failed, set net code to 314 """
if len(param1) <= 3:
# FIXME: we're unable to use better logic here, because we should support non-http codes
# but, we should look for core.util.HTTP or some other common logic
# here
if success:
return 0
else:
return 314
exc = param1.split(' ')[-1]
if exc in KNOWN_EXC.keys():
return KNOWN_EXC[exc]
else:
logger.warning(
"Unknown Java exception, consider adding it to dictionary: %s",
param1)
return 41 |
def _debug(self):
"""This is just for debugging, so we can see what buttons would be drawn.
Not intended to be used in production."""
self.window.blit(self.surfaceUp, (self.loc[0], 10))
self.window.blit(self.surfaceOver, (self.loc[0], 60))
self.window.blit(self.surfaceDown, (self.loc[0], 110))
self.window.blit(self.surfaceDisabled, (self.loc[0], 160)) | This is just for debugging, so we can see what buttons would be drawn.
Not intended to be used in production. | Below is the the instruction that describes the task:
### Input:
This is just for debugging, so we can see what buttons would be drawn.
Not intended to be used in production.
### Response:
def _debug(self):
"""This is just for debugging, so we can see what buttons would be drawn.
Not intended to be used in production."""
self.window.blit(self.surfaceUp, (self.loc[0], 10))
self.window.blit(self.surfaceOver, (self.loc[0], 60))
self.window.blit(self.surfaceDown, (self.loc[0], 110))
self.window.blit(self.surfaceDisabled, (self.loc[0], 160)) |
def into2dBlocks(arr, n0, n1):
"""
similar to blockshaped
but splits an array into n0*n1 blocks
"""
s0, s1 = arr.shape
b = blockshaped(arr, s0// n0, s1// n1)
return b.reshape(n0, n1, *b.shape[1:]) | similar to blockshaped
but splits an array into n0*n1 blocks | Below is the the instruction that describes the task:
### Input:
similar to blockshaped
but splits an array into n0*n1 blocks
### Response:
def into2dBlocks(arr, n0, n1):
"""
similar to blockshaped
but splits an array into n0*n1 blocks
"""
s0, s1 = arr.shape
b = blockshaped(arr, s0// n0, s1// n1)
return b.reshape(n0, n1, *b.shape[1:]) |
def _to_parent_frame(self, *args, **kwargs):
"""Conversion from Topocentric Frame to parent frame
"""
lat, lon, _ = self.latlonalt
m = rot3(-lon) @ rot2(lat - np.pi / 2.) @ rot3(self.heading)
offset = np.zeros(6)
offset[:3] = self.coordinates
return self._convert(m, m), offset | Conversion from Topocentric Frame to parent frame | Below is the the instruction that describes the task:
### Input:
Conversion from Topocentric Frame to parent frame
### Response:
def _to_parent_frame(self, *args, **kwargs):
"""Conversion from Topocentric Frame to parent frame
"""
lat, lon, _ = self.latlonalt
m = rot3(-lon) @ rot2(lat - np.pi / 2.) @ rot3(self.heading)
offset = np.zeros(6)
offset[:3] = self.coordinates
return self._convert(m, m), offset |
def gross_lev(positions):
"""
Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage.
"""
exposure = positions.drop('cash', axis=1).abs().sum(axis=1)
return exposure / positions.sum(axis=1) | Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage. | Below is the the instruction that describes the task:
### Input:
Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage.
### Response:
def gross_lev(positions):
"""
Calculates the gross leverage of a strategy.
Parameters
----------
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
Returns
-------
pd.Series
Gross leverage.
"""
exposure = positions.drop('cash', axis=1).abs().sum(axis=1)
return exposure / positions.sum(axis=1) |
def best_value(Y,sign=1):
'''
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
'''
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i]=Y[:(i+1)].min()
else:
Y_best[i]=Y[:(i+1)].max()
return Y_best | Returns a vector whose components i are the minimum (default) or maximum of Y[:i] | Below is the the instruction that describes the task:
### Input:
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
### Response:
def best_value(Y,sign=1):
'''
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
'''
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i]=Y[:(i+1)].min()
else:
Y_best[i]=Y[:(i+1)].max()
return Y_best |
def _scale_cores_to_memory(cores, mem_per_core, sysinfo, system_memory):
"""Scale multicore usage to avoid excessive memory usage based on system information.
"""
total_mem = "%.2f" % (cores * mem_per_core + system_memory)
if "cores" not in sysinfo:
return cores, total_mem, 1.0
total_mem = min(float(total_mem), float(sysinfo["memory"]) - system_memory)
cores = min(cores, int(sysinfo["cores"]))
mem_cores = int(math.floor(float(total_mem) / mem_per_core)) # cores based on available memory
if mem_cores < 1:
out_cores = 1
elif mem_cores < cores:
out_cores = mem_cores
else:
out_cores = cores
mem_pct = float(out_cores) / float(cores)
return out_cores, total_mem, mem_pct | Scale multicore usage to avoid excessive memory usage based on system information. | Below is the the instruction that describes the task:
### Input:
Scale multicore usage to avoid excessive memory usage based on system information.
### Response:
def _scale_cores_to_memory(cores, mem_per_core, sysinfo, system_memory):
"""Scale multicore usage to avoid excessive memory usage based on system information.
"""
total_mem = "%.2f" % (cores * mem_per_core + system_memory)
if "cores" not in sysinfo:
return cores, total_mem, 1.0
total_mem = min(float(total_mem), float(sysinfo["memory"]) - system_memory)
cores = min(cores, int(sysinfo["cores"]))
mem_cores = int(math.floor(float(total_mem) / mem_per_core)) # cores based on available memory
if mem_cores < 1:
out_cores = 1
elif mem_cores < cores:
out_cores = mem_cores
else:
out_cores = cores
mem_pct = float(out_cores) / float(cores)
return out_cores, total_mem, mem_pct |
def next_event(self):
"""Simulates the queue forward one event.
Use :meth:`.simulate` instead.
Returns
-------
out : :class:`.Agent` (sometimes)
If the next event is a departure then the departing agent
is returned, otherwise nothing is returned.
See Also
--------
:meth:`.simulate` : Simulates the queue forward.
"""
if self._departures[0]._time < self._arrivals[0]._time:
new_depart = heappop(self._departures)
self._current_t = new_depart._time
self._num_total -= 1
self.num_system -= 1
self.num_departures += 1
if self.collect_data and new_depart.agent_id in self.data:
self.data[new_depart.agent_id][-1][2] = self._current_t
if len(self.queue) > 0:
agent = self.queue.popleft()
if self.collect_data and agent.agent_id in self.data:
self.data[agent.agent_id][-1][1] = self._current_t
agent._time = self.service_f(self._current_t)
agent.queue_action(self, 1)
heappush(self._departures, agent)
new_depart.queue_action(self, 2)
self._update_time()
return new_depart
elif self._arrivals[0]._time < infty:
arrival = heappop(self._arrivals)
self._current_t = arrival._time
if self._active:
self._add_arrival()
self.num_system += 1
self._num_arrivals += 1
if self.collect_data:
b = 0 if self.num_system <= self.num_servers else 1
if arrival.agent_id not in self.data:
self.data[arrival.agent_id] = \
[[arrival._time, 0, 0, len(self.queue) + b, self.num_system]]
else:
self.data[arrival.agent_id]\
.append([arrival._time, 0, 0, len(self.queue) + b, self.num_system])
arrival.queue_action(self, 0)
if self.num_system <= self.num_servers:
if self.collect_data:
self.data[arrival.agent_id][-1][1] = arrival._time
arrival._time = self.service_f(arrival._time)
arrival.queue_action(self, 1)
heappush(self._departures, arrival)
else:
self.queue.append(arrival)
self._update_time() | Simulates the queue forward one event.
Use :meth:`.simulate` instead.
Returns
-------
out : :class:`.Agent` (sometimes)
If the next event is a departure then the departing agent
is returned, otherwise nothing is returned.
See Also
--------
:meth:`.simulate` : Simulates the queue forward. | Below is the the instruction that describes the task:
### Input:
Simulates the queue forward one event.
Use :meth:`.simulate` instead.
Returns
-------
out : :class:`.Agent` (sometimes)
If the next event is a departure then the departing agent
is returned, otherwise nothing is returned.
See Also
--------
:meth:`.simulate` : Simulates the queue forward.
### Response:
def next_event(self):
"""Simulates the queue forward one event.
Use :meth:`.simulate` instead.
Returns
-------
out : :class:`.Agent` (sometimes)
If the next event is a departure then the departing agent
is returned, otherwise nothing is returned.
See Also
--------
:meth:`.simulate` : Simulates the queue forward.
"""
if self._departures[0]._time < self._arrivals[0]._time:
new_depart = heappop(self._departures)
self._current_t = new_depart._time
self._num_total -= 1
self.num_system -= 1
self.num_departures += 1
if self.collect_data and new_depart.agent_id in self.data:
self.data[new_depart.agent_id][-1][2] = self._current_t
if len(self.queue) > 0:
agent = self.queue.popleft()
if self.collect_data and agent.agent_id in self.data:
self.data[agent.agent_id][-1][1] = self._current_t
agent._time = self.service_f(self._current_t)
agent.queue_action(self, 1)
heappush(self._departures, agent)
new_depart.queue_action(self, 2)
self._update_time()
return new_depart
elif self._arrivals[0]._time < infty:
arrival = heappop(self._arrivals)
self._current_t = arrival._time
if self._active:
self._add_arrival()
self.num_system += 1
self._num_arrivals += 1
if self.collect_data:
b = 0 if self.num_system <= self.num_servers else 1
if arrival.agent_id not in self.data:
self.data[arrival.agent_id] = \
[[arrival._time, 0, 0, len(self.queue) + b, self.num_system]]
else:
self.data[arrival.agent_id]\
.append([arrival._time, 0, 0, len(self.queue) + b, self.num_system])
arrival.queue_action(self, 0)
if self.num_system <= self.num_servers:
if self.collect_data:
self.data[arrival.agent_id][-1][1] = arrival._time
arrival._time = self.service_f(arrival._time)
arrival.queue_action(self, 1)
heappush(self._departures, arrival)
else:
self.queue.append(arrival)
self._update_time() |
def file_list(*packages, **kwargs):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list
'''
errors = []
ret = set([])
pkgs = {}
cmd = 'dpkg -l {0}'.format(' '.join(packages))
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out['retcode'] != 0:
msg = 'Error: ' + out['stderr']
log.error(msg)
return msg
out = out['stdout']
for line in out.splitlines():
if line.startswith('ii '):
comps = line.split()
pkgs[comps[1]] = {'version': comps[2],
'description': ' '.join(comps[3:])}
if 'No packages found' in line:
errors.append(line)
for pkg in pkgs:
files = []
cmd = 'dpkg -L {0}'.format(pkg)
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
files.append(line)
fileset = set(files)
ret = ret.union(fileset)
return {'errors': errors, 'files': list(ret)} | List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list | Below is the the instruction that describes the task:
### Input:
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list
### Response:
def file_list(*packages, **kwargs):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list
'''
errors = []
ret = set([])
pkgs = {}
cmd = 'dpkg -l {0}'.format(' '.join(packages))
out = __salt__['cmd.run_all'](cmd, python_shell=False)
if out['retcode'] != 0:
msg = 'Error: ' + out['stderr']
log.error(msg)
return msg
out = out['stdout']
for line in out.splitlines():
if line.startswith('ii '):
comps = line.split()
pkgs[comps[1]] = {'version': comps[2],
'description': ' '.join(comps[3:])}
if 'No packages found' in line:
errors.append(line)
for pkg in pkgs:
files = []
cmd = 'dpkg -L {0}'.format(pkg)
for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines():
files.append(line)
fileset = set(files)
ret = ret.union(fileset)
return {'errors': errors, 'files': list(ret)} |
def _read_http_goaway(self, size, kind, flag):
"""Read HTTP/2 GOAWAY frames.
Structure of HTTP/2 GOAWAY frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Last-Stream-ID (31) |
+-+-------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
| Additional Debug Data (*) |
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (2)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 - Reserved
9 73 http.last_sid Last Stream ID
13 104 http.error Error Code
17 136 http.data Additional Debug Data (Optional)
"""
_dlen = size - 8
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
if any((int(bit, base=2) for bit in flag)):
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
_rsid = self._read_binary(4)
_code = self._read_unpack(4)
_data = self._read_fileng(_dlen) or None
if int(_rsid[0], base=2):
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
data = dict(
flags=None,
last_sid=int(_rsid[1:], base=2),
error=_ERROR_CODE.get(_code, _code),
data=_data,
)
return data | Read HTTP/2 GOAWAY frames.
Structure of HTTP/2 GOAWAY frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Last-Stream-ID (31) |
+-+-------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
| Additional Debug Data (*) |
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (2)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 - Reserved
9 73 http.last_sid Last Stream ID
13 104 http.error Error Code
17 136 http.data Additional Debug Data (Optional) | Below is the the instruction that describes the task:
### Input:
Read HTTP/2 GOAWAY frames.
Structure of HTTP/2 GOAWAY frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Last-Stream-ID (31) |
+-+-------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
| Additional Debug Data (*) |
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (2)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 - Reserved
9 73 http.last_sid Last Stream ID
13 104 http.error Error Code
17 136 http.data Additional Debug Data (Optional)
### Response:
def _read_http_goaway(self, size, kind, flag):
"""Read HTTP/2 GOAWAY frames.
Structure of HTTP/2 GOAWAY frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+-+-------------+---------------+-------------------------------+
|R| Last-Stream-ID (31) |
+-+-------------------------------------------------------------+
| Error Code (32) |
+---------------------------------------------------------------+
| Additional Debug Data (*) |
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (2)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 - Reserved
9 73 http.last_sid Last Stream ID
13 104 http.error Error Code
17 136 http.data Additional Debug Data (Optional)
"""
_dlen = size - 8
if _dlen < 0:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
if any((int(bit, base=2) for bit in flag)):
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
_rsid = self._read_binary(4)
_code = self._read_unpack(4)
_data = self._read_fileng(_dlen) or None
if int(_rsid[0], base=2):
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
data = dict(
flags=None,
last_sid=int(_rsid[1:], base=2),
error=_ERROR_CODE.get(_code, _code),
data=_data,
)
return data |
def get_latex_environment(s, pos, environmentname=None, **parse_flags):
"""
Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first
token in the stream must be the \\begin{environment}.
Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_environment()` instead.
"""
return LatexWalker(s, **parse_flags).get_latex_environment(pos=pos, environmentname=environmentname) | Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first
token in the stream must be the \\begin{environment}.
Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_environment()` instead. | Below is the the instruction that describes the task:
### Input:
Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first
token in the stream must be the \\begin{environment}.
Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_environment()` instead.
### Response:
def get_latex_environment(s, pos, environmentname=None, **parse_flags):
"""
Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first
token in the stream must be the \\begin{environment}.
Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`.
.. deprecated:: 1.0
Please use :py:meth:`LatexWalker.get_latex_environment()` instead.
"""
return LatexWalker(s, **parse_flags).get_latex_environment(pos=pos, environmentname=environmentname) |
def _parameter_values(parameter_values_from_pillars, parameter_value_overrides):
'''
Return a dictionary of parameter values that configure the pipeline
parameter_values_from_pillars
The pillar key to use for lookup
parameter_value_overrides
Parameter values to use. Will override values read from pillars.
'''
from_pillars = copy.deepcopy(__salt__['pillar.get'](parameter_values_from_pillars))
from_pillars.update(parameter_value_overrides)
parameter_values = _standardize(from_pillars)
return _properties_from_dict(parameter_values, key_name='id') | Return a dictionary of parameter values that configure the pipeline
parameter_values_from_pillars
The pillar key to use for lookup
parameter_value_overrides
Parameter values to use. Will override values read from pillars. | Below is the the instruction that describes the task:
### Input:
Return a dictionary of parameter values that configure the pipeline
parameter_values_from_pillars
The pillar key to use for lookup
parameter_value_overrides
Parameter values to use. Will override values read from pillars.
### Response:
def _parameter_values(parameter_values_from_pillars, parameter_value_overrides):
'''
Return a dictionary of parameter values that configure the pipeline
parameter_values_from_pillars
The pillar key to use for lookup
parameter_value_overrides
Parameter values to use. Will override values read from pillars.
'''
from_pillars = copy.deepcopy(__salt__['pillar.get'](parameter_values_from_pillars))
from_pillars.update(parameter_value_overrides)
parameter_values = _standardize(from_pillars)
return _properties_from_dict(parameter_values, key_name='id') |
def get_default(self):
"""Return the default VLAN from the set."""
length = len(self)
if length == 0:
return None
elif length == 1:
return self[0]
else:
return sorted(self, key=attrgetter('id'))[0] | Return the default VLAN from the set. | Below is the the instruction that describes the task:
### Input:
Return the default VLAN from the set.
### Response:
def get_default(self):
"""Return the default VLAN from the set."""
length = len(self)
if length == 0:
return None
elif length == 1:
return self[0]
else:
return sorted(self, key=attrgetter('id'))[0] |
def get_assets_by_search(self, asset_query, asset_search):
"""Gets the search results matching the given search query using the given search.
arg: asset_query (osid.repository.AssetQuery): the asset
query
arg: asset_search (osid.repository.AssetSearch): the asset
search
return: (osid.repository.AssetSearchResults) - the asset search
results
raise: NullArgument - ``asset_query`` or ``asset_search`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_query`` or ``asset_search`` is not
of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceSearchSession.get_resources_by_search_template
# Copied from osid.resource.ResourceQuerySession.get_resources_by_query_template
and_list = list()
or_list = list()
for term in asset_query._query_terms:
and_list.append({term: asset_query._query_terms[term]})
for term in asset_query._keyword_terms:
or_list.append({term: asset_query._keyword_terms[term]})
if asset_search._id_list is not None:
identifiers = [ObjectId(i.identifier) for i in asset_search._id_list]
and_list.append({'_id': {'$in': identifiers}})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('repository',
collection='Asset',
runtime=self._runtime)
if asset_search.start is not None and asset_search.end is not None:
result = collection.find(query_terms)[asset_search.start:asset_search.end]
else:
result = collection.find(query_terms)
return searches.AssetSearchResults(result, dict(asset_query._query_terms), runtime=self._runtime) | Gets the search results matching the given search query using the given search.
arg: asset_query (osid.repository.AssetQuery): the asset
query
arg: asset_search (osid.repository.AssetSearch): the asset
search
return: (osid.repository.AssetSearchResults) - the asset search
results
raise: NullArgument - ``asset_query`` or ``asset_search`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_query`` or ``asset_search`` is not
of this service
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the search results matching the given search query using the given search.
arg: asset_query (osid.repository.AssetQuery): the asset
query
arg: asset_search (osid.repository.AssetSearch): the asset
search
return: (osid.repository.AssetSearchResults) - the asset search
results
raise: NullArgument - ``asset_query`` or ``asset_search`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_query`` or ``asset_search`` is not
of this service
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assets_by_search(self, asset_query, asset_search):
"""Gets the search results matching the given search query using the given search.
arg: asset_query (osid.repository.AssetQuery): the asset
query
arg: asset_search (osid.repository.AssetSearch): the asset
search
return: (osid.repository.AssetSearchResults) - the asset search
results
raise: NullArgument - ``asset_query`` or ``asset_search`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``asset_query`` or ``asset_search`` is not
of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceSearchSession.get_resources_by_search_template
# Copied from osid.resource.ResourceQuerySession.get_resources_by_query_template
and_list = list()
or_list = list()
for term in asset_query._query_terms:
and_list.append({term: asset_query._query_terms[term]})
for term in asset_query._keyword_terms:
or_list.append({term: asset_query._keyword_terms[term]})
if asset_search._id_list is not None:
identifiers = [ObjectId(i.identifier) for i in asset_search._id_list]
and_list.append({'_id': {'$in': identifiers}})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('repository',
collection='Asset',
runtime=self._runtime)
if asset_search.start is not None and asset_search.end is not None:
result = collection.find(query_terms)[asset_search.start:asset_search.end]
else:
result = collection.find(query_terms)
return searches.AssetSearchResults(result, dict(asset_query._query_terms), runtime=self._runtime) |
def callgraph(func):
''' Makes a call graph
Note: be sure to install GraphViz prior to printing the dot graph!
'''
import pycallgraph
@functools.wraps(func)
def wrapper(*args, **kwargs):
pycallgraph.start_trace()
func(*args, **kwargs)
pycallgraph.save_dot('callgraph.log')
pycallgraph.make_dot_graph('callgraph.png')
#pycallgraph.make_dot_graph('callgraph.jpg', format='jpg', tool='neato')
return wrapper | Makes a call graph
Note: be sure to install GraphViz prior to printing the dot graph! | Below is the the instruction that describes the task:
### Input:
Makes a call graph
Note: be sure to install GraphViz prior to printing the dot graph!
### Response:
def callgraph(func):
''' Makes a call graph
Note: be sure to install GraphViz prior to printing the dot graph!
'''
import pycallgraph
@functools.wraps(func)
def wrapper(*args, **kwargs):
pycallgraph.start_trace()
func(*args, **kwargs)
pycallgraph.save_dot('callgraph.log')
pycallgraph.make_dot_graph('callgraph.png')
#pycallgraph.make_dot_graph('callgraph.jpg', format='jpg', tool='neato')
return wrapper |
def _create_path(self, *args):
"""Create the URL path with the Fred endpoint and given arguments."""
args = filter(None, args)
path = self.endpoint + '/'.join(args)
return path | Create the URL path with the Fred endpoint and given arguments. | Below is the the instruction that describes the task:
### Input:
Create the URL path with the Fred endpoint and given arguments.
### Response:
def _create_path(self, *args):
"""Create the URL path with the Fred endpoint and given arguments."""
args = filter(None, args)
path = self.endpoint + '/'.join(args)
return path |
def step(self):
"""Run the environment for one time step. If the
actions and exogenous changes are independent, this method will
do. If there are interactions between them, you'll need to
override this method."""
if not self.is_done():
actions = [agent.program(self.percept(agent))
for agent in self.agents]
for (agent, action) in zip(self.agents, actions):
self.execute_action(agent, action)
self.exogenous_change() | Run the environment for one time step. If the
actions and exogenous changes are independent, this method will
do. If there are interactions between them, you'll need to
override this method. | Below is the the instruction that describes the task:
### Input:
Run the environment for one time step. If the
actions and exogenous changes are independent, this method will
do. If there are interactions between them, you'll need to
override this method.
### Response:
def step(self):
"""Run the environment for one time step. If the
actions and exogenous changes are independent, this method will
do. If there are interactions between them, you'll need to
override this method."""
if not self.is_done():
actions = [agent.program(self.percept(agent))
for agent in self.agents]
for (agent, action) in zip(self.agents, actions):
self.execute_action(agent, action)
self.exogenous_change() |
def download(url, proxies=None):
"""
Download a PDF or DJVU document from a url, eventually using proxies.
:params url: The URL to the PDF/DJVU document to fetch.
:params proxies: An optional list of proxies to use. Proxies will be \
used sequentially. Proxies should be a list of proxy strings. \
Do not forget to include ``""`` (empty string) in the list if \
you want to try direct fetching without any proxy.
:returns: A tuple of the raw content of the downloaded data and its \
associated content-type. Returns ``(None, None)`` if it was \
unable to download the document.
>>> download("http://arxiv.org/pdf/1312.4006.pdf") # doctest: +SKIP
"""
# Handle default argument
if proxies is None:
proxies = [""]
# Loop over all available connections
for proxy in proxies:
# Handle no proxy case
if proxy == "":
socket.socket = DEFAULT_SOCKET
# Handle SOCKS proxy
elif proxy.startswith('socks'):
if proxy[5] == '4':
proxy_type = socks.SOCKS4
else:
proxy_type = socks.SOCKS5
proxy = proxy[proxy.find('://') + 3:]
try:
proxy, port = proxy.split(':')
except ValueError:
port = None
socks.set_default_proxy(proxy_type, proxy, port)
socket.socket = socks.socksocket
# Handle generic HTTP proxy
else:
try:
proxy, port = proxy.split(':')
except ValueError:
port = None
socks.set_default_proxy(socks.HTTP, proxy, port)
socket.socket = socks.socksocket
downloaded = _download_helper(url)
if downloaded is not None:
return downloaded
# In case of running out of proxies, return (None, None)
return (None, None) | Download a PDF or DJVU document from a url, eventually using proxies.
:params url: The URL to the PDF/DJVU document to fetch.
:params proxies: An optional list of proxies to use. Proxies will be \
used sequentially. Proxies should be a list of proxy strings. \
Do not forget to include ``""`` (empty string) in the list if \
you want to try direct fetching without any proxy.
:returns: A tuple of the raw content of the downloaded data and its \
associated content-type. Returns ``(None, None)`` if it was \
unable to download the document.
>>> download("http://arxiv.org/pdf/1312.4006.pdf") # doctest: +SKIP | Below is the the instruction that describes the task:
### Input:
Download a PDF or DJVU document from a url, eventually using proxies.
:params url: The URL to the PDF/DJVU document to fetch.
:params proxies: An optional list of proxies to use. Proxies will be \
used sequentially. Proxies should be a list of proxy strings. \
Do not forget to include ``""`` (empty string) in the list if \
you want to try direct fetching without any proxy.
:returns: A tuple of the raw content of the downloaded data and its \
associated content-type. Returns ``(None, None)`` if it was \
unable to download the document.
>>> download("http://arxiv.org/pdf/1312.4006.pdf") # doctest: +SKIP
### Response:
def download(url, proxies=None):
"""
Download a PDF or DJVU document from a url, eventually using proxies.
:params url: The URL to the PDF/DJVU document to fetch.
:params proxies: An optional list of proxies to use. Proxies will be \
used sequentially. Proxies should be a list of proxy strings. \
Do not forget to include ``""`` (empty string) in the list if \
you want to try direct fetching without any proxy.
:returns: A tuple of the raw content of the downloaded data and its \
associated content-type. Returns ``(None, None)`` if it was \
unable to download the document.
>>> download("http://arxiv.org/pdf/1312.4006.pdf") # doctest: +SKIP
"""
# Handle default argument
if proxies is None:
proxies = [""]
# Loop over all available connections
for proxy in proxies:
# Handle no proxy case
if proxy == "":
socket.socket = DEFAULT_SOCKET
# Handle SOCKS proxy
elif proxy.startswith('socks'):
if proxy[5] == '4':
proxy_type = socks.SOCKS4
else:
proxy_type = socks.SOCKS5
proxy = proxy[proxy.find('://') + 3:]
try:
proxy, port = proxy.split(':')
except ValueError:
port = None
socks.set_default_proxy(proxy_type, proxy, port)
socket.socket = socks.socksocket
# Handle generic HTTP proxy
else:
try:
proxy, port = proxy.split(':')
except ValueError:
port = None
socks.set_default_proxy(socks.HTTP, proxy, port)
socket.socket = socks.socksocket
downloaded = _download_helper(url)
if downloaded is not None:
return downloaded
# In case of running out of proxies, return (None, None)
return (None, None) |
def serve_protected_file(request, path):
"""
Serve protected files to authenticated users with read permissions.
"""
path = path.rstrip('/')
try:
file_obj = File.objects.get(file=path)
except File.DoesNotExist:
raise Http404('File not found %s' % path)
if not file_obj.has_read_permission(request):
if settings.DEBUG:
raise PermissionDenied
else:
raise Http404('File not found %s' % path)
return server.serve(request, file_obj=file_obj.file, save_as=False) | Serve protected files to authenticated users with read permissions. | Below is the the instruction that describes the task:
### Input:
Serve protected files to authenticated users with read permissions.
### Response:
def serve_protected_file(request, path):
"""
Serve protected files to authenticated users with read permissions.
"""
path = path.rstrip('/')
try:
file_obj = File.objects.get(file=path)
except File.DoesNotExist:
raise Http404('File not found %s' % path)
if not file_obj.has_read_permission(request):
if settings.DEBUG:
raise PermissionDenied
else:
raise Http404('File not found %s' % path)
return server.serve(request, file_obj=file_obj.file, save_as=False) |
def close(self):
"""Closes the record and index files."""
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close() | Closes the record and index files. | Below is the the instruction that describes the task:
### Input:
Closes the record and index files.
### Response:
def close(self):
"""Closes the record and index files."""
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close() |
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True | Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning. | Below is the the instruction that describes the task:
### Input:
Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning.
### Response:
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True |
def wait(self, sensor_name, condition_or_value, timeout=5.0, quorum=None,
max_grace_period=1.0):
"""Wait for sensor present on all group clients to satisfy a condition.
Parameters
----------
sensor_name : string
The name of the sensor to check
condition_or_value : obj or callable, or seq of objs or callables
If obj, sensor.value is compared with obj. If callable,
condition_or_value(reading) is called, and must return True if its
condition is satisfied. Since the reading is passed in, the value,
status, timestamp or received_timestamp attributes can all be used
in the check.
timeout : float or None
The total timeout in seconds (None means wait forever)
quorum : None or int or float
The number of clients that are required to satisfy the condition,
as either an explicit integer or a float between 0 and 1 indicating
a fraction of the total number of clients, rounded up. If None,
this means that all clients are required (the default). Be warned
that a value of 1.0 (float) indicates all clients while a value
of 1 (int) indicates a single client...
max_grace_period : float or None
After a quorum or initial timeout is reached, wait up to this long
in an attempt to get the rest of the clients to satisfy condition
as well (achieving effectively a full quorum if all clients behave)
Returns
-------
This command returns a tornado Future that resolves with True when a
quorum of clients satisfy the sensor condition, or False if a quorum
is not reached after a given timeout period (including a grace period).
Raises
------
:class:`KATCPSensorError`
If any of the sensors do not have a strategy set, or if the named
sensor is not present
"""
if quorum is None:
quorum = len(self.clients)
elif quorum > 1:
if not isinstance(quorum, int):
raise TypeError('Quorum parameter %r must be an integer '
'if outside range [0, 1]' % (quorum,))
elif isinstance(quorum, float):
quorum = int(math.ceil(quorum * len(self.clients)))
if timeout and max_grace_period:
# Avoid having a grace period longer than or equal to timeout
grace_period = min(max_grace_period, timeout / 2.)
initial_timeout = timeout - grace_period
else:
grace_period = max_grace_period
initial_timeout = timeout
# Build dict of futures instead of list as this will be easier to debug
futures = {}
for client in self.clients:
f = client.wait(sensor_name, condition_or_value, initial_timeout)
futures[client.name] = f
# No timeout required here as all futures will resolve after timeout
initial_results = yield until_some(done_at_least=quorum, **futures)
results = dict(initial_results)
# Identify stragglers and let them all respond within grace period
stragglers = {}
for client in self.clients:
if not results.get(client.name, False):
f = client.wait(sensor_name, condition_or_value, grace_period)
stragglers[client.name] = f
rest_of_results = yield until_some(**stragglers)
results.update(dict(rest_of_results))
class TestableDict(dict):
"""Dictionary of results that can be tested for overall success."""
def __bool__(self):
return sum(self.values()) >= quorum
# Was not handled automatrically by futurize, see
# https://github.com/PythonCharmers/python-future/issues/282
if sys.version_info[0] == 2:
__nonzero__ = __bool__
raise tornado.gen.Return(TestableDict(results)) | Wait for sensor present on all group clients to satisfy a condition.
Parameters
----------
sensor_name : string
The name of the sensor to check
condition_or_value : obj or callable, or seq of objs or callables
If obj, sensor.value is compared with obj. If callable,
condition_or_value(reading) is called, and must return True if its
condition is satisfied. Since the reading is passed in, the value,
status, timestamp or received_timestamp attributes can all be used
in the check.
timeout : float or None
The total timeout in seconds (None means wait forever)
quorum : None or int or float
The number of clients that are required to satisfy the condition,
as either an explicit integer or a float between 0 and 1 indicating
a fraction of the total number of clients, rounded up. If None,
this means that all clients are required (the default). Be warned
that a value of 1.0 (float) indicates all clients while a value
of 1 (int) indicates a single client...
max_grace_period : float or None
After a quorum or initial timeout is reached, wait up to this long
in an attempt to get the rest of the clients to satisfy condition
as well (achieving effectively a full quorum if all clients behave)
Returns
-------
This command returns a tornado Future that resolves with True when a
quorum of clients satisfy the sensor condition, or False if a quorum
is not reached after a given timeout period (including a grace period).
Raises
------
:class:`KATCPSensorError`
If any of the sensors do not have a strategy set, or if the named
sensor is not present | Below is the the instruction that describes the task:
### Input:
Wait for sensor present on all group clients to satisfy a condition.
Parameters
----------
sensor_name : string
The name of the sensor to check
condition_or_value : obj or callable, or seq of objs or callables
If obj, sensor.value is compared with obj. If callable,
condition_or_value(reading) is called, and must return True if its
condition is satisfied. Since the reading is passed in, the value,
status, timestamp or received_timestamp attributes can all be used
in the check.
timeout : float or None
The total timeout in seconds (None means wait forever)
quorum : None or int or float
The number of clients that are required to satisfy the condition,
as either an explicit integer or a float between 0 and 1 indicating
a fraction of the total number of clients, rounded up. If None,
this means that all clients are required (the default). Be warned
that a value of 1.0 (float) indicates all clients while a value
of 1 (int) indicates a single client...
max_grace_period : float or None
After a quorum or initial timeout is reached, wait up to this long
in an attempt to get the rest of the clients to satisfy condition
as well (achieving effectively a full quorum if all clients behave)
Returns
-------
This command returns a tornado Future that resolves with True when a
quorum of clients satisfy the sensor condition, or False if a quorum
is not reached after a given timeout period (including a grace period).
Raises
------
:class:`KATCPSensorError`
If any of the sensors do not have a strategy set, or if the named
sensor is not present
### Response:
def wait(self, sensor_name, condition_or_value, timeout=5.0, quorum=None,
max_grace_period=1.0):
"""Wait for sensor present on all group clients to satisfy a condition.
Parameters
----------
sensor_name : string
The name of the sensor to check
condition_or_value : obj or callable, or seq of objs or callables
If obj, sensor.value is compared with obj. If callable,
condition_or_value(reading) is called, and must return True if its
condition is satisfied. Since the reading is passed in, the value,
status, timestamp or received_timestamp attributes can all be used
in the check.
timeout : float or None
The total timeout in seconds (None means wait forever)
quorum : None or int or float
The number of clients that are required to satisfy the condition,
as either an explicit integer or a float between 0 and 1 indicating
a fraction of the total number of clients, rounded up. If None,
this means that all clients are required (the default). Be warned
that a value of 1.0 (float) indicates all clients while a value
of 1 (int) indicates a single client...
max_grace_period : float or None
After a quorum or initial timeout is reached, wait up to this long
in an attempt to get the rest of the clients to satisfy condition
as well (achieving effectively a full quorum if all clients behave)
Returns
-------
This command returns a tornado Future that resolves with True when a
quorum of clients satisfy the sensor condition, or False if a quorum
is not reached after a given timeout period (including a grace period).
Raises
------
:class:`KATCPSensorError`
If any of the sensors do not have a strategy set, or if the named
sensor is not present
"""
if quorum is None:
quorum = len(self.clients)
elif quorum > 1:
if not isinstance(quorum, int):
raise TypeError('Quorum parameter %r must be an integer '
'if outside range [0, 1]' % (quorum,))
elif isinstance(quorum, float):
quorum = int(math.ceil(quorum * len(self.clients)))
if timeout and max_grace_period:
# Avoid having a grace period longer than or equal to timeout
grace_period = min(max_grace_period, timeout / 2.)
initial_timeout = timeout - grace_period
else:
grace_period = max_grace_period
initial_timeout = timeout
# Build dict of futures instead of list as this will be easier to debug
futures = {}
for client in self.clients:
f = client.wait(sensor_name, condition_or_value, initial_timeout)
futures[client.name] = f
# No timeout required here as all futures will resolve after timeout
initial_results = yield until_some(done_at_least=quorum, **futures)
results = dict(initial_results)
# Identify stragglers and let them all respond within grace period
stragglers = {}
for client in self.clients:
if not results.get(client.name, False):
f = client.wait(sensor_name, condition_or_value, grace_period)
stragglers[client.name] = f
rest_of_results = yield until_some(**stragglers)
results.update(dict(rest_of_results))
class TestableDict(dict):
"""Dictionary of results that can be tested for overall success."""
def __bool__(self):
return sum(self.values()) >= quorum
# Was not handled automatrically by futurize, see
# https://github.com/PythonCharmers/python-future/issues/282
if sys.version_info[0] == 2:
__nonzero__ = __bool__
raise tornado.gen.Return(TestableDict(results)) |
async def read_message(self) -> Optional[Data]:
"""
Read a single message from the connection.
Re-assemble data frames if the message is fragmented.
Return ``None`` when the closing handshake is started.
"""
frame = await self.read_data_frame(max_size=self.max_size)
# A close frame was received.
if frame is None:
return None
if frame.opcode == OP_TEXT:
text = True
elif frame.opcode == OP_BINARY:
text = False
else: # frame.opcode == OP_CONT
raise WebSocketProtocolError("Unexpected opcode")
# Shortcut for the common case - no fragmentation
if frame.fin:
return frame.data.decode("utf-8") if text else frame.data
# 5.4. Fragmentation
chunks: List[Data] = []
max_size = self.max_size
if text:
decoder_factory = codecs.getincrementaldecoder("utf-8")
# https://github.com/python/typeshed/pull/2752
decoder = decoder_factory(errors="strict") # type: ignore
if max_size is None:
def append(frame: Frame) -> None:
nonlocal chunks
chunks.append(decoder.decode(frame.data, frame.fin))
else:
def append(frame: Frame) -> None:
nonlocal chunks, max_size
chunks.append(decoder.decode(frame.data, frame.fin))
max_size -= len(frame.data)
else:
if max_size is None:
def append(frame: Frame) -> None:
nonlocal chunks
chunks.append(frame.data)
else:
def append(frame: Frame) -> None:
nonlocal chunks, max_size
chunks.append(frame.data)
max_size -= len(frame.data)
append(frame)
while not frame.fin:
frame = await self.read_data_frame(max_size=max_size)
if frame is None:
raise WebSocketProtocolError("Incomplete fragmented message")
if frame.opcode != OP_CONT:
raise WebSocketProtocolError("Unexpected opcode")
append(frame)
# mypy cannot figure out that chunks have the proper type.
return ("" if text else b"").join(chunks) | Read a single message from the connection.
Re-assemble data frames if the message is fragmented.
Return ``None`` when the closing handshake is started. | Below is the the instruction that describes the task:
### Input:
Read a single message from the connection.
Re-assemble data frames if the message is fragmented.
Return ``None`` when the closing handshake is started.
### Response:
async def read_message(self) -> Optional[Data]:
"""
Read a single message from the connection.
Re-assemble data frames if the message is fragmented.
Return ``None`` when the closing handshake is started.
"""
frame = await self.read_data_frame(max_size=self.max_size)
# A close frame was received.
if frame is None:
return None
if frame.opcode == OP_TEXT:
text = True
elif frame.opcode == OP_BINARY:
text = False
else: # frame.opcode == OP_CONT
raise WebSocketProtocolError("Unexpected opcode")
# Shortcut for the common case - no fragmentation
if frame.fin:
return frame.data.decode("utf-8") if text else frame.data
# 5.4. Fragmentation
chunks: List[Data] = []
max_size = self.max_size
if text:
decoder_factory = codecs.getincrementaldecoder("utf-8")
# https://github.com/python/typeshed/pull/2752
decoder = decoder_factory(errors="strict") # type: ignore
if max_size is None:
def append(frame: Frame) -> None:
nonlocal chunks
chunks.append(decoder.decode(frame.data, frame.fin))
else:
def append(frame: Frame) -> None:
nonlocal chunks, max_size
chunks.append(decoder.decode(frame.data, frame.fin))
max_size -= len(frame.data)
else:
if max_size is None:
def append(frame: Frame) -> None:
nonlocal chunks
chunks.append(frame.data)
else:
def append(frame: Frame) -> None:
nonlocal chunks, max_size
chunks.append(frame.data)
max_size -= len(frame.data)
append(frame)
while not frame.fin:
frame = await self.read_data_frame(max_size=max_size)
if frame is None:
raise WebSocketProtocolError("Incomplete fragmented message")
if frame.opcode != OP_CONT:
raise WebSocketProtocolError("Unexpected opcode")
append(frame)
# mypy cannot figure out that chunks have the proper type.
return ("" if text else b"").join(chunks) |
def admin_required(function=None):
"""
Decorator for views that checks that the user is an administrator,
redirecting to the log-in page if necessary.
"""
def check_perms(user):
# if user not logged in, show login form
if not user.is_authenticated:
return False
# if this site doesn't allow admin access, fail
if settings.ADMIN_IGNORED:
raise PermissionDenied
# check if the user has admin rights
if not user.is_admin:
raise PermissionDenied
return True
actual_decorator = user_passes_test(check_perms, login_url=_login_url)
if function:
return actual_decorator(function)
return actual_decorator | Decorator for views that checks that the user is an administrator,
redirecting to the log-in page if necessary. | Below is the the instruction that describes the task:
### Input:
Decorator for views that checks that the user is an administrator,
redirecting to the log-in page if necessary.
### Response:
def admin_required(function=None):
"""
Decorator for views that checks that the user is an administrator,
redirecting to the log-in page if necessary.
"""
def check_perms(user):
# if user not logged in, show login form
if not user.is_authenticated:
return False
# if this site doesn't allow admin access, fail
if settings.ADMIN_IGNORED:
raise PermissionDenied
# check if the user has admin rights
if not user.is_admin:
raise PermissionDenied
return True
actual_decorator = user_passes_test(check_perms, login_url=_login_url)
if function:
return actual_decorator(function)
return actual_decorator |
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples | Read a list of `InputExample`s from an input file. | Below is the the instruction that describes the task:
### Input:
Read a list of `InputExample`s from an input file.
### Response:
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with open(input_file, "r", encoding='utf-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples |
def _get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if hasattr(self, 'get_path_from_parent'):
return self.get_path_from_parent(parent)
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent) or []
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path | Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model. | Below is the the instruction that describes the task:
### Input:
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
### Response:
def _get_path_from_parent(self, parent):
"""
Return a list of PathInfos containing the path from the parent
model to the current model, or an empty list if parent is not a
parent of the current model.
"""
if hasattr(self, 'get_path_from_parent'):
return self.get_path_from_parent(parent)
if self.model is parent:
return []
model = self.concrete_model
# Get a reversed base chain including both the current and parent
# models.
chain = model._meta.get_base_chain(parent) or []
chain.reverse()
chain.append(model)
# Construct a list of the PathInfos between models in chain.
path = []
for i, ancestor in enumerate(chain[:-1]):
child = chain[i + 1]
link = child._meta.get_ancestor_link(ancestor)
path.extend(link.get_reverse_path_info())
return path |
def _decodeRelativeValidityPeriod(tpVp):
""" Calculates the relative SMS validity period (based on the table in section 9.2.3.12 of GSM 03.40)
:rtype: datetime.timedelta
"""
if tpVp <= 143:
return timedelta(minutes=((tpVp + 1) * 5))
elif 144 <= tpVp <= 167:
return timedelta(hours=12, minutes=((tpVp - 143) * 30))
elif 168 <= tpVp <= 196:
return timedelta(days=(tpVp - 166))
elif 197 <= tpVp <= 255:
return timedelta(weeks=(tpVp - 192))
else:
raise ValueError('tpVp must be in range [0, 255]') | Calculates the relative SMS validity period (based on the table in section 9.2.3.12 of GSM 03.40)
:rtype: datetime.timedelta | Below is the the instruction that describes the task:
### Input:
Calculates the relative SMS validity period (based on the table in section 9.2.3.12 of GSM 03.40)
:rtype: datetime.timedelta
### Response:
def _decodeRelativeValidityPeriod(tpVp):
""" Calculates the relative SMS validity period (based on the table in section 9.2.3.12 of GSM 03.40)
:rtype: datetime.timedelta
"""
if tpVp <= 143:
return timedelta(minutes=((tpVp + 1) * 5))
elif 144 <= tpVp <= 167:
return timedelta(hours=12, minutes=((tpVp - 143) * 30))
elif 168 <= tpVp <= 196:
return timedelta(days=(tpVp - 166))
elif 197 <= tpVp <= 255:
return timedelta(weeks=(tpVp - 192))
else:
raise ValueError('tpVp must be in range [0, 255]') |
def on_batch_begin(self, batch_info: BatchInfo):
""" Set proper learning rate """
cycle_length = self.cycle_lengths[batch_info.local_epoch_number - 1]
cycle_start = self.cycle_starts[batch_info.local_epoch_number - 1]
numerator = (batch_info.local_epoch_number - cycle_start - 1) * batch_info.batches_per_epoch + batch_info.batch_number
denominator = cycle_length * batch_info.batches_per_epoch
interpolation_number = numerator / denominator
if cycle_start == 0 and numerator < self.init_iter:
lr = self.init_lr
else:
if isinstance(self.max_lr, list):
lr = [interp.interpolate_single(max_lr, min_lr, interpolation_number, how=self.interpolate) for max_lr, min_lr in zip(self.max_lr, self.min_lr)]
else:
lr = interp.interpolate_single(self.max_lr, self.min_lr, interpolation_number, how=self.interpolate)
self.set_lr(lr) | Set proper learning rate | Below is the the instruction that describes the task:
### Input:
Set proper learning rate
### Response:
def on_batch_begin(self, batch_info: BatchInfo):
""" Set proper learning rate """
cycle_length = self.cycle_lengths[batch_info.local_epoch_number - 1]
cycle_start = self.cycle_starts[batch_info.local_epoch_number - 1]
numerator = (batch_info.local_epoch_number - cycle_start - 1) * batch_info.batches_per_epoch + batch_info.batch_number
denominator = cycle_length * batch_info.batches_per_epoch
interpolation_number = numerator / denominator
if cycle_start == 0 and numerator < self.init_iter:
lr = self.init_lr
else:
if isinstance(self.max_lr, list):
lr = [interp.interpolate_single(max_lr, min_lr, interpolation_number, how=self.interpolate) for max_lr, min_lr in zip(self.max_lr, self.min_lr)]
else:
lr = interp.interpolate_single(self.max_lr, self.min_lr, interpolation_number, how=self.interpolate)
self.set_lr(lr) |
def create_key_index_object(key_index_name: str, iterables: Dict[str, Any]) -> Any:
""" Create a ``KeyIndex`` class based on the passed attributes.
This is wrapped into a helper function to allow for the ``__itter__`` to be specified for the object.
Further, this allows it to be called outside the package when it is needed in analysis tasks..
Args:
key_index_name: Name of the iterable key index.
iterables: Iterables which will be specified by this ``KeyIndex``. The keys should be the names of
the values, while the values should be the iterables themselves.
Returns:
A ``KeyIndex`` class which can be used to specify an object. The keys and values will be iterable.
Raises:
TypeError: If one of the iterables which is passed is an iterator that can be exhausted. The iterables
must all be passed within containers which can recreate the iterator each time it is called to
iterate.
"""
# Validation
# We are going to use the iterators when determining the fields, so we need to notify if an iterator was
# passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed,
# which can recreate the iter.
# See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/
for name, iterable in iterables.items():
if iter(iterable) == iter(iterable):
raise TypeError(
f"Iterable {name} is in iterator which can be exhausted. Please pass the iterable"
f" in a container that can recreate the iterable. See the comments here for more info."
)
# We need the types of the fields to create the dataclass. However, we are provided with iterables
# in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use
# that to determine the type of that particular iterable. This is safe to do because the iterables
# must always have at least one entry (or else they wouldn't be one of the iterables).
# NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all
# objects from the iterables and blindly use set because set won't preserve the order.
fields = [(name, type(next(iter(iterable)))) for name, iterable in iterables.items()]
KeyIndex = dataclasses.make_dataclass(
key_index_name,
fields,
frozen = True
)
# Allow for iteration over the key index values
KeyIndex.__iter__ = _key_index_iter
return KeyIndex | Create a ``KeyIndex`` class based on the passed attributes.
This is wrapped into a helper function to allow for the ``__itter__`` to be specified for the object.
Further, this allows it to be called outside the package when it is needed in analysis tasks..
Args:
key_index_name: Name of the iterable key index.
iterables: Iterables which will be specified by this ``KeyIndex``. The keys should be the names of
the values, while the values should be the iterables themselves.
Returns:
A ``KeyIndex`` class which can be used to specify an object. The keys and values will be iterable.
Raises:
TypeError: If one of the iterables which is passed is an iterator that can be exhausted. The iterables
must all be passed within containers which can recreate the iterator each time it is called to
iterate. | Below is the the instruction that describes the task:
### Input:
Create a ``KeyIndex`` class based on the passed attributes.
This is wrapped into a helper function to allow for the ``__itter__`` to be specified for the object.
Further, this allows it to be called outside the package when it is needed in analysis tasks..
Args:
key_index_name: Name of the iterable key index.
iterables: Iterables which will be specified by this ``KeyIndex``. The keys should be the names of
the values, while the values should be the iterables themselves.
Returns:
A ``KeyIndex`` class which can be used to specify an object. The keys and values will be iterable.
Raises:
TypeError: If one of the iterables which is passed is an iterator that can be exhausted. The iterables
must all be passed within containers which can recreate the iterator each time it is called to
iterate.
### Response:
def create_key_index_object(key_index_name: str, iterables: Dict[str, Any]) -> Any:
""" Create a ``KeyIndex`` class based on the passed attributes.
This is wrapped into a helper function to allow for the ``__itter__`` to be specified for the object.
Further, this allows it to be called outside the package when it is needed in analysis tasks..
Args:
key_index_name: Name of the iterable key index.
iterables: Iterables which will be specified by this ``KeyIndex``. The keys should be the names of
the values, while the values should be the iterables themselves.
Returns:
A ``KeyIndex`` class which can be used to specify an object. The keys and values will be iterable.
Raises:
TypeError: If one of the iterables which is passed is an iterator that can be exhausted. The iterables
must all be passed within containers which can recreate the iterator each time it is called to
iterate.
"""
# Validation
# We are going to use the iterators when determining the fields, so we need to notify if an iterator was
# passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed,
# which can recreate the iter.
# See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/
for name, iterable in iterables.items():
if iter(iterable) == iter(iterable):
raise TypeError(
f"Iterable {name} is in iterator which can be exhausted. Please pass the iterable"
f" in a container that can recreate the iterable. See the comments here for more info."
)
# We need the types of the fields to create the dataclass. However, we are provided with iterables
# in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use
# that to determine the type of that particular iterable. This is safe to do because the iterables
# must always have at least one entry (or else they wouldn't be one of the iterables).
# NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all
# objects from the iterables and blindly use set because set won't preserve the order.
fields = [(name, type(next(iter(iterable)))) for name, iterable in iterables.items()]
KeyIndex = dataclasses.make_dataclass(
key_index_name,
fields,
frozen = True
)
# Allow for iteration over the key index values
KeyIndex.__iter__ = _key_index_iter
return KeyIndex |
def managedcloud(vm_):
'''
Determine if we should wait for the managed cloud automation before
running. Either 'False' (default) or 'True'.
'''
return config.get_cloud_config_value(
'managedcloud', vm_, __opts__, default=False,
search_global=False
) | Determine if we should wait for the managed cloud automation before
running. Either 'False' (default) or 'True'. | Below is the the instruction that describes the task:
### Input:
Determine if we should wait for the managed cloud automation before
running. Either 'False' (default) or 'True'.
### Response:
def managedcloud(vm_):
'''
Determine if we should wait for the managed cloud automation before
running. Either 'False' (default) or 'True'.
'''
return config.get_cloud_config_value(
'managedcloud', vm_, __opts__, default=False,
search_global=False
) |
def alpha_(self,x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users."""
def alpha(xmin,x=x):
"""
given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
"""
x = [i for i in x if i>=xmin]
n = sum(x)
divsum = sum([math.log(i/xmin) for i in x])
if divsum == 0:
return float('inf')
# the "1+" here is unimportant because alpha_ is only used for minimization
a = 1 + float(n) / divsum
return a
return alpha | Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users. | Below is the the instruction that describes the task:
### Input:
Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users.
### Response:
def alpha_(self,x):
""" Create a mappable function alpha to apply to each xmin in a list of xmins.
This is essentially the slow version of fplfit/cplfit, though I bet it could
be speeded up with a clever use of parellel_map. Not intended to be used by users."""
def alpha(xmin,x=x):
"""
given a sorted data set and a minimum, returns power law MLE fit
data is passed as a keyword parameter so that it can be vectorized
"""
x = [i for i in x if i>=xmin]
n = sum(x)
divsum = sum([math.log(i/xmin) for i in x])
if divsum == 0:
return float('inf')
# the "1+" here is unimportant because alpha_ is only used for minimization
a = 1 + float(n) / divsum
return a
return alpha |
def from_dict(self, document):
"""Create experiment object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ExperimentHandle
Handle for experiment object
"""
identifier = str(document['_id'])
active = document['active']
timestamp = datetime.datetime.strptime(document['timestamp'], '%Y-%m-%dT%H:%M:%S.%f')
properties = document['properties']
subject_id = document['subject']
image_group_id = document['images']
fmri_data_id = document['fmri'] if 'fmri' in document else None
return ExperimentHandle(
identifier,
properties,
subject_id,
image_group_id,
fmri_data_id=fmri_data_id,
timestamp=timestamp,
is_active=active
) | Create experiment object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ExperimentHandle
Handle for experiment object | Below is the the instruction that describes the task:
### Input:
Create experiment object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ExperimentHandle
Handle for experiment object
### Response:
def from_dict(self, document):
"""Create experiment object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ExperimentHandle
Handle for experiment object
"""
identifier = str(document['_id'])
active = document['active']
timestamp = datetime.datetime.strptime(document['timestamp'], '%Y-%m-%dT%H:%M:%S.%f')
properties = document['properties']
subject_id = document['subject']
image_group_id = document['images']
fmri_data_id = document['fmri'] if 'fmri' in document else None
return ExperimentHandle(
identifier,
properties,
subject_id,
image_group_id,
fmri_data_id=fmri_data_id,
timestamp=timestamp,
is_active=active
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.