code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for _ in range(0, response_size):
response_item = client_message.read_data()
response.append(response_item)
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters | Decode response from client message | Below is the the instruction that describes the task:
### Input:
Decode response from client message
### Response:
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for _ in range(0, response_size):
response_item = client_message.read_data()
response.append(response_item)
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters |
def _init_from_npy2d(self, mat, missing):
"""
Initialize data from a 2-D numpy matrix.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
data = np.array(mat.reshape(mat.size), dtype=np.float32)
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromMat(data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
mat.shape[0], mat.shape[1],
ctypes.c_float(missing),
ctypes.byref(self.handle))) | Initialize data from a 2-D numpy matrix. | Below is the the instruction that describes the task:
### Input:
Initialize data from a 2-D numpy matrix.
### Response:
def _init_from_npy2d(self, mat, missing):
"""
Initialize data from a 2-D numpy matrix.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
data = np.array(mat.reshape(mat.size), dtype=np.float32)
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixCreateFromMat(data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
mat.shape[0], mat.shape[1],
ctypes.c_float(missing),
ctypes.byref(self.handle))) |
def path_ndir_split(path_, n, force_unix=True, winroot='C:', trailing=True):
r"""
Shows only a little bit of the path. Up to the n bottom-level directories
TODO: rename to path_tail? ndir_split?
Returns:
(str) the trailing n paths of path.
CommandLine:
python3 -m utool.util_path --test-path_ndir_split
python3 -m utool --tf path_ndir_split
python -m utool --tf path_ndir_split
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> paths = [r'/usr/bin/local/foo/bar',
... r'C:/',
... #r'lonerel',
... #r'reldir/other',
... r'/ham',
... r'./eggs',
... r'/spam/eggs',
... r'C:\Program Files (x86)/foobar/bin']
>>> N = 2
>>> iter_ = ut.iprod(paths, range(1, N + 1))
>>> force_unix = True
>>> tuplist = [(n, path_ndir_split(path_, n)) for path_, n in iter_]
>>> chunklist = list(ut.ichunks(tuplist, N))
>>> list_ = [['n=%r: %s' % (x, ut.reprfunc(y)) for x, y in chunk]
>>> for chunk in chunklist]
>>> line_list = [', '.join(strs) for strs in list_]
>>> result = '\n'.join(line_list)
>>> print(result)
n=1: '.../bar', n=2: '.../foo/bar'
n=1: 'C:/', n=2: 'C:/'
n=1: '.../ham', n=2: '/ham'
n=1: '.../eggs', n=2: './eggs'
n=1: '.../eggs', n=2: '.../spam/eggs'
n=1: '.../bin', n=2: '.../foobar/bin'
"""
if not isinstance(path_, six.string_types):
# Probably given a file pointer
return path_
if n is None:
cplat_path = ensure_crossplat_path(path_)
elif n == 0:
cplat_path = ''
else:
sep = '/' if force_unix else os.sep
ndirs_list = []
head = path_
reached_end = False
for nx in range(n):
head, tail = split(head)
if tail == '':
if head == '':
reached_end = True
else:
root = head if len(ndirs_list) == 0 else head.strip('\\/')
ndirs_list.append(root)
reached_end = True
break
else:
ndirs_list.append(tail)
if trailing and not reached_end:
head, tail = split(head)
if len(tail) == 0:
if len(head) == 0: # or head == '/':
reached_end = True
ndirs = sep.join(ndirs_list[::-1])
cplat_path = ensure_crossplat_path(ndirs)
#if trailing and not reached_end:
if trailing and not reached_end:
cplat_path = '.../' + cplat_path
return cplat_path | r"""
Shows only a little bit of the path. Up to the n bottom-level directories
TODO: rename to path_tail? ndir_split?
Returns:
(str) the trailing n paths of path.
CommandLine:
python3 -m utool.util_path --test-path_ndir_split
python3 -m utool --tf path_ndir_split
python -m utool --tf path_ndir_split
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> paths = [r'/usr/bin/local/foo/bar',
... r'C:/',
... #r'lonerel',
... #r'reldir/other',
... r'/ham',
... r'./eggs',
... r'/spam/eggs',
... r'C:\Program Files (x86)/foobar/bin']
>>> N = 2
>>> iter_ = ut.iprod(paths, range(1, N + 1))
>>> force_unix = True
>>> tuplist = [(n, path_ndir_split(path_, n)) for path_, n in iter_]
>>> chunklist = list(ut.ichunks(tuplist, N))
>>> list_ = [['n=%r: %s' % (x, ut.reprfunc(y)) for x, y in chunk]
>>> for chunk in chunklist]
>>> line_list = [', '.join(strs) for strs in list_]
>>> result = '\n'.join(line_list)
>>> print(result)
n=1: '.../bar', n=2: '.../foo/bar'
n=1: 'C:/', n=2: 'C:/'
n=1: '.../ham', n=2: '/ham'
n=1: '.../eggs', n=2: './eggs'
n=1: '.../eggs', n=2: '.../spam/eggs'
n=1: '.../bin', n=2: '.../foobar/bin' | Below is the the instruction that describes the task:
### Input:
r"""
Shows only a little bit of the path. Up to the n bottom-level directories
TODO: rename to path_tail? ndir_split?
Returns:
(str) the trailing n paths of path.
CommandLine:
python3 -m utool.util_path --test-path_ndir_split
python3 -m utool --tf path_ndir_split
python -m utool --tf path_ndir_split
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> paths = [r'/usr/bin/local/foo/bar',
... r'C:/',
... #r'lonerel',
... #r'reldir/other',
... r'/ham',
... r'./eggs',
... r'/spam/eggs',
... r'C:\Program Files (x86)/foobar/bin']
>>> N = 2
>>> iter_ = ut.iprod(paths, range(1, N + 1))
>>> force_unix = True
>>> tuplist = [(n, path_ndir_split(path_, n)) for path_, n in iter_]
>>> chunklist = list(ut.ichunks(tuplist, N))
>>> list_ = [['n=%r: %s' % (x, ut.reprfunc(y)) for x, y in chunk]
>>> for chunk in chunklist]
>>> line_list = [', '.join(strs) for strs in list_]
>>> result = '\n'.join(line_list)
>>> print(result)
n=1: '.../bar', n=2: '.../foo/bar'
n=1: 'C:/', n=2: 'C:/'
n=1: '.../ham', n=2: '/ham'
n=1: '.../eggs', n=2: './eggs'
n=1: '.../eggs', n=2: '.../spam/eggs'
n=1: '.../bin', n=2: '.../foobar/bin'
### Response:
def path_ndir_split(path_, n, force_unix=True, winroot='C:', trailing=True):
r"""
Shows only a little bit of the path. Up to the n bottom-level directories
TODO: rename to path_tail? ndir_split?
Returns:
(str) the trailing n paths of path.
CommandLine:
python3 -m utool.util_path --test-path_ndir_split
python3 -m utool --tf path_ndir_split
python -m utool --tf path_ndir_split
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> paths = [r'/usr/bin/local/foo/bar',
... r'C:/',
... #r'lonerel',
... #r'reldir/other',
... r'/ham',
... r'./eggs',
... r'/spam/eggs',
... r'C:\Program Files (x86)/foobar/bin']
>>> N = 2
>>> iter_ = ut.iprod(paths, range(1, N + 1))
>>> force_unix = True
>>> tuplist = [(n, path_ndir_split(path_, n)) for path_, n in iter_]
>>> chunklist = list(ut.ichunks(tuplist, N))
>>> list_ = [['n=%r: %s' % (x, ut.reprfunc(y)) for x, y in chunk]
>>> for chunk in chunklist]
>>> line_list = [', '.join(strs) for strs in list_]
>>> result = '\n'.join(line_list)
>>> print(result)
n=1: '.../bar', n=2: '.../foo/bar'
n=1: 'C:/', n=2: 'C:/'
n=1: '.../ham', n=2: '/ham'
n=1: '.../eggs', n=2: './eggs'
n=1: '.../eggs', n=2: '.../spam/eggs'
n=1: '.../bin', n=2: '.../foobar/bin'
"""
if not isinstance(path_, six.string_types):
# Probably given a file pointer
return path_
if n is None:
cplat_path = ensure_crossplat_path(path_)
elif n == 0:
cplat_path = ''
else:
sep = '/' if force_unix else os.sep
ndirs_list = []
head = path_
reached_end = False
for nx in range(n):
head, tail = split(head)
if tail == '':
if head == '':
reached_end = True
else:
root = head if len(ndirs_list) == 0 else head.strip('\\/')
ndirs_list.append(root)
reached_end = True
break
else:
ndirs_list.append(tail)
if trailing and not reached_end:
head, tail = split(head)
if len(tail) == 0:
if len(head) == 0: # or head == '/':
reached_end = True
ndirs = sep.join(ndirs_list[::-1])
cplat_path = ensure_crossplat_path(ndirs)
#if trailing and not reached_end:
if trailing and not reached_end:
cplat_path = '.../' + cplat_path
return cplat_path |
def field_singleton_sub_fields_schema(
sub_fields: Sequence[Field],
*,
by_alias: bool,
model_name_map: Dict[Type['main.BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
This function is indirectly used by ``field_schema()``, you probably should be using that function.
Take a list of Pydantic ``Field`` from the declaration of a type with parameters, and generate their
schema. I.e., fields used as "type parameters", like ``str`` and ``int`` in ``Tuple[str, int]``.
"""
ref_prefix = ref_prefix or default_prefix
definitions = {}
sub_fields = [sf for sf in sub_fields if sf.include_in_schema()]
if len(sub_fields) == 1:
return field_type_schema(
sub_fields[0],
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
else:
sub_field_schemas = []
for sf in sub_fields:
sub_schema, sub_definitions = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
definitions.update(sub_definitions)
sub_field_schemas.append(sub_schema)
return {'anyOf': sub_field_schemas}, definitions | This function is indirectly used by ``field_schema()``, you probably should be using that function.
Take a list of Pydantic ``Field`` from the declaration of a type with parameters, and generate their
schema. I.e., fields used as "type parameters", like ``str`` and ``int`` in ``Tuple[str, int]``. | Below is the the instruction that describes the task:
### Input:
This function is indirectly used by ``field_schema()``, you probably should be using that function.
Take a list of Pydantic ``Field`` from the declaration of a type with parameters, and generate their
schema. I.e., fields used as "type parameters", like ``str`` and ``int`` in ``Tuple[str, int]``.
### Response:
def field_singleton_sub_fields_schema(
sub_fields: Sequence[Field],
*,
by_alias: bool,
model_name_map: Dict[Type['main.BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
This function is indirectly used by ``field_schema()``, you probably should be using that function.
Take a list of Pydantic ``Field`` from the declaration of a type with parameters, and generate their
schema. I.e., fields used as "type parameters", like ``str`` and ``int`` in ``Tuple[str, int]``.
"""
ref_prefix = ref_prefix or default_prefix
definitions = {}
sub_fields = [sf for sf in sub_fields if sf.include_in_schema()]
if len(sub_fields) == 1:
return field_type_schema(
sub_fields[0],
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
else:
sub_field_schemas = []
for sf in sub_fields:
sub_schema, sub_definitions = field_type_schema(
sf,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
)
definitions.update(sub_definitions)
sub_field_schemas.append(sub_schema)
return {'anyOf': sub_field_schemas}, definitions |
def clear_session(self, response):
"""Clear the session.
This method is invoked when the session is found to be invalid.
Subclasses can override this method to implement a custom session
reset.
"""
session.clear()
# if flask-login is installed, we try to clear the
# "remember me" cookie, just in case it is set
if 'flask_login' in sys.modules:
remember_cookie = current_app.config.get('REMEMBER_COOKIE',
'remember_token')
response.set_cookie(remember_cookie, '', expires=0, max_age=0) | Clear the session.
This method is invoked when the session is found to be invalid.
Subclasses can override this method to implement a custom session
reset. | Below is the the instruction that describes the task:
### Input:
Clear the session.
This method is invoked when the session is found to be invalid.
Subclasses can override this method to implement a custom session
reset.
### Response:
def clear_session(self, response):
"""Clear the session.
This method is invoked when the session is found to be invalid.
Subclasses can override this method to implement a custom session
reset.
"""
session.clear()
# if flask-login is installed, we try to clear the
# "remember me" cookie, just in case it is set
if 'flask_login' in sys.modules:
remember_cookie = current_app.config.get('REMEMBER_COOKIE',
'remember_token')
response.set_cookie(remember_cookie, '', expires=0, max_age=0) |
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = N.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = N.asarray(scores)
if not n:
scores = scores.squeeze()
return scores | Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts`` | Below is the the instruction that describes the task:
### Input:
Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
### Response:
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = N.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = N.asarray(scores)
if not n:
scores = scores.squeeze()
return scores |
def change_dir(directory):
"""
Wraps a function to run in a given directory.
"""
def cd_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
org_path = os.getcwd()
os.chdir(directory)
func(*args, **kwargs)
os.chdir(org_path)
return wrapper
return cd_decorator | Wraps a function to run in a given directory. | Below is the the instruction that describes the task:
### Input:
Wraps a function to run in a given directory.
### Response:
def change_dir(directory):
"""
Wraps a function to run in a given directory.
"""
def cd_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
org_path = os.getcwd()
os.chdir(directory)
func(*args, **kwargs)
os.chdir(org_path)
return wrapper
return cd_decorator |
def get_data(model, instance_id, kind=''):
"""Get instance data by id.
:param model: a string, model name in rio.models
:param id: an integer, instance id.
:param kind: a string specified which kind of dict tranformer should be called.
:return: data.
"""
instance = get_instance(model, instance_id)
if not instance:
return
return ins2dict(instance, kind) | Get instance data by id.
:param model: a string, model name in rio.models
:param id: an integer, instance id.
:param kind: a string specified which kind of dict tranformer should be called.
:return: data. | Below is the the instruction that describes the task:
### Input:
Get instance data by id.
:param model: a string, model name in rio.models
:param id: an integer, instance id.
:param kind: a string specified which kind of dict tranformer should be called.
:return: data.
### Response:
def get_data(model, instance_id, kind=''):
"""Get instance data by id.
:param model: a string, model name in rio.models
:param id: an integer, instance id.
:param kind: a string specified which kind of dict tranformer should be called.
:return: data.
"""
instance = get_instance(model, instance_id)
if not instance:
return
return ins2dict(instance, kind) |
def _ExpandArtifactFilesSource(self, source, requested):
"""Recursively expands an artifact files source."""
expanded_source = rdf_artifacts.ExpandedSource(base_source=source)
sub_sources = []
artifact_list = []
if "artifact_list" in source.attributes:
artifact_list = source.attributes["artifact_list"]
for artifact_name in artifact_list:
if artifact_name in self.processed_artifacts:
continue
artifact_obj = artifact_registry.REGISTRY.GetArtifact(artifact_name)
for expanded_artifact in self.Expand(artifact_obj, requested):
sub_sources.extend(expanded_artifact.sources)
expanded_source.artifact_sources = sub_sources
expanded_source.path_type = self._path_type
return [expanded_source] | Recursively expands an artifact files source. | Below is the the instruction that describes the task:
### Input:
Recursively expands an artifact files source.
### Response:
def _ExpandArtifactFilesSource(self, source, requested):
"""Recursively expands an artifact files source."""
expanded_source = rdf_artifacts.ExpandedSource(base_source=source)
sub_sources = []
artifact_list = []
if "artifact_list" in source.attributes:
artifact_list = source.attributes["artifact_list"]
for artifact_name in artifact_list:
if artifact_name in self.processed_artifacts:
continue
artifact_obj = artifact_registry.REGISTRY.GetArtifact(artifact_name)
for expanded_artifact in self.Expand(artifact_obj, requested):
sub_sources.extend(expanded_artifact.sources)
expanded_source.artifact_sources = sub_sources
expanded_source.path_type = self._path_type
return [expanded_source] |
def _parse(self, stream):
"""Parse a JSON BUILD file.
Args:
builddata: dictionary of buildfile data
reponame: name of the repo that it came from
path: directory path within the repo
"""
builddata = json.load(stream)
log.debug('This is a JSON build file.')
if 'targets' not in builddata:
log.warn('Warning: No targets defined here.')
return
for tdata in builddata['targets']:
# TODO: validate name
target = address.new(target=tdata.pop('name'),
repo=self.target.repo,
path=self.target.path)
# Duplicate target definition? Uh oh.
if target in self.node and 'target_obj' in self.node[target]:
raise error.ButcherError(
'Target is defined more than once: %s', target)
rule_obj = targets.new(name=target,
ruletype=tdata.pop('type'),
**tdata)
log.debug('New target: %s', target)
self.add_node(target, {'target_obj': rule_obj})
# dep could be ":blabla" or "//foo:blabla" or "//foo/bar:blabla"
for dep in rule_obj.composed_deps() or []:
d_target = address.new(dep)
if not d_target.repo: # ":blabla"
d_target.repo = self.target.repo
if d_target.repo == self.target.repo and not d_target.path:
d_target.path = self.target.path
if d_target not in self.nodes():
self.add_node(d_target)
log.debug('New dep: %s -> %s', target, d_target)
self.add_edge(target, d_target) | Parse a JSON BUILD file.
Args:
builddata: dictionary of buildfile data
reponame: name of the repo that it came from
path: directory path within the repo | Below is the the instruction that describes the task:
### Input:
Parse a JSON BUILD file.
Args:
builddata: dictionary of buildfile data
reponame: name of the repo that it came from
path: directory path within the repo
### Response:
def _parse(self, stream):
"""Parse a JSON BUILD file.
Args:
builddata: dictionary of buildfile data
reponame: name of the repo that it came from
path: directory path within the repo
"""
builddata = json.load(stream)
log.debug('This is a JSON build file.')
if 'targets' not in builddata:
log.warn('Warning: No targets defined here.')
return
for tdata in builddata['targets']:
# TODO: validate name
target = address.new(target=tdata.pop('name'),
repo=self.target.repo,
path=self.target.path)
# Duplicate target definition? Uh oh.
if target in self.node and 'target_obj' in self.node[target]:
raise error.ButcherError(
'Target is defined more than once: %s', target)
rule_obj = targets.new(name=target,
ruletype=tdata.pop('type'),
**tdata)
log.debug('New target: %s', target)
self.add_node(target, {'target_obj': rule_obj})
# dep could be ":blabla" or "//foo:blabla" or "//foo/bar:blabla"
for dep in rule_obj.composed_deps() or []:
d_target = address.new(dep)
if not d_target.repo: # ":blabla"
d_target.repo = self.target.repo
if d_target.repo == self.target.repo and not d_target.path:
d_target.path = self.target.path
if d_target not in self.nodes():
self.add_node(d_target)
log.debug('New dep: %s -> %s', target, d_target)
self.add_edge(target, d_target) |
def add_callback_for_action(self, action, callback):
"""Adds a callback function to an action
The method checks whether both action and callback are valid. If so, the callback is added to the list of
functions called when the action is triggered.
:param str action: An action like 'add', 'copy', 'info'
:param callback: A callback function, which is called when action is triggered. It retrieves the event as
parameter
:return: True is the parameters are valid and the callback is registered, False else
:rtype: bool
"""
if hasattr(callback, '__call__'): # Is the callback really a function?
if action not in self.__action_to_callbacks:
self.__action_to_callbacks[action] = []
self.__action_to_callbacks[action].append(callback)
controller = None
try:
controller = callback.__self__
except AttributeError:
try:
# Needed when callback was wrapped using functools.partial
controller = callback.func.__self__
except AttributeError:
pass
if controller:
if controller not in self.__controller_action_callbacks:
self.__controller_action_callbacks[controller] = {}
if action not in self.__controller_action_callbacks[controller]:
self.__controller_action_callbacks[controller][action] = []
self.__controller_action_callbacks[controller][action].append(callback)
return True | Adds a callback function to an action
The method checks whether both action and callback are valid. If so, the callback is added to the list of
functions called when the action is triggered.
:param str action: An action like 'add', 'copy', 'info'
:param callback: A callback function, which is called when action is triggered. It retrieves the event as
parameter
:return: True is the parameters are valid and the callback is registered, False else
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Adds a callback function to an action
The method checks whether both action and callback are valid. If so, the callback is added to the list of
functions called when the action is triggered.
:param str action: An action like 'add', 'copy', 'info'
:param callback: A callback function, which is called when action is triggered. It retrieves the event as
parameter
:return: True is the parameters are valid and the callback is registered, False else
:rtype: bool
### Response:
def add_callback_for_action(self, action, callback):
"""Adds a callback function to an action
The method checks whether both action and callback are valid. If so, the callback is added to the list of
functions called when the action is triggered.
:param str action: An action like 'add', 'copy', 'info'
:param callback: A callback function, which is called when action is triggered. It retrieves the event as
parameter
:return: True is the parameters are valid and the callback is registered, False else
:rtype: bool
"""
if hasattr(callback, '__call__'): # Is the callback really a function?
if action not in self.__action_to_callbacks:
self.__action_to_callbacks[action] = []
self.__action_to_callbacks[action].append(callback)
controller = None
try:
controller = callback.__self__
except AttributeError:
try:
# Needed when callback was wrapped using functools.partial
controller = callback.func.__self__
except AttributeError:
pass
if controller:
if controller not in self.__controller_action_callbacks:
self.__controller_action_callbacks[controller] = {}
if action not in self.__controller_action_callbacks[controller]:
self.__controller_action_callbacks[controller][action] = []
self.__controller_action_callbacks[controller][action].append(callback)
return True |
def get_placement_solver(service_instance):
'''
Returns a placement solver
service_instance
Service instance to the host or vCenter
'''
stub = salt.utils.vmware.get_new_service_instance_stub(
service_instance, ns='pbm/2.0', path='/pbm/sdk')
pbm_si = pbm.ServiceInstance('ServiceInstance', stub)
try:
profile_manager = pbm_si.RetrieveContent().placementSolver
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
return profile_manager | Returns a placement solver
service_instance
Service instance to the host or vCenter | Below is the the instruction that describes the task:
### Input:
Returns a placement solver
service_instance
Service instance to the host or vCenter
### Response:
def get_placement_solver(service_instance):
'''
Returns a placement solver
service_instance
Service instance to the host or vCenter
'''
stub = salt.utils.vmware.get_new_service_instance_stub(
service_instance, ns='pbm/2.0', path='/pbm/sdk')
pbm_si = pbm.ServiceInstance('ServiceInstance', stub)
try:
profile_manager = pbm_si.RetrieveContent().placementSolver
except vim.fault.NoPermission as exc:
log.exception(exc)
raise VMwareApiError('Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise VMwareRuntimeError(exc.msg)
return profile_manager |
def locked_repository(
self, with_dev_reqs=False
): # type: (bool) -> poetry.repositories.Repository
"""
Searches and returns a repository of locked packages.
"""
if not self.is_locked():
return poetry.repositories.Repository()
lock_data = self.lock_data
packages = poetry.repositories.Repository()
if with_dev_reqs:
locked_packages = lock_data["package"]
else:
locked_packages = [
p for p in lock_data["package"] if p["category"] == "main"
]
if not locked_packages:
return packages
for info in locked_packages:
package = poetry.packages.Package(
info["name"], info["version"], info["version"]
)
package.description = info.get("description", "")
package.category = info["category"]
package.optional = info["optional"]
package.hashes = lock_data["metadata"]["hashes"][info["name"]]
package.python_versions = info["python-versions"]
if "marker" in info:
package.marker = parse_marker(info["marker"])
else:
# Compatibility for old locks
if "requirements" in info:
dep = poetry.packages.Dependency("foo", "0.0.0")
for name, value in info["requirements"].items():
if name == "python":
dep.python_versions = value
elif name == "platform":
dep.platform = value
split_dep = dep.to_pep_508(False).split(";")
if len(split_dep) > 1:
package.marker = parse_marker(split_dep[1].strip())
for dep_name, constraint in info.get("dependencies", {}).items():
if isinstance(constraint, list):
for c in constraint:
package.add_dependency(dep_name, c)
continue
package.add_dependency(dep_name, constraint)
if "source" in info:
package.source_type = info["source"]["type"]
package.source_url = info["source"]["url"]
package.source_reference = info["source"]["reference"]
packages.add_package(package)
return packages | Searches and returns a repository of locked packages. | Below is the the instruction that describes the task:
### Input:
Searches and returns a repository of locked packages.
### Response:
def locked_repository(
self, with_dev_reqs=False
): # type: (bool) -> poetry.repositories.Repository
"""
Searches and returns a repository of locked packages.
"""
if not self.is_locked():
return poetry.repositories.Repository()
lock_data = self.lock_data
packages = poetry.repositories.Repository()
if with_dev_reqs:
locked_packages = lock_data["package"]
else:
locked_packages = [
p for p in lock_data["package"] if p["category"] == "main"
]
if not locked_packages:
return packages
for info in locked_packages:
package = poetry.packages.Package(
info["name"], info["version"], info["version"]
)
package.description = info.get("description", "")
package.category = info["category"]
package.optional = info["optional"]
package.hashes = lock_data["metadata"]["hashes"][info["name"]]
package.python_versions = info["python-versions"]
if "marker" in info:
package.marker = parse_marker(info["marker"])
else:
# Compatibility for old locks
if "requirements" in info:
dep = poetry.packages.Dependency("foo", "0.0.0")
for name, value in info["requirements"].items():
if name == "python":
dep.python_versions = value
elif name == "platform":
dep.platform = value
split_dep = dep.to_pep_508(False).split(";")
if len(split_dep) > 1:
package.marker = parse_marker(split_dep[1].strip())
for dep_name, constraint in info.get("dependencies", {}).items():
if isinstance(constraint, list):
for c in constraint:
package.add_dependency(dep_name, c)
continue
package.add_dependency(dep_name, constraint)
if "source" in info:
package.source_type = info["source"]["type"]
package.source_url = info["source"]["url"]
package.source_reference = info["source"]["reference"]
packages.add_package(package)
return packages |
def debug_dump(message, file_prefix="dump"):
"""
Utility while developing to dump message data to play with in the
interpreter
"""
global index
index += 1
with open("%s_%s.dump" % (file_prefix, index), 'w') as f:
f.write(message.SerializeToString())
f.close() | Utility while developing to dump message data to play with in the
interpreter | Below is the the instruction that describes the task:
### Input:
Utility while developing to dump message data to play with in the
interpreter
### Response:
def debug_dump(message, file_prefix="dump"):
"""
Utility while developing to dump message data to play with in the
interpreter
"""
global index
index += 1
with open("%s_%s.dump" % (file_prefix, index), 'w') as f:
f.write(message.SerializeToString())
f.close() |
def untlxml2py(untl_filename):
"""Parse a UNTL XML file object into a pyuntl element tree.
You can also pass this a string as file input like so:
import StringIO
untlxml2py(StringIO.StringIO(untl_string))
"""
# Create a stack to hold parents.
parent_stack = []
# Use iterparse to open the file and loop through elements.
for event, element in iterparse(untl_filename, events=('start', 'end')):
if NAMESPACE_REGEX.search(element.tag, 0):
element_tag = NAMESPACE_REGEX.search(element.tag, 0).group(1)
else:
element_tag = element.tag
# Process the element if it exists in UNTL.
if element_tag in PYUNTL_DISPATCH:
# If it is the element's opening tag,
# add it to the parent stack.
if event == 'start':
parent_stack.append(PYUNTL_DISPATCH[element_tag]())
# If it is the element's closing tag,
# remove element from stack. Add qualifier and content.
elif event == 'end':
child = parent_stack.pop()
if element.text is not None:
content = element.text.strip()
if content != '':
child.set_content(element.text)
if element.get('qualifier', False):
child.set_qualifier(element.get('qualifier'))
# Add the element to its parent.
if len(parent_stack) > 0:
parent_stack[-1].add_child(child)
# If it doesn't have a parent, it is the root element,
# so return it.
else:
return child
else:
raise PyuntlException(
'Element "%s" not in UNTL dispatch.' % (element_tag)
) | Parse a UNTL XML file object into a pyuntl element tree.
You can also pass this a string as file input like so:
import StringIO
untlxml2py(StringIO.StringIO(untl_string)) | Below is the the instruction that describes the task:
### Input:
Parse a UNTL XML file object into a pyuntl element tree.
You can also pass this a string as file input like so:
import StringIO
untlxml2py(StringIO.StringIO(untl_string))
### Response:
def untlxml2py(untl_filename):
"""Parse a UNTL XML file object into a pyuntl element tree.
You can also pass this a string as file input like so:
import StringIO
untlxml2py(StringIO.StringIO(untl_string))
"""
# Create a stack to hold parents.
parent_stack = []
# Use iterparse to open the file and loop through elements.
for event, element in iterparse(untl_filename, events=('start', 'end')):
if NAMESPACE_REGEX.search(element.tag, 0):
element_tag = NAMESPACE_REGEX.search(element.tag, 0).group(1)
else:
element_tag = element.tag
# Process the element if it exists in UNTL.
if element_tag in PYUNTL_DISPATCH:
# If it is the element's opening tag,
# add it to the parent stack.
if event == 'start':
parent_stack.append(PYUNTL_DISPATCH[element_tag]())
# If it is the element's closing tag,
# remove element from stack. Add qualifier and content.
elif event == 'end':
child = parent_stack.pop()
if element.text is not None:
content = element.text.strip()
if content != '':
child.set_content(element.text)
if element.get('qualifier', False):
child.set_qualifier(element.get('qualifier'))
# Add the element to its parent.
if len(parent_stack) > 0:
parent_stack[-1].add_child(child)
# If it doesn't have a parent, it is the root element,
# so return it.
else:
return child
else:
raise PyuntlException(
'Element "%s" not in UNTL dispatch.' % (element_tag)
) |
def index(self):
"""Returns this layer's index in the canvas.layers[].
Searches the position of this layer in the canvas'
layers list, return None when not found.
"""
for i in range(len(self.canvas.layers)):
if self.canvas.layers[i] == self: break
if self.canvas.layers[i] == self:
return i
else:
return None | Returns this layer's index in the canvas.layers[].
Searches the position of this layer in the canvas'
layers list, return None when not found. | Below is the the instruction that describes the task:
### Input:
Returns this layer's index in the canvas.layers[].
Searches the position of this layer in the canvas'
layers list, return None when not found.
### Response:
def index(self):
"""Returns this layer's index in the canvas.layers[].
Searches the position of this layer in the canvas'
layers list, return None when not found.
"""
for i in range(len(self.canvas.layers)):
if self.canvas.layers[i] == self: break
if self.canvas.layers[i] == self:
return i
else:
return None |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_system_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_system_name = ET.SubElement(lldp_neighbor_detail, "remote-system-name")
remote_system_name.text = kwargs.pop('remote_system_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_system_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_system_name = ET.SubElement(lldp_neighbor_detail, "remote-system-name")
remote_system_name.text = kwargs.pop('remote_system_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _run_dpkt(self, dpkt):
"""Call dpkt.pcap.Reader to extract PCAP files."""
# if not self._flag_a:
# self._flag_a = True
# warnings.warn(f"'Extractor(engine=dpkt)' object is not iterable; "
# "so 'auto=False' will be ignored", AttributeWarning, stacklevel=stacklevel())
if self._exlyr != 'None' or self._exptl != 'null':
warnings.warn("'Extractor(engine=dpkt)' does not support protocol and layer threshold; "
f"'layer={self._exlyr}' and 'protocol={self._exptl}' ignored",
AttributeWarning, stacklevel=stacklevel())
# extract global header
self.record_header()
self._ifile.seek(0, os.SEEK_SET)
# extract & analyse file
self._expkg = dpkt
self._extmp = iter(dpkt.pcap.Reader(self._ifile))
# start iteration
self.record_frames() | Call dpkt.pcap.Reader to extract PCAP files. | Below is the the instruction that describes the task:
### Input:
Call dpkt.pcap.Reader to extract PCAP files.
### Response:
def _run_dpkt(self, dpkt):
"""Call dpkt.pcap.Reader to extract PCAP files."""
# if not self._flag_a:
# self._flag_a = True
# warnings.warn(f"'Extractor(engine=dpkt)' object is not iterable; "
# "so 'auto=False' will be ignored", AttributeWarning, stacklevel=stacklevel())
if self._exlyr != 'None' or self._exptl != 'null':
warnings.warn("'Extractor(engine=dpkt)' does not support protocol and layer threshold; "
f"'layer={self._exlyr}' and 'protocol={self._exptl}' ignored",
AttributeWarning, stacklevel=stacklevel())
# extract global header
self.record_header()
self._ifile.seek(0, os.SEEK_SET)
# extract & analyse file
self._expkg = dpkt
self._extmp = iter(dpkt.pcap.Reader(self._ifile))
# start iteration
self.record_frames() |
def set_hosts(hosts, use_ssl=False, ssl_cert_path=None):
"""
Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
"""
if type(hosts) != list:
hosts = [hosts]
conn_params = {
"hosts": hosts,
"timeout": 20
}
if use_ssl:
conn_params['use_ssl'] = True
if ssl_cert_path:
conn_params['verify_certs'] = True
conn_params['ca_certs'] = ssl_cert_path
else:
conn_params['verify_certs'] = False
connections.create_connection(**conn_params) | Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain | Below is the the instruction that describes the task:
### Input:
Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
### Response:
def set_hosts(hosts, use_ssl=False, ssl_cert_path=None):
"""
Sets the Elasticsearch hosts to use
Args:
hosts (str): A single hostname or URL, or list of hostnames or URLs
use_ssl (bool): Use a HTTPS connection to the server
ssl_cert_path (str): Path to the certificate chain
"""
if type(hosts) != list:
hosts = [hosts]
conn_params = {
"hosts": hosts,
"timeout": 20
}
if use_ssl:
conn_params['use_ssl'] = True
if ssl_cert_path:
conn_params['verify_certs'] = True
conn_params['ca_certs'] = ssl_cert_path
else:
conn_params['verify_certs'] = False
connections.create_connection(**conn_params) |
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object | Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object. | Below is the the instruction that describes the task:
### Input:
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
### Response:
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object |
def nesting_level(self) -> int:
"""Return the nesting level of self.
The minimum nesting_level is 0. Being part of any Template or
ParserFunction increases the level by one.
"""
ss, se = self._span
level = 0
type_to_spans = self._type_to_spans
for type_ in ('Template', 'ParserFunction'):
spans = type_to_spans[type_]
for s, e in spans[:bisect(spans, [ss + 1])]:
if se <= e:
level += 1
return level | Return the nesting level of self.
The minimum nesting_level is 0. Being part of any Template or
ParserFunction increases the level by one. | Below is the the instruction that describes the task:
### Input:
Return the nesting level of self.
The minimum nesting_level is 0. Being part of any Template or
ParserFunction increases the level by one.
### Response:
def nesting_level(self) -> int:
"""Return the nesting level of self.
The minimum nesting_level is 0. Being part of any Template or
ParserFunction increases the level by one.
"""
ss, se = self._span
level = 0
type_to_spans = self._type_to_spans
for type_ in ('Template', 'ParserFunction'):
spans = type_to_spans[type_]
for s, e in spans[:bisect(spans, [ss + 1])]:
if se <= e:
level += 1
return level |
def batch(self, requests):
"""
Make a batch request.
:param requests: A list of dictionaries with keys 'method', 'relative_url' and optionally 'body'.
Yields a list of responses and/or exceptions.
"""
for request in requests:
if 'body' in request:
request['body'] = urlencode(request['body'])
def _grouper(complete_list, n=1):
"""
Batches a list into constant size chunks.
:param complete_list: A input list (not a generator).
:param n: The size of the chunk.
Adapted from <http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python>
"""
for i in range(0, len(complete_list), n):
yield complete_list[i:i + n]
responses = []
# Maximum batch size for Facebook is 50 so split up requests
# https://developers.facebook.com/docs/graph-api/making-multiple-requests/#limits
for group in _grouper(requests, 50):
responses += self.post(
batch=json.dumps(group)
)
for response, request in zip(responses, requests):
# Facilitate for empty Graph API responses.
#
# https://github.com/jgorset/facepy/pull/30
if not response:
yield None
continue
try:
yield self._parse(response['body'])
except FacepyError as exception:
exception.request = request
yield exception | Make a batch request.
:param requests: A list of dictionaries with keys 'method', 'relative_url' and optionally 'body'.
Yields a list of responses and/or exceptions. | Below is the the instruction that describes the task:
### Input:
Make a batch request.
:param requests: A list of dictionaries with keys 'method', 'relative_url' and optionally 'body'.
Yields a list of responses and/or exceptions.
### Response:
def batch(self, requests):
"""
Make a batch request.
:param requests: A list of dictionaries with keys 'method', 'relative_url' and optionally 'body'.
Yields a list of responses and/or exceptions.
"""
for request in requests:
if 'body' in request:
request['body'] = urlencode(request['body'])
def _grouper(complete_list, n=1):
"""
Batches a list into constant size chunks.
:param complete_list: A input list (not a generator).
:param n: The size of the chunk.
Adapted from <http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python>
"""
for i in range(0, len(complete_list), n):
yield complete_list[i:i + n]
responses = []
# Maximum batch size for Facebook is 50 so split up requests
# https://developers.facebook.com/docs/graph-api/making-multiple-requests/#limits
for group in _grouper(requests, 50):
responses += self.post(
batch=json.dumps(group)
)
for response, request in zip(responses, requests):
# Facilitate for empty Graph API responses.
#
# https://github.com/jgorset/facepy/pull/30
if not response:
yield None
continue
try:
yield self._parse(response['body'])
except FacepyError as exception:
exception.request = request
yield exception |
def iterchildren(self):
"""Returns a :obj:`Gtk.TreeModelRowIter` for the row's children"""
child_iter = self.model.iter_children(self.iter)
return TreeModelRowIter(self.model, child_iter) | Returns a :obj:`Gtk.TreeModelRowIter` for the row's children | Below is the the instruction that describes the task:
### Input:
Returns a :obj:`Gtk.TreeModelRowIter` for the row's children
### Response:
def iterchildren(self):
"""Returns a :obj:`Gtk.TreeModelRowIter` for the row's children"""
child_iter = self.model.iter_children(self.iter)
return TreeModelRowIter(self.model, child_iter) |
def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
"""
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
# Create the environemnt string.
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst | Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used. | Below is the the instruction that describes the task:
### Input:
Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
### Response:
def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
"""
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
# Create the environemnt string.
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst |
def set(self, name: str, value: Union[str, List[str]]) -> None:
"""
设置 header
"""
self._headers[name] = value | 设置 header | Below is the the instruction that describes the task:
### Input:
设置 header
### Response:
def set(self, name: str, value: Union[str, List[str]]) -> None:
"""
设置 header
"""
self._headers[name] = value |
def spinner(self, spinner=None):
"""Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
"""
self._spinner = self._get_spinner(spinner)
self._frame_index = 0
self._text_index = 0 | Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval | Below is the the instruction that describes the task:
### Input:
Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
### Response:
def spinner(self, spinner=None):
"""Setter for spinner property.
Parameters
----------
spinner : dict, str
Defines the spinner value with frame and interval
"""
self._spinner = self._get_spinner(spinner)
self._frame_index = 0
self._text_index = 0 |
def post(self, request, *args, **kwargs):
""" Handles POST requests. """
self.init_attachment_cache()
# Stores a boolean indicating if we are considering a preview
self.preview = 'preview' in self.request.POST
# Initializes the forms
post_form_class = self.get_post_form_class()
post_form = self.get_post_form(post_form_class)
attachment_formset_class = self.get_attachment_formset_class()
attachment_formset = self.get_attachment_formset(attachment_formset_class)
poll_option_formset_class = self.get_poll_option_formset_class()
poll_option_formset = self.get_poll_option_formset(poll_option_formset_class)
post_form_valid = post_form.is_valid()
attachment_formset_valid = (
attachment_formset.is_valid() if attachment_formset else None
)
poll_option_formset_valid = (
poll_option_formset.is_valid()
if poll_option_formset and len(post_form.cleaned_data['poll_question']) else None
)
self.attachment_preview = self.preview if attachment_formset_valid else None
self.poll_preview = self.preview if poll_option_formset_valid else None
poll_options_validated = poll_option_formset_valid is not None
if (
post_form_valid and
attachment_formset_valid is not False and
poll_option_formset_valid is not False
):
return self.form_valid(
post_form, attachment_formset, poll_option_formset,
poll_options_validated=poll_options_validated,
)
else:
return self.form_invalid(
post_form, attachment_formset, poll_option_formset,
poll_options_validated=poll_options_validated,
) | Handles POST requests. | Below is the the instruction that describes the task:
### Input:
Handles POST requests.
### Response:
def post(self, request, *args, **kwargs):
""" Handles POST requests. """
self.init_attachment_cache()
# Stores a boolean indicating if we are considering a preview
self.preview = 'preview' in self.request.POST
# Initializes the forms
post_form_class = self.get_post_form_class()
post_form = self.get_post_form(post_form_class)
attachment_formset_class = self.get_attachment_formset_class()
attachment_formset = self.get_attachment_formset(attachment_formset_class)
poll_option_formset_class = self.get_poll_option_formset_class()
poll_option_formset = self.get_poll_option_formset(poll_option_formset_class)
post_form_valid = post_form.is_valid()
attachment_formset_valid = (
attachment_formset.is_valid() if attachment_formset else None
)
poll_option_formset_valid = (
poll_option_formset.is_valid()
if poll_option_formset and len(post_form.cleaned_data['poll_question']) else None
)
self.attachment_preview = self.preview if attachment_formset_valid else None
self.poll_preview = self.preview if poll_option_formset_valid else None
poll_options_validated = poll_option_formset_valid is not None
if (
post_form_valid and
attachment_formset_valid is not False and
poll_option_formset_valid is not False
):
return self.form_valid(
post_form, attachment_formset, poll_option_formset,
poll_options_validated=poll_options_validated,
)
else:
return self.form_invalid(
post_form, attachment_formset, poll_option_formset,
poll_options_validated=poll_options_validated,
) |
def render_to_response(self, context, **response_kwargs):
""" Returns the rendered template in JSON format """
if self.request.is_ajax():
data = {
"content": render_to_string(
self.get_template_names(), context, request=self.request)
}
return JsonResponse(data)
if settings.DEBUG:
return super(PartialAjaxMixin, self).render_to_response(
context, **response_kwargs)
raise Http404() | Returns the rendered template in JSON format | Below is the the instruction that describes the task:
### Input:
Returns the rendered template in JSON format
### Response:
def render_to_response(self, context, **response_kwargs):
""" Returns the rendered template in JSON format """
if self.request.is_ajax():
data = {
"content": render_to_string(
self.get_template_names(), context, request=self.request)
}
return JsonResponse(data)
if settings.DEBUG:
return super(PartialAjaxMixin, self).render_to_response(
context, **response_kwargs)
raise Http404() |
def get_id(self):
"""
get unique identifier of this container
:return: str
"""
if self._id is None:
self._id = graceful_get(self.inspect(refresh=True), "ID")
return self._id | get unique identifier of this container
:return: str | Below is the the instruction that describes the task:
### Input:
get unique identifier of this container
:return: str
### Response:
def get_id(self):
"""
get unique identifier of this container
:return: str
"""
if self._id is None:
self._id = graceful_get(self.inspect(refresh=True), "ID")
return self._id |
def list_unit_states(self, machine_id=None, unit_name=None):
"""Return the current UnitState for the fleet cluster
Args:
machine_id (str): filter all UnitState objects to those
originating from a specific machine
unit_name (str): filter all UnitState objects to those related
to a specific unit
Yields:
UnitState: The next UnitState in the cluster
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
"""
for page in self._request('UnitState.List', machineID=machine_id, unitName=unit_name):
for state in page.get('states', []):
yield UnitState(data=state) | Return the current UnitState for the fleet cluster
Args:
machine_id (str): filter all UnitState objects to those
originating from a specific machine
unit_name (str): filter all UnitState objects to those related
to a specific unit
Yields:
UnitState: The next UnitState in the cluster
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400 | Below is the the instruction that describes the task:
### Input:
Return the current UnitState for the fleet cluster
Args:
machine_id (str): filter all UnitState objects to those
originating from a specific machine
unit_name (str): filter all UnitState objects to those related
to a specific unit
Yields:
UnitState: The next UnitState in the cluster
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
### Response:
def list_unit_states(self, machine_id=None, unit_name=None):
"""Return the current UnitState for the fleet cluster
Args:
machine_id (str): filter all UnitState objects to those
originating from a specific machine
unit_name (str): filter all UnitState objects to those related
to a specific unit
Yields:
UnitState: The next UnitState in the cluster
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
"""
for page in self._request('UnitState.List', machineID=machine_id, unitName=unit_name):
for state in page.get('states', []):
yield UnitState(data=state) |
def _get_selected_ids(self):
"""List of currently selected ids"""
selection = self.get_selection()
if selection.get_mode() != gtk.SELECTION_MULTIPLE:
raise AttributeError('selected_ids only valid for select_multiple')
model, selected_paths = selection.get_selected_rows()
if selected_paths:
return zip(*selected_paths)[0]
else:
return () | List of currently selected ids | Below is the the instruction that describes the task:
### Input:
List of currently selected ids
### Response:
def _get_selected_ids(self):
"""List of currently selected ids"""
selection = self.get_selection()
if selection.get_mode() != gtk.SELECTION_MULTIPLE:
raise AttributeError('selected_ids only valid for select_multiple')
model, selected_paths = selection.get_selected_rows()
if selected_paths:
return zip(*selected_paths)[0]
else:
return () |
def copy_binder_files(app, exception):
"""Copy all Binder requirements and notebooks files."""
if exception is not None:
return
if app.builder.name not in ['html', 'readthedocs']:
return
gallery_conf = app.config.sphinx_gallery_conf
binder_conf = check_binder_conf(gallery_conf.get('binder'))
if not len(binder_conf) > 0:
return
logger.info('copying binder requirements...', color='white')
_copy_binder_reqs(app, binder_conf)
_copy_binder_notebooks(app) | Copy all Binder requirements and notebooks files. | Below is the the instruction that describes the task:
### Input:
Copy all Binder requirements and notebooks files.
### Response:
def copy_binder_files(app, exception):
"""Copy all Binder requirements and notebooks files."""
if exception is not None:
return
if app.builder.name not in ['html', 'readthedocs']:
return
gallery_conf = app.config.sphinx_gallery_conf
binder_conf = check_binder_conf(gallery_conf.get('binder'))
if not len(binder_conf) > 0:
return
logger.info('copying binder requirements...', color='white')
_copy_binder_reqs(app, binder_conf)
_copy_binder_notebooks(app) |
def get_pull_requests(self):
"https://developer.github.com/v3/pulls/#list-pull-requests"
g = self.github
query = {'state': 'all'}
if self.args.github_token:
query['access_token'] = g['token']
def f(pull):
if self.args.ignore_closed:
return (pull['state'] == 'opened' or
(pull['state'] == 'closed' and pull['merged_at']))
else:
return True
pulls = filter(f,
self.get(g['url'] + "/repos/" + g['repo'] + "/pulls",
query, self.args.cache))
return dict([(str(pull['number']), pull) for pull in pulls]) | https://developer.github.com/v3/pulls/#list-pull-requests | Below is the the instruction that describes the task:
### Input:
https://developer.github.com/v3/pulls/#list-pull-requests
### Response:
def get_pull_requests(self):
"https://developer.github.com/v3/pulls/#list-pull-requests"
g = self.github
query = {'state': 'all'}
if self.args.github_token:
query['access_token'] = g['token']
def f(pull):
if self.args.ignore_closed:
return (pull['state'] == 'opened' or
(pull['state'] == 'closed' and pull['merged_at']))
else:
return True
pulls = filter(f,
self.get(g['url'] + "/repos/" + g['repo'] + "/pulls",
query, self.args.cache))
return dict([(str(pull['number']), pull) for pull in pulls]) |
def renderProcessStdOut(self, stdout):
""" render stdout of shelled-out process
stdout always contains information Java process wants to
propagate back to cli, so we do special rendering here
:param stdout: all lines from shelled-out process
:return:
"""
# since we render stdout line based on Java process return code,
# ``status'' has to be already set
assert self.status is not None
# remove pending newline
if self.status == Status.Ok:
self._do_log(Log.info, stdout)
elif self.status == Status.HeronError:
# remove last newline since logging will append newline
self._do_log(Log.error, stdout)
# No need to prefix [INFO] here. We want to display dry-run response in a clean way
elif self.status == Status.DryRun:
self._do_print(sys.stdout, stdout)
elif self.status == Status.InvocationError:
self._do_print(sys.stdout, stdout)
else:
raise RuntimeError(
"Unknown status type of value %d. Expected value: %s" % \
(self.status.value, list(Status))) | render stdout of shelled-out process
stdout always contains information Java process wants to
propagate back to cli, so we do special rendering here
:param stdout: all lines from shelled-out process
:return: | Below is the the instruction that describes the task:
### Input:
render stdout of shelled-out process
stdout always contains information Java process wants to
propagate back to cli, so we do special rendering here
:param stdout: all lines from shelled-out process
:return:
### Response:
def renderProcessStdOut(self, stdout):
""" render stdout of shelled-out process
stdout always contains information Java process wants to
propagate back to cli, so we do special rendering here
:param stdout: all lines from shelled-out process
:return:
"""
# since we render stdout line based on Java process return code,
# ``status'' has to be already set
assert self.status is not None
# remove pending newline
if self.status == Status.Ok:
self._do_log(Log.info, stdout)
elif self.status == Status.HeronError:
# remove last newline since logging will append newline
self._do_log(Log.error, stdout)
# No need to prefix [INFO] here. We want to display dry-run response in a clean way
elif self.status == Status.DryRun:
self._do_print(sys.stdout, stdout)
elif self.status == Status.InvocationError:
self._do_print(sys.stdout, stdout)
else:
raise RuntimeError(
"Unknown status type of value %d. Expected value: %s" % \
(self.status.value, list(Status))) |
def on_directory_button_tool_clicked(self):
"""Autoconnect slot activated when directory button is clicked."""
# noinspection PyCallByClass,PyTypeChecker
# set up parameter from dialog
input_path = self.layer.currentLayer().source()
input_directory, self.output_filename = os.path.split(input_path)
file_extension = os.path.splitext(self.output_filename)[1]
self.output_filename = os.path.splitext(self.output_filename)[0]
# show Qt file directory dialog
output_path, __ = QtWidgets.QFileDialog.getSaveFileName(
self,
self.tr('Output file'),
'%s_multi_buffer%s' % (
os.path.join(input_directory, self.output_filename),
file_extension),
'GeoJSON (*.geojson);;Shapefile (*.shp)')
# set selected path to the dialog
self.output_form.setText(output_path) | Autoconnect slot activated when directory button is clicked. | Below is the the instruction that describes the task:
### Input:
Autoconnect slot activated when directory button is clicked.
### Response:
def on_directory_button_tool_clicked(self):
"""Autoconnect slot activated when directory button is clicked."""
# noinspection PyCallByClass,PyTypeChecker
# set up parameter from dialog
input_path = self.layer.currentLayer().source()
input_directory, self.output_filename = os.path.split(input_path)
file_extension = os.path.splitext(self.output_filename)[1]
self.output_filename = os.path.splitext(self.output_filename)[0]
# show Qt file directory dialog
output_path, __ = QtWidgets.QFileDialog.getSaveFileName(
self,
self.tr('Output file'),
'%s_multi_buffer%s' % (
os.path.join(input_directory, self.output_filename),
file_extension),
'GeoJSON (*.geojson);;Shapefile (*.shp)')
# set selected path to the dialog
self.output_form.setText(output_path) |
def __get_config(self, data_sources=None):
"""
Build a dictionary with the Report configuration with the data sources and metrics to be included
in each section of the report
:param data_sources: list of data sources to be included in the report
:return: a dict with the data sources and metrics to be included in the report
"""
if not data_sources:
# For testing
data_sources = ["gerrit", "git", "github_issues", "mls"]
# In new_config a dict with all the metrics for all data sources is created
new_config = {}
for ds in data_sources:
ds_config = self.ds2class[ds].get_section_metrics()
for section in ds_config:
if section not in new_config:
# Just create the section with the data for the ds
new_config[section] = ds_config[section]
else:
for metric_section in ds_config[section]:
if ds_config[section][metric_section] is not None:
if (metric_section not in new_config[section] or
new_config[section][metric_section] is None):
new_config[section][metric_section] = ds_config[section][metric_section]
else:
new_config[section][metric_section] += ds_config[section][metric_section]
# Fields that are not linked to a data source
new_config['overview']['activity_file_csv'] = "data_source_evolution.csv"
new_config['overview']['efficiency_file_csv'] = "efficiency.csv"
new_config['project_process']['time_to_close_title'] = "Days to close (median and average)"
new_config['project_process']['time_to_close_review_title'] = "Days to close review (median and average)"
for i in range(0, len(data_sources)):
ds = data_sources[i]
ds_config = self.ds2class[ds].get_section_metrics()
activity_metrics = ds_config['project_activity']['metrics']
new_config['project_activity']['ds' + str(i + 1) + "_metrics"] = activity_metrics
return new_config | Build a dictionary with the Report configuration with the data sources and metrics to be included
in each section of the report
:param data_sources: list of data sources to be included in the report
:return: a dict with the data sources and metrics to be included in the report | Below is the the instruction that describes the task:
### Input:
Build a dictionary with the Report configuration with the data sources and metrics to be included
in each section of the report
:param data_sources: list of data sources to be included in the report
:return: a dict with the data sources and metrics to be included in the report
### Response:
def __get_config(self, data_sources=None):
"""
Build a dictionary with the Report configuration with the data sources and metrics to be included
in each section of the report
:param data_sources: list of data sources to be included in the report
:return: a dict with the data sources and metrics to be included in the report
"""
if not data_sources:
# For testing
data_sources = ["gerrit", "git", "github_issues", "mls"]
# In new_config a dict with all the metrics for all data sources is created
new_config = {}
for ds in data_sources:
ds_config = self.ds2class[ds].get_section_metrics()
for section in ds_config:
if section not in new_config:
# Just create the section with the data for the ds
new_config[section] = ds_config[section]
else:
for metric_section in ds_config[section]:
if ds_config[section][metric_section] is not None:
if (metric_section not in new_config[section] or
new_config[section][metric_section] is None):
new_config[section][metric_section] = ds_config[section][metric_section]
else:
new_config[section][metric_section] += ds_config[section][metric_section]
# Fields that are not linked to a data source
new_config['overview']['activity_file_csv'] = "data_source_evolution.csv"
new_config['overview']['efficiency_file_csv'] = "efficiency.csv"
new_config['project_process']['time_to_close_title'] = "Days to close (median and average)"
new_config['project_process']['time_to_close_review_title'] = "Days to close review (median and average)"
for i in range(0, len(data_sources)):
ds = data_sources[i]
ds_config = self.ds2class[ds].get_section_metrics()
activity_metrics = ds_config['project_activity']['metrics']
new_config['project_activity']['ds' + str(i + 1) + "_metrics"] = activity_metrics
return new_config |
def editline_with_regex(self, regex_tgtline, to_replace):
"""find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace
"""
for idx, line in enumerate(self._swp_lines):
mobj = re.match(regex_tgtline, line)
if mobj:
self._swp_lines[idx] = to_replace
return | find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace | Below is the the instruction that describes the task:
### Input:
find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace
### Response:
def editline_with_regex(self, regex_tgtline, to_replace):
"""find the first matched line, then replace
Args:
regex_tgtline (str): regular expression used to match the target line
to_replace (str): line you wanna use to replace
"""
for idx, line in enumerate(self._swp_lines):
mobj = re.match(regex_tgtline, line)
if mobj:
self._swp_lines[idx] = to_replace
return |
def _exec_request(self, service, method=None, path_args=None, data=None,
params=None):
"""Execute request."""
if path_args is None:
path_args = []
req = {
'method': method or 'get',
'url': '/'.join(str(a).strip('/') for a in [
cfg.CONF.tvdb.service_url, service] + path_args),
'data': json.dumps(data) if data else None,
'headers': self.headers,
'params': params,
'verify': cfg.CONF.tvdb.verify_ssl_certs,
}
LOG.debug('executing request (%s %s)', req['method'], req['url'])
resp = self.session.request(**req)
resp.raise_for_status()
return resp.json() if resp.text else resp.text | Execute request. | Below is the the instruction that describes the task:
### Input:
Execute request.
### Response:
def _exec_request(self, service, method=None, path_args=None, data=None,
params=None):
"""Execute request."""
if path_args is None:
path_args = []
req = {
'method': method or 'get',
'url': '/'.join(str(a).strip('/') for a in [
cfg.CONF.tvdb.service_url, service] + path_args),
'data': json.dumps(data) if data else None,
'headers': self.headers,
'params': params,
'verify': cfg.CONF.tvdb.verify_ssl_certs,
}
LOG.debug('executing request (%s %s)', req['method'], req['url'])
resp = self.session.request(**req)
resp.raise_for_status()
return resp.json() if resp.text else resp.text |
def get_configured_capabilities(self):
"""Returns configured capabilities."""
capabilities = OrderedDict()
mbgp_caps = []
if self.cap_mbgp_ipv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_UC.afi, RF_IPv4_UC.safi))
if self.cap_mbgp_ipv6:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_UC.afi, RF_IPv6_UC.safi))
if self.cap_mbgp_vpnv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_VPN.afi, RF_IPv4_VPN.safi))
if self.cap_mbgp_vpnv6:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_VPN.afi, RF_IPv6_VPN.safi))
if self.cap_rtc:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_RTC_UC.afi, RF_RTC_UC.safi))
if self.cap_mbgp_evpn:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_L2_EVPN.afi, RF_L2_EVPN.safi))
if self.cap_mbgp_ipv4fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_FLOWSPEC.afi, RF_IPv4_FLOWSPEC.safi))
if self.cap_mbgp_ipv6fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_FLOWSPEC.afi, RF_IPv6_FLOWSPEC.safi))
if self.cap_mbgp_vpnv4fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_VPNv4_FLOWSPEC.afi, RF_VPNv4_FLOWSPEC.safi))
if self.cap_mbgp_vpnv6fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_VPNv6_FLOWSPEC.afi, RF_VPNv6_FLOWSPEC.safi))
if self.cap_mbgp_l2vpnfs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_L2VPN_FLOWSPEC.afi, RF_L2VPN_FLOWSPEC.safi))
if mbgp_caps:
capabilities[BGP_CAP_MULTIPROTOCOL] = mbgp_caps
if self.cap_refresh:
capabilities[BGP_CAP_ROUTE_REFRESH] = [
BGPOptParamCapabilityRouteRefresh()]
if self.cap_enhanced_refresh:
capabilities[BGP_CAP_ENHANCED_ROUTE_REFRESH] = [
BGPOptParamCapabilityEnhancedRouteRefresh()]
if self.cap_four_octet_as_number:
capabilities[BGP_CAP_FOUR_OCTET_AS_NUMBER] = [
BGPOptParamCapabilityFourOctetAsNumber(self.local_as)]
return capabilities | Returns configured capabilities. | Below is the the instruction that describes the task:
### Input:
Returns configured capabilities.
### Response:
def get_configured_capabilities(self):
"""Returns configured capabilities."""
capabilities = OrderedDict()
mbgp_caps = []
if self.cap_mbgp_ipv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_UC.afi, RF_IPv4_UC.safi))
if self.cap_mbgp_ipv6:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_UC.afi, RF_IPv6_UC.safi))
if self.cap_mbgp_vpnv4:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_VPN.afi, RF_IPv4_VPN.safi))
if self.cap_mbgp_vpnv6:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_VPN.afi, RF_IPv6_VPN.safi))
if self.cap_rtc:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_RTC_UC.afi, RF_RTC_UC.safi))
if self.cap_mbgp_evpn:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_L2_EVPN.afi, RF_L2_EVPN.safi))
if self.cap_mbgp_ipv4fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv4_FLOWSPEC.afi, RF_IPv4_FLOWSPEC.safi))
if self.cap_mbgp_ipv6fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_IPv6_FLOWSPEC.afi, RF_IPv6_FLOWSPEC.safi))
if self.cap_mbgp_vpnv4fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_VPNv4_FLOWSPEC.afi, RF_VPNv4_FLOWSPEC.safi))
if self.cap_mbgp_vpnv6fs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_VPNv6_FLOWSPEC.afi, RF_VPNv6_FLOWSPEC.safi))
if self.cap_mbgp_l2vpnfs:
mbgp_caps.append(
BGPOptParamCapabilityMultiprotocol(
RF_L2VPN_FLOWSPEC.afi, RF_L2VPN_FLOWSPEC.safi))
if mbgp_caps:
capabilities[BGP_CAP_MULTIPROTOCOL] = mbgp_caps
if self.cap_refresh:
capabilities[BGP_CAP_ROUTE_REFRESH] = [
BGPOptParamCapabilityRouteRefresh()]
if self.cap_enhanced_refresh:
capabilities[BGP_CAP_ENHANCED_ROUTE_REFRESH] = [
BGPOptParamCapabilityEnhancedRouteRefresh()]
if self.cap_four_octet_as_number:
capabilities[BGP_CAP_FOUR_OCTET_AS_NUMBER] = [
BGPOptParamCapabilityFourOctetAsNumber(self.local_as)]
return capabilities |
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
) | Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10 | Below is the the instruction that describes the task:
### Input:
Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
### Response:
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
) |
def layers(self):
'''Construct Keras input layers for all feature transformers
in the pump.
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
fields.
'''
layermap = dict()
for operator in self.ops:
if hasattr(operator, 'layers'):
layermap.update(operator.layers())
return layermap | Construct Keras input layers for all feature transformers
in the pump.
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
fields. | Below is the the instruction that describes the task:
### Input:
Construct Keras input layers for all feature transformers
in the pump.
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
fields.
### Response:
def layers(self):
'''Construct Keras input layers for all feature transformers
in the pump.
Returns
-------
layers : {field: keras.layers.Input}
A dictionary of keras input layers, keyed by the corresponding
fields.
'''
layermap = dict()
for operator in self.ops:
if hasattr(operator, 'layers'):
layermap.update(operator.layers())
return layermap |
def benchmark_assets(self):
"""
基准组合的账户资产队列
"""
return (
self.benchmark_data.close /
float(self.benchmark_data.close.iloc[0])
* float(self.assets[0])
) | 基准组合的账户资产队列 | Below is the the instruction that describes the task:
### Input:
基准组合的账户资产队列
### Response:
def benchmark_assets(self):
"""
基准组合的账户资产队列
"""
return (
self.benchmark_data.close /
float(self.benchmark_data.close.iloc[0])
* float(self.assets[0])
) |
def __getNetworkStateDirectory(self, extraDataDir):
"""
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
"""
if self.__restoringFromV1:
if self.getInferenceType() == InferenceType.TemporalNextStep:
leafName = 'temporal'+ "-network.nta"
else:
leafName = 'nonTemporal'+ "-network.nta"
else:
leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta"
path = os.path.join(extraDataDir, leafName)
path = os.path.abspath(path)
return path | extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network | Below is the the instruction that describes the task:
### Input:
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
### Response:
def __getNetworkStateDirectory(self, extraDataDir):
"""
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
"""
if self.__restoringFromV1:
if self.getInferenceType() == InferenceType.TemporalNextStep:
leafName = 'temporal'+ "-network.nta"
else:
leafName = 'nonTemporal'+ "-network.nta"
else:
leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta"
path = os.path.join(extraDataDir, leafName)
path = os.path.abspath(path)
return path |
def series(collection, method, prints = 15, *args, **kwargs):
'''
Processes a collection in series
Parameters
----------
collection : list
list of Record objects
method : method to call on each Record
prints : int
number of timer prints to the screen
Returns
-------
collection : list
list of Record objects after going through method called
If more than one collection is given, the function is called with an argument list
consisting of the corresponding item of each collection, substituting None for
missing values when not all collection have the same length.
If the function is None, return the original collection (or a list of tuples if multiple collections).
Example
-------
adding 2 to every number in a range
>>> import turntable
>>> collection = range(100)
>>> method = lambda x: x + 2
>>> collection = turntable.spin.series(collection, method)
'''
if 'verbose' in kwargs.keys():
verbose = kwargs['verbose']
else:
verbose = True
results = []
timer = turntable.utils.Timer(nLoops=len(collection), numPrints=prints, verbose=verbose)
for subject in collection:
results.append(method(subject, *args, **kwargs))
timer.loop()
timer.fin()
return results | Processes a collection in series
Parameters
----------
collection : list
list of Record objects
method : method to call on each Record
prints : int
number of timer prints to the screen
Returns
-------
collection : list
list of Record objects after going through method called
If more than one collection is given, the function is called with an argument list
consisting of the corresponding item of each collection, substituting None for
missing values when not all collection have the same length.
If the function is None, return the original collection (or a list of tuples if multiple collections).
Example
-------
adding 2 to every number in a range
>>> import turntable
>>> collection = range(100)
>>> method = lambda x: x + 2
>>> collection = turntable.spin.series(collection, method) | Below is the the instruction that describes the task:
### Input:
Processes a collection in series
Parameters
----------
collection : list
list of Record objects
method : method to call on each Record
prints : int
number of timer prints to the screen
Returns
-------
collection : list
list of Record objects after going through method called
If more than one collection is given, the function is called with an argument list
consisting of the corresponding item of each collection, substituting None for
missing values when not all collection have the same length.
If the function is None, return the original collection (or a list of tuples if multiple collections).
Example
-------
adding 2 to every number in a range
>>> import turntable
>>> collection = range(100)
>>> method = lambda x: x + 2
>>> collection = turntable.spin.series(collection, method)
### Response:
def series(collection, method, prints = 15, *args, **kwargs):
'''
Processes a collection in series
Parameters
----------
collection : list
list of Record objects
method : method to call on each Record
prints : int
number of timer prints to the screen
Returns
-------
collection : list
list of Record objects after going through method called
If more than one collection is given, the function is called with an argument list
consisting of the corresponding item of each collection, substituting None for
missing values when not all collection have the same length.
If the function is None, return the original collection (or a list of tuples if multiple collections).
Example
-------
adding 2 to every number in a range
>>> import turntable
>>> collection = range(100)
>>> method = lambda x: x + 2
>>> collection = turntable.spin.series(collection, method)
'''
if 'verbose' in kwargs.keys():
verbose = kwargs['verbose']
else:
verbose = True
results = []
timer = turntable.utils.Timer(nLoops=len(collection), numPrints=prints, verbose=verbose)
for subject in collection:
results.append(method(subject, *args, **kwargs))
timer.loop()
timer.fin()
return results |
def load_from_file(self, path):
"""
Load cookies from the file.
Content of file should be a JSON-serialized list of dicts.
"""
with open(path) as inf:
data = inf.read()
if data:
items = json.loads(data)
else:
items = {}
for item in items:
extra = dict((x, y) for x, y in item.items()
if x not in ['name', 'value', 'domain'])
self.set(item['name'], item['value'], item['domain'], **extra) | Load cookies from the file.
Content of file should be a JSON-serialized list of dicts. | Below is the the instruction that describes the task:
### Input:
Load cookies from the file.
Content of file should be a JSON-serialized list of dicts.
### Response:
def load_from_file(self, path):
"""
Load cookies from the file.
Content of file should be a JSON-serialized list of dicts.
"""
with open(path) as inf:
data = inf.read()
if data:
items = json.loads(data)
else:
items = {}
for item in items:
extra = dict((x, y) for x, y in item.items()
if x not in ['name', 'value', 'domain'])
self.set(item['name'], item['value'], item['domain'], **extra) |
def get_is_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = int(tag.value.decode("ascii").replace(" ", ""))
return value
return int(tag.value) | Getters for data that also work with implicit transfersyntax
:param tag: the tag to read | Below is the the instruction that describes the task:
### Input:
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
### Response:
def get_is_value(tag):
"""
Getters for data that also work with implicit transfersyntax
:param tag: the tag to read
"""
# data is int formatted as string so convert te string first and cast to int
if tag.VR == 'OB' or tag.VR == 'UN':
value = int(tag.value.decode("ascii").replace(" ", ""))
return value
return int(tag.value) |
async def send_all_reactions(self):
"""
Sends all reactions for this paginator, if any are missing.
This method is generally for internal use only.
"""
for emoji in filter(None, self.emojis):
await self.message.add_reaction(emoji)
self.sent_page_reactions = True | Sends all reactions for this paginator, if any are missing.
This method is generally for internal use only. | Below is the the instruction that describes the task:
### Input:
Sends all reactions for this paginator, if any are missing.
This method is generally for internal use only.
### Response:
async def send_all_reactions(self):
"""
Sends all reactions for this paginator, if any are missing.
This method is generally for internal use only.
"""
for emoji in filter(None, self.emojis):
await self.message.add_reaction(emoji)
self.sent_page_reactions = True |
def servers(self):
"""gets the federated or registered servers for Portal"""
url = "%s/servers" % self.root
return Servers(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | gets the federated or registered servers for Portal | Below is the the instruction that describes the task:
### Input:
gets the federated or registered servers for Portal
### Response:
def servers(self):
"""gets the federated or registered servers for Portal"""
url = "%s/servers" % self.root
return Servers(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def BooleanTake(input_vertex: vertex_constructor_param_types, index: Collection[int], label: Optional[str]=None) -> Vertex:
"""
A vertex that extracts a scalar at a given index
:param input_vertex: the input vertex to extract from
:param index: the index to extract at
"""
return Boolean(context.jvm_view().BooleanTakeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(index)) | A vertex that extracts a scalar at a given index
:param input_vertex: the input vertex to extract from
:param index: the index to extract at | Below is the the instruction that describes the task:
### Input:
A vertex that extracts a scalar at a given index
:param input_vertex: the input vertex to extract from
:param index: the index to extract at
### Response:
def BooleanTake(input_vertex: vertex_constructor_param_types, index: Collection[int], label: Optional[str]=None) -> Vertex:
"""
A vertex that extracts a scalar at a given index
:param input_vertex: the input vertex to extract from
:param index: the index to extract at
"""
return Boolean(context.jvm_view().BooleanTakeVertex, label, cast_to_vertex(input_vertex), cast_to_long_array(index)) |
def remove_members_in_score_range(self, min_score, max_score):
'''
Remove members from the leaderboard in a given score range.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
'''
self.remove_members_in_score_range_in(
self.leaderboard_name,
min_score,
max_score) | Remove members from the leaderboard in a given score range.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score. | Below is the the instruction that describes the task:
### Input:
Remove members from the leaderboard in a given score range.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
### Response:
def remove_members_in_score_range(self, min_score, max_score):
'''
Remove members from the leaderboard in a given score range.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
'''
self.remove_members_in_score_range_in(
self.leaderboard_name,
min_score,
max_score) |
def get_submissions_by_course_and_assignment(
self, course_id, assignment_id, params={}):
"""
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index
"""
url = COURSES_API.format(course_id)
url += "/assignments/{}/submissions".format(assignment_id)
submissions = []
for data in self._get_paged_resource(url, params=params):
submissions.append(Submission(data=data))
return submissions | https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index | Below is the the instruction that describes the task:
### Input:
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index
### Response:
def get_submissions_by_course_and_assignment(
self, course_id, assignment_id, params={}):
"""
https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index
"""
url = COURSES_API.format(course_id)
url += "/assignments/{}/submissions".format(assignment_id)
submissions = []
for data in self._get_paged_resource(url, params=params):
submissions.append(Submission(data=data))
return submissions |
def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem):
"""
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
coefs_list = self._array_to_list(coefs_array, problem.number_odes)
derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list)
resids = self._assess_approximation(boundary_points, derivs, funcs,
nodes, problem)
return resids | Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
### Response:
def _compute_residuals(self, coefs_array, basis_kwargs, boundary_points, nodes, problem):
"""
Return collocation residuals.
Parameters
----------
coefs_array : numpy.ndarray
basis_kwargs : dict
problem : TwoPointBVPLike
Returns
-------
resids : numpy.ndarray
"""
coefs_list = self._array_to_list(coefs_array, problem.number_odes)
derivs, funcs = self._construct_approximation(basis_kwargs, coefs_list)
resids = self._assess_approximation(boundary_points, derivs, funcs,
nodes, problem)
return resids |
def ascii_unarmor(text):
"""
Takes an ASCII-armored PGP block and returns the decoded byte value.
:param text: An ASCII-armored PGP block, to un-armor.
:raises: :py:exc:`ValueError` if ``text`` did not contain an ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: A ``dict`` containing information from ``text``, including the de-armored data.
It can contain the following keys: ``magic``, ``headers``, ``hashes``, ``cleartext``, ``body``, ``crc``.
"""
m = {'magic': None, 'headers': None, 'body': bytearray(), 'crc': None}
if not Armorable.is_ascii(text):
m['body'] = bytearray(text)
return m
if isinstance(text, (bytes, bytearray)): # pragma: no cover
text = text.decode('latin-1')
m = Armorable.__armor_regex.search(text)
if m is None: # pragma: no cover
raise ValueError("Expected: ASCII-armored PGP data")
m = m.groupdict()
if m['hashes'] is not None:
m['hashes'] = m['hashes'].split(',')
if m['headers'] is not None:
m['headers'] = collections.OrderedDict(re.findall('^(?P<key>.+): (?P<value>.+)$\n?', m['headers'], flags=re.MULTILINE))
if m['body'] is not None:
try:
m['body'] = bytearray(base64.b64decode(m['body'].encode()))
except (binascii.Error, TypeError) as ex:
six.raise_from(PGPError, ex)
six.raise_from(PGPError(str(ex)), ex)
if m['crc'] is not None:
m['crc'] = Header.bytes_to_int(base64.b64decode(m['crc'].encode()))
if Armorable.crc24(m['body']) != m['crc']:
warnings.warn('Incorrect crc24', stacklevel=3)
return m | Takes an ASCII-armored PGP block and returns the decoded byte value.
:param text: An ASCII-armored PGP block, to un-armor.
:raises: :py:exc:`ValueError` if ``text`` did not contain an ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: A ``dict`` containing information from ``text``, including the de-armored data.
It can contain the following keys: ``magic``, ``headers``, ``hashes``, ``cleartext``, ``body``, ``crc``. | Below is the the instruction that describes the task:
### Input:
Takes an ASCII-armored PGP block and returns the decoded byte value.
:param text: An ASCII-armored PGP block, to un-armor.
:raises: :py:exc:`ValueError` if ``text`` did not contain an ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: A ``dict`` containing information from ``text``, including the de-armored data.
It can contain the following keys: ``magic``, ``headers``, ``hashes``, ``cleartext``, ``body``, ``crc``.
### Response:
def ascii_unarmor(text):
"""
Takes an ASCII-armored PGP block and returns the decoded byte value.
:param text: An ASCII-armored PGP block, to un-armor.
:raises: :py:exc:`ValueError` if ``text`` did not contain an ASCII-armored PGP block.
:raises: :py:exc:`TypeError` if ``text`` is not a ``str``, ``bytes``, or ``bytearray``
:returns: A ``dict`` containing information from ``text``, including the de-armored data.
It can contain the following keys: ``magic``, ``headers``, ``hashes``, ``cleartext``, ``body``, ``crc``.
"""
m = {'magic': None, 'headers': None, 'body': bytearray(), 'crc': None}
if not Armorable.is_ascii(text):
m['body'] = bytearray(text)
return m
if isinstance(text, (bytes, bytearray)): # pragma: no cover
text = text.decode('latin-1')
m = Armorable.__armor_regex.search(text)
if m is None: # pragma: no cover
raise ValueError("Expected: ASCII-armored PGP data")
m = m.groupdict()
if m['hashes'] is not None:
m['hashes'] = m['hashes'].split(',')
if m['headers'] is not None:
m['headers'] = collections.OrderedDict(re.findall('^(?P<key>.+): (?P<value>.+)$\n?', m['headers'], flags=re.MULTILINE))
if m['body'] is not None:
try:
m['body'] = bytearray(base64.b64decode(m['body'].encode()))
except (binascii.Error, TypeError) as ex:
six.raise_from(PGPError, ex)
six.raise_from(PGPError(str(ex)), ex)
if m['crc'] is not None:
m['crc'] = Header.bytes_to_int(base64.b64decode(m['crc'].encode()))
if Armorable.crc24(m['body']) != m['crc']:
warnings.warn('Incorrect crc24', stacklevel=3)
return m |
def encode(self, reference_boxes, proposals):
"""
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
TO_REMOVE = 1 # TODO remove
ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE
ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE
ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths
ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights
gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE
gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE
gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = self.weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets | Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded | Below is the the instruction that describes the task:
### Input:
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
### Response:
def encode(self, reference_boxes, proposals):
"""
Encode a set of proposals with respect to some
reference boxes
Arguments:
reference_boxes (Tensor): reference boxes
proposals (Tensor): boxes to be encoded
"""
TO_REMOVE = 1 # TODO remove
ex_widths = proposals[:, 2] - proposals[:, 0] + TO_REMOVE
ex_heights = proposals[:, 3] - proposals[:, 1] + TO_REMOVE
ex_ctr_x = proposals[:, 0] + 0.5 * ex_widths
ex_ctr_y = proposals[:, 1] + 0.5 * ex_heights
gt_widths = reference_boxes[:, 2] - reference_boxes[:, 0] + TO_REMOVE
gt_heights = reference_boxes[:, 3] - reference_boxes[:, 1] + TO_REMOVE
gt_ctr_x = reference_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = reference_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = self.weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets |
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir) | Enable the crash reporter. CrashReporter is defaulted to be enabled on creation. | Below is the the instruction that describes the task:
### Input:
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
### Response:
def enable(self):
"""
Enable the crash reporter. CrashReporter is defaulted to be enabled on creation.
"""
if not CrashReporter.active:
CrashReporter.active = True
# Store this function so we can set it back if the CrashReporter is deactivated
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
# First attempt to send the reports, if that fails then start the watcher
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir) |
def _pfp__unpack_data(self, raw_data):
"""Means that the field has already been parsed normally,
and that it now needs to be unpacked.
:raw_data: A string of the data that the field consumed while parsing
"""
if self._pfp__pack_type is None:
return
if self._pfp__no_unpack:
return
unpack_func = self._pfp__packer
unpack_args = []
if self._pfp__packer is not None:
unpack_func = self._pfp__packer
unpack_args = [false(), raw_data]
elif self._pfp__unpack is not None:
unpack_func = self._pfp__unpack
unpack_args = [raw_data]
# does not need to be converted to a char array
if not isinstance(unpack_func, functions.NativeFunction):
io_stream = bitwrap.BitwrappedStream(six.BytesIO(raw_data))
unpack_args[-1] = Array(len(raw_data), Char, io_stream)
res = unpack_func.call(unpack_args, *self._pfp__pack_func_call_info, no_cast=True)
if isinstance(res, Array):
res = res._pfp__build()
io_stream = six.BytesIO(res)
tmp_stream = bitwrap.BitwrappedStream(io_stream)
tmp_stream.padded = self._pfp__interp.get_bitfield_padded()
self._ = self._pfp__parsed_packed = self._pfp__pack_type(tmp_stream)
self._._pfp__watch(self) | Means that the field has already been parsed normally,
and that it now needs to be unpacked.
:raw_data: A string of the data that the field consumed while parsing | Below is the the instruction that describes the task:
### Input:
Means that the field has already been parsed normally,
and that it now needs to be unpacked.
:raw_data: A string of the data that the field consumed while parsing
### Response:
def _pfp__unpack_data(self, raw_data):
"""Means that the field has already been parsed normally,
and that it now needs to be unpacked.
:raw_data: A string of the data that the field consumed while parsing
"""
if self._pfp__pack_type is None:
return
if self._pfp__no_unpack:
return
unpack_func = self._pfp__packer
unpack_args = []
if self._pfp__packer is not None:
unpack_func = self._pfp__packer
unpack_args = [false(), raw_data]
elif self._pfp__unpack is not None:
unpack_func = self._pfp__unpack
unpack_args = [raw_data]
# does not need to be converted to a char array
if not isinstance(unpack_func, functions.NativeFunction):
io_stream = bitwrap.BitwrappedStream(six.BytesIO(raw_data))
unpack_args[-1] = Array(len(raw_data), Char, io_stream)
res = unpack_func.call(unpack_args, *self._pfp__pack_func_call_info, no_cast=True)
if isinstance(res, Array):
res = res._pfp__build()
io_stream = six.BytesIO(res)
tmp_stream = bitwrap.BitwrappedStream(io_stream)
tmp_stream.padded = self._pfp__interp.get_bitfield_padded()
self._ = self._pfp__parsed_packed = self._pfp__pack_type(tmp_stream)
self._._pfp__watch(self) |
def parse(source, **kwargs):
# type: (Union[Source, str], **Any) -> Document
"""Given a GraphQL source, parses it into a Document."""
options = {"no_location": False, "no_source": False}
options.update(kwargs)
if isinstance(source, string_types):
source_obj = Source(source) # type: Source
else:
source_obj = source # type: ignore
parser = Parser(source_obj, options)
return parse_document(parser) | Given a GraphQL source, parses it into a Document. | Below is the the instruction that describes the task:
### Input:
Given a GraphQL source, parses it into a Document.
### Response:
def parse(source, **kwargs):
# type: (Union[Source, str], **Any) -> Document
"""Given a GraphQL source, parses it into a Document."""
options = {"no_location": False, "no_source": False}
options.update(kwargs)
if isinstance(source, string_types):
source_obj = Source(source) # type: Source
else:
source_obj = source # type: ignore
parser = Parser(source_obj, options)
return parse_document(parser) |
def data(self, index, role=Qt.DisplayRole):
"""
Reimplements the :meth:`QAbstractItemModel.data` method.
:param index: Index.
:type index: QModelIndex
:param role: Role.
:type role: int
:return: Data.
:rtype: QVariant
"""
if not index.isValid():
return QVariant()
node = self.get_node(index)
if index.column() == 0:
if hasattr(node, "roles"):
if role == Qt.DecorationRole:
return QIcon(node.roles.get(role, ""))
else:
return node.roles.get(role, QVariant())
else:
attribute = self.get_attribute(node, index.column())
if attribute:
if hasattr(attribute, "roles"):
if role == Qt.DecorationRole:
return QIcon(attribute.roles.get(role, ""))
else:
return attribute.roles.get(role, QVariant())
return QVariant() | Reimplements the :meth:`QAbstractItemModel.data` method.
:param index: Index.
:type index: QModelIndex
:param role: Role.
:type role: int
:return: Data.
:rtype: QVariant | Below is the the instruction that describes the task:
### Input:
Reimplements the :meth:`QAbstractItemModel.data` method.
:param index: Index.
:type index: QModelIndex
:param role: Role.
:type role: int
:return: Data.
:rtype: QVariant
### Response:
def data(self, index, role=Qt.DisplayRole):
"""
Reimplements the :meth:`QAbstractItemModel.data` method.
:param index: Index.
:type index: QModelIndex
:param role: Role.
:type role: int
:return: Data.
:rtype: QVariant
"""
if not index.isValid():
return QVariant()
node = self.get_node(index)
if index.column() == 0:
if hasattr(node, "roles"):
if role == Qt.DecorationRole:
return QIcon(node.roles.get(role, ""))
else:
return node.roles.get(role, QVariant())
else:
attribute = self.get_attribute(node, index.column())
if attribute:
if hasattr(attribute, "roles"):
if role == Qt.DecorationRole:
return QIcon(attribute.roles.get(role, ""))
else:
return attribute.roles.get(role, QVariant())
return QVariant() |
def _parse_common(tag):
"""Returns a tuple of (name, modifiers, dtype, kind)
for the specified tag. Any missing attributes will have values of None.
"""
if "modifiers" in tag.attrib:
modifiers = re.split(",\s*", tag.attrib["modifiers"].strip())
if "" in modifiers:
modifiers.remove("")
else:
modifiers = None
if "name" in tag.attrib:
name = tag.attrib["name"]
if "type" in tag.attrib:
dtype = tag.attrib["type"]
else:
dtype = None
if "kind" in tag.attrib:
kind = tag.attrib["kind"]
else:
kind = None
return (name, modifiers, dtype, kind) | Returns a tuple of (name, modifiers, dtype, kind)
for the specified tag. Any missing attributes will have values of None. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of (name, modifiers, dtype, kind)
for the specified tag. Any missing attributes will have values of None.
### Response:
def _parse_common(tag):
"""Returns a tuple of (name, modifiers, dtype, kind)
for the specified tag. Any missing attributes will have values of None.
"""
if "modifiers" in tag.attrib:
modifiers = re.split(",\s*", tag.attrib["modifiers"].strip())
if "" in modifiers:
modifiers.remove("")
else:
modifiers = None
if "name" in tag.attrib:
name = tag.attrib["name"]
if "type" in tag.attrib:
dtype = tag.attrib["type"]
else:
dtype = None
if "kind" in tag.attrib:
kind = tag.attrib["kind"]
else:
kind = None
return (name, modifiers, dtype, kind) |
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, opts: dict = None) -> '<SASdata object>':
"""
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
"""
opts = opts if opts is not None else {}
code = "filename x "
if file.lower().startswith("http"):
code += "url "
code += "\""+file+"\";\n"
code += "proc import datafile=x out="
if len(libref):
code += libref+"."
code += table+" dbms=csv replace; "+self._sb._impopts(opts)+" run;"
if nosub:
print(code)
else:
ll = self.submit(code, "text") | This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows) | Below is the the instruction that describes the task:
### Input:
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
### Response:
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, opts: dict = None) -> '<SASdata object>':
"""
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
"""
opts = opts if opts is not None else {}
code = "filename x "
if file.lower().startswith("http"):
code += "url "
code += "\""+file+"\";\n"
code += "proc import datafile=x out="
if len(libref):
code += libref+"."
code += table+" dbms=csv replace; "+self._sb._impopts(opts)+" run;"
if nosub:
print(code)
else:
ll = self.submit(code, "text") |
def as_json(obj: JsonObj, indent: Optional[str]=' ', **kwargs) -> str:
""" Convert obj to json string representation.
:param obj: pseudo 'self'
:param indent: indent argument to dumps
:param kwargs: other arguments for dumps
:return: JSON formatted string
"""
return obj._as_json_dumps(indent, **kwargs) | Convert obj to json string representation.
:param obj: pseudo 'self'
:param indent: indent argument to dumps
:param kwargs: other arguments for dumps
:return: JSON formatted string | Below is the the instruction that describes the task:
### Input:
Convert obj to json string representation.
:param obj: pseudo 'self'
:param indent: indent argument to dumps
:param kwargs: other arguments for dumps
:return: JSON formatted string
### Response:
def as_json(obj: JsonObj, indent: Optional[str]=' ', **kwargs) -> str:
""" Convert obj to json string representation.
:param obj: pseudo 'self'
:param indent: indent argument to dumps
:param kwargs: other arguments for dumps
:return: JSON formatted string
"""
return obj._as_json_dumps(indent, **kwargs) |
def linear_interpolate(x1, y1, x2):
"""
Given a function at a set of points (x1, y1), interpolate to
evaluate it at points x2.
"""
li = LinInt(x1, y1)
return li(x2) | Given a function at a set of points (x1, y1), interpolate to
evaluate it at points x2. | Below is the the instruction that describes the task:
### Input:
Given a function at a set of points (x1, y1), interpolate to
evaluate it at points x2.
### Response:
def linear_interpolate(x1, y1, x2):
"""
Given a function at a set of points (x1, y1), interpolate to
evaluate it at points x2.
"""
li = LinInt(x1, y1)
return li(x2) |
def apply(self, method, *args, **kwargs):
"""Create a new image by applying a function to this image's data.
Parameters
----------
method : :obj:`function`
A function to call on the data. This takes in a ndarray
as its first argument and optionally takes other arguments.
It should return a modified data ndarray.
args : arguments
Additional args for method.
kwargs : keyword arguments
Additional keyword arguments for method.
Returns
-------
:obj:`Image`
A new Image of the same type with new data generated by calling
method on the current image's data.
"""
data = method(self.data, *args, **kwargs)
return type(self)(data.astype(self.type), self.frame) | Create a new image by applying a function to this image's data.
Parameters
----------
method : :obj:`function`
A function to call on the data. This takes in a ndarray
as its first argument and optionally takes other arguments.
It should return a modified data ndarray.
args : arguments
Additional args for method.
kwargs : keyword arguments
Additional keyword arguments for method.
Returns
-------
:obj:`Image`
A new Image of the same type with new data generated by calling
method on the current image's data. | Below is the the instruction that describes the task:
### Input:
Create a new image by applying a function to this image's data.
Parameters
----------
method : :obj:`function`
A function to call on the data. This takes in a ndarray
as its first argument and optionally takes other arguments.
It should return a modified data ndarray.
args : arguments
Additional args for method.
kwargs : keyword arguments
Additional keyword arguments for method.
Returns
-------
:obj:`Image`
A new Image of the same type with new data generated by calling
method on the current image's data.
### Response:
def apply(self, method, *args, **kwargs):
"""Create a new image by applying a function to this image's data.
Parameters
----------
method : :obj:`function`
A function to call on the data. This takes in a ndarray
as its first argument and optionally takes other arguments.
It should return a modified data ndarray.
args : arguments
Additional args for method.
kwargs : keyword arguments
Additional keyword arguments for method.
Returns
-------
:obj:`Image`
A new Image of the same type with new data generated by calling
method on the current image's data.
"""
data = method(self.data, *args, **kwargs)
return type(self)(data.astype(self.type), self.frame) |
def gui_getFile():
"""
Launch an ABF file selection file dialog.
This is smart, and remembers (through reboots) where you last were.
"""
import tkinter as tk
from tkinter import filedialog
root = tk.Tk() # this is natively supported by python
root.withdraw() # hide main window
root.wm_attributes('-topmost', 1) # always on top
fname = filedialog.askopenfilename(title = "select ABF file",
filetypes=[('ABF Files', '.abf')],
initialdir=os.path.dirname(abfFname_Load()))
if fname.endswith(".abf"):
abfFname_Save(fname)
return fname
else:
print("didn't select an ABF!")
return None | Launch an ABF file selection file dialog.
This is smart, and remembers (through reboots) where you last were. | Below is the the instruction that describes the task:
### Input:
Launch an ABF file selection file dialog.
This is smart, and remembers (through reboots) where you last were.
### Response:
def gui_getFile():
"""
Launch an ABF file selection file dialog.
This is smart, and remembers (through reboots) where you last were.
"""
import tkinter as tk
from tkinter import filedialog
root = tk.Tk() # this is natively supported by python
root.withdraw() # hide main window
root.wm_attributes('-topmost', 1) # always on top
fname = filedialog.askopenfilename(title = "select ABF file",
filetypes=[('ABF Files', '.abf')],
initialdir=os.path.dirname(abfFname_Load()))
if fname.endswith(".abf"):
abfFname_Save(fname)
return fname
else:
print("didn't select an ABF!")
return None |
def print_data(zone_id, connection):
"""fetch data from database (use zone_id if not empty/None) and print to
console"""
result = connection.execute(
# explicitly pass zone id before related data
select([cast(zone_id.encode('utf-8'), BYTEA), test_table]))
result = result.fetchall()
ZONE_ID_INDEX = 0
print("use zone_id: ", zone_id)
print("{:<3} - {} - {} - {}".format("id", 'zone', "data", "raw_data"))
for row in result:
print(
"{:<3} - {} - {} - {}\n".format(
row['id'], row[ZONE_ID_INDEX].decode('utf-8'),
row['data'].decode('utf-8', errors='ignore'), row['raw_data'])) | fetch data from database (use zone_id if not empty/None) and print to
console | Below is the the instruction that describes the task:
### Input:
fetch data from database (use zone_id if not empty/None) and print to
console
### Response:
def print_data(zone_id, connection):
"""fetch data from database (use zone_id if not empty/None) and print to
console"""
result = connection.execute(
# explicitly pass zone id before related data
select([cast(zone_id.encode('utf-8'), BYTEA), test_table]))
result = result.fetchall()
ZONE_ID_INDEX = 0
print("use zone_id: ", zone_id)
print("{:<3} - {} - {} - {}".format("id", 'zone', "data", "raw_data"))
for row in result:
print(
"{:<3} - {} - {} - {}\n".format(
row['id'], row[ZONE_ID_INDEX].decode('utf-8'),
row['data'].decode('utf-8', errors='ignore'), row['raw_data'])) |
def hrd(self,ifig=None,label=None,colour=None,s2ms=False,
dashes=None,**kwargs):
"""
Plot an HR diagram
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"?
The default is False.
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
# fsize=18
#
# params = {'axes.labelsize': fsize,
# # 'font.family': 'serif',
# 'font.family': 'Times New Roman',
# 'figure.facecolor': 'white',
# 'text.fontsize': fsize,
# 'legend.fontsize': fsize,
# 'xtick.labelsize': fsize*0.8,
# 'ytick.labelsize': fsize*0.8,
# 'text.usetex': False}
#
# try:
# pl.rcParams.update(params)
# except:
# pass
if ifig is not None:
pl.figure(ifig)
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=3.e-3)[0][0]
skip=idx
else:
skip=0
x = self.get('log_Teff')[skip:]
y = self.get('log_L')[skip:]
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour,**kwargs)
else:
line,=pl.plot(x,y,label=label,**kwargs)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour,**kwargs)
else:
line,=pl.plot(x,y,**kwargs)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
# pyl.plot(self.data[:,self.cols['log_Teff']-1],\
# self.data[:,self.cols['log_L']-1],\
# label = "M="+str(self.header_attr['initial_mass'])+", Z="\
# +str(self.header_attr['initial_z']))
pyl.xlabel('$\log T_{\\rm eff}$')
pyl.ylabel('$\log L$')
x1,x2=pl.xlim()
if x2 > x1:
ax=pl.gca()
ax.invert_xaxis() | Plot an HR diagram
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"?
The default is False.
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None. | Below is the the instruction that describes the task:
### Input:
Plot an HR diagram
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"?
The default is False.
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
### Response:
def hrd(self,ifig=None,label=None,colour=None,s2ms=False,
dashes=None,**kwargs):
"""
Plot an HR diagram
Parameters
----------
ifig : integer or string
Figure label, if None the current figure is used
The default value is None.
lims : list [x_lower, x_upper, y_lower, y_upper]
label : string
Label for the model
The default value is None
colour : string
The colour of the line
The default value is None
s2ms : boolean, optional
"Skip to Main Sequence"?
The default is False.
dashes : list, optional
Custom dashing style. If None, ignore.
The default is None.
"""
# fsize=18
#
# params = {'axes.labelsize': fsize,
# # 'font.family': 'serif',
# 'font.family': 'Times New Roman',
# 'figure.facecolor': 'white',
# 'text.fontsize': fsize,
# 'legend.fontsize': fsize,
# 'xtick.labelsize': fsize*0.8,
# 'ytick.labelsize': fsize*0.8,
# 'text.usetex': False}
#
# try:
# pl.rcParams.update(params)
# except:
# pass
if ifig is not None:
pl.figure(ifig)
if s2ms:
h1=self.get('center_h1')
idx=np.where(h1[0]-h1>=3.e-3)[0][0]
skip=idx
else:
skip=0
x = self.get('log_Teff')[skip:]
y = self.get('log_L')[skip:]
if label is not None:
if colour is not None:
line,=pl.plot(x,y,label=label,color=colour,**kwargs)
else:
line,=pl.plot(x,y,label=label,**kwargs)
else:
if colour is not None:
line,=pl.plot(x,y,color=colour,**kwargs)
else:
line,=pl.plot(x,y,**kwargs)
if dashes is not None:
line.set_dashes(dashes)
if label is not None:
pl.legend(loc='best').draw_frame(False)
# pyl.plot(self.data[:,self.cols['log_Teff']-1],\
# self.data[:,self.cols['log_L']-1],\
# label = "M="+str(self.header_attr['initial_mass'])+", Z="\
# +str(self.header_attr['initial_z']))
pyl.xlabel('$\log T_{\\rm eff}$')
pyl.ylabel('$\log L$')
x1,x2=pl.xlim()
if x2 > x1:
ax=pl.gca()
ax.invert_xaxis() |
def com_google_fonts_check_fontdata_namecheck(ttFont, familyname):
""" Familyname must be unique according to namecheck.fontdata.com """
FB_ISSUE_TRACKER = "https://github.com/googlefonts/fontbakery/issues"
import requests
url = f"http://namecheck.fontdata.com/?q={familyname}"
try:
response = requests.get(url, timeout=10)
data = response.content.decode("utf-8")
if "fonts by that exact name" in data:
yield INFO, ("The family name '{}' seem to be already in use.\n"
"Please visit {} for more info.").format(familyname, url)
else:
yield PASS, "Font familyname seems to be unique."
except:
yield ERROR, ("Failed to access: '{}'.\n"
"Please report this issue at:\n{}").format(url,
FB_ISSUE_TRACKER) | Familyname must be unique according to namecheck.fontdata.com | Below is the the instruction that describes the task:
### Input:
Familyname must be unique according to namecheck.fontdata.com
### Response:
def com_google_fonts_check_fontdata_namecheck(ttFont, familyname):
""" Familyname must be unique according to namecheck.fontdata.com """
FB_ISSUE_TRACKER = "https://github.com/googlefonts/fontbakery/issues"
import requests
url = f"http://namecheck.fontdata.com/?q={familyname}"
try:
response = requests.get(url, timeout=10)
data = response.content.decode("utf-8")
if "fonts by that exact name" in data:
yield INFO, ("The family name '{}' seem to be already in use.\n"
"Please visit {} for more info.").format(familyname, url)
else:
yield PASS, "Font familyname seems to be unique."
except:
yield ERROR, ("Failed to access: '{}'.\n"
"Please report this issue at:\n{}").format(url,
FB_ISSUE_TRACKER) |
def get_logfile_name(tags):
"""Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided.
"""
if not os.path.exists(sd.LOG_DIR):
os.mkdir(sd.LOG_DIR)
filename = "log"
for tag in tags:
filename += "_{}".format(tag)
filename += ".txt"
filename = os.path.join(sd.LOG_DIR,filename)
return filename | Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided. | Below is the the instruction that describes the task:
### Input:
Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided.
### Response:
def get_logfile_name(tags):
"""Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided.
"""
if not os.path.exists(sd.LOG_DIR):
os.mkdir(sd.LOG_DIR)
filename = "log"
for tag in tags:
filename += "_{}".format(tag)
filename += ".txt"
filename = os.path.join(sd.LOG_DIR,filename)
return filename |
def gmap(google_api_key, map_options, **kwargs):
''' Create a new :class:`~bokeh.plotting.gmap.GMap` for plotting.
Args:
google_api_key (str):
Google requires an API key be supplied for maps to function. See:
https://developers.google.com/maps/documentation/javascript/get-api-key
map_options: (GMapOptions)
Configuration specific to a Google Map
In addition to the standard :class:`~bokeh.plotting.gmap.GMap` keyword
arguments (e.g. ``plot_width`` or ``sizing_mode``), the following
additional options can be passed as well:
.. bokeh-options:: GMapFigureOptions
:module: bokeh.plotting.gmap
Returns:
GMap
'''
return GMap(api_key=google_api_key, map_options=map_options, **kwargs) | Create a new :class:`~bokeh.plotting.gmap.GMap` for plotting.
Args:
google_api_key (str):
Google requires an API key be supplied for maps to function. See:
https://developers.google.com/maps/documentation/javascript/get-api-key
map_options: (GMapOptions)
Configuration specific to a Google Map
In addition to the standard :class:`~bokeh.plotting.gmap.GMap` keyword
arguments (e.g. ``plot_width`` or ``sizing_mode``), the following
additional options can be passed as well:
.. bokeh-options:: GMapFigureOptions
:module: bokeh.plotting.gmap
Returns:
GMap | Below is the the instruction that describes the task:
### Input:
Create a new :class:`~bokeh.plotting.gmap.GMap` for plotting.
Args:
google_api_key (str):
Google requires an API key be supplied for maps to function. See:
https://developers.google.com/maps/documentation/javascript/get-api-key
map_options: (GMapOptions)
Configuration specific to a Google Map
In addition to the standard :class:`~bokeh.plotting.gmap.GMap` keyword
arguments (e.g. ``plot_width`` or ``sizing_mode``), the following
additional options can be passed as well:
.. bokeh-options:: GMapFigureOptions
:module: bokeh.plotting.gmap
Returns:
GMap
### Response:
def gmap(google_api_key, map_options, **kwargs):
''' Create a new :class:`~bokeh.plotting.gmap.GMap` for plotting.
Args:
google_api_key (str):
Google requires an API key be supplied for maps to function. See:
https://developers.google.com/maps/documentation/javascript/get-api-key
map_options: (GMapOptions)
Configuration specific to a Google Map
In addition to the standard :class:`~bokeh.plotting.gmap.GMap` keyword
arguments (e.g. ``plot_width`` or ``sizing_mode``), the following
additional options can be passed as well:
.. bokeh-options:: GMapFigureOptions
:module: bokeh.plotting.gmap
Returns:
GMap
'''
return GMap(api_key=google_api_key, map_options=map_options, **kwargs) |
def execute_command(self, command, tab=None):
# TODO DBUS_ONLY
"""Execute the `command' in the `tab'. If tab is None, the
command will be executed in the currently selected
tab. Command should end with '\n', otherwise it will be
appended to the string.
"""
# TODO CONTEXTMENU this has to be rewriten and only serves the
# dbus interface, maybe this should be moved to dbusinterface.py
if not self.get_notebook().has_page():
self.add_tab()
if command[-1] != '\n':
command += '\n'
terminal = self.get_notebook().get_current_terminal()
terminal.feed_child(command) | Execute the `command' in the `tab'. If tab is None, the
command will be executed in the currently selected
tab. Command should end with '\n', otherwise it will be
appended to the string. | Below is the the instruction that describes the task:
### Input:
Execute the `command' in the `tab'. If tab is None, the
command will be executed in the currently selected
tab. Command should end with '\n', otherwise it will be
appended to the string.
### Response:
def execute_command(self, command, tab=None):
# TODO DBUS_ONLY
"""Execute the `command' in the `tab'. If tab is None, the
command will be executed in the currently selected
tab. Command should end with '\n', otherwise it will be
appended to the string.
"""
# TODO CONTEXTMENU this has to be rewriten and only serves the
# dbus interface, maybe this should be moved to dbusinterface.py
if not self.get_notebook().has_page():
self.add_tab()
if command[-1] != '\n':
command += '\n'
terminal = self.get_notebook().get_current_terminal()
terminal.feed_child(command) |
def set_control_output(self, name: str, value: float, *, options: dict=None) -> None:
"""Set the value of a control asynchronously.
:param name: The name of the control (string).
:param value: The control value (float).
:param options: A dict of custom options to pass to the instrument for setting the value.
Options are:
value_type: local, delta, output. output is default.
confirm, confirm_tolerance_factor, confirm_timeout: confirm value gets set.
inform: True to keep dependent control outputs constant by adjusting their internal values. False is
default.
Default value of confirm is False.
Default confirm_tolerance_factor is 1.0. A value of 1.0 is the nominal tolerance for that control. Passing a
higher tolerance factor (for example 1.5) will increase the permitted error margin and passing lower tolerance
factor (for example 0.5) will decrease the permitted error margin and consequently make a timeout more likely.
The tolerance factor value 0.0 is a special value which removes all checking and only waits for any change at
all and then returns.
Default confirm_timeout is 16.0 (seconds).
Raises exception if control with name doesn't exist.
Raises TimeoutException if confirm is True and timeout occurs.
.. versionadded:: 1.0
Scriptable: Yes
"""
self.__instrument.set_control_output(name, value, options) | Set the value of a control asynchronously.
:param name: The name of the control (string).
:param value: The control value (float).
:param options: A dict of custom options to pass to the instrument for setting the value.
Options are:
value_type: local, delta, output. output is default.
confirm, confirm_tolerance_factor, confirm_timeout: confirm value gets set.
inform: True to keep dependent control outputs constant by adjusting their internal values. False is
default.
Default value of confirm is False.
Default confirm_tolerance_factor is 1.0. A value of 1.0 is the nominal tolerance for that control. Passing a
higher tolerance factor (for example 1.5) will increase the permitted error margin and passing lower tolerance
factor (for example 0.5) will decrease the permitted error margin and consequently make a timeout more likely.
The tolerance factor value 0.0 is a special value which removes all checking and only waits for any change at
all and then returns.
Default confirm_timeout is 16.0 (seconds).
Raises exception if control with name doesn't exist.
Raises TimeoutException if confirm is True and timeout occurs.
.. versionadded:: 1.0
Scriptable: Yes | Below is the the instruction that describes the task:
### Input:
Set the value of a control asynchronously.
:param name: The name of the control (string).
:param value: The control value (float).
:param options: A dict of custom options to pass to the instrument for setting the value.
Options are:
value_type: local, delta, output. output is default.
confirm, confirm_tolerance_factor, confirm_timeout: confirm value gets set.
inform: True to keep dependent control outputs constant by adjusting their internal values. False is
default.
Default value of confirm is False.
Default confirm_tolerance_factor is 1.0. A value of 1.0 is the nominal tolerance for that control. Passing a
higher tolerance factor (for example 1.5) will increase the permitted error margin and passing lower tolerance
factor (for example 0.5) will decrease the permitted error margin and consequently make a timeout more likely.
The tolerance factor value 0.0 is a special value which removes all checking and only waits for any change at
all and then returns.
Default confirm_timeout is 16.0 (seconds).
Raises exception if control with name doesn't exist.
Raises TimeoutException if confirm is True and timeout occurs.
.. versionadded:: 1.0
Scriptable: Yes
### Response:
def set_control_output(self, name: str, value: float, *, options: dict=None) -> None:
"""Set the value of a control asynchronously.
:param name: The name of the control (string).
:param value: The control value (float).
:param options: A dict of custom options to pass to the instrument for setting the value.
Options are:
value_type: local, delta, output. output is default.
confirm, confirm_tolerance_factor, confirm_timeout: confirm value gets set.
inform: True to keep dependent control outputs constant by adjusting their internal values. False is
default.
Default value of confirm is False.
Default confirm_tolerance_factor is 1.0. A value of 1.0 is the nominal tolerance for that control. Passing a
higher tolerance factor (for example 1.5) will increase the permitted error margin and passing lower tolerance
factor (for example 0.5) will decrease the permitted error margin and consequently make a timeout more likely.
The tolerance factor value 0.0 is a special value which removes all checking and only waits for any change at
all and then returns.
Default confirm_timeout is 16.0 (seconds).
Raises exception if control with name doesn't exist.
Raises TimeoutException if confirm is True and timeout occurs.
.. versionadded:: 1.0
Scriptable: Yes
"""
self.__instrument.set_control_output(name, value, options) |
def content(self, output=None, str_output=None, **kwargs):
"""Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags."""
if self.response.mimetype != "text/html":
raise Failure(_("expected request to return HTML, but it returned {}").format(
self.response.mimetype))
# TODO: Remove once beautiful soup updates to accomodate python 3.7
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
content = BeautifulSoup(self.response.data, "html.parser")
return self._search_page(
output,
str_output,
content,
lambda regex, content: any(regex.search(str(tag)) for tag in content.find_all(**kwargs))) | Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags. | Below is the the instruction that describes the task:
### Input:
Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags.
### Response:
def content(self, output=None, str_output=None, **kwargs):
"""Searches for `output` regex within HTML page. kwargs are passed to BeautifulSoup's find function to filter for tags."""
if self.response.mimetype != "text/html":
raise Failure(_("expected request to return HTML, but it returned {}").format(
self.response.mimetype))
# TODO: Remove once beautiful soup updates to accomodate python 3.7
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
content = BeautifulSoup(self.response.data, "html.parser")
return self._search_page(
output,
str_output,
content,
lambda regex, content: any(regex.search(str(tag)) for tag in content.find_all(**kwargs))) |
def search_files(self, search):
"""
Search for :class:`meteorpi_model.FileRecord` entities
:param search:
an instance of :class:`meteorpi_model.FileRecordSearch` used to constrain the observations returned from
the DB
:return:
a structure of {count:int total rows of an unrestricted search, observations:list of
:class:`meteorpi_model.FileRecord`}
"""
b = search_files_sql_builder(search)
sql = b.get_select_sql(columns='f.uid, o.publicId AS observationId, f.mimeType, '
'f.fileName, s2.name AS semanticType, f.fileTime, '
'f.fileSize, f.fileMD5, l.publicId AS obstory_id, l.name AS obstory_name, '
'f.repositoryFname',
skip=search.skip,
limit=search.limit,
order='f.fileTime DESC')
files = list(self.generators.file_generator(sql=sql, sql_args=b.sql_args))
rows_returned = len(files)
total_rows = rows_returned + search.skip
if (rows_returned == search.limit > 0) or (rows_returned == 0 and search.skip > 0):
self.con.execute(b.get_count_sql(), b.sql_args)
total_rows = self.con.fetchone()['COUNT(*)']
return {"count": total_rows,
"files": files} | Search for :class:`meteorpi_model.FileRecord` entities
:param search:
an instance of :class:`meteorpi_model.FileRecordSearch` used to constrain the observations returned from
the DB
:return:
a structure of {count:int total rows of an unrestricted search, observations:list of
:class:`meteorpi_model.FileRecord`} | Below is the the instruction that describes the task:
### Input:
Search for :class:`meteorpi_model.FileRecord` entities
:param search:
an instance of :class:`meteorpi_model.FileRecordSearch` used to constrain the observations returned from
the DB
:return:
a structure of {count:int total rows of an unrestricted search, observations:list of
:class:`meteorpi_model.FileRecord`}
### Response:
def search_files(self, search):
"""
Search for :class:`meteorpi_model.FileRecord` entities
:param search:
an instance of :class:`meteorpi_model.FileRecordSearch` used to constrain the observations returned from
the DB
:return:
a structure of {count:int total rows of an unrestricted search, observations:list of
:class:`meteorpi_model.FileRecord`}
"""
b = search_files_sql_builder(search)
sql = b.get_select_sql(columns='f.uid, o.publicId AS observationId, f.mimeType, '
'f.fileName, s2.name AS semanticType, f.fileTime, '
'f.fileSize, f.fileMD5, l.publicId AS obstory_id, l.name AS obstory_name, '
'f.repositoryFname',
skip=search.skip,
limit=search.limit,
order='f.fileTime DESC')
files = list(self.generators.file_generator(sql=sql, sql_args=b.sql_args))
rows_returned = len(files)
total_rows = rows_returned + search.skip
if (rows_returned == search.limit > 0) or (rows_returned == 0 and search.skip > 0):
self.con.execute(b.get_count_sql(), b.sql_args)
total_rows = self.con.fetchone()['COUNT(*)']
return {"count": total_rows,
"files": files} |
def cache(self, value):
"""Enable or disable caching of pages/frames. Clear cache if False."""
value = bool(value)
if self._cache and not value:
self._clear()
self._cache = value | Enable or disable caching of pages/frames. Clear cache if False. | Below is the the instruction that describes the task:
### Input:
Enable or disable caching of pages/frames. Clear cache if False.
### Response:
def cache(self, value):
"""Enable or disable caching of pages/frames. Clear cache if False."""
value = bool(value)
if self._cache and not value:
self._clear()
self._cache = value |
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False | Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default". | Below is the the instruction that describes the task:
### Input:
Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
### Response:
def watch(self, tube):
"""Add the given tube to the watchlist.
:param tube: Name of the tube to add to the watchlist
Note: Initially, all connections are watching a tube named "default". If
you manually call :func:`watch()`, we will un-watch the "default" tube.
To keep it in your list, first call :func:`watch()` with the other tubes, then
call :func:`watch()` with "default".
"""
with self._sock_ctx() as socket:
self.desired_watchlist.add(tube)
if tube not in self._watchlist:
self._send_message('watch {0}'.format(tube), socket)
self._receive_id(socket)
self._watchlist.add(tube)
if self.initial_watch:
if tube != 'default':
self.ignore('default')
self.initial_watch = False |
def points(self):
'''Return unordered array with all the points in this neuron'''
if self._points is None:
_points = self.soma.points.tolist()
for n in self.neurites:
_points.extend(n.points.tolist())
self._points = np.array(_points)
return self._points | Return unordered array with all the points in this neuron | Below is the the instruction that describes the task:
### Input:
Return unordered array with all the points in this neuron
### Response:
def points(self):
'''Return unordered array with all the points in this neuron'''
if self._points is None:
_points = self.soma.points.tolist()
for n in self.neurites:
_points.extend(n.points.tolist())
self._points = np.array(_points)
return self._points |
def reduceloci(clus_obj, path):
"""reduce number of loci a cluster has
:param clus_obj: cluster object object
:param path: output path
"""
filtered = {}
n_cluster = 0
large = 0
current = clus_obj.clusid
logger.info("Number of loci: %s" % len(clus_obj.loci.keys()))
bar = ProgressBar(maxval=len(current))
bar.start()
bar.update(0)
for itern, idmc in enumerate(current):
bar.update(itern)
logger.debug("_reduceloci: cluster %s" % idmc)
c = copy.deepcopy(list(current[idmc]))
n_loci = len(c)
if n_loci < 1000:
filtered, n_cluster = _iter_loci(c, clus_obj.clus, (clus_obj.loci, clus_obj.seq), filtered, n_cluster)
else:
large += 1
n_cluster += 1
_write_cluster(c, clus_obj.clus, clus_obj.loci, n_cluster, path)
filtered[n_cluster] = _add_complete_cluster(n_cluster, c, clus_obj.clus)
clus_obj.clus = filtered
seqs = 0
for idc in filtered:
seqs += len(filtered[idc].idmembers)
logger.info("seqs in clusters %s" % (seqs))
logger.info("Clusters too long to be analized: %s" % large)
logger.info("Number of clusters removed because low number of reads: %s" % REMOVED)
logger.info("Number of clusters with conflicts: %s" % CONFLICT)
return clus_obj | reduce number of loci a cluster has
:param clus_obj: cluster object object
:param path: output path | Below is the the instruction that describes the task:
### Input:
reduce number of loci a cluster has
:param clus_obj: cluster object object
:param path: output path
### Response:
def reduceloci(clus_obj, path):
"""reduce number of loci a cluster has
:param clus_obj: cluster object object
:param path: output path
"""
filtered = {}
n_cluster = 0
large = 0
current = clus_obj.clusid
logger.info("Number of loci: %s" % len(clus_obj.loci.keys()))
bar = ProgressBar(maxval=len(current))
bar.start()
bar.update(0)
for itern, idmc in enumerate(current):
bar.update(itern)
logger.debug("_reduceloci: cluster %s" % idmc)
c = copy.deepcopy(list(current[idmc]))
n_loci = len(c)
if n_loci < 1000:
filtered, n_cluster = _iter_loci(c, clus_obj.clus, (clus_obj.loci, clus_obj.seq), filtered, n_cluster)
else:
large += 1
n_cluster += 1
_write_cluster(c, clus_obj.clus, clus_obj.loci, n_cluster, path)
filtered[n_cluster] = _add_complete_cluster(n_cluster, c, clus_obj.clus)
clus_obj.clus = filtered
seqs = 0
for idc in filtered:
seqs += len(filtered[idc].idmembers)
logger.info("seqs in clusters %s" % (seqs))
logger.info("Clusters too long to be analized: %s" % large)
logger.info("Number of clusters removed because low number of reads: %s" % REMOVED)
logger.info("Number of clusters with conflicts: %s" % CONFLICT)
return clus_obj |
def p_partselect_pointer(self, p):
'partselect : pointer LBRACKET expression COLON expression RBRACKET'
p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | partselect : pointer LBRACKET expression COLON expression RBRACKET | Below is the the instruction that describes the task:
### Input:
partselect : pointer LBRACKET expression COLON expression RBRACKET
### Response:
def p_partselect_pointer(self, p):
'partselect : pointer LBRACKET expression COLON expression RBRACKET'
p[0] = Partselect(p[1], p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def check_auth_deps(self, payload):
'''
Checks if both master and minion either sign (master) and
verify (minion). If one side does not, it should fail.
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
'aes': The shared AES key
'enc': The format of the message. ('clear', 'pub', 'aes')
'publish_port': The TCP port which published the message
'token': The encrypted token used to verify the message.
'pub_key': The RSA public key of the sender.
'''
# master and minion sign and verify
if 'pub_sig' in payload and self.opts['verify_master_pubkey_sign']:
return True
# master and minion do NOT sign and do NOT verify
elif 'pub_sig' not in payload and not self.opts['verify_master_pubkey_sign']:
return True
# master signs, but minion does NOT verify
elif 'pub_sig' in payload and not self.opts['verify_master_pubkey_sign']:
log.error('The masters sent its public-key signature, but signature '
'verification is not enabled on the minion. Either enable '
'signature verification on the minion or disable signing '
'the public key on the master!')
return False
# master does NOT sign but minion wants to verify
elif 'pub_sig' not in payload and self.opts['verify_master_pubkey_sign']:
log.error('The master did not send its public-key signature, but '
'signature verification is enabled on the minion. Either '
'disable signature verification on the minion or enable '
'signing the public on the master!')
return False | Checks if both master and minion either sign (master) and
verify (minion). If one side does not, it should fail.
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
'aes': The shared AES key
'enc': The format of the message. ('clear', 'pub', 'aes')
'publish_port': The TCP port which published the message
'token': The encrypted token used to verify the message.
'pub_key': The RSA public key of the sender. | Below is the the instruction that describes the task:
### Input:
Checks if both master and minion either sign (master) and
verify (minion). If one side does not, it should fail.
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
'aes': The shared AES key
'enc': The format of the message. ('clear', 'pub', 'aes')
'publish_port': The TCP port which published the message
'token': The encrypted token used to verify the message.
'pub_key': The RSA public key of the sender.
### Response:
def check_auth_deps(self, payload):
'''
Checks if both master and minion either sign (master) and
verify (minion). If one side does not, it should fail.
:param dict payload: The incoming payload. This is a dictionary which may have the following keys:
'aes': The shared AES key
'enc': The format of the message. ('clear', 'pub', 'aes')
'publish_port': The TCP port which published the message
'token': The encrypted token used to verify the message.
'pub_key': The RSA public key of the sender.
'''
# master and minion sign and verify
if 'pub_sig' in payload and self.opts['verify_master_pubkey_sign']:
return True
# master and minion do NOT sign and do NOT verify
elif 'pub_sig' not in payload and not self.opts['verify_master_pubkey_sign']:
return True
# master signs, but minion does NOT verify
elif 'pub_sig' in payload and not self.opts['verify_master_pubkey_sign']:
log.error('The masters sent its public-key signature, but signature '
'verification is not enabled on the minion. Either enable '
'signature verification on the minion or disable signing '
'the public key on the master!')
return False
# master does NOT sign but minion wants to verify
elif 'pub_sig' not in payload and self.opts['verify_master_pubkey_sign']:
log.error('The master did not send its public-key signature, but '
'signature verification is enabled on the minion. Either '
'disable signature verification on the minion or enable '
'signing the public on the master!')
return False |
def _check_request(self, msg):
"""Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
"""
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"])) | Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object] | Below is the the instruction that describes the task:
### Input:
Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
### Response:
def _check_request(self, msg):
"""Checks that the request json is well-formed.
:param msg: The request's json data
:type msg: dict[str, object]
"""
if "jsonrpc" not in msg:
raise InvalidRequestError("'\"jsonrpc\": \"2.0\"' must be included.")
if msg["jsonrpc"] != "2.0":
raise InvalidRequestError("'jsonrpc' must be exactly the string '2.0', but it was '{}'."
.format(msg["jsonrpc"]))
if "method" not in msg:
raise InvalidRequestError("No method specified.")
if "id" in msg:
if msg["id"] is None:
raise InvalidRequestError("typedjsonrpc does not allow id to be None.")
if isinstance(msg["id"], float):
raise InvalidRequestError("typedjsonrpc does not support float ids.")
if not isinstance(msg["id"], (six.string_types, six.integer_types)):
raise InvalidRequestError("id must be a string or integer; '{}' is of type {}."
.format(msg["id"], type(msg["id"])))
if msg["method"] not in self._name_to_method_info:
raise MethodNotFoundError("Could not find method '{}'.".format(msg["method"])) |
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array | Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed. | Below is the the instruction that describes the task:
### Input:
Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
### Response:
def _collapse_address_list_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('1.1.0.0/24')
ip2 = IPv4Network('1.1.1.0/24')
ip3 = IPv4Network('1.1.2.0/24')
ip4 = IPv4Network('1.1.3.0/24')
ip5 = IPv4Network('1.1.4.0/24')
ip6 = IPv4Network('1.1.0.1/22')
_collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
[IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
This shouldn't be called directly; it is called via
collapse_address_list([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
ret_array.append(cur_addr)
continue
if cur_addr in ret_array[-1]:
optimized = True
elif cur_addr == ret_array[-1].supernet().subnet()[1]:
ret_array.append(ret_array.pop().supernet())
optimized = True
else:
ret_array.append(cur_addr)
if optimized:
return _collapse_address_list_recursive(ret_array)
return ret_array |
def _context_source_file_url(path_or_url):
"""
Returns a URL for a remote or local context CSV file
"""
if path_or_url.startswith('http'):
# Remote CSV. Just return the URL
return path_or_url
if path_or_url.startswith('/'):
# Absolute path
return "file://" + path_or_url
return "file://" + os.path.join(os.path.realpath(os.getcwd()), path_or_url) | Returns a URL for a remote or local context CSV file | Below is the the instruction that describes the task:
### Input:
Returns a URL for a remote or local context CSV file
### Response:
def _context_source_file_url(path_or_url):
"""
Returns a URL for a remote or local context CSV file
"""
if path_or_url.startswith('http'):
# Remote CSV. Just return the URL
return path_or_url
if path_or_url.startswith('/'):
# Absolute path
return "file://" + path_or_url
return "file://" + os.path.join(os.path.realpath(os.getcwd()), path_or_url) |
def from_latlon(latitude, longitude, force_zone_number=None, force_zone_letter=None):
"""This function convert Latitude and Longitude to UTM coordinate
Parameters
----------
latitude: float
Latitude between 80 deg S and 84 deg N, e.g. (-80.0 to 84.0)
longitude: float
Longitude between 180 deg W and 180 deg E, e.g. (-180.0 to 180.0).
force_zone number: int
Zone Number is represented with global map numbers of an UTM Zone
Numbers Map. You may force conversion including one UTM Zone Number.
More information see utmzones [1]_
.. _[1]: http://www.jaworski.ca/utmzones.htm
"""
if not in_bounds(latitude, -80.0, 84.0):
raise OutOfRangeError('latitude out of range (must be between 80 deg S and 84 deg N)')
if not in_bounds(longitude, -180.0, 180.0):
raise OutOfRangeError('longitude out of range (must be between 180 deg W and 180 deg E)')
if force_zone_number is not None:
check_valid_zone(force_zone_number, force_zone_letter)
lat_rad = mathlib.radians(latitude)
lat_sin = mathlib.sin(lat_rad)
lat_cos = mathlib.cos(lat_rad)
lat_tan = lat_sin / lat_cos
lat_tan2 = lat_tan * lat_tan
lat_tan4 = lat_tan2 * lat_tan2
if force_zone_number is None:
zone_number = latlon_to_zone_number(latitude, longitude)
else:
zone_number = force_zone_number
if force_zone_letter is None:
zone_letter = latitude_to_zone_letter(latitude)
else:
zone_letter = force_zone_letter
lon_rad = mathlib.radians(longitude)
central_lon = zone_number_to_central_longitude(zone_number)
central_lon_rad = mathlib.radians(central_lon)
n = R / mathlib.sqrt(1 - E * lat_sin**2)
c = E_P2 * lat_cos**2
a = lat_cos * (lon_rad - central_lon_rad)
a2 = a * a
a3 = a2 * a
a4 = a3 * a
a5 = a4 * a
a6 = a5 * a
m = R * (M1 * lat_rad -
M2 * mathlib.sin(2 * lat_rad) +
M3 * mathlib.sin(4 * lat_rad) -
M4 * mathlib.sin(6 * lat_rad))
easting = K0 * n * (a +
a3 / 6 * (1 - lat_tan2 + c) +
a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000
northing = K0 * (m + n * lat_tan * (a2 / 2 +
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) +
a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2)))
if mixed_signs(latitude):
raise ValueError("latitudes must all have the same sign")
elif negative(latitude):
northing += 10000000
return easting, northing, zone_number, zone_letter | This function convert Latitude and Longitude to UTM coordinate
Parameters
----------
latitude: float
Latitude between 80 deg S and 84 deg N, e.g. (-80.0 to 84.0)
longitude: float
Longitude between 180 deg W and 180 deg E, e.g. (-180.0 to 180.0).
force_zone number: int
Zone Number is represented with global map numbers of an UTM Zone
Numbers Map. You may force conversion including one UTM Zone Number.
More information see utmzones [1]_
.. _[1]: http://www.jaworski.ca/utmzones.htm | Below is the the instruction that describes the task:
### Input:
This function convert Latitude and Longitude to UTM coordinate
Parameters
----------
latitude: float
Latitude between 80 deg S and 84 deg N, e.g. (-80.0 to 84.0)
longitude: float
Longitude between 180 deg W and 180 deg E, e.g. (-180.0 to 180.0).
force_zone number: int
Zone Number is represented with global map numbers of an UTM Zone
Numbers Map. You may force conversion including one UTM Zone Number.
More information see utmzones [1]_
.. _[1]: http://www.jaworski.ca/utmzones.htm
### Response:
def from_latlon(latitude, longitude, force_zone_number=None, force_zone_letter=None):
"""This function convert Latitude and Longitude to UTM coordinate
Parameters
----------
latitude: float
Latitude between 80 deg S and 84 deg N, e.g. (-80.0 to 84.0)
longitude: float
Longitude between 180 deg W and 180 deg E, e.g. (-180.0 to 180.0).
force_zone number: int
Zone Number is represented with global map numbers of an UTM Zone
Numbers Map. You may force conversion including one UTM Zone Number.
More information see utmzones [1]_
.. _[1]: http://www.jaworski.ca/utmzones.htm
"""
if not in_bounds(latitude, -80.0, 84.0):
raise OutOfRangeError('latitude out of range (must be between 80 deg S and 84 deg N)')
if not in_bounds(longitude, -180.0, 180.0):
raise OutOfRangeError('longitude out of range (must be between 180 deg W and 180 deg E)')
if force_zone_number is not None:
check_valid_zone(force_zone_number, force_zone_letter)
lat_rad = mathlib.radians(latitude)
lat_sin = mathlib.sin(lat_rad)
lat_cos = mathlib.cos(lat_rad)
lat_tan = lat_sin / lat_cos
lat_tan2 = lat_tan * lat_tan
lat_tan4 = lat_tan2 * lat_tan2
if force_zone_number is None:
zone_number = latlon_to_zone_number(latitude, longitude)
else:
zone_number = force_zone_number
if force_zone_letter is None:
zone_letter = latitude_to_zone_letter(latitude)
else:
zone_letter = force_zone_letter
lon_rad = mathlib.radians(longitude)
central_lon = zone_number_to_central_longitude(zone_number)
central_lon_rad = mathlib.radians(central_lon)
n = R / mathlib.sqrt(1 - E * lat_sin**2)
c = E_P2 * lat_cos**2
a = lat_cos * (lon_rad - central_lon_rad)
a2 = a * a
a3 = a2 * a
a4 = a3 * a
a5 = a4 * a
a6 = a5 * a
m = R * (M1 * lat_rad -
M2 * mathlib.sin(2 * lat_rad) +
M3 * mathlib.sin(4 * lat_rad) -
M4 * mathlib.sin(6 * lat_rad))
easting = K0 * n * (a +
a3 / 6 * (1 - lat_tan2 + c) +
a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000
northing = K0 * (m + n * lat_tan * (a2 / 2 +
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) +
a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2)))
if mixed_signs(latitude):
raise ValueError("latitudes must all have the same sign")
elif negative(latitude):
northing += 10000000
return easting, northing, zone_number, zone_letter |
def _cleanup_and_die(data):
""" cleanup func for step 1 """
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
os.remove(tmpf) | cleanup func for step 1 | Below is the the instruction that describes the task:
### Input:
cleanup func for step 1
### Response:
def _cleanup_and_die(data):
""" cleanup func for step 1 """
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
os.remove(tmpf) |
def _assert_ndims_statically(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_static=False):
"""Assert that Tensor x has expected number of dimensions."""
ndims = x.shape.ndims
if ndims is None:
if expect_static:
raise ValueError('Expected static ndims. Found: {}'.format(x))
return
if expect_ndims is not None and ndims != expect_ndims:
raise ValueError('ndims must be {}. Found: {}'.format(expect_ndims, ndims))
if expect_ndims_at_least is not None and ndims < expect_ndims_at_least:
raise ValueError('ndims must be at least {}. Found {}'.format(
expect_ndims_at_least, ndims)) | Assert that Tensor x has expected number of dimensions. | Below is the the instruction that describes the task:
### Input:
Assert that Tensor x has expected number of dimensions.
### Response:
def _assert_ndims_statically(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_static=False):
"""Assert that Tensor x has expected number of dimensions."""
ndims = x.shape.ndims
if ndims is None:
if expect_static:
raise ValueError('Expected static ndims. Found: {}'.format(x))
return
if expect_ndims is not None and ndims != expect_ndims:
raise ValueError('ndims must be {}. Found: {}'.format(expect_ndims, ndims))
if expect_ndims_at_least is not None and ndims < expect_ndims_at_least:
raise ValueError('ndims must be at least {}. Found {}'.format(
expect_ndims_at_least, ndims)) |
def find_matching(cls, path, patterns):
"""Yield all matching patterns for path."""
for pattern in patterns:
if pattern.match(path):
yield pattern | Yield all matching patterns for path. | Below is the the instruction that describes the task:
### Input:
Yield all matching patterns for path.
### Response:
def find_matching(cls, path, patterns):
"""Yield all matching patterns for path."""
for pattern in patterns:
if pattern.match(path):
yield pattern |
def _copy_module(self, conn, tmp, module_name, module_args, inject):
''' transfer a module over SFTP, does not run it '''
if module_name.startswith("/"):
raise errors.AnsibleFileNotFound("%s is not a module" % module_name)
# Search module path(s) for named module.
in_path = utils.plugins.module_finder.find_plugin(module_name)
if in_path is None:
raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths()))
out_path = os.path.join(tmp, module_name)
module_data = ""
is_new_style=False
with open(in_path) as f:
module_data = f.read()
if module_common.REPLACER in module_data:
is_new_style=True
module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON)
encoded_args = "\"\"\"%s\"\"\"" % module_args.replace("\"","\\\"")
module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args)
encoded_lang = "\"\"\"%s\"\"\"" % C.DEFAULT_MODULE_LANG
module_data = module_data.replace(module_common.REPLACER_LANG, encoded_lang)
if is_new_style:
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in inject:
facility = inject['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split("\n")
shebang = None
if lines[0].startswith("#!"):
shebang = lines[0]
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in inject:
lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
module_data = "\n".join(lines)
self._transfer_str(conn, tmp, module_name, module_data)
return (out_path, is_new_style, shebang) | transfer a module over SFTP, does not run it | Below is the the instruction that describes the task:
### Input:
transfer a module over SFTP, does not run it
### Response:
def _copy_module(self, conn, tmp, module_name, module_args, inject):
''' transfer a module over SFTP, does not run it '''
if module_name.startswith("/"):
raise errors.AnsibleFileNotFound("%s is not a module" % module_name)
# Search module path(s) for named module.
in_path = utils.plugins.module_finder.find_plugin(module_name)
if in_path is None:
raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths()))
out_path = os.path.join(tmp, module_name)
module_data = ""
is_new_style=False
with open(in_path) as f:
module_data = f.read()
if module_common.REPLACER in module_data:
is_new_style=True
module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON)
encoded_args = "\"\"\"%s\"\"\"" % module_args.replace("\"","\\\"")
module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args)
encoded_lang = "\"\"\"%s\"\"\"" % C.DEFAULT_MODULE_LANG
module_data = module_data.replace(module_common.REPLACER_LANG, encoded_lang)
if is_new_style:
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in inject:
facility = inject['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split("\n")
shebang = None
if lines[0].startswith("#!"):
shebang = lines[0]
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in inject:
lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
module_data = "\n".join(lines)
self._transfer_str(conn, tmp, module_name, module_data)
return (out_path, is_new_style, shebang) |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
"""
if self._context is None:
self._context = PhoneNumberContext(
self._version,
trunk_sid=self._solution['trunk_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.trunking.v1.trunk.phone_number.PhoneNumberContext
"""
if self._context is None:
self._context = PhoneNumberContext(
self._version,
trunk_sid=self._solution['trunk_sid'],
sid=self._solution['sid'],
)
return self._context |
def create_job(self, project_id, job, use_existing_job_fn=None):
"""
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict
"""
request = self._mlengine.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
self.log.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
self.log.error('Failed to create MLEngine job: {}'.format(e))
raise
return self._wait_for_job_done(project_id, job_id) | Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict
### Response:
def create_job(self, project_id, job, use_existing_job_fn=None):
"""
Launches a MLEngine job and wait for it to reach a terminal state.
:param project_id: The Google Cloud project id within which MLEngine
job will be launched.
:type project_id: str
:param job: MLEngine Job object that should be provided to the MLEngine
API, such as: ::
{
'jobId': 'my_job_id',
'trainingInput': {
'scaleTier': 'STANDARD_1',
...
}
}
:type job: dict
:param use_existing_job_fn: In case that a MLEngine job with the same
job_id already exist, this method (if provided) will decide whether
we should use this existing job, continue waiting for it to finish
and returning the job object. It should accepts a MLEngine job
object, and returns a boolean value indicating whether it is OK to
reuse the existing job. If 'use_existing_job_fn' is not provided,
we by default reuse the existing MLEngine job.
:type use_existing_job_fn: function
:return: The MLEngine job object if the job successfully reach a
terminal state (which might be FAILED or CANCELLED state).
:rtype: dict
"""
request = self._mlengine.projects().jobs().create(
parent='projects/{}'.format(project_id),
body=job)
job_id = job['jobId']
try:
request.execute()
except HttpError as e:
# 409 means there is an existing job with the same job ID.
if e.resp.status == 409:
if use_existing_job_fn is not None:
existing_job = self._get_job(project_id, job_id)
if not use_existing_job_fn(existing_job):
self.log.error(
'Job with job_id %s already exist, but it does '
'not match our expectation: %s',
job_id, existing_job
)
raise
self.log.info(
'Job with job_id %s already exist. Will waiting for it to finish',
job_id
)
else:
self.log.error('Failed to create MLEngine job: {}'.format(e))
raise
return self._wait_for_job_done(project_id, job_id) |
def location_path(self):
"""
Return the Location-Path of the response.
:rtype : String
:return: the Location-Path option
"""
value = []
for option in self.options:
if option.number == defines.OptionRegistry.LOCATION_PATH.number:
value.append(str(option.value))
return "/".join(value) | Return the Location-Path of the response.
:rtype : String
:return: the Location-Path option | Below is the the instruction that describes the task:
### Input:
Return the Location-Path of the response.
:rtype : String
:return: the Location-Path option
### Response:
def location_path(self):
"""
Return the Location-Path of the response.
:rtype : String
:return: the Location-Path option
"""
value = []
for option in self.options:
if option.number == defines.OptionRegistry.LOCATION_PATH.number:
value.append(str(option.value))
return "/".join(value) |
def SetProp(self, prop, value):
"""
set attribute
"""
if prop == 'id':
self.id = value
elif prop == 'status':
self.status = value
elif prop == 'bm':
self.bm = value
elif prop == 'graph':
self.graph = value
else:
self.properties[prop] = value | set attribute | Below is the the instruction that describes the task:
### Input:
set attribute
### Response:
def SetProp(self, prop, value):
"""
set attribute
"""
if prop == 'id':
self.id = value
elif prop == 'status':
self.status = value
elif prop == 'bm':
self.bm = value
elif prop == 'graph':
self.graph = value
else:
self.properties[prop] = value |
def write_http_request(self, path: str, headers: Headers) -> None:
"""
Write request line and headers to the HTTP request.
"""
self.path = path
self.request_headers = headers
logger.debug("%s > GET %s HTTP/1.1", self.side, path)
logger.debug("%s > %r", self.side, headers)
# Since the path and headers only contain ASCII characters,
# we can keep this simple.
request = f"GET {path} HTTP/1.1\r\n"
request += str(headers)
self.writer.write(request.encode()) | Write request line and headers to the HTTP request. | Below is the the instruction that describes the task:
### Input:
Write request line and headers to the HTTP request.
### Response:
def write_http_request(self, path: str, headers: Headers) -> None:
"""
Write request line and headers to the HTTP request.
"""
self.path = path
self.request_headers = headers
logger.debug("%s > GET %s HTTP/1.1", self.side, path)
logger.debug("%s > %r", self.side, headers)
# Since the path and headers only contain ASCII characters,
# we can keep this simple.
request = f"GET {path} HTTP/1.1\r\n"
request += str(headers)
self.writer.write(request.encode()) |
def drop(self):
"""Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
api.drop_database(self.name, metadata=metadata) | Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase | Below is the the instruction that describes the task:
### Input:
Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
### Response:
def drop(self):
"""Drop this database.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase
"""
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
api.drop_database(self.name, metadata=metadata) |
def next_conkey(self, conkey):
"""Return the next <conkey><n> based on conkey as a
string. Example, if 'startcond3' and 'startcond5' exist, this
will return 'startcond6' if 'startcond5' value is not None,
else startcond5 is returned.
It is assumed conkey is a valid condition key.
.. warning::
Under construction. There is work to do. This function in
combination with the pack.add_condition. But now it's time for
bed.
"""
if conkey in self.conditions:
return conkey # Explicit conkey
conkeys = self.sorted_conkeys(prefix=conkey) # Might be empty.
if not conkeys:
# A trailing number given that does not already exist.
# accept possible gap from previous number.
return conkey
for candidate in conkeys:
if self.conditions[candidate] is None:
return candidate
i = self.cond_int(candidate) # The last one.
return re.sub(r'\d+', str(i + 1), candidate) | Return the next <conkey><n> based on conkey as a
string. Example, if 'startcond3' and 'startcond5' exist, this
will return 'startcond6' if 'startcond5' value is not None,
else startcond5 is returned.
It is assumed conkey is a valid condition key.
.. warning::
Under construction. There is work to do. This function in
combination with the pack.add_condition. But now it's time for
bed. | Below is the the instruction that describes the task:
### Input:
Return the next <conkey><n> based on conkey as a
string. Example, if 'startcond3' and 'startcond5' exist, this
will return 'startcond6' if 'startcond5' value is not None,
else startcond5 is returned.
It is assumed conkey is a valid condition key.
.. warning::
Under construction. There is work to do. This function in
combination with the pack.add_condition. But now it's time for
bed.
### Response:
def next_conkey(self, conkey):
"""Return the next <conkey><n> based on conkey as a
string. Example, if 'startcond3' and 'startcond5' exist, this
will return 'startcond6' if 'startcond5' value is not None,
else startcond5 is returned.
It is assumed conkey is a valid condition key.
.. warning::
Under construction. There is work to do. This function in
combination with the pack.add_condition. But now it's time for
bed.
"""
if conkey in self.conditions:
return conkey # Explicit conkey
conkeys = self.sorted_conkeys(prefix=conkey) # Might be empty.
if not conkeys:
# A trailing number given that does not already exist.
# accept possible gap from previous number.
return conkey
for candidate in conkeys:
if self.conditions[candidate] is None:
return candidate
i = self.cond_int(candidate) # The last one.
return re.sub(r'\d+', str(i + 1), candidate) |
def get_packing_plan(self, topologyName, callback=None):
""" get packing plan """
isWatching = False
# Temp dict used to return result
# if callback is not provided.
ret = {
"result": None
}
if callback:
isWatching = True
else:
def callback(data):
""" Custom callback to get the topologies right now. """
ret["result"] = data
self._get_packing_plan_with_watch(topologyName, callback, isWatching)
# The topologies are now populated with the data.
return ret["result"] | get packing plan | Below is the the instruction that describes the task:
### Input:
get packing plan
### Response:
def get_packing_plan(self, topologyName, callback=None):
""" get packing plan """
isWatching = False
# Temp dict used to return result
# if callback is not provided.
ret = {
"result": None
}
if callback:
isWatching = True
else:
def callback(data):
""" Custom callback to get the topologies right now. """
ret["result"] = data
self._get_packing_plan_with_watch(topologyName, callback, isWatching)
# The topologies are now populated with the data.
return ret["result"] |
def add_resource(self, resource):
'''Perform an atomic prepend for a new resource'''
resource.validate()
self.update(__raw__={
'$push': {
'resources': {
'$each': [resource.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self,
resource_added=resource.id) | Perform an atomic prepend for a new resource | Below is the the instruction that describes the task:
### Input:
Perform an atomic prepend for a new resource
### Response:
def add_resource(self, resource):
'''Perform an atomic prepend for a new resource'''
resource.validate()
self.update(__raw__={
'$push': {
'resources': {
'$each': [resource.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self,
resource_added=resource.id) |
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't
fallback)
"""
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index "
"can only have integer indexers")
else:
if is_integer(i) and not ax.holds_integer():
raise ValueError("At based indexing on an non-integer "
"index can only have non-integer "
"indexers")
return key | require they keys to be the same type as the index (so we don't
fallback) | Below is the the instruction that describes the task:
### Input:
require they keys to be the same type as the index (so we don't
fallback)
### Response:
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't
fallback)
"""
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index "
"can only have integer indexers")
else:
if is_integer(i) and not ax.holds_integer():
raise ValueError("At based indexing on an non-integer "
"index can only have non-integer "
"indexers")
return key |
def free_sources(self, free=True, pars=None, cuts=None,
distance=None, skydir=None, minmax_ts=None, minmax_npred=None,
exclude=None, square=False, **kwargs):
"""Free or fix sources in the ROI model satisfying the given
selection. When multiple selections are defined, the selected
sources will be those satisfying the logical AND of all
selections (e.g. distance < X && minmax_ts[0] < ts <
minmax_ts[1] && ...).
Parameters
----------
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Free sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Free sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
exclude : list
Names of sources that will be excluded from the selection.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
srcs = self.roi.get_sources(skydir=skydir, distance=distance,
cuts=cuts, minmax_ts=minmax_ts,
minmax_npred=minmax_npred, exclude=exclude,
square=square,
coordsys=self.config['binning']['coordsys'])
for s in srcs:
self.free_source(s.name, free=free, pars=pars, **kwargs)
return srcs | Free or fix sources in the ROI model satisfying the given
selection. When multiple selections are defined, the selected
sources will be those satisfying the logical AND of all
selections (e.g. distance < X && minmax_ts[0] < ts <
minmax_ts[1] && ...).
Parameters
----------
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Free sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Free sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
exclude : list
Names of sources that will be excluded from the selection.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects. | Below is the the instruction that describes the task:
### Input:
Free or fix sources in the ROI model satisfying the given
selection. When multiple selections are defined, the selected
sources will be those satisfying the logical AND of all
selections (e.g. distance < X && minmax_ts[0] < ts <
minmax_ts[1] && ...).
Parameters
----------
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Free sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Free sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
exclude : list
Names of sources that will be excluded from the selection.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
### Response:
def free_sources(self, free=True, pars=None, cuts=None,
distance=None, skydir=None, minmax_ts=None, minmax_npred=None,
exclude=None, square=False, **kwargs):
"""Free or fix sources in the ROI model satisfying the given
selection. When multiple selections are defined, the selected
sources will be those satisfying the logical AND of all
selections (e.g. distance < X && minmax_ts[0] < ts <
minmax_ts[1] && ...).
Parameters
----------
free : bool
Choose whether to free (free=True) or fix (free=False)
source parameters.
pars : list
Set a list of parameters to be freed/fixed for each
source. If none then all source parameters will be
freed/fixed. If pars='norm' then only normalization
parameters will be freed.
cuts : dict
Dictionary of [min,max] selections on source properties.
distance : float
Cut on angular distance from ``skydir``. If None then no
selection will be applied.
skydir : `~astropy.coordinates.SkyCoord`
Reference sky coordinate for ``distance`` selection. If
None then the distance selection will be applied with
respect to the ROI center.
minmax_ts : list
Free sources that have TS in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
minmax_npred : list
Free sources that have npred in the range [min,max]. If
either min or max are None then only a lower (upper) bound
will be applied. If this parameter is none no selection
will be applied.
exclude : list
Names of sources that will be excluded from the selection.
square : bool
Switch between applying a circular or square (ROI-like)
selection on the maximum projected distance from the ROI
center.
Returns
-------
srcs : list
A list of `~fermipy.roi_model.Model` objects.
"""
srcs = self.roi.get_sources(skydir=skydir, distance=distance,
cuts=cuts, minmax_ts=minmax_ts,
minmax_npred=minmax_npred, exclude=exclude,
square=square,
coordsys=self.config['binning']['coordsys'])
for s in srcs:
self.free_source(s.name, free=free, pars=pars, **kwargs)
return srcs |
def _get_hgroup(name, array):
'''Private function to check hostgroup'''
hostgroup = None
for temp in array.list_hgroups():
if temp['name'] == name:
hostgroup = temp
break
return hostgroup | Private function to check hostgroup | Below is the the instruction that describes the task:
### Input:
Private function to check hostgroup
### Response:
def _get_hgroup(name, array):
'''Private function to check hostgroup'''
hostgroup = None
for temp in array.list_hgroups():
if temp['name'] == name:
hostgroup = temp
break
return hostgroup |
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, string_types):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
if PY3K:
if not isinstance(line, str):
infile[i] = line.decode(encoding)
else:
if not isinstance(line, unicode):
infile[i] = line.decode(encoding)
return infile | Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list. | Below is the the instruction that describes the task:
### Input:
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
### Response:
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, string_types):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
if PY3K:
if not isinstance(line, str):
infile[i] = line.decode(encoding)
else:
if not isinstance(line, unicode):
infile[i] = line.decode(encoding)
return infile |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.