code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _standard_params(klass, ids, metric_groups, **kwargs):
"""
Sets the standard params for a stats request
"""
end_time = kwargs.get('end_time', datetime.utcnow())
start_time = kwargs.get('start_time', end_time - timedelta(seconds=604800))
granularity = kwargs.get('granularity', GRANULARITY.HOUR)
placement = kwargs.get('placement', PLACEMENT.ALL_ON_TWITTER)
params = {
'metric_groups': ','.join(metric_groups),
'start_time': to_time(start_time, granularity),
'end_time': to_time(end_time, granularity),
'granularity': granularity.upper(),
'entity': klass.ANALYTICS_MAP[klass.__name__],
'placement': placement
}
params['entity_ids'] = ','.join(ids)
return params | Sets the standard params for a stats request | Below is the the instruction that describes the task:
### Input:
Sets the standard params for a stats request
### Response:
def _standard_params(klass, ids, metric_groups, **kwargs):
"""
Sets the standard params for a stats request
"""
end_time = kwargs.get('end_time', datetime.utcnow())
start_time = kwargs.get('start_time', end_time - timedelta(seconds=604800))
granularity = kwargs.get('granularity', GRANULARITY.HOUR)
placement = kwargs.get('placement', PLACEMENT.ALL_ON_TWITTER)
params = {
'metric_groups': ','.join(metric_groups),
'start_time': to_time(start_time, granularity),
'end_time': to_time(end_time, granularity),
'granularity': granularity.upper(),
'entity': klass.ANALYTICS_MAP[klass.__name__],
'placement': placement
}
params['entity_ids'] = ','.join(ids)
return params |
def get_random_choral(log=True):
""" Gets a choral from the J. S. Bach chorals corpus (in Music21). """
choral_file = corpus.getBachChorales()[random.randint(0, 399)]
choral = corpus.parse(choral_file)
if log:
print("Chosen choral:", choral.metadata.title)
return choral | Gets a choral from the J. S. Bach chorals corpus (in Music21). | Below is the the instruction that describes the task:
### Input:
Gets a choral from the J. S. Bach chorals corpus (in Music21).
### Response:
def get_random_choral(log=True):
""" Gets a choral from the J. S. Bach chorals corpus (in Music21). """
choral_file = corpus.getBachChorales()[random.randint(0, 399)]
choral = corpus.parse(choral_file)
if log:
print("Chosen choral:", choral.metadata.title)
return choral |
def _get_population(self, freq, t): # freq in THz
"""Return phonon population number
Three types of combinations of array inputs are possible.
- single freq and single t
- single freq and len(t) > 1
- len(freq) > 1 and single t
"""
condition = t > 1.0
if type(condition) == bool or type(condition) == np.bool_:
if condition:
return 1.0 / (np.exp(freq * THzToEv / (Kb * t)) - 1)
else:
return 0.0
else:
vals = np.zeros(len(t), dtype='double')
vals[condition] = 1.0 / (
np.exp(freq * THzToEv / (Kb * t[condition])) - 1)
return vals | Return phonon population number
Three types of combinations of array inputs are possible.
- single freq and single t
- single freq and len(t) > 1
- len(freq) > 1 and single t | Below is the the instruction that describes the task:
### Input:
Return phonon population number
Three types of combinations of array inputs are possible.
- single freq and single t
- single freq and len(t) > 1
- len(freq) > 1 and single t
### Response:
def _get_population(self, freq, t): # freq in THz
"""Return phonon population number
Three types of combinations of array inputs are possible.
- single freq and single t
- single freq and len(t) > 1
- len(freq) > 1 and single t
"""
condition = t > 1.0
if type(condition) == bool or type(condition) == np.bool_:
if condition:
return 1.0 / (np.exp(freq * THzToEv / (Kb * t)) - 1)
else:
return 0.0
else:
vals = np.zeros(len(t), dtype='double')
vals[condition] = 1.0 / (
np.exp(freq * THzToEv / (Kb * t[condition])) - 1)
return vals |
def pop_callback(obj):
"""Pop a single callback."""
callbacks = obj._callbacks
if not callbacks:
return
if isinstance(callbacks, Node):
node = callbacks
obj._callbacks = None
else:
node = callbacks.first
callbacks.remove(node)
if not callbacks:
obj._callbacks = None
return node.data, node.extra | Pop a single callback. | Below is the the instruction that describes the task:
### Input:
Pop a single callback.
### Response:
def pop_callback(obj):
"""Pop a single callback."""
callbacks = obj._callbacks
if not callbacks:
return
if isinstance(callbacks, Node):
node = callbacks
obj._callbacks = None
else:
node = callbacks.first
callbacks.remove(node)
if not callbacks:
obj._callbacks = None
return node.data, node.extra |
def trace_decorator(self):
"""Decorator to trace a function."""
def decorator(func):
def wrapper(*args, **kwargs):
self.tracer.start_span(name=func.__name__)
return_value = func(*args, **kwargs)
self.tracer.end_span()
return return_value
return wrapper
return decorator | Decorator to trace a function. | Below is the the instruction that describes the task:
### Input:
Decorator to trace a function.
### Response:
def trace_decorator(self):
"""Decorator to trace a function."""
def decorator(func):
def wrapper(*args, **kwargs):
self.tracer.start_span(name=func.__name__)
return_value = func(*args, **kwargs)
self.tracer.end_span()
return return_value
return wrapper
return decorator |
def parse_job_files(self):
"""Check for job definitions in known zuul files."""
repo_jobs = []
for rel_job_file_path, job_info in self.job_files.items():
LOGGER.debug("Checking for job definitions in %s", rel_job_file_path)
jobs = self.parse_job_definitions(rel_job_file_path, job_info)
LOGGER.debug("Found %d job definitions in %s", len(jobs), rel_job_file_path)
repo_jobs.extend(jobs)
if not repo_jobs:
LOGGER.info("No job definitions found in repo '%s'", self.repo)
else:
LOGGER.info(
"Found %d job definitions in repo '%s'", len(repo_jobs), self.repo
)
# LOGGER.debug(json.dumps(repo_jobs, indent=4))
return repo_jobs | Check for job definitions in known zuul files. | Below is the the instruction that describes the task:
### Input:
Check for job definitions in known zuul files.
### Response:
def parse_job_files(self):
"""Check for job definitions in known zuul files."""
repo_jobs = []
for rel_job_file_path, job_info in self.job_files.items():
LOGGER.debug("Checking for job definitions in %s", rel_job_file_path)
jobs = self.parse_job_definitions(rel_job_file_path, job_info)
LOGGER.debug("Found %d job definitions in %s", len(jobs), rel_job_file_path)
repo_jobs.extend(jobs)
if not repo_jobs:
LOGGER.info("No job definitions found in repo '%s'", self.repo)
else:
LOGGER.info(
"Found %d job definitions in repo '%s'", len(repo_jobs), self.repo
)
# LOGGER.debug(json.dumps(repo_jobs, indent=4))
return repo_jobs |
def get_teams_by_name(org, team_names):
"""Find team(s) in org by name(s).
Parameters
----------
org: github.Organization.Organization
org to search for team(s)
teams: list(str)
list of team names to search for
Returns
-------
list of github.Team.Team objects
Raises
------
github.GithubException
Upon error from github api
"""
assert isinstance(org, github.Organization.Organization), type(org)
try:
org_teams = list(org.get_teams())
except github.RateLimitExceededException:
raise
except github.GithubException as e:
msg = 'error getting teams'
raise CaughtOrganizationError(org, e, msg) from None
found_teams = []
for name in team_names:
debug("looking for team: {o}/'{t}'".format(
o=org.login,
t=name
))
t = next((t for t in org_teams if t.name == name), None)
if t:
debug(' found')
found_teams.append(t)
else:
debug(' not found')
return found_teams | Find team(s) in org by name(s).
Parameters
----------
org: github.Organization.Organization
org to search for team(s)
teams: list(str)
list of team names to search for
Returns
-------
list of github.Team.Team objects
Raises
------
github.GithubException
Upon error from github api | Below is the the instruction that describes the task:
### Input:
Find team(s) in org by name(s).
Parameters
----------
org: github.Organization.Organization
org to search for team(s)
teams: list(str)
list of team names to search for
Returns
-------
list of github.Team.Team objects
Raises
------
github.GithubException
Upon error from github api
### Response:
def get_teams_by_name(org, team_names):
"""Find team(s) in org by name(s).
Parameters
----------
org: github.Organization.Organization
org to search for team(s)
teams: list(str)
list of team names to search for
Returns
-------
list of github.Team.Team objects
Raises
------
github.GithubException
Upon error from github api
"""
assert isinstance(org, github.Organization.Organization), type(org)
try:
org_teams = list(org.get_teams())
except github.RateLimitExceededException:
raise
except github.GithubException as e:
msg = 'error getting teams'
raise CaughtOrganizationError(org, e, msg) from None
found_teams = []
for name in team_names:
debug("looking for team: {o}/'{t}'".format(
o=org.login,
t=name
))
t = next((t for t in org_teams if t.name == name), None)
if t:
debug(' found')
found_teams.append(t)
else:
debug(' not found')
return found_teams |
def get_results_as_numpy_array(self, parameter_space,
result_parsing_function, runs):
"""
Return the results relative to the desired parameter space in the form
of a numpy array.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
runs (int): number of runs to gather for each parameter
combination.
"""
return np.array(self.get_space(self.db.get_complete_results(), {},
parameter_space, runs,
result_parsing_function)) | Return the results relative to the desired parameter space in the form
of a numpy array.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
runs (int): number of runs to gather for each parameter
combination. | Below is the the instruction that describes the task:
### Input:
Return the results relative to the desired parameter space in the form
of a numpy array.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
runs (int): number of runs to gather for each parameter
combination.
### Response:
def get_results_as_numpy_array(self, parameter_space,
result_parsing_function, runs):
"""
Return the results relative to the desired parameter space in the form
of a numpy array.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
runs (int): number of runs to gather for each parameter
combination.
"""
return np.array(self.get_space(self.db.get_complete_results(), {},
parameter_space, runs,
result_parsing_function)) |
def image_tile_create(comptparms, clrspc):
"""Creates a new image structure.
Wraps the openjp2 library function opj_image_tile_create.
Parameters
----------
cmptparms : comptparms_t
The component parameters.
clrspc : int
Specifies the color space.
Returns
-------
image : ImageType
Reference to ImageType instance.
"""
ARGTYPES = [ctypes.c_uint32,
ctypes.POINTER(ImageComptParmType),
COLOR_SPACE_TYPE]
OPENJP2.opj_image_tile_create.argtypes = ARGTYPES
OPENJP2.opj_image_tile_create.restype = ctypes.POINTER(ImageType)
image = OPENJP2.opj_image_tile_create(len(comptparms),
comptparms,
clrspc)
return image | Creates a new image structure.
Wraps the openjp2 library function opj_image_tile_create.
Parameters
----------
cmptparms : comptparms_t
The component parameters.
clrspc : int
Specifies the color space.
Returns
-------
image : ImageType
Reference to ImageType instance. | Below is the the instruction that describes the task:
### Input:
Creates a new image structure.
Wraps the openjp2 library function opj_image_tile_create.
Parameters
----------
cmptparms : comptparms_t
The component parameters.
clrspc : int
Specifies the color space.
Returns
-------
image : ImageType
Reference to ImageType instance.
### Response:
def image_tile_create(comptparms, clrspc):
"""Creates a new image structure.
Wraps the openjp2 library function opj_image_tile_create.
Parameters
----------
cmptparms : comptparms_t
The component parameters.
clrspc : int
Specifies the color space.
Returns
-------
image : ImageType
Reference to ImageType instance.
"""
ARGTYPES = [ctypes.c_uint32,
ctypes.POINTER(ImageComptParmType),
COLOR_SPACE_TYPE]
OPENJP2.opj_image_tile_create.argtypes = ARGTYPES
OPENJP2.opj_image_tile_create.restype = ctypes.POINTER(ImageType)
image = OPENJP2.opj_image_tile_create(len(comptparms),
comptparms,
clrspc)
return image |
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor | Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition | Below is the the instruction that describes the task:
### Input:
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
### Response:
def normalized_energy_at_conditions(self, pH, V):
"""
Energy at an electrochemical condition, compatible with
numpy arrays for pH/V input
Args:
pH (float): pH at condition
V (float): applied potential at condition
Returns:
energy normalized by number of non-O/H atoms at condition
"""
return self.energy_at_conditions(pH, V) * self.normalization_factor |
def _get_default_dependencies(self):
'''
Get default dependencies for archive
Get default dependencies from requirements file or (if no requirements
file) from previous version
'''
# Get default dependencies from requirements file
default_dependencies = {
k: v for k,
v in self.api.default_versions.items() if k != self.archive_name}
# If no requirements file or is empty:
if len(default_dependencies) == 0:
# Retrieve dependencies from last archive record
history = self.get_history()
if len(history) > 0:
default_dependencies = history[-1].get('dependencies', {})
return default_dependencies | Get default dependencies for archive
Get default dependencies from requirements file or (if no requirements
file) from previous version | Below is the the instruction that describes the task:
### Input:
Get default dependencies for archive
Get default dependencies from requirements file or (if no requirements
file) from previous version
### Response:
def _get_default_dependencies(self):
'''
Get default dependencies for archive
Get default dependencies from requirements file or (if no requirements
file) from previous version
'''
# Get default dependencies from requirements file
default_dependencies = {
k: v for k,
v in self.api.default_versions.items() if k != self.archive_name}
# If no requirements file or is empty:
if len(default_dependencies) == 0:
# Retrieve dependencies from last archive record
history = self.get_history()
if len(history) > 0:
default_dependencies = history[-1].get('dependencies', {})
return default_dependencies |
def get_pixel_size_from_nside(nside):
""" Returns an estimate of the pixel size from the HEALPix nside coordinate
This just uses a lookup table to provide a nice round number for each
HEALPix order.
"""
order = int(np.log2(nside))
if order < 0 or order > 13:
raise ValueError('HEALPix order must be between 0 to 13 %i' % order)
return HPX_ORDER_TO_PIXSIZE[order] | Returns an estimate of the pixel size from the HEALPix nside coordinate
This just uses a lookup table to provide a nice round number for each
HEALPix order. | Below is the the instruction that describes the task:
### Input:
Returns an estimate of the pixel size from the HEALPix nside coordinate
This just uses a lookup table to provide a nice round number for each
HEALPix order.
### Response:
def get_pixel_size_from_nside(nside):
""" Returns an estimate of the pixel size from the HEALPix nside coordinate
This just uses a lookup table to provide a nice round number for each
HEALPix order.
"""
order = int(np.log2(nside))
if order < 0 or order > 13:
raise ValueError('HEALPix order must be between 0 to 13 %i' % order)
return HPX_ORDER_TO_PIXSIZE[order] |
def doesIntersect(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0
'''
if self.A.ccw(self.B, other.A) * self.A.ccw(self.B, other.B) > 0:
return False
if other.A.ccw(other.B, self.A) * other.A.ccw(other.B, self.B) > 0:
return False
return True | :param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0 | Below is the the instruction that describes the task:
### Input:
:param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0
### Response:
def doesIntersect(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0
'''
if self.A.ccw(self.B, other.A) * self.A.ccw(self.B, other.B) > 0:
return False
if other.A.ccw(other.B, self.A) * other.A.ccw(other.B, self.B) > 0:
return False
return True |
def _add_edge_dmap_fun(graph, edges_weights=None):
"""
Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
add = graph.add_edge # Namespace shortcut for speed.
if edges_weights is not None:
def add_edge(i, o, w):
if w in edges_weights:
add(i, o, weight=edges_weights[w]) # Weighted edge.
else:
add(i, o) # Normal edge.
else:
# noinspection PyUnusedLocal
def add_edge(i, o, w):
add(i, o) # Normal edge.
return add_edge | Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable | Below is the the instruction that describes the task:
### Input:
Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable
### Response:
def _add_edge_dmap_fun(graph, edges_weights=None):
"""
Adds edge to the dispatcher map.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param edges_weights:
Edge weights.
:type edges_weights: dict, optional
:return:
A function that adds an edge to the `graph`.
:rtype: callable
"""
add = graph.add_edge # Namespace shortcut for speed.
if edges_weights is not None:
def add_edge(i, o, w):
if w in edges_weights:
add(i, o, weight=edges_weights[w]) # Weighted edge.
else:
add(i, o) # Normal edge.
else:
# noinspection PyUnusedLocal
def add_edge(i, o, w):
add(i, o) # Normal edge.
return add_edge |
def delete_grade(self, grade_id):
"""Deletes a ``Grade``.
arg: grade_id (osid.id.Id): the ``Id`` of the ``Grade`` to
remove
raise: NotFound - ``grade_id`` not found
raise: NullArgument - ``grade_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.repository.AssetAdminSession.delete_asset_content_template
from dlkit.abstract_osid.id.primitives import Id as ABCId
from .objects import Grade
collection = JSONClientValidated('grading',
collection='GradeSystem',
runtime=self._runtime)
if not isinstance(grade_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
grade_system = collection.find_one({'grades._id': ObjectId(grade_id.get_identifier())})
index = 0
found = False
for i in grade_system['grades']:
if i['_id'] == ObjectId(grade_id.get_identifier()):
grade_map = grade_system['grades'].pop(index)
index += 1
found = True
if not found:
raise errors.OperationFailed()
Grade(
osid_object_map=grade_map,
runtime=self._runtime,
proxy=self._proxy)._delete()
collection.save(grade_system) | Deletes a ``Grade``.
arg: grade_id (osid.id.Id): the ``Id`` of the ``Grade`` to
remove
raise: NotFound - ``grade_id`` not found
raise: NullArgument - ``grade_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Deletes a ``Grade``.
arg: grade_id (osid.id.Id): the ``Id`` of the ``Grade`` to
remove
raise: NotFound - ``grade_id`` not found
raise: NullArgument - ``grade_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def delete_grade(self, grade_id):
"""Deletes a ``Grade``.
arg: grade_id (osid.id.Id): the ``Id`` of the ``Grade`` to
remove
raise: NotFound - ``grade_id`` not found
raise: NullArgument - ``grade_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.repository.AssetAdminSession.delete_asset_content_template
from dlkit.abstract_osid.id.primitives import Id as ABCId
from .objects import Grade
collection = JSONClientValidated('grading',
collection='GradeSystem',
runtime=self._runtime)
if not isinstance(grade_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
grade_system = collection.find_one({'grades._id': ObjectId(grade_id.get_identifier())})
index = 0
found = False
for i in grade_system['grades']:
if i['_id'] == ObjectId(grade_id.get_identifier()):
grade_map = grade_system['grades'].pop(index)
index += 1
found = True
if not found:
raise errors.OperationFailed()
Grade(
osid_object_map=grade_map,
runtime=self._runtime,
proxy=self._proxy)._delete()
collection.save(grade_system) |
def cors_allow_any(request, response):
"""
Add headers to permit CORS requests from any origin, with or without credentials,
with any headers.
"""
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return response
# From the CORS spec: The string "*" cannot be used for a resource that supports credentials.
response['Access-Control-Allow-Origin'] = origin
patch_vary_headers(response, ['Origin'])
response['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META:
response['Access-Control-Allow-Headers'] \
= request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']
response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
return response | Add headers to permit CORS requests from any origin, with or without credentials,
with any headers. | Below is the the instruction that describes the task:
### Input:
Add headers to permit CORS requests from any origin, with or without credentials,
with any headers.
### Response:
def cors_allow_any(request, response):
"""
Add headers to permit CORS requests from any origin, with or without credentials,
with any headers.
"""
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return response
# From the CORS spec: The string "*" cannot be used for a resource that supports credentials.
response['Access-Control-Allow-Origin'] = origin
patch_vary_headers(response, ['Origin'])
response['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META:
response['Access-Control-Allow-Headers'] \
= request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']
response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
return response |
def absent(name, orgname=None, profile='grafana'):
'''
Ensure the named grafana dashboard is absent.
name
Name of the grafana dashboard.
orgname
Name of the organization in which the dashboard should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if isinstance(profile, six.string_types):
profile = __salt__['config.option'](profile)
existing_dashboard = __salt__['grafana4.get_dashboard'](
name, orgname, profile)
if existing_dashboard:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Dashboard {0} is set to be deleted.'.format(name)
return ret
__salt__['grafana4.delete_dashboard'](name, profile=profile)
ret['comment'] = 'Dashboard {0} deleted.'.format(name)
ret['changes']['new'] = 'Dashboard {0} deleted.'.format(name)
return ret
ret['comment'] = 'Dashboard absent'
return ret | Ensure the named grafana dashboard is absent.
name
Name of the grafana dashboard.
orgname
Name of the organization in which the dashboard should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'. | Below is the the instruction that describes the task:
### Input:
Ensure the named grafana dashboard is absent.
name
Name of the grafana dashboard.
orgname
Name of the organization in which the dashboard should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
### Response:
def absent(name, orgname=None, profile='grafana'):
'''
Ensure the named grafana dashboard is absent.
name
Name of the grafana dashboard.
orgname
Name of the organization in which the dashboard should be present.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if isinstance(profile, six.string_types):
profile = __salt__['config.option'](profile)
existing_dashboard = __salt__['grafana4.get_dashboard'](
name, orgname, profile)
if existing_dashboard:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Dashboard {0} is set to be deleted.'.format(name)
return ret
__salt__['grafana4.delete_dashboard'](name, profile=profile)
ret['comment'] = 'Dashboard {0} deleted.'.format(name)
ret['changes']['new'] = 'Dashboard {0} deleted.'.format(name)
return ret
ret['comment'] = 'Dashboard absent'
return ret |
def delete_doc_by_query(self, collection, query, **kwargs):
"""
:param str collection: The name of the collection for the request
:param str query: Query selecting documents to be deleted.
Deletes items from Solr based on a given query. ::
>>> solr.delete_doc_by_query('SolrClient_unittest','*:*')
"""
temp = {"delete": {"query": query}}
resp, con_inf = self.transport.send_request(method='POST',
endpoint='update',
collection=collection,
data=json.dumps(temp),
**kwargs)
return resp | :param str collection: The name of the collection for the request
:param str query: Query selecting documents to be deleted.
Deletes items from Solr based on a given query. ::
>>> solr.delete_doc_by_query('SolrClient_unittest','*:*') | Below is the the instruction that describes the task:
### Input:
:param str collection: The name of the collection for the request
:param str query: Query selecting documents to be deleted.
Deletes items from Solr based on a given query. ::
>>> solr.delete_doc_by_query('SolrClient_unittest','*:*')
### Response:
def delete_doc_by_query(self, collection, query, **kwargs):
"""
:param str collection: The name of the collection for the request
:param str query: Query selecting documents to be deleted.
Deletes items from Solr based on a given query. ::
>>> solr.delete_doc_by_query('SolrClient_unittest','*:*')
"""
temp = {"delete": {"query": query}}
resp, con_inf = self.transport.send_request(method='POST',
endpoint='update',
collection=collection,
data=json.dumps(temp),
**kwargs)
return resp |
def children(self, alias, bank_id):
"""
URL for getting or setting child relationships for the specified bank
:param alias:
:param bank_id:
:return:
"""
return self._root + self._safe_alias(alias) + '/child/ids/' + str(bank_id) | URL for getting or setting child relationships for the specified bank
:param alias:
:param bank_id:
:return: | Below is the the instruction that describes the task:
### Input:
URL for getting or setting child relationships for the specified bank
:param alias:
:param bank_id:
:return:
### Response:
def children(self, alias, bank_id):
"""
URL for getting or setting child relationships for the specified bank
:param alias:
:param bank_id:
:return:
"""
return self._root + self._safe_alias(alias) + '/child/ids/' + str(bank_id) |
def gzip_cache(path):
"""
Another GZIP handler for Bottle functions. This may be used to cache the
files statically on the disc on given `path`.
If the browser accepts GZIP and there is file at ``path + ".gz"``, this
file is returned, correct headers are set (Content-Encoding, Last-Modified,
Content-Length, Date and so on).
If the browser doesn't accept GZIP or there is no ``.gz`` file at same
path, normal file is returned.
Args:
path (str): Path to the cached file.
Returns:
obj: Opened file.
"""
accept_enc = request.get_header("Accept-Encoding")
if accept_enc and "gzip" in accept_enc and os.path.exists(path + ".gz"):
path = path + ".gz"
response.set_header("Content-Encoding", "gzip")
stats = os.stat(path)
headers = dict()
headers['Content-Length'] = stats.st_size
headers['Last-Modified'] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(stats.st_mtime)
)
# I need to set `headers` dict for optional HTTPResponse use, but also set
# hedears using `response.set_header()` for normal use
for key, val in headers.iteritems():
response.set_header(key, val)
modified_since = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if modified_since:
modified_since = parse_date(modified_since.split(";")[0].strip())
if modified_since is not None and modified_since >= int(stats.st_mtime):
headers['Date'] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime()
)
return HTTPResponse(status=304, **headers)
return open(path) | Another GZIP handler for Bottle functions. This may be used to cache the
files statically on the disc on given `path`.
If the browser accepts GZIP and there is file at ``path + ".gz"``, this
file is returned, correct headers are set (Content-Encoding, Last-Modified,
Content-Length, Date and so on).
If the browser doesn't accept GZIP or there is no ``.gz`` file at same
path, normal file is returned.
Args:
path (str): Path to the cached file.
Returns:
obj: Opened file. | Below is the the instruction that describes the task:
### Input:
Another GZIP handler for Bottle functions. This may be used to cache the
files statically on the disc on given `path`.
If the browser accepts GZIP and there is file at ``path + ".gz"``, this
file is returned, correct headers are set (Content-Encoding, Last-Modified,
Content-Length, Date and so on).
If the browser doesn't accept GZIP or there is no ``.gz`` file at same
path, normal file is returned.
Args:
path (str): Path to the cached file.
Returns:
obj: Opened file.
### Response:
def gzip_cache(path):
"""
Another GZIP handler for Bottle functions. This may be used to cache the
files statically on the disc on given `path`.
If the browser accepts GZIP and there is file at ``path + ".gz"``, this
file is returned, correct headers are set (Content-Encoding, Last-Modified,
Content-Length, Date and so on).
If the browser doesn't accept GZIP or there is no ``.gz`` file at same
path, normal file is returned.
Args:
path (str): Path to the cached file.
Returns:
obj: Opened file.
"""
accept_enc = request.get_header("Accept-Encoding")
if accept_enc and "gzip" in accept_enc and os.path.exists(path + ".gz"):
path = path + ".gz"
response.set_header("Content-Encoding", "gzip")
stats = os.stat(path)
headers = dict()
headers['Content-Length'] = stats.st_size
headers['Last-Modified'] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(stats.st_mtime)
)
# I need to set `headers` dict for optional HTTPResponse use, but also set
# hedears using `response.set_header()` for normal use
for key, val in headers.iteritems():
response.set_header(key, val)
modified_since = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if modified_since:
modified_since = parse_date(modified_since.split(";")[0].strip())
if modified_since is not None and modified_since >= int(stats.st_mtime):
headers['Date'] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT",
time.gmtime()
)
return HTTPResponse(status=304, **headers)
return open(path) |
def add(self, template, resource, name=None):
"""Add a route to a resource.
The optional `name` assigns a name to this route that can be used when
building URLs. The name must be unique within this Mapper object.
"""
# Special case for standalone handler functions
if hasattr(resource, '_rhino_meta'):
route = Route(
template, Resource(resource), name=name, ranges=self.ranges)
else:
route = Route(
template, resource, name=name, ranges=self.ranges)
obj_id = id(resource)
if obj_id not in self._lookup:
# It's ok to have multiple routes for the same object id, the
# lookup will return the first one.
self._lookup[obj_id] = route
if name is not None:
if name in self.named_routes:
raise InvalidArgumentError("A route named '%s' already exists in this %s object."
% (name, self.__class__.__name__))
self.named_routes[name] = route
self.routes.append(route) | Add a route to a resource.
The optional `name` assigns a name to this route that can be used when
building URLs. The name must be unique within this Mapper object. | Below is the the instruction that describes the task:
### Input:
Add a route to a resource.
The optional `name` assigns a name to this route that can be used when
building URLs. The name must be unique within this Mapper object.
### Response:
def add(self, template, resource, name=None):
"""Add a route to a resource.
The optional `name` assigns a name to this route that can be used when
building URLs. The name must be unique within this Mapper object.
"""
# Special case for standalone handler functions
if hasattr(resource, '_rhino_meta'):
route = Route(
template, Resource(resource), name=name, ranges=self.ranges)
else:
route = Route(
template, resource, name=name, ranges=self.ranges)
obj_id = id(resource)
if obj_id not in self._lookup:
# It's ok to have multiple routes for the same object id, the
# lookup will return the first one.
self._lookup[obj_id] = route
if name is not None:
if name in self.named_routes:
raise InvalidArgumentError("A route named '%s' already exists in this %s object."
% (name, self.__class__.__name__))
self.named_routes[name] = route
self.routes.append(route) |
def temperature(self):
""" Get the temperature in degree celcius
"""
result = self.i2c_read(2)
value = struct.unpack('>H', result)[0]
if value < 32768:
return value / 256.0
else:
return (value - 65536) / 256.0 | Get the temperature in degree celcius | Below is the the instruction that describes the task:
### Input:
Get the temperature in degree celcius
### Response:
def temperature(self):
""" Get the temperature in degree celcius
"""
result = self.i2c_read(2)
value = struct.unpack('>H', result)[0]
if value < 32768:
return value / 256.0
else:
return (value - 65536) / 256.0 |
def unmix(a, D, M, M0, h0, reg, reg0, alpha, numItermax=1000,
stopThr=1e-3, verbose=False, log=False):
"""
Compute the unmixing of an observation with a given dictionary using Wasserstein distance
The function solve the following optimization problem:
.. math::
\mathbf{h} = arg\min_\mathbf{h} (1- \\alpha) W_{M,reg}(\mathbf{a},\mathbf{Dh})+\\alpha W_{M0,reg0}(\mathbf{h}_0,\mathbf{h})
where :
- :math:`W_{M,reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance with M loss matrix (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}` is an observed distribution, :math:`\mathbf{h}_0` is aprior on unmixing
- reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT data fitting
- reg0 and :math:`\mathbf{M0}` are respectively the regularization term and the cost matrix for regularization
- :math:`\\alpha`weight data fitting and regularization
The optimization problem is solved suing the algorithm described in [4]
Parameters
----------
a : np.ndarray (d)
observed distribution
D : np.ndarray (d,n)
dictionary matrix
M : np.ndarray (d,d)
loss matrix
M0 : np.ndarray (n,n)
loss matrix
h0 : np.ndarray (n,)
prior on h
reg : float
Regularization term >0 (Wasserstein data fitting)
reg0 : float
Regularization term >0 (Wasserstein reg with h0)
alpha : float
How much should we trust the prior ([0,1])
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (d,) ndarray
Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [4] S. Nakhostin, N. Courty, R. Flamary, D. Tuia, T. Corpetti, Supervised planetary unmixing with optimal transport, Whorkshop on Hyperspectral Image and Signal Processing : Evolution in Remote Sensing (WHISPERS), 2016.
"""
# M = M/np.median(M)
K = np.exp(-M / reg)
# M0 = M0/np.median(M0)
K0 = np.exp(-M0 / reg0)
old = h0
err = 1
cpt = 0
# log = {'niter':0, 'all_err':[]}
if log:
log = {'err': []}
while (err > stopThr and cpt < numItermax):
K = projC(K, a)
K0 = projC(K0, h0)
new = np.sum(K0, axis=1)
# we recombine the current selection from dictionnary
inv_new = np.dot(D, new)
other = np.sum(K, axis=1)
# geometric interpolation
delta = np.exp(alpha * np.log(other) + (1 - alpha) * np.log(inv_new))
K = projR(K, delta)
K0 = np.dot(np.diag(np.dot(D.T, delta / inv_new)), K0)
err = np.linalg.norm(np.sum(K0, axis=1) - old)
old = new
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt = cpt + 1
if log:
log['niter'] = cpt
return np.sum(K0, axis=1), log
else:
return np.sum(K0, axis=1) | Compute the unmixing of an observation with a given dictionary using Wasserstein distance
The function solve the following optimization problem:
.. math::
\mathbf{h} = arg\min_\mathbf{h} (1- \\alpha) W_{M,reg}(\mathbf{a},\mathbf{Dh})+\\alpha W_{M0,reg0}(\mathbf{h}_0,\mathbf{h})
where :
- :math:`W_{M,reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance with M loss matrix (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}` is an observed distribution, :math:`\mathbf{h}_0` is aprior on unmixing
- reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT data fitting
- reg0 and :math:`\mathbf{M0}` are respectively the regularization term and the cost matrix for regularization
- :math:`\\alpha`weight data fitting and regularization
The optimization problem is solved suing the algorithm described in [4]
Parameters
----------
a : np.ndarray (d)
observed distribution
D : np.ndarray (d,n)
dictionary matrix
M : np.ndarray (d,d)
loss matrix
M0 : np.ndarray (n,n)
loss matrix
h0 : np.ndarray (n,)
prior on h
reg : float
Regularization term >0 (Wasserstein data fitting)
reg0 : float
Regularization term >0 (Wasserstein reg with h0)
alpha : float
How much should we trust the prior ([0,1])
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (d,) ndarray
Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [4] S. Nakhostin, N. Courty, R. Flamary, D. Tuia, T. Corpetti, Supervised planetary unmixing with optimal transport, Whorkshop on Hyperspectral Image and Signal Processing : Evolution in Remote Sensing (WHISPERS), 2016. | Below is the the instruction that describes the task:
### Input:
Compute the unmixing of an observation with a given dictionary using Wasserstein distance
The function solve the following optimization problem:
.. math::
\mathbf{h} = arg\min_\mathbf{h} (1- \\alpha) W_{M,reg}(\mathbf{a},\mathbf{Dh})+\\alpha W_{M0,reg0}(\mathbf{h}_0,\mathbf{h})
where :
- :math:`W_{M,reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance with M loss matrix (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}` is an observed distribution, :math:`\mathbf{h}_0` is aprior on unmixing
- reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT data fitting
- reg0 and :math:`\mathbf{M0}` are respectively the regularization term and the cost matrix for regularization
- :math:`\\alpha`weight data fitting and regularization
The optimization problem is solved suing the algorithm described in [4]
Parameters
----------
a : np.ndarray (d)
observed distribution
D : np.ndarray (d,n)
dictionary matrix
M : np.ndarray (d,d)
loss matrix
M0 : np.ndarray (n,n)
loss matrix
h0 : np.ndarray (n,)
prior on h
reg : float
Regularization term >0 (Wasserstein data fitting)
reg0 : float
Regularization term >0 (Wasserstein reg with h0)
alpha : float
How much should we trust the prior ([0,1])
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (d,) ndarray
Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [4] S. Nakhostin, N. Courty, R. Flamary, D. Tuia, T. Corpetti, Supervised planetary unmixing with optimal transport, Whorkshop on Hyperspectral Image and Signal Processing : Evolution in Remote Sensing (WHISPERS), 2016.
### Response:
def unmix(a, D, M, M0, h0, reg, reg0, alpha, numItermax=1000,
stopThr=1e-3, verbose=False, log=False):
"""
Compute the unmixing of an observation with a given dictionary using Wasserstein distance
The function solve the following optimization problem:
.. math::
\mathbf{h} = arg\min_\mathbf{h} (1- \\alpha) W_{M,reg}(\mathbf{a},\mathbf{Dh})+\\alpha W_{M0,reg0}(\mathbf{h}_0,\mathbf{h})
where :
- :math:`W_{M,reg}(\cdot,\cdot)` is the entropic regularized Wasserstein distance with M loss matrix (see ot.bregman.sinkhorn)
- :math:`\mathbf{a}` is an observed distribution, :math:`\mathbf{h}_0` is aprior on unmixing
- reg and :math:`\mathbf{M}` are respectively the regularization term and the cost matrix for OT data fitting
- reg0 and :math:`\mathbf{M0}` are respectively the regularization term and the cost matrix for regularization
- :math:`\\alpha`weight data fitting and regularization
The optimization problem is solved suing the algorithm described in [4]
Parameters
----------
a : np.ndarray (d)
observed distribution
D : np.ndarray (d,n)
dictionary matrix
M : np.ndarray (d,d)
loss matrix
M0 : np.ndarray (n,n)
loss matrix
h0 : np.ndarray (n,)
prior on h
reg : float
Regularization term >0 (Wasserstein data fitting)
reg0 : float
Regularization term >0 (Wasserstein reg with h0)
alpha : float
How much should we trust the prior ([0,1])
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
a : (d,) ndarray
Wasserstein barycenter
log : dict
log dictionary return only if log==True in parameters
References
----------
.. [4] S. Nakhostin, N. Courty, R. Flamary, D. Tuia, T. Corpetti, Supervised planetary unmixing with optimal transport, Whorkshop on Hyperspectral Image and Signal Processing : Evolution in Remote Sensing (WHISPERS), 2016.
"""
# M = M/np.median(M)
K = np.exp(-M / reg)
# M0 = M0/np.median(M0)
K0 = np.exp(-M0 / reg0)
old = h0
err = 1
cpt = 0
# log = {'niter':0, 'all_err':[]}
if log:
log = {'err': []}
while (err > stopThr and cpt < numItermax):
K = projC(K, a)
K0 = projC(K0, h0)
new = np.sum(K0, axis=1)
# we recombine the current selection from dictionnary
inv_new = np.dot(D, new)
other = np.sum(K, axis=1)
# geometric interpolation
delta = np.exp(alpha * np.log(other) + (1 - alpha) * np.log(inv_new))
K = projR(K, delta)
K0 = np.dot(np.diag(np.dot(D.T, delta / inv_new)), K0)
err = np.linalg.norm(np.sum(K0, axis=1) - old)
old = new
if log:
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print('{:5s}|{:12s}'.format('It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt = cpt + 1
if log:
log['niter'] = cpt
return np.sum(K0, axis=1), log
else:
return np.sum(K0, axis=1) |
def init (self, base_ref, base_url, parent_url, recursion_level,
aggregate, line, column, page, name, url_encoding, extern):
"""Initialize the scheme."""
super(FileUrl, self).init(base_ref, base_url, parent_url,
recursion_level, aggregate, line, column, page, name, url_encoding, extern)
self.scheme = u'file' | Initialize the scheme. | Below is the the instruction that describes the task:
### Input:
Initialize the scheme.
### Response:
def init (self, base_ref, base_url, parent_url, recursion_level,
aggregate, line, column, page, name, url_encoding, extern):
"""Initialize the scheme."""
super(FileUrl, self).init(base_ref, base_url, parent_url,
recursion_level, aggregate, line, column, page, name, url_encoding, extern)
self.scheme = u'file' |
def setup(app):
"""Sphinx extension entry point"""
app.add_config_value('jsdoc_source_root', '..', 'env')
app.add_config_value('jsdoc_output_root', 'javascript', 'env')
app.add_config_value('jsdoc_exclude', [], 'env')
app.connect('builder-inited', generate_docs) | Sphinx extension entry point | Below is the the instruction that describes the task:
### Input:
Sphinx extension entry point
### Response:
def setup(app):
"""Sphinx extension entry point"""
app.add_config_value('jsdoc_source_root', '..', 'env')
app.add_config_value('jsdoc_output_root', 'javascript', 'env')
app.add_config_value('jsdoc_exclude', [], 'env')
app.connect('builder-inited', generate_docs) |
def set(self, name, value, force=False):
"""Set a form element identified by ``name`` to a specified ``value``.
The type of element (input, textarea, select, ...) does not
need to be given; it is inferred by the following methods:
:func:`~Form.set_checkbox`,
:func:`~Form.set_radio`,
:func:`~Form.set_input`,
:func:`~Form.set_textarea`,
:func:`~Form.set_select`.
If none of these methods find a matching element, then if ``force``
is True, a new element (``<input type="text" ...>``) will be
added using :func:`~Form.new_control`.
Example: filling-in a login/password form with EULA checkbox
.. code-block:: python
form.set("login", username)
form.set("password", password)
form.set("eula-checkbox", True)
Example: uploading a file through a ``<input type="file"
name="tagname">`` field (provide the path to the local file,
and its content will be uploaded):
.. code-block:: python
form.set("tagname") = path_to_local_file
"""
for func in ("checkbox", "radio", "input", "textarea", "select"):
try:
getattr(self, "set_" + func)({name: value})
return
except InvalidFormMethod:
pass
if force:
self.new_control('text', name, value=value)
return
raise LinkNotFoundError("No valid element named " + name) | Set a form element identified by ``name`` to a specified ``value``.
The type of element (input, textarea, select, ...) does not
need to be given; it is inferred by the following methods:
:func:`~Form.set_checkbox`,
:func:`~Form.set_radio`,
:func:`~Form.set_input`,
:func:`~Form.set_textarea`,
:func:`~Form.set_select`.
If none of these methods find a matching element, then if ``force``
is True, a new element (``<input type="text" ...>``) will be
added using :func:`~Form.new_control`.
Example: filling-in a login/password form with EULA checkbox
.. code-block:: python
form.set("login", username)
form.set("password", password)
form.set("eula-checkbox", True)
Example: uploading a file through a ``<input type="file"
name="tagname">`` field (provide the path to the local file,
and its content will be uploaded):
.. code-block:: python
form.set("tagname") = path_to_local_file | Below is the the instruction that describes the task:
### Input:
Set a form element identified by ``name`` to a specified ``value``.
The type of element (input, textarea, select, ...) does not
need to be given; it is inferred by the following methods:
:func:`~Form.set_checkbox`,
:func:`~Form.set_radio`,
:func:`~Form.set_input`,
:func:`~Form.set_textarea`,
:func:`~Form.set_select`.
If none of these methods find a matching element, then if ``force``
is True, a new element (``<input type="text" ...>``) will be
added using :func:`~Form.new_control`.
Example: filling-in a login/password form with EULA checkbox
.. code-block:: python
form.set("login", username)
form.set("password", password)
form.set("eula-checkbox", True)
Example: uploading a file through a ``<input type="file"
name="tagname">`` field (provide the path to the local file,
and its content will be uploaded):
.. code-block:: python
form.set("tagname") = path_to_local_file
### Response:
def set(self, name, value, force=False):
"""Set a form element identified by ``name`` to a specified ``value``.
The type of element (input, textarea, select, ...) does not
need to be given; it is inferred by the following methods:
:func:`~Form.set_checkbox`,
:func:`~Form.set_radio`,
:func:`~Form.set_input`,
:func:`~Form.set_textarea`,
:func:`~Form.set_select`.
If none of these methods find a matching element, then if ``force``
is True, a new element (``<input type="text" ...>``) will be
added using :func:`~Form.new_control`.
Example: filling-in a login/password form with EULA checkbox
.. code-block:: python
form.set("login", username)
form.set("password", password)
form.set("eula-checkbox", True)
Example: uploading a file through a ``<input type="file"
name="tagname">`` field (provide the path to the local file,
and its content will be uploaded):
.. code-block:: python
form.set("tagname") = path_to_local_file
"""
for func in ("checkbox", "radio", "input", "textarea", "select"):
try:
getattr(self, "set_" + func)({name: value})
return
except InvalidFormMethod:
pass
if force:
self.new_control('text', name, value=value)
return
raise LinkNotFoundError("No valid element named " + name) |
def monitor_layer_outputs(self):
"""
Monitoring the outputs of each layer.
Useful for troubleshooting convergence problems.
"""
for layer, hidden in zip(self.layers, self._hidden_outputs):
self.training_monitors.append(('mean(%s)' % (layer.name), abs(hidden).mean())) | Monitoring the outputs of each layer.
Useful for troubleshooting convergence problems. | Below is the the instruction that describes the task:
### Input:
Monitoring the outputs of each layer.
Useful for troubleshooting convergence problems.
### Response:
def monitor_layer_outputs(self):
"""
Monitoring the outputs of each layer.
Useful for troubleshooting convergence problems.
"""
for layer, hidden in zip(self.layers, self._hidden_outputs):
self.training_monitors.append(('mean(%s)' % (layer.name), abs(hidden).mean())) |
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisat22_new()
if bootstrap_with:
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 | Actual constructor of the solver. | Below is the the instruction that describes the task:
### Input:
Actual constructor of the solver.
### Response:
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisat22_new()
if bootstrap_with:
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 |
def addChild(self,item):
"""
When you add a child to a Node, you are adding yourself as a parent to the child
You cannot have the same node as a child more than once.
If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot
add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child)
"""
if not isinstance(item,Node):
item = Node(item)
if item in self.children:
return item
self.children.append(item)
item.parents.add(self)
return item | When you add a child to a Node, you are adding yourself as a parent to the child
You cannot have the same node as a child more than once.
If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot
add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child) | Below is the the instruction that describes the task:
### Input:
When you add a child to a Node, you are adding yourself as a parent to the child
You cannot have the same node as a child more than once.
If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot
add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child)
### Response:
def addChild(self,item):
"""
When you add a child to a Node, you are adding yourself as a parent to the child
You cannot have the same node as a child more than once.
If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot
add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child)
"""
if not isinstance(item,Node):
item = Node(item)
if item in self.children:
return item
self.children.append(item)
item.parents.add(self)
return item |
def get_enrollment(self, id):
"""Retrieves an enrollment.
Useful to check its type and related metadata.
Args:
id (str): The id of the device account to update
See: https://auth0.com/docs/api/management/v2#!/Guardian/get_enrollments_by_id
"""
url = self._url('enrollments/{}'.format(id))
return self.client.get(url) | Retrieves an enrollment.
Useful to check its type and related metadata.
Args:
id (str): The id of the device account to update
See: https://auth0.com/docs/api/management/v2#!/Guardian/get_enrollments_by_id | Below is the the instruction that describes the task:
### Input:
Retrieves an enrollment.
Useful to check its type and related metadata.
Args:
id (str): The id of the device account to update
See: https://auth0.com/docs/api/management/v2#!/Guardian/get_enrollments_by_id
### Response:
def get_enrollment(self, id):
"""Retrieves an enrollment.
Useful to check its type and related metadata.
Args:
id (str): The id of the device account to update
See: https://auth0.com/docs/api/management/v2#!/Guardian/get_enrollments_by_id
"""
url = self._url('enrollments/{}'.format(id))
return self.client.get(url) |
def to_sql(self, connection, grammar):
"""
Get the raw SQL statements for the blueprint.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.schema.grammars.SchemaGrammar
:rtype: list
"""
self._add_implied_commands()
statements = []
for command in self._commands:
method = "compile_%s" % command.name
if hasattr(grammar, method):
sql = getattr(grammar, method)(self, command, connection)
if sql is not None:
if isinstance(sql, list):
statements += sql
else:
statements.append(sql)
return statements | Get the raw SQL statements for the blueprint.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.schema.grammars.SchemaGrammar
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get the raw SQL statements for the blueprint.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.schema.grammars.SchemaGrammar
:rtype: list
### Response:
def to_sql(self, connection, grammar):
"""
Get the raw SQL statements for the blueprint.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.schema.grammars.SchemaGrammar
:rtype: list
"""
self._add_implied_commands()
statements = []
for command in self._commands:
method = "compile_%s" % command.name
if hasattr(grammar, method):
sql = getattr(grammar, method)(self, command, connection)
if sql is not None:
if isinstance(sql, list):
statements += sql
else:
statements.append(sql)
return statements |
def Extract(self, components):
"""Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings).
"""
for index, component in enumerate(components):
if component.lower().endswith(self.EXECUTABLE_EXTENSIONS):
extracted_path = " ".join(components[0:index + 1])
return [extracted_path]
return [] | Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings). | Below is the the instruction that describes the task:
### Input:
Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings).
### Response:
def Extract(self, components):
"""Extracts interesting paths from a given path.
Args:
components: Source string represented as a list of components.
Returns:
A list of extracted paths (as strings).
"""
for index, component in enumerate(components):
if component.lower().endswith(self.EXECUTABLE_EXTENSIONS):
extracted_path = " ".join(components[0:index + 1])
return [extracted_path]
return [] |
def store_widget_properties(self, widget, widget_name):
"""Sets configuration values for widgets
If the widget is a window, then the size and position are stored. If the widget is a pane, then only the
position is stored. If the window is maximized the last insert position before being maximized is keep in the
config and the maximized flag set to True. The maximized state and the last size and position are strictly
separated by this.
:param widget: The widget, for which the position (and possibly the size) will be stored.
:param widget_name: The window or widget name of the widget, which constitutes a part of its key in the
configuration file.
"""
if isinstance(widget, Gtk.Window):
maximized = bool(widget.is_maximized())
self.set_config_value('{0}_MAXIMIZED'.format(widget_name), maximized)
if maximized:
return
size = widget.get_size()
self.set_config_value('{0}_SIZE'.format(widget_name), tuple(size))
position = widget.get_position()
self.set_config_value('{0}_POS'.format(widget_name), tuple(position))
else: # Gtk.Paned
position = widget.get_position()
self.set_config_value('{0}_POS'.format(widget_name), position) | Sets configuration values for widgets
If the widget is a window, then the size and position are stored. If the widget is a pane, then only the
position is stored. If the window is maximized the last insert position before being maximized is keep in the
config and the maximized flag set to True. The maximized state and the last size and position are strictly
separated by this.
:param widget: The widget, for which the position (and possibly the size) will be stored.
:param widget_name: The window or widget name of the widget, which constitutes a part of its key in the
configuration file. | Below is the the instruction that describes the task:
### Input:
Sets configuration values for widgets
If the widget is a window, then the size and position are stored. If the widget is a pane, then only the
position is stored. If the window is maximized the last insert position before being maximized is keep in the
config and the maximized flag set to True. The maximized state and the last size and position are strictly
separated by this.
:param widget: The widget, for which the position (and possibly the size) will be stored.
:param widget_name: The window or widget name of the widget, which constitutes a part of its key in the
configuration file.
### Response:
def store_widget_properties(self, widget, widget_name):
"""Sets configuration values for widgets
If the widget is a window, then the size and position are stored. If the widget is a pane, then only the
position is stored. If the window is maximized the last insert position before being maximized is keep in the
config and the maximized flag set to True. The maximized state and the last size and position are strictly
separated by this.
:param widget: The widget, for which the position (and possibly the size) will be stored.
:param widget_name: The window or widget name of the widget, which constitutes a part of its key in the
configuration file.
"""
if isinstance(widget, Gtk.Window):
maximized = bool(widget.is_maximized())
self.set_config_value('{0}_MAXIMIZED'.format(widget_name), maximized)
if maximized:
return
size = widget.get_size()
self.set_config_value('{0}_SIZE'.format(widget_name), tuple(size))
position = widget.get_position()
self.set_config_value('{0}_POS'.format(widget_name), tuple(position))
else: # Gtk.Paned
position = widget.get_position()
self.set_config_value('{0}_POS'.format(widget_name), position) |
def derivation(self):
"""
Deserialize and return a Derivation object for UDF- or
JSON-formatted derivation data; otherwise return the original
string.
"""
drv = self.get('derivation')
if drv is not None:
if isinstance(drv, dict):
drv = Derivation.from_dict(drv)
elif isinstance(drv, stringtypes):
drv = Derivation.from_string(drv)
return drv | Deserialize and return a Derivation object for UDF- or
JSON-formatted derivation data; otherwise return the original
string. | Below is the the instruction that describes the task:
### Input:
Deserialize and return a Derivation object for UDF- or
JSON-formatted derivation data; otherwise return the original
string.
### Response:
def derivation(self):
"""
Deserialize and return a Derivation object for UDF- or
JSON-formatted derivation data; otherwise return the original
string.
"""
drv = self.get('derivation')
if drv is not None:
if isinstance(drv, dict):
drv = Derivation.from_dict(drv)
elif isinstance(drv, stringtypes):
drv = Derivation.from_string(drv)
return drv |
def main():
"""
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
"""
start_time = datetime.now()
for a_device in devices:
my_thread = threading.Thread(target=show_version, args=(a_device,))
my_thread.start()
main_thread = threading.currentThread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
print(some_thread)
some_thread.join()
print("\nElapsed time: " + str(datetime.now() - start_time)) | Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this. | Below is the the instruction that describes the task:
### Input:
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
### Response:
def main():
"""
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
"""
start_time = datetime.now()
for a_device in devices:
my_thread = threading.Thread(target=show_version, args=(a_device,))
my_thread.start()
main_thread = threading.currentThread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
print(some_thread)
some_thread.join()
print("\nElapsed time: " + str(datetime.now() - start_time)) |
def least_squares_effective_mass( cartesian_k_points, eigenvalues ):
"""
Calculate the effective mass using a least squares quadratic fit.
Args:
cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points
eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.
Returns:
(float): The fitted effective mass
Notes:
If the k-points do not sit on a straight line a ValueError will be raised.
"""
if not points_are_in_a_straight_line( cartesian_k_points ):
raise ValueError( 'k-points are not collinear' )
dk = cartesian_k_points - cartesian_k_points[0]
mod_dk = np.linalg.norm( dk, axis = 1 )
delta_e = eigenvalues - eigenvalues[0]
effective_mass = 1.0 / ( np.polyfit( mod_dk, eigenvalues, 2 )[0] * ev_to_hartree * 2.0 )
return effective_mass | Calculate the effective mass using a least squares quadratic fit.
Args:
cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points
eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.
Returns:
(float): The fitted effective mass
Notes:
If the k-points do not sit on a straight line a ValueError will be raised. | Below is the the instruction that describes the task:
### Input:
Calculate the effective mass using a least squares quadratic fit.
Args:
cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points
eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.
Returns:
(float): The fitted effective mass
Notes:
If the k-points do not sit on a straight line a ValueError will be raised.
### Response:
def least_squares_effective_mass( cartesian_k_points, eigenvalues ):
"""
Calculate the effective mass using a least squares quadratic fit.
Args:
cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points
eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.
Returns:
(float): The fitted effective mass
Notes:
If the k-points do not sit on a straight line a ValueError will be raised.
"""
if not points_are_in_a_straight_line( cartesian_k_points ):
raise ValueError( 'k-points are not collinear' )
dk = cartesian_k_points - cartesian_k_points[0]
mod_dk = np.linalg.norm( dk, axis = 1 )
delta_e = eigenvalues - eigenvalues[0]
effective_mass = 1.0 / ( np.polyfit( mod_dk, eigenvalues, 2 )[0] * ev_to_hartree * 2.0 )
return effective_mass |
def add(self, member):
""" Adds @member to the set
-> #int the number of @members that were added to the set,
excluding pre-existing members (1 or 0)
"""
return self._client.sadd(self.key_prefix, self._dumps(member)) | Adds @member to the set
-> #int the number of @members that were added to the set,
excluding pre-existing members (1 or 0) | Below is the the instruction that describes the task:
### Input:
Adds @member to the set
-> #int the number of @members that were added to the set,
excluding pre-existing members (1 or 0)
### Response:
def add(self, member):
""" Adds @member to the set
-> #int the number of @members that were added to the set,
excluding pre-existing members (1 or 0)
"""
return self._client.sadd(self.key_prefix, self._dumps(member)) |
async def get_info(self):
'''
Retrieves a brief information about the compute session.
'''
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key
rqst = Request(self.session,
'GET', '/kernel/{}'.format(self.kernel_id),
params=params)
async with rqst.fetch() as resp:
return await resp.json() | Retrieves a brief information about the compute session. | Below is the the instruction that describes the task:
### Input:
Retrieves a brief information about the compute session.
### Response:
async def get_info(self):
'''
Retrieves a brief information about the compute session.
'''
params = {}
if self.owner_access_key:
params['owner_access_key'] = self.owner_access_key
rqst = Request(self.session,
'GET', '/kernel/{}'.format(self.kernel_id),
params=params)
async with rqst.fetch() as resp:
return await resp.json() |
def setup_session(self, server, hooks, graph_default_context):
"""
Creates and then enters the session for this model (finalizes the graph).
Args:
server (tf.train.Server): The tf.train.Server object to connect to (None for single execution).
hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session.
graph_default_context: The graph as_default() context that we are currently in.
"""
if self.execution_type == "distributed":
# if self.distributed_spec['task_index'] == 0:
# TensorFlow chief session creator object
session_creator = tf.train.ChiefSessionCreator(
scaffold=self.scaffold,
master=server.target,
config=self.session_config,
checkpoint_dir=None,
checkpoint_filename_with_path=None
)
# else:
# # TensorFlow worker session creator object
# session_creator = tf.train.WorkerSessionCreator(
# scaffold=self.scaffold,
# master=server.target,
# config=self.execution_spec.get('session_config'),
# )
# TensorFlow monitored session object
self.monitored_session = tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks,
stop_grace_period_secs=120 # Default value.
)
# Add debug session.run dumping?
if self.tf_session_dump_dir != "":
self.monitored_session = DumpingDebugWrapperSession(self.monitored_session, self.tf_session_dump_dir)
else:
# TensorFlow non-distributed monitored session object
self.monitored_session = tf.train.SingularMonitoredSession(
hooks=hooks,
scaffold=self.scaffold,
master='', # Default value.
config=self.session_config, # self.execution_spec.get('session_config'),
checkpoint_dir=None
)
if graph_default_context:
graph_default_context.__exit__(None, None, None)
self.graph.finalize()
# enter the session to be ready for acting/learning
self.monitored_session.__enter__()
self.session = self.monitored_session._tf_sess() | Creates and then enters the session for this model (finalizes the graph).
Args:
server (tf.train.Server): The tf.train.Server object to connect to (None for single execution).
hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session.
graph_default_context: The graph as_default() context that we are currently in. | Below is the the instruction that describes the task:
### Input:
Creates and then enters the session for this model (finalizes the graph).
Args:
server (tf.train.Server): The tf.train.Server object to connect to (None for single execution).
hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session.
graph_default_context: The graph as_default() context that we are currently in.
### Response:
def setup_session(self, server, hooks, graph_default_context):
"""
Creates and then enters the session for this model (finalizes the graph).
Args:
server (tf.train.Server): The tf.train.Server object to connect to (None for single execution).
hooks (list): A list of (saver, summary, etc..) hooks to be passed to the session.
graph_default_context: The graph as_default() context that we are currently in.
"""
if self.execution_type == "distributed":
# if self.distributed_spec['task_index'] == 0:
# TensorFlow chief session creator object
session_creator = tf.train.ChiefSessionCreator(
scaffold=self.scaffold,
master=server.target,
config=self.session_config,
checkpoint_dir=None,
checkpoint_filename_with_path=None
)
# else:
# # TensorFlow worker session creator object
# session_creator = tf.train.WorkerSessionCreator(
# scaffold=self.scaffold,
# master=server.target,
# config=self.execution_spec.get('session_config'),
# )
# TensorFlow monitored session object
self.monitored_session = tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks,
stop_grace_period_secs=120 # Default value.
)
# Add debug session.run dumping?
if self.tf_session_dump_dir != "":
self.monitored_session = DumpingDebugWrapperSession(self.monitored_session, self.tf_session_dump_dir)
else:
# TensorFlow non-distributed monitored session object
self.monitored_session = tf.train.SingularMonitoredSession(
hooks=hooks,
scaffold=self.scaffold,
master='', # Default value.
config=self.session_config, # self.execution_spec.get('session_config'),
checkpoint_dir=None
)
if graph_default_context:
graph_default_context.__exit__(None, None, None)
self.graph.finalize()
# enter the session to be ready for acting/learning
self.monitored_session.__enter__()
self.session = self.monitored_session._tf_sess() |
def _data_execute(self, data, program, executor):
"""Execute the Data object.
The activities carried out here include target directory
preparation, executor copying, setting serialization and actual
execution of the object.
:param data: The :class:`~resolwe.flow.models.Data` object to
execute.
:param program: The process text the manager got out of
execution engine evaluation.
:param executor: The executor to use for this object.
"""
if not program:
return
logger.debug(__("Manager preparing Data with id {} for processing.", data.id))
# Prepare the executor's environment.
try:
executor_env_vars = self.get_executor().get_environment_variables()
program = self._include_environment_variables(program, executor_env_vars)
data_dir = self._prepare_data_dir(data)
executor_module, runtime_dir = self._prepare_executor(data, executor)
# Execute execution engine specific runtime preparation.
execution_engine = data.process.run.get('language', None)
volume_maps = self.get_execution_engine(execution_engine).prepare_runtime(runtime_dir, data)
self._prepare_context(data.id, data_dir, runtime_dir, RUNTIME_VOLUME_MAPS=volume_maps)
self._prepare_script(runtime_dir, program)
argv = [
'/bin/bash',
'-c',
self.settings_actual.get('FLOW_EXECUTOR', {}).get('PYTHON', '/usr/bin/env python')
+ ' -m executors ' + executor_module
]
except PermissionDenied as error:
data.status = Data.STATUS_ERROR
data.process_error.append("Permission denied for process: {}".format(error))
data.save()
return
except OSError as err:
logger.error(__(
"OSError occurred while preparing data {} (will skip): {}",
data.id, err
))
return
# Hand off to the run() method for execution.
logger.info(__("Running {}", runtime_dir))
self.run(data, runtime_dir, argv) | Execute the Data object.
The activities carried out here include target directory
preparation, executor copying, setting serialization and actual
execution of the object.
:param data: The :class:`~resolwe.flow.models.Data` object to
execute.
:param program: The process text the manager got out of
execution engine evaluation.
:param executor: The executor to use for this object. | Below is the the instruction that describes the task:
### Input:
Execute the Data object.
The activities carried out here include target directory
preparation, executor copying, setting serialization and actual
execution of the object.
:param data: The :class:`~resolwe.flow.models.Data` object to
execute.
:param program: The process text the manager got out of
execution engine evaluation.
:param executor: The executor to use for this object.
### Response:
def _data_execute(self, data, program, executor):
"""Execute the Data object.
The activities carried out here include target directory
preparation, executor copying, setting serialization and actual
execution of the object.
:param data: The :class:`~resolwe.flow.models.Data` object to
execute.
:param program: The process text the manager got out of
execution engine evaluation.
:param executor: The executor to use for this object.
"""
if not program:
return
logger.debug(__("Manager preparing Data with id {} for processing.", data.id))
# Prepare the executor's environment.
try:
executor_env_vars = self.get_executor().get_environment_variables()
program = self._include_environment_variables(program, executor_env_vars)
data_dir = self._prepare_data_dir(data)
executor_module, runtime_dir = self._prepare_executor(data, executor)
# Execute execution engine specific runtime preparation.
execution_engine = data.process.run.get('language', None)
volume_maps = self.get_execution_engine(execution_engine).prepare_runtime(runtime_dir, data)
self._prepare_context(data.id, data_dir, runtime_dir, RUNTIME_VOLUME_MAPS=volume_maps)
self._prepare_script(runtime_dir, program)
argv = [
'/bin/bash',
'-c',
self.settings_actual.get('FLOW_EXECUTOR', {}).get('PYTHON', '/usr/bin/env python')
+ ' -m executors ' + executor_module
]
except PermissionDenied as error:
data.status = Data.STATUS_ERROR
data.process_error.append("Permission denied for process: {}".format(error))
data.save()
return
except OSError as err:
logger.error(__(
"OSError occurred while preparing data {} (will skip): {}",
data.id, err
))
return
# Hand off to the run() method for execution.
logger.info(__("Running {}", runtime_dir))
self.run(data, runtime_dir, argv) |
def simplify(cls, content_type):
"""
The MIME types main- and sub-label can both start with <tt>x-</tt>,
which indicates that it is a non-registered name. Of course, after
registration this flag can disappear, adds to the confusing
proliferation of MIME types. The simplified string has the
<tt>x-</tt> removed and are translated to lowercase.
"""
matchdata = MEDIA_TYPE_RE.match(content_type)
if matchdata is None:
return None
wrap = lambda s: re.sub(UNREG_RE, '', s.lower())
(media_type, subtype) = matchdata.groups()
return '%s/%s' % (wrap(media_type), wrap(subtype)) | The MIME types main- and sub-label can both start with <tt>x-</tt>,
which indicates that it is a non-registered name. Of course, after
registration this flag can disappear, adds to the confusing
proliferation of MIME types. The simplified string has the
<tt>x-</tt> removed and are translated to lowercase. | Below is the the instruction that describes the task:
### Input:
The MIME types main- and sub-label can both start with <tt>x-</tt>,
which indicates that it is a non-registered name. Of course, after
registration this flag can disappear, adds to the confusing
proliferation of MIME types. The simplified string has the
<tt>x-</tt> removed and are translated to lowercase.
### Response:
def simplify(cls, content_type):
"""
The MIME types main- and sub-label can both start with <tt>x-</tt>,
which indicates that it is a non-registered name. Of course, after
registration this flag can disappear, adds to the confusing
proliferation of MIME types. The simplified string has the
<tt>x-</tt> removed and are translated to lowercase.
"""
matchdata = MEDIA_TYPE_RE.match(content_type)
if matchdata is None:
return None
wrap = lambda s: re.sub(UNREG_RE, '', s.lower())
(media_type, subtype) = matchdata.groups()
return '%s/%s' % (wrap(media_type), wrap(subtype)) |
def unite(df, colname, *args, **kwargs):
"""
Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining.
"""
to_unite = list([a for a in flatten(args)])
sep = kwargs.get('sep', '_')
remove = kwargs.get('remove', True)
# possible na_action values
# ignore: empty string
# maintain: keep as np.nan (default)
# as_string: becomes string 'nan'
na_action = kwargs.get('na_action', 'maintain')
# print(to_unite, sep, remove, na_action)
if na_action == 'maintain':
df[colname] = df[to_unite].apply(lambda x: np.nan if any(x.isnull())
else sep.join(x.map(str)), axis=1)
elif na_action == 'ignore':
df[colname] = df[to_unite].apply(lambda x: sep.join(x[~x.isnull()].map(str)),
axis=1)
elif na_action == 'as_string':
df[colname] = df[to_unite].astype(str).apply(lambda x: sep.join(x), axis=1)
if remove:
df.drop(to_unite, axis=1, inplace=True)
return df | Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining. | Below is the the instruction that describes the task:
### Input:
Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining.
### Response:
def unite(df, colname, *args, **kwargs):
"""
Does the inverse of `separate`, joining columns together by a specified
separator.
Any columns that are not strings will be converted to strings.
Args:
df (pandas.DataFrame): DataFrame passed in through the pipe.
colname (str): the name of the new joined column.
*args: list of columns to be joined, which can be strings, symbolic, or
integer positions.
Kwargs:
sep (str): the string separator to join the columns with.
remove (bool): Boolean indicating whether or not to remove the
original columns.
na_action (str): can be one of `'maintain'` (the default),
'`ignore'`, or `'as_string'`. The default will make the new column
row a `NaN` value if any of the original column cells at that
row contained `NaN`. '`ignore'` will treat any `NaN` value as an
empty string during joining. `'as_string'` will convert any `NaN`
value to the string `'nan'` prior to joining.
"""
to_unite = list([a for a in flatten(args)])
sep = kwargs.get('sep', '_')
remove = kwargs.get('remove', True)
# possible na_action values
# ignore: empty string
# maintain: keep as np.nan (default)
# as_string: becomes string 'nan'
na_action = kwargs.get('na_action', 'maintain')
# print(to_unite, sep, remove, na_action)
if na_action == 'maintain':
df[colname] = df[to_unite].apply(lambda x: np.nan if any(x.isnull())
else sep.join(x.map(str)), axis=1)
elif na_action == 'ignore':
df[colname] = df[to_unite].apply(lambda x: sep.join(x[~x.isnull()].map(str)),
axis=1)
elif na_action == 'as_string':
df[colname] = df[to_unite].astype(str).apply(lambda x: sep.join(x), axis=1)
if remove:
df.drop(to_unite, axis=1, inplace=True)
return df |
def normalize_text(text: str) -> str:
"""
Performs a normalization that is very similar to that done by the normalization functions in
SQuAD and TriviaQA.
This involves splitting and rejoining the text, and could be a somewhat expensive operation.
"""
return ' '.join([token
for token in text.lower().strip(STRIPPED_CHARACTERS).split()
if token not in IGNORED_TOKENS]) | Performs a normalization that is very similar to that done by the normalization functions in
SQuAD and TriviaQA.
This involves splitting and rejoining the text, and could be a somewhat expensive operation. | Below is the the instruction that describes the task:
### Input:
Performs a normalization that is very similar to that done by the normalization functions in
SQuAD and TriviaQA.
This involves splitting and rejoining the text, and could be a somewhat expensive operation.
### Response:
def normalize_text(text: str) -> str:
"""
Performs a normalization that is very similar to that done by the normalization functions in
SQuAD and TriviaQA.
This involves splitting and rejoining the text, and could be a somewhat expensive operation.
"""
return ' '.join([token
for token in text.lower().strip(STRIPPED_CHARACTERS).split()
if token not in IGNORED_TOKENS]) |
def add_http_basic_auth(url,
user=None,
password=None,
https_only=False):
'''
Return a string with http basic auth incorporated into it
'''
if user is None and password is None:
return url
else:
urltuple = urlparse(url)
if https_only and urltuple.scheme != 'https':
raise ValueError('Basic Auth only supported for HTTPS')
if password is None:
netloc = '{0}@{1}'.format(
user,
urltuple.netloc
)
urltuple = urltuple._replace(netloc=netloc)
return urlunparse(urltuple)
else:
netloc = '{0}:{1}@{2}'.format(
user,
password,
urltuple.netloc
)
urltuple = urltuple._replace(netloc=netloc)
return urlunparse(urltuple) | Return a string with http basic auth incorporated into it | Below is the the instruction that describes the task:
### Input:
Return a string with http basic auth incorporated into it
### Response:
def add_http_basic_auth(url,
user=None,
password=None,
https_only=False):
'''
Return a string with http basic auth incorporated into it
'''
if user is None and password is None:
return url
else:
urltuple = urlparse(url)
if https_only and urltuple.scheme != 'https':
raise ValueError('Basic Auth only supported for HTTPS')
if password is None:
netloc = '{0}@{1}'.format(
user,
urltuple.netloc
)
urltuple = urltuple._replace(netloc=netloc)
return urlunparse(urltuple)
else:
netloc = '{0}:{1}@{2}'.format(
user,
password,
urltuple.netloc
)
urltuple = urltuple._replace(netloc=netloc)
return urlunparse(urltuple) |
def _extract_id_from_batch_response(r, name='id'):
"""Unholy, forward-compatible, mess for extraction of id/oid from a
soon-to-be (deprecated) batch response."""
names = name + 's'
if names in r:
# soon-to-be deprecated batch reponse
if 'errors' in r and r['errors']:
raise GeneralException(r['errors'][0]['desc'])
id = r[names][0]
else:
# new-style simplified api response
id = r[name]
return int(id) | Unholy, forward-compatible, mess for extraction of id/oid from a
soon-to-be (deprecated) batch response. | Below is the the instruction that describes the task:
### Input:
Unholy, forward-compatible, mess for extraction of id/oid from a
soon-to-be (deprecated) batch response.
### Response:
def _extract_id_from_batch_response(r, name='id'):
"""Unholy, forward-compatible, mess for extraction of id/oid from a
soon-to-be (deprecated) batch response."""
names = name + 's'
if names in r:
# soon-to-be deprecated batch reponse
if 'errors' in r and r['errors']:
raise GeneralException(r['errors'][0]['desc'])
id = r[names][0]
else:
# new-style simplified api response
id = r[name]
return int(id) |
def get(self, name, ns=None, default=None):
"""
Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either the
attribute does not exist or has no value.
@type default: basestring
@return: The attribute's value or I{default}.
@rtype: basestring
@see: __getitem__()
"""
attr = self.getAttribute(name, ns)
if attr is None or attr.value is None:
return default
return attr.getValue() | Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either the
attribute does not exist or has no value.
@type default: basestring
@return: The attribute's value or I{default}.
@rtype: basestring
@see: __getitem__() | Below is the the instruction that describes the task:
### Input:
Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either the
attribute does not exist or has no value.
@type default: basestring
@return: The attribute's value or I{default}.
@rtype: basestring
@see: __getitem__()
### Response:
def get(self, name, ns=None, default=None):
"""
Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either the
attribute does not exist or has no value.
@type default: basestring
@return: The attribute's value or I{default}.
@rtype: basestring
@see: __getitem__()
"""
attr = self.getAttribute(name, ns)
if attr is None or attr.value is None:
return default
return attr.getValue() |
def process_part(self, char):
'''Process chars while in a part'''
if char in self.whitespace or char == self.eol_char:
# End of the part.
self.parts.append( ''.join(self.part) )
self.part = []
# Switch back to processing a delimiter.
self.process_char = self.process_delimiter
if char == self.eol_char:
self.complete = True
return
if char in self.quote_chars:
# Store the quote type (' or ") and switch to quote processing.
self.inquote = char
self.process_char = self.process_quote
return
self.part.append(char) | Process chars while in a part | Below is the the instruction that describes the task:
### Input:
Process chars while in a part
### Response:
def process_part(self, char):
'''Process chars while in a part'''
if char in self.whitespace or char == self.eol_char:
# End of the part.
self.parts.append( ''.join(self.part) )
self.part = []
# Switch back to processing a delimiter.
self.process_char = self.process_delimiter
if char == self.eol_char:
self.complete = True
return
if char in self.quote_chars:
# Store the quote type (' or ") and switch to quote processing.
self.inquote = char
self.process_char = self.process_quote
return
self.part.append(char) |
def UTCFromGps(gpsWeek, SOW, leapSecs=14):
"""converts gps week and seconds to UTC
see comments of inverse function!
SOW = seconds of week
gpsWeek is the full number (not modulo 1024)
"""
secFract = SOW % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = time.mktime(epochTuple) - time.timezone #mktime is localtime, correct for UTC
tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs
t = t0 + tdiff
(year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t)
#use gmtime since localtime does not allow to switch off daylighsavings correction!!!
return (year, month, day, hh, mm, ss + secFract) | converts gps week and seconds to UTC
see comments of inverse function!
SOW = seconds of week
gpsWeek is the full number (not modulo 1024) | Below is the the instruction that describes the task:
### Input:
converts gps week and seconds to UTC
see comments of inverse function!
SOW = seconds of week
gpsWeek is the full number (not modulo 1024)
### Response:
def UTCFromGps(gpsWeek, SOW, leapSecs=14):
"""converts gps week and seconds to UTC
see comments of inverse function!
SOW = seconds of week
gpsWeek is the full number (not modulo 1024)
"""
secFract = SOW % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = time.mktime(epochTuple) - time.timezone #mktime is localtime, correct for UTC
tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs
t = t0 + tdiff
(year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t)
#use gmtime since localtime does not allow to switch off daylighsavings correction!!!
return (year, month, day, hh, mm, ss + secFract) |
def get_input_widget(self, fieldname, arnum=0, **kw):
"""Get the field widget of the AR in column <arnum>
:param fieldname: The base fieldname
:type fieldname: string
"""
# temporary AR Context
context = self.get_ar()
# request = self.request
schema = context.Schema()
# get original field in the schema from the base_fieldname
base_fieldname = fieldname.split("-")[0]
field = context.getField(base_fieldname)
# fieldname with -<arnum> suffix
new_fieldname = self.get_fieldname(field, arnum)
new_field = field.copy(name=new_fieldname)
# get the default value for this field
fieldvalues = self.fieldvalues
field_value = fieldvalues.get(new_fieldname)
# request_value = request.form.get(new_fieldname)
# value = request_value or field_value
value = field_value
def getAccessor(instance):
def accessor(**kw):
return value
return accessor
# inject the new context for the widget renderer
# see: Products.Archetypes.Renderer.render
kw["here"] = context
kw["context"] = context
kw["fieldName"] = new_fieldname
# make the field available with this name
# XXX: This is a hack to make the widget available in the template
schema._fields[new_fieldname] = new_field
new_field.getAccessor = getAccessor
# set the default value
form = dict()
form[new_fieldname] = value
self.request.form.update(form)
logger.info("get_input_widget: fieldname={} arnum={} "
"-> new_fieldname={} value={}".format(
fieldname, arnum, new_fieldname, value))
widget = context.widget(new_fieldname, **kw)
return widget | Get the field widget of the AR in column <arnum>
:param fieldname: The base fieldname
:type fieldname: string | Below is the the instruction that describes the task:
### Input:
Get the field widget of the AR in column <arnum>
:param fieldname: The base fieldname
:type fieldname: string
### Response:
def get_input_widget(self, fieldname, arnum=0, **kw):
"""Get the field widget of the AR in column <arnum>
:param fieldname: The base fieldname
:type fieldname: string
"""
# temporary AR Context
context = self.get_ar()
# request = self.request
schema = context.Schema()
# get original field in the schema from the base_fieldname
base_fieldname = fieldname.split("-")[0]
field = context.getField(base_fieldname)
# fieldname with -<arnum> suffix
new_fieldname = self.get_fieldname(field, arnum)
new_field = field.copy(name=new_fieldname)
# get the default value for this field
fieldvalues = self.fieldvalues
field_value = fieldvalues.get(new_fieldname)
# request_value = request.form.get(new_fieldname)
# value = request_value or field_value
value = field_value
def getAccessor(instance):
def accessor(**kw):
return value
return accessor
# inject the new context for the widget renderer
# see: Products.Archetypes.Renderer.render
kw["here"] = context
kw["context"] = context
kw["fieldName"] = new_fieldname
# make the field available with this name
# XXX: This is a hack to make the widget available in the template
schema._fields[new_fieldname] = new_field
new_field.getAccessor = getAccessor
# set the default value
form = dict()
form[new_fieldname] = value
self.request.form.update(form)
logger.info("get_input_widget: fieldname={} arnum={} "
"-> new_fieldname={} value={}".format(
fieldname, arnum, new_fieldname, value))
widget = context.widget(new_fieldname, **kw)
return widget |
def p_block_statements(self, p):
'block_statements : block_statements block_statement'
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) | block_statements : block_statements block_statement | Below is the the instruction that describes the task:
### Input:
block_statements : block_statements block_statement
### Response:
def p_block_statements(self, p):
'block_statements : block_statements block_statement'
p[0] = p[1] + (p[2],)
p.set_lineno(0, p.lineno(1)) |
def send_password_changed_email(self, user):
"""Send the 'password has changed' notification email."""
# Verify config settings
if not self.user_manager.USER_ENABLE_EMAIL: return
if not self.user_manager.USER_SEND_PASSWORD_CHANGED_EMAIL: return
# Notification emails are sent to the user's primary email address
user_or_user_email_object = self.user_manager.db_manager.get_primary_user_email_object(user)
email = user_or_user_email_object.email
# Render email from templates and send it via the configured EmailAdapter
self._render_and_send_email(
email,
user,
self.user_manager.USER_PASSWORD_CHANGED_EMAIL_TEMPLATE,
) | Send the 'password has changed' notification email. | Below is the the instruction that describes the task:
### Input:
Send the 'password has changed' notification email.
### Response:
def send_password_changed_email(self, user):
"""Send the 'password has changed' notification email."""
# Verify config settings
if not self.user_manager.USER_ENABLE_EMAIL: return
if not self.user_manager.USER_SEND_PASSWORD_CHANGED_EMAIL: return
# Notification emails are sent to the user's primary email address
user_or_user_email_object = self.user_manager.db_manager.get_primary_user_email_object(user)
email = user_or_user_email_object.email
# Render email from templates and send it via the configured EmailAdapter
self._render_and_send_email(
email,
user,
self.user_manager.USER_PASSWORD_CHANGED_EMAIL_TEMPLATE,
) |
def execute_greenlet_async(func, *args, **kwargs):
"""
Executes `func` in a separate greenlet in the same process. Memory and other
resources are available (e.g. TCP connections etc.) `args` and `kwargs` are
passed to `func`.
"""
global _GREENLET_EXECUTOR
if _GREENLET_EXECUTOR is None:
_GREENLET_EXECUTOR = GreenletExecutor(
num_greenlets=settings.node.greenlet_pool_size)
return _GREENLET_EXECUTOR.submit(func, *args, **kwargs) | Executes `func` in a separate greenlet in the same process. Memory and other
resources are available (e.g. TCP connections etc.) `args` and `kwargs` are
passed to `func`. | Below is the the instruction that describes the task:
### Input:
Executes `func` in a separate greenlet in the same process. Memory and other
resources are available (e.g. TCP connections etc.) `args` and `kwargs` are
passed to `func`.
### Response:
def execute_greenlet_async(func, *args, **kwargs):
"""
Executes `func` in a separate greenlet in the same process. Memory and other
resources are available (e.g. TCP connections etc.) `args` and `kwargs` are
passed to `func`.
"""
global _GREENLET_EXECUTOR
if _GREENLET_EXECUTOR is None:
_GREENLET_EXECUTOR = GreenletExecutor(
num_greenlets=settings.node.greenlet_pool_size)
return _GREENLET_EXECUTOR.submit(func, *args, **kwargs) |
def ttSparseALS(cooP, shape, x0=None, ttRank=1, tol=1e-5, maxnsweeps=20, verbose=True, alpha=1e-2):
'''
TT completion via Alternating Least Squares algorithm.
Parameters:
:dict: cooP
dictionary with two records
- 'indices': numpy.array of P x d shape,
contains index subspace of P known elements;
each string is an index of one element.
- 'values': numpy array of size P,
contains P known values.
:list, numpy.array: shape
full-format shape of tensor to be completed [dimensions]
:tt.vector: x0 = None
initial approximation of completed tensor
If it is specified, parameters 'shape' and 'ttRank' will be ignored
:int, numpy.array: ttRank = 1
assumed rank of completed tensor
:float: tol = 1e-5
tolerance for functional value
:int: maxnsweeps = 20
maximal number of sweeps [sequential optimization of all d cores
in right or left direction]
:boolean: verbose = True
switcher of messages from function
:float: alpha: = 1e-2
regularizer of least squares problem for each slice of current TT core.
[rcond parameter for np.linalg.lstsq]
Returns:
:tt.vector: xNew
completed TT vector
:list: fit
list of functional values at each sweep
'''
indices = cooP['indices']
values = cooP['values']
[P, d] = indices.shape
assert P == len(values)
timeVal = time.clock()
if x0 is None:
x = tt.rand(shape, r = ttRank)
x = x.round(0.)
x = (1./x.norm())*x
else:
x = copy.deepcopy(x0)
assert d == x.d
# TODO: also check if cooP indices are aligned with shape
normP = np.linalg.norm(values)
values /= normP
fitList = []
sweepTimeList = []
initTime = time.clock() - timeVal
timeVal = time.clock()
coreList = tt.vector.to_list(x)
#coreList = orthLRFull(coreList, mu = d, splitResult = False)
# orthTime = time.clock() - timeVal
if verbose:
print("Initialization time: %.3f seconds (proc.time)" % (initTime))
# print "Orthogonalizing time: %.3f seconds (proc.time)" % (orthTime)
for sweep in xrange(maxnsweeps):
sweepStart = time.clock()
# list left + right
[kStart, kEnd, kStep] = [0, d, 1]
# select direction of sweep
'''
if sweep % 2 == 0: # left to rigth
[kStart, kEnd, kStep] = [0, d, 1]
else: # right to left
[kStart, kEnd, kStep] = [d-1, -1, -1]
'''
# fix k-th core to update
for k in xrange(kStart, kEnd, kStep):
[r1, n, r2] = coreList[k].shape
core = np.zeros([r1, n, r2])
leftU = []
rightV = []
if k > 0:
leftU = coreList[:k]
if k < d-1:
rightV = coreList[k+1:]
for i in xrange(n):
thetaI = np.where(indices[:, k] == i)[0]
if len(thetaI) > 0:
A = np.zeros([len(thetaI), r1*r2])
for j in xrange(len(thetaI)):
tmp = getRow(leftU, rightV, indices[thetaI[j], :])
A[j:j+1, :] += tmp # .flatten(order = 'F')
vecCoreSlice, _, _, _ = np.linalg.lstsq(A, values[thetaI])#, rcond = alpha)
# 0.5*np.linalg.norm(np.dot(A, vecCoreSlice) - values[thetaI])**2.
core[:, i, :] += reshape(vecCoreSlice, [r1, r2]) ####
'''
if k < (d-1):
core = reshape(core, [r1*n, r2])
Q, R = np.linalg.qr(core)
rnew = Q.shape[1]
core = reshape(Q, [r1, n, rnew])
coreList[k+1] = np.einsum('ijk,li->ljk', coreList[k+1], R)
'''
coreList[k] = core.copy()
'''
else:
if (k > 0):
core = reshape(core, [r1, n*r2])
Q, R = np.linalg.qr(core.T)
rnew = Q.shape[1]
core = reshape(Q.T, [rnew, n, r2])
coreList[k-1] = np.einsum('ijk,lk->ijl', coreList[k-1], R)
'''
xNew = tt.vector.from_list(coreList)
fit = computeFunctional(xNew, cooP)
fitList.append(fit)
if fit < tol:
break
if sweep > 0:
if abs(fit - fitList[-2]) < tol:
break
sweepTimeList.append(time.clock() - sweepStart)
if verbose:
print("sweep %d/%d\t fit value: %.5e\t time: %.3f seconds (proc.time)" % (sweep+1, maxnsweeps, fit, sweepTimeList[-1]))
if verbose:
print("Total sweep time: %.3f seconds (proc.time)\t Total time: %.3f seconds (proc.time)" % (sum(sweepTimeList), sum(sweepTimeList) + initTime))# + orthTime)
info = {'fit': fitList, 'initTime': initTime, 'sweepTime': sweepTimeList} # 'orthTime': orthTime,
xNew *= normP
values *= normP
return xNew, info | TT completion via Alternating Least Squares algorithm.
Parameters:
:dict: cooP
dictionary with two records
- 'indices': numpy.array of P x d shape,
contains index subspace of P known elements;
each string is an index of one element.
- 'values': numpy array of size P,
contains P known values.
:list, numpy.array: shape
full-format shape of tensor to be completed [dimensions]
:tt.vector: x0 = None
initial approximation of completed tensor
If it is specified, parameters 'shape' and 'ttRank' will be ignored
:int, numpy.array: ttRank = 1
assumed rank of completed tensor
:float: tol = 1e-5
tolerance for functional value
:int: maxnsweeps = 20
maximal number of sweeps [sequential optimization of all d cores
in right or left direction]
:boolean: verbose = True
switcher of messages from function
:float: alpha: = 1e-2
regularizer of least squares problem for each slice of current TT core.
[rcond parameter for np.linalg.lstsq]
Returns:
:tt.vector: xNew
completed TT vector
:list: fit
list of functional values at each sweep | Below is the the instruction that describes the task:
### Input:
TT completion via Alternating Least Squares algorithm.
Parameters:
:dict: cooP
dictionary with two records
- 'indices': numpy.array of P x d shape,
contains index subspace of P known elements;
each string is an index of one element.
- 'values': numpy array of size P,
contains P known values.
:list, numpy.array: shape
full-format shape of tensor to be completed [dimensions]
:tt.vector: x0 = None
initial approximation of completed tensor
If it is specified, parameters 'shape' and 'ttRank' will be ignored
:int, numpy.array: ttRank = 1
assumed rank of completed tensor
:float: tol = 1e-5
tolerance for functional value
:int: maxnsweeps = 20
maximal number of sweeps [sequential optimization of all d cores
in right or left direction]
:boolean: verbose = True
switcher of messages from function
:float: alpha: = 1e-2
regularizer of least squares problem for each slice of current TT core.
[rcond parameter for np.linalg.lstsq]
Returns:
:tt.vector: xNew
completed TT vector
:list: fit
list of functional values at each sweep
### Response:
def ttSparseALS(cooP, shape, x0=None, ttRank=1, tol=1e-5, maxnsweeps=20, verbose=True, alpha=1e-2):
'''
TT completion via Alternating Least Squares algorithm.
Parameters:
:dict: cooP
dictionary with two records
- 'indices': numpy.array of P x d shape,
contains index subspace of P known elements;
each string is an index of one element.
- 'values': numpy array of size P,
contains P known values.
:list, numpy.array: shape
full-format shape of tensor to be completed [dimensions]
:tt.vector: x0 = None
initial approximation of completed tensor
If it is specified, parameters 'shape' and 'ttRank' will be ignored
:int, numpy.array: ttRank = 1
assumed rank of completed tensor
:float: tol = 1e-5
tolerance for functional value
:int: maxnsweeps = 20
maximal number of sweeps [sequential optimization of all d cores
in right or left direction]
:boolean: verbose = True
switcher of messages from function
:float: alpha: = 1e-2
regularizer of least squares problem for each slice of current TT core.
[rcond parameter for np.linalg.lstsq]
Returns:
:tt.vector: xNew
completed TT vector
:list: fit
list of functional values at each sweep
'''
indices = cooP['indices']
values = cooP['values']
[P, d] = indices.shape
assert P == len(values)
timeVal = time.clock()
if x0 is None:
x = tt.rand(shape, r = ttRank)
x = x.round(0.)
x = (1./x.norm())*x
else:
x = copy.deepcopy(x0)
assert d == x.d
# TODO: also check if cooP indices are aligned with shape
normP = np.linalg.norm(values)
values /= normP
fitList = []
sweepTimeList = []
initTime = time.clock() - timeVal
timeVal = time.clock()
coreList = tt.vector.to_list(x)
#coreList = orthLRFull(coreList, mu = d, splitResult = False)
# orthTime = time.clock() - timeVal
if verbose:
print("Initialization time: %.3f seconds (proc.time)" % (initTime))
# print "Orthogonalizing time: %.3f seconds (proc.time)" % (orthTime)
for sweep in xrange(maxnsweeps):
sweepStart = time.clock()
# list left + right
[kStart, kEnd, kStep] = [0, d, 1]
# select direction of sweep
'''
if sweep % 2 == 0: # left to rigth
[kStart, kEnd, kStep] = [0, d, 1]
else: # right to left
[kStart, kEnd, kStep] = [d-1, -1, -1]
'''
# fix k-th core to update
for k in xrange(kStart, kEnd, kStep):
[r1, n, r2] = coreList[k].shape
core = np.zeros([r1, n, r2])
leftU = []
rightV = []
if k > 0:
leftU = coreList[:k]
if k < d-1:
rightV = coreList[k+1:]
for i in xrange(n):
thetaI = np.where(indices[:, k] == i)[0]
if len(thetaI) > 0:
A = np.zeros([len(thetaI), r1*r2])
for j in xrange(len(thetaI)):
tmp = getRow(leftU, rightV, indices[thetaI[j], :])
A[j:j+1, :] += tmp # .flatten(order = 'F')
vecCoreSlice, _, _, _ = np.linalg.lstsq(A, values[thetaI])#, rcond = alpha)
# 0.5*np.linalg.norm(np.dot(A, vecCoreSlice) - values[thetaI])**2.
core[:, i, :] += reshape(vecCoreSlice, [r1, r2]) ####
'''
if k < (d-1):
core = reshape(core, [r1*n, r2])
Q, R = np.linalg.qr(core)
rnew = Q.shape[1]
core = reshape(Q, [r1, n, rnew])
coreList[k+1] = np.einsum('ijk,li->ljk', coreList[k+1], R)
'''
coreList[k] = core.copy()
'''
else:
if (k > 0):
core = reshape(core, [r1, n*r2])
Q, R = np.linalg.qr(core.T)
rnew = Q.shape[1]
core = reshape(Q.T, [rnew, n, r2])
coreList[k-1] = np.einsum('ijk,lk->ijl', coreList[k-1], R)
'''
xNew = tt.vector.from_list(coreList)
fit = computeFunctional(xNew, cooP)
fitList.append(fit)
if fit < tol:
break
if sweep > 0:
if abs(fit - fitList[-2]) < tol:
break
sweepTimeList.append(time.clock() - sweepStart)
if verbose:
print("sweep %d/%d\t fit value: %.5e\t time: %.3f seconds (proc.time)" % (sweep+1, maxnsweeps, fit, sweepTimeList[-1]))
if verbose:
print("Total sweep time: %.3f seconds (proc.time)\t Total time: %.3f seconds (proc.time)" % (sum(sweepTimeList), sum(sweepTimeList) + initTime))# + orthTime)
info = {'fit': fitList, 'initTime': initTime, 'sweepTime': sweepTimeList} # 'orthTime': orthTime,
xNew *= normP
values *= normP
return xNew, info |
def enter_alternate_screen(self):
"""
Go to alternate screen buffer.
"""
if not self._in_alternate_screen:
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
# Create a new console buffer and activate that one.
handle = self._winapi(windll.kernel32.CreateConsoleScreenBuffer, GENERIC_READ|GENERIC_WRITE,
DWORD(0), None, DWORD(1), None)
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle)
self.hconsole = handle
self._in_alternate_screen = True | Go to alternate screen buffer. | Below is the the instruction that describes the task:
### Input:
Go to alternate screen buffer.
### Response:
def enter_alternate_screen(self):
"""
Go to alternate screen buffer.
"""
if not self._in_alternate_screen:
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
# Create a new console buffer and activate that one.
handle = self._winapi(windll.kernel32.CreateConsoleScreenBuffer, GENERIC_READ|GENERIC_WRITE,
DWORD(0), None, DWORD(1), None)
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle)
self.hconsole = handle
self._in_alternate_screen = True |
def normalizeHSP(hsp, queryLen, diamondTask):
"""
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in DIAMOND output properly.
hsp['frame'] is a value from {-3, -2, -1, 1, 2, 3}. The sign indicates
negative or positive sense (i.e., the direction of reading through the
query to get the alignment). The frame value is the nucleotide match offset
modulo 3, plus one (i.e., it tells us which of the 3 possible query reading
frames was used in the match).
NOTE: the returned readStartInSubject value may be negative. We consider
the subject sequence to start at offset 0. So if the query string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the subject. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a DIAMOND record.
All passed offsets are 1-based.
@param queryLen: the length of the query sequence.
@param diamondTask: The C{str} command-line matching algorithm that was
run (either 'blastx' or 'blastp').
@return: A C{dict} with C{str} keys and C{int} offset values. Keys are
readStart
readEnd
readStartInSubject
readEndInSubject
subjectStart
subjectEnd
The returned offset values are all zero-based.
"""
queryGaps, subjectGaps = countGaps(hsp['btop'])
# Make some variables using Python's standard string indexing (start
# offset included, end offset not). No calculations in this function
# are done with the original 1-based HSP variables.
queryStart = hsp['query_start'] - 1
queryEnd = hsp['query_end']
subjectStart = hsp['sbjct_start'] - 1
subjectEnd = hsp['sbjct_end']
queryReversed = hsp['frame'] < 0
# Query offsets must be ascending, unless we're looking at blastx output
# and the query was reversed for the match.
if queryStart >= queryEnd:
if diamondTask == 'blastx' and queryReversed:
# Compute new query start and end indices, based on their
# distance from the end of the string.
#
# Above we took one off the start index, so we need to undo
# that (because the start is actually the end). We didn't take
# one off the end index, and need to do that now (because the
# end is actually the start).
queryStart = queryLen - (queryStart + 1)
queryEnd = queryLen - (queryEnd - 1)
else:
_debugPrint(hsp, queryLen, locals(), 'queryStart >= queryEnd')
if diamondTask == 'blastx':
# In DIAMOND blastx output, subject offsets are based on protein
# sequence length but queries (and the reported offsets) are
# nucleotide. Convert the query offsets to protein because we will
# plot against the subject (protein).
#
# Convert queryLen and the query nucleotide start and end offsets
# to be valid for the query after translation to AAs. When
# translating, DIAMOND may ignore some nucleotides at the start
# and/or the end of the original DNA query. At the start this is
# due to the frame in use, and at the end it is due to always using
# three nucleotides at a time to form codons.
#
# So, for example, a query of 6 nucleotides that is translated in
# frame 2 (i.e., the translation starts from the second nucleotide)
# will have length 1 as an AA sequence. The first nucleotide is
# ignored due to the frame and the last two due to there not being
# enough final nucleotides to make another codon.
#
# In the following, the subtraction accounts for the first form of
# loss and the integer division for the second.
initiallyIgnored = abs(hsp['frame']) - 1
queryLen = (queryLen - initiallyIgnored) // 3
queryStart = (queryStart - initiallyIgnored) // 3
queryEnd = (queryEnd - initiallyIgnored) // 3
# unmatchedQueryLeft is the number of query bases that will extend
# to the left of the start of the subject in our plots.
unmatchedQueryLeft = queryStart
# Set the query offsets into the subject.
queryStartInSubject = subjectStart - unmatchedQueryLeft
queryEndInSubject = queryStartInSubject + queryLen + queryGaps
_sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd,
queryStartInSubject, queryEndInSubject, hsp, queryLen,
subjectGaps, queryGaps, locals())
return {
'readStart': queryStart,
'readEnd': queryEnd,
'readStartInSubject': queryStartInSubject,
'readEndInSubject': queryEndInSubject,
'subjectStart': subjectStart,
'subjectEnd': subjectEnd,
} | Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in DIAMOND output properly.
hsp['frame'] is a value from {-3, -2, -1, 1, 2, 3}. The sign indicates
negative or positive sense (i.e., the direction of reading through the
query to get the alignment). The frame value is the nucleotide match offset
modulo 3, plus one (i.e., it tells us which of the 3 possible query reading
frames was used in the match).
NOTE: the returned readStartInSubject value may be negative. We consider
the subject sequence to start at offset 0. So if the query string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the subject. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a DIAMOND record.
All passed offsets are 1-based.
@param queryLen: the length of the query sequence.
@param diamondTask: The C{str} command-line matching algorithm that was
run (either 'blastx' or 'blastp').
@return: A C{dict} with C{str} keys and C{int} offset values. Keys are
readStart
readEnd
readStartInSubject
readEndInSubject
subjectStart
subjectEnd
The returned offset values are all zero-based. | Below is the the instruction that describes the task:
### Input:
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in DIAMOND output properly.
hsp['frame'] is a value from {-3, -2, -1, 1, 2, 3}. The sign indicates
negative or positive sense (i.e., the direction of reading through the
query to get the alignment). The frame value is the nucleotide match offset
modulo 3, plus one (i.e., it tells us which of the 3 possible query reading
frames was used in the match).
NOTE: the returned readStartInSubject value may be negative. We consider
the subject sequence to start at offset 0. So if the query string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the subject. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a DIAMOND record.
All passed offsets are 1-based.
@param queryLen: the length of the query sequence.
@param diamondTask: The C{str} command-line matching algorithm that was
run (either 'blastx' or 'blastp').
@return: A C{dict} with C{str} keys and C{int} offset values. Keys are
readStart
readEnd
readStartInSubject
readEndInSubject
subjectStart
subjectEnd
The returned offset values are all zero-based.
### Response:
def normalizeHSP(hsp, queryLen, diamondTask):
"""
Examine an HSP and return information about where the query and subject
match begins and ends. Return a dict with keys that allow the query to
be displayed against the subject. The returned readStartInSubject and
readEndInSubject indices are offsets into the subject. I.e., they
indicate where in the subject the query falls.
In the returned object, all indices are suitable for Python string
slicing etc. We must be careful to convert from the 1-based offsets
found in DIAMOND output properly.
hsp['frame'] is a value from {-3, -2, -1, 1, 2, 3}. The sign indicates
negative or positive sense (i.e., the direction of reading through the
query to get the alignment). The frame value is the nucleotide match offset
modulo 3, plus one (i.e., it tells us which of the 3 possible query reading
frames was used in the match).
NOTE: the returned readStartInSubject value may be negative. We consider
the subject sequence to start at offset 0. So if the query string has
sufficient additional nucleotides before the start of the alignment
match, it may protrude to the left of the subject. Similarly, the returned
readEndInSubject can be greater than the subjectEnd.
@param hsp: an HSP in the form of a C{dict}, built from a DIAMOND record.
All passed offsets are 1-based.
@param queryLen: the length of the query sequence.
@param diamondTask: The C{str} command-line matching algorithm that was
run (either 'blastx' or 'blastp').
@return: A C{dict} with C{str} keys and C{int} offset values. Keys are
readStart
readEnd
readStartInSubject
readEndInSubject
subjectStart
subjectEnd
The returned offset values are all zero-based.
"""
queryGaps, subjectGaps = countGaps(hsp['btop'])
# Make some variables using Python's standard string indexing (start
# offset included, end offset not). No calculations in this function
# are done with the original 1-based HSP variables.
queryStart = hsp['query_start'] - 1
queryEnd = hsp['query_end']
subjectStart = hsp['sbjct_start'] - 1
subjectEnd = hsp['sbjct_end']
queryReversed = hsp['frame'] < 0
# Query offsets must be ascending, unless we're looking at blastx output
# and the query was reversed for the match.
if queryStart >= queryEnd:
if diamondTask == 'blastx' and queryReversed:
# Compute new query start and end indices, based on their
# distance from the end of the string.
#
# Above we took one off the start index, so we need to undo
# that (because the start is actually the end). We didn't take
# one off the end index, and need to do that now (because the
# end is actually the start).
queryStart = queryLen - (queryStart + 1)
queryEnd = queryLen - (queryEnd - 1)
else:
_debugPrint(hsp, queryLen, locals(), 'queryStart >= queryEnd')
if diamondTask == 'blastx':
# In DIAMOND blastx output, subject offsets are based on protein
# sequence length but queries (and the reported offsets) are
# nucleotide. Convert the query offsets to protein because we will
# plot against the subject (protein).
#
# Convert queryLen and the query nucleotide start and end offsets
# to be valid for the query after translation to AAs. When
# translating, DIAMOND may ignore some nucleotides at the start
# and/or the end of the original DNA query. At the start this is
# due to the frame in use, and at the end it is due to always using
# three nucleotides at a time to form codons.
#
# So, for example, a query of 6 nucleotides that is translated in
# frame 2 (i.e., the translation starts from the second nucleotide)
# will have length 1 as an AA sequence. The first nucleotide is
# ignored due to the frame and the last two due to there not being
# enough final nucleotides to make another codon.
#
# In the following, the subtraction accounts for the first form of
# loss and the integer division for the second.
initiallyIgnored = abs(hsp['frame']) - 1
queryLen = (queryLen - initiallyIgnored) // 3
queryStart = (queryStart - initiallyIgnored) // 3
queryEnd = (queryEnd - initiallyIgnored) // 3
# unmatchedQueryLeft is the number of query bases that will extend
# to the left of the start of the subject in our plots.
unmatchedQueryLeft = queryStart
# Set the query offsets into the subject.
queryStartInSubject = subjectStart - unmatchedQueryLeft
queryEndInSubject = queryStartInSubject + queryLen + queryGaps
_sanityCheck(subjectStart, subjectEnd, queryStart, queryEnd,
queryStartInSubject, queryEndInSubject, hsp, queryLen,
subjectGaps, queryGaps, locals())
return {
'readStart': queryStart,
'readEnd': queryEnd,
'readStartInSubject': queryStartInSubject,
'readEndInSubject': queryEndInSubject,
'subjectStart': subjectStart,
'subjectEnd': subjectEnd,
} |
def returner(ret):
'''
Write the return data to a file on the minion.
'''
opts = _get_options(ret)
try:
with salt.utils.files.flopen(opts['filename'], 'a') as logfile:
salt.utils.json.dump(ret, logfile)
logfile.write(str('\n')) # future lint: disable=blacklisted-function
except Exception:
log.error('Could not write to rawdata_json file %s', opts['filename'])
raise | Write the return data to a file on the minion. | Below is the the instruction that describes the task:
### Input:
Write the return data to a file on the minion.
### Response:
def returner(ret):
'''
Write the return data to a file on the minion.
'''
opts = _get_options(ret)
try:
with salt.utils.files.flopen(opts['filename'], 'a') as logfile:
salt.utils.json.dump(ret, logfile)
logfile.write(str('\n')) # future lint: disable=blacklisted-function
except Exception:
log.error('Could not write to rawdata_json file %s', opts['filename'])
raise |
def _reset_file_descriptors(self):
"""Close open file descriptors and redirect standard streams."""
if self.close_open_files:
# Attempt to determine the max number of open files
max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if max_fds == resource.RLIM_INFINITY:
# If the limit is infinity, use a more reasonable limit
max_fds = 2048
else:
# If we're not closing all open files, we at least need to
# reset STDIN, STDOUT, and STDERR.
max_fds = 3
for fd in range(max_fds):
try:
os.close(fd)
except OSError:
# The file descriptor probably wasn't open
pass
# Redirect STDIN, STDOUT, and STDERR to /dev/null
devnull_fd = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2) | Close open file descriptors and redirect standard streams. | Below is the the instruction that describes the task:
### Input:
Close open file descriptors and redirect standard streams.
### Response:
def _reset_file_descriptors(self):
"""Close open file descriptors and redirect standard streams."""
if self.close_open_files:
# Attempt to determine the max number of open files
max_fds = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if max_fds == resource.RLIM_INFINITY:
# If the limit is infinity, use a more reasonable limit
max_fds = 2048
else:
# If we're not closing all open files, we at least need to
# reset STDIN, STDOUT, and STDERR.
max_fds = 3
for fd in range(max_fds):
try:
os.close(fd)
except OSError:
# The file descriptor probably wasn't open
pass
# Redirect STDIN, STDOUT, and STDERR to /dev/null
devnull_fd = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2) |
def hex_to_rgb(self, h):
"""Converts a valid hex color string to an RGB array."""
rgb = (self.hex_to_red(h), self.hex_to_green(h), self.hex_to_blue(h))
return rgb | Converts a valid hex color string to an RGB array. | Below is the the instruction that describes the task:
### Input:
Converts a valid hex color string to an RGB array.
### Response:
def hex_to_rgb(self, h):
"""Converts a valid hex color string to an RGB array."""
rgb = (self.hex_to_red(h), self.hex_to_green(h), self.hex_to_blue(h))
return rgb |
def _get_settings(self):
"""
Return any settings defined by the user, as well as any pre-defined
settings files that exist for the image modalities to be registered.
"""
# If user-defined settings exist...
if isdefined(self.inputs.settings):
# Note this in the log and return those settings.
NIWORKFLOWS_LOG.info('User-defined settings, overriding defaults')
return self.inputs.settings
# Define a prefix for output files based on the modality of the moving image.
filestart = '{}-mni_registration_{}_'.format(
self.inputs.moving.lower(), self.inputs.flavor)
# Get a list of settings files that match the flavor.
filenames = [i for i in pkgr.resource_listdir('niworkflows', 'data')
if i.startswith(filestart) and i.endswith('.json')]
# Return the settings files.
return [pkgr.resource_filename('niworkflows.data', f)
for f in sorted(filenames)] | Return any settings defined by the user, as well as any pre-defined
settings files that exist for the image modalities to be registered. | Below is the the instruction that describes the task:
### Input:
Return any settings defined by the user, as well as any pre-defined
settings files that exist for the image modalities to be registered.
### Response:
def _get_settings(self):
"""
Return any settings defined by the user, as well as any pre-defined
settings files that exist for the image modalities to be registered.
"""
# If user-defined settings exist...
if isdefined(self.inputs.settings):
# Note this in the log and return those settings.
NIWORKFLOWS_LOG.info('User-defined settings, overriding defaults')
return self.inputs.settings
# Define a prefix for output files based on the modality of the moving image.
filestart = '{}-mni_registration_{}_'.format(
self.inputs.moving.lower(), self.inputs.flavor)
# Get a list of settings files that match the flavor.
filenames = [i for i in pkgr.resource_listdir('niworkflows', 'data')
if i.startswith(filestart) and i.endswith('.json')]
# Return the settings files.
return [pkgr.resource_filename('niworkflows.data', f)
for f in sorted(filenames)] |
def cid_ce(x, normalize):
"""
This function calculator is an estimate for a time series complexity [1] (A more complex time series has more peaks,
valleys etc.). It calculates the value of
.. math::
\\sqrt{ \\sum_{i=0}^{n-2lag} ( x_{i} - x_{i+1})^2 }
.. rubric:: References
| [1] Batista, Gustavo EAPA, et al (2014).
| CID: an efficient complexity-invariant distance for time series.
| Data Mining and Knowledge Discovery 28.3 (2014): 634-669.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param normalize: should the time series be z-transformed?
:type normalize: bool
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
if normalize:
s = np.std(x)
if s!=0:
x = (x - np.mean(x))/s
else:
return 0.0
x = np.diff(x)
return np.sqrt(np.dot(x, x)) | This function calculator is an estimate for a time series complexity [1] (A more complex time series has more peaks,
valleys etc.). It calculates the value of
.. math::
\\sqrt{ \\sum_{i=0}^{n-2lag} ( x_{i} - x_{i+1})^2 }
.. rubric:: References
| [1] Batista, Gustavo EAPA, et al (2014).
| CID: an efficient complexity-invariant distance for time series.
| Data Mining and Knowledge Discovery 28.3 (2014): 634-669.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param normalize: should the time series be z-transformed?
:type normalize: bool
:return: the value of this feature
:return type: float | Below is the the instruction that describes the task:
### Input:
This function calculator is an estimate for a time series complexity [1] (A more complex time series has more peaks,
valleys etc.). It calculates the value of
.. math::
\\sqrt{ \\sum_{i=0}^{n-2lag} ( x_{i} - x_{i+1})^2 }
.. rubric:: References
| [1] Batista, Gustavo EAPA, et al (2014).
| CID: an efficient complexity-invariant distance for time series.
| Data Mining and Knowledge Discovery 28.3 (2014): 634-669.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param normalize: should the time series be z-transformed?
:type normalize: bool
:return: the value of this feature
:return type: float
### Response:
def cid_ce(x, normalize):
"""
This function calculator is an estimate for a time series complexity [1] (A more complex time series has more peaks,
valleys etc.). It calculates the value of
.. math::
\\sqrt{ \\sum_{i=0}^{n-2lag} ( x_{i} - x_{i+1})^2 }
.. rubric:: References
| [1] Batista, Gustavo EAPA, et al (2014).
| CID: an efficient complexity-invariant distance for time series.
| Data Mining and Knowledge Discovery 28.3 (2014): 634-669.
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param normalize: should the time series be z-transformed?
:type normalize: bool
:return: the value of this feature
:return type: float
"""
if not isinstance(x, (np.ndarray, pd.Series)):
x = np.asarray(x)
if normalize:
s = np.std(x)
if s!=0:
x = (x - np.mean(x))/s
else:
return 0.0
x = np.diff(x)
return np.sqrt(np.dot(x, x)) |
def by_id(self, region, encrypted_summoner_id):
"""
Get a summoner by summoner ID.
:param string region: The region to execute this request on
:param string encrypted_summoner_id: Summoner ID
:returns: SummonerDTO: represents a summoner
"""
url, query = SummonerApiV4Urls.by_id(
region=region, encrypted_summoner_id=encrypted_summoner_id
)
return self._raw_request(self.by_id.__name__, region, url, query) | Get a summoner by summoner ID.
:param string region: The region to execute this request on
:param string encrypted_summoner_id: Summoner ID
:returns: SummonerDTO: represents a summoner | Below is the the instruction that describes the task:
### Input:
Get a summoner by summoner ID.
:param string region: The region to execute this request on
:param string encrypted_summoner_id: Summoner ID
:returns: SummonerDTO: represents a summoner
### Response:
def by_id(self, region, encrypted_summoner_id):
"""
Get a summoner by summoner ID.
:param string region: The region to execute this request on
:param string encrypted_summoner_id: Summoner ID
:returns: SummonerDTO: represents a summoner
"""
url, query = SummonerApiV4Urls.by_id(
region=region, encrypted_summoner_id=encrypted_summoner_id
)
return self._raw_request(self.by_id.__name__, region, url, query) |
def Depends(self, target, dependency):
"""Explicity specify that 'target's depend on 'dependency'."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_dependency(dlist)
return tlist | Explicity specify that 'target's depend on 'dependency'. | Below is the the instruction that describes the task:
### Input:
Explicity specify that 'target's depend on 'dependency'.
### Response:
def Depends(self, target, dependency):
"""Explicity specify that 'target's depend on 'dependency'."""
tlist = self.arg2nodes(target, self.fs.Entry)
dlist = self.arg2nodes(dependency, self.fs.Entry)
for t in tlist:
t.add_dependency(dlist)
return tlist |
def timeid(self, data: ['SASdata', str] = None,
by: str = None,
id: str = None,
out: [str, 'SASdata'] = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the TIMEID procedure
Documentation link:
http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeid_syntax.htm
:param data: SASdata object or string. This parameter is required.
:parm by: The by variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm out: The out variable can be a string or SASdata type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" | Python method to call the TIMEID procedure
Documentation link:
http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeid_syntax.htm
:param data: SASdata object or string. This parameter is required.
:parm by: The by variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm out: The out variable can be a string or SASdata type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object | Below is the the instruction that describes the task:
### Input:
Python method to call the TIMEID procedure
Documentation link:
http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeid_syntax.htm
:param data: SASdata object or string. This parameter is required.
:parm by: The by variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm out: The out variable can be a string or SASdata type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
### Response:
def timeid(self, data: ['SASdata', str] = None,
by: str = None,
id: str = None,
out: [str, 'SASdata'] = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the TIMEID procedure
Documentation link:
http://support.sas.com/documentation/cdl//en/etsug/68148/HTML/default/viewer.htm#etsug_timeid_syntax.htm
:param data: SASdata object or string. This parameter is required.
:parm by: The by variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm out: The out variable can be a string or SASdata type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
""" |
def _trigger_events(view_obj, events_map, additional_kw=None):
""" Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
"""
if additional_kw is None:
additional_kw = {}
event_kwargs = _get_event_kwargs(view_obj)
if event_kwargs is None:
return
event_kwargs.update(additional_kw)
event_cls = _get_event_cls(view_obj, events_map)
event = event_cls(**event_kwargs)
view_obj.request.registry.notify(event)
return event | Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event. | Below is the the instruction that describes the task:
### Input:
Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
### Response:
def _trigger_events(view_obj, events_map, additional_kw=None):
""" Common logic to trigger before/after events.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Instance if triggered event.
"""
if additional_kw is None:
additional_kw = {}
event_kwargs = _get_event_kwargs(view_obj)
if event_kwargs is None:
return
event_kwargs.update(additional_kw)
event_cls = _get_event_cls(view_obj, events_map)
event = event_cls(**event_kwargs)
view_obj.request.registry.notify(event)
return event |
def WaitForJobChange(r, job_id, fields, prev_job_info, prev_log_serial):
"""
Waits for job changes.
@type job_id: int
@param job_id: Job ID for which to wait
"""
body = {
"fields": fields,
"previous_job_info": prev_job_info,
"previous_log_serial": prev_log_serial,
}
return r.request("get", "/2/jobs/%s/wait" % job_id, content=body) | Waits for job changes.
@type job_id: int
@param job_id: Job ID for which to wait | Below is the the instruction that describes the task:
### Input:
Waits for job changes.
@type job_id: int
@param job_id: Job ID for which to wait
### Response:
def WaitForJobChange(r, job_id, fields, prev_job_info, prev_log_serial):
"""
Waits for job changes.
@type job_id: int
@param job_id: Job ID for which to wait
"""
body = {
"fields": fields,
"previous_job_info": prev_job_info,
"previous_log_serial": prev_log_serial,
}
return r.request("get", "/2/jobs/%s/wait" % job_id, content=body) |
def parse(self):
"""
Processes the files for each IRQ and each CPU in terms of the differences.
Also produces accumulated interrupt count differences for each set of Ethernet IRQs.
Generally Ethernet has 8 TxRx IRQs thus all are combined so that one can see the overall interrupts being generated by the NIC.
Simplified Interrupt File Format: (See examples for example log)
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
:returns: True or False whether parsing was successful or not.
"""
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
if not os.path.isdir(self.resource_directory):
os.makedirs(self.resource_directory)
data = {}
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file, 'r') as infile:
# Get the header for this file
cpus = self.find_header(infile)
if len(cpus) == 0: # Make sure we have header otherwise go to next file
logger.error("Header not found for file: %s", input_file)
continue
# Parse the actual file after header
prev_data = None # Stores the previous interval's log data
curr_data = {} # Stores the current interval's log data
eth_data = {}
for line in infile:
if self.is_header_line(line): # New section so save old and aggregate ETH
prev_data = curr_data
curr_data = {}
# New section so store the collected Ethernet data
# Example Aggregate metric: PROCINTERRUPTS.AGGREGATE.eth0
for eth in eth_data:
outcsv = self.get_csv('AGGREGATE', eth)
if outcsv not in data:
data[outcsv] = []
data[outcsv].append(ts + ',' + str(eth_data[eth]))
eth_data = {}
continue
words = line.split()
if len(words) <= 4: # Does not have any CPU data so skip
continue
# Process timestamp or determine timestamp
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts): # See if time is in range
continue
# Process data lines
# Note that some IRQs such as ERR and MIS do not have device nor ascii name
device = words[2].strip(':') # Get IRQ Number/Name
if re.match("\d+", device):
# Devices with digits need ASCII name if exists
if (4 + len(cpus)) < len(words):
device = words[4 + len(cpus)] + "-IRQ" + device
else:
device = "IRQ" + device
else:
# For devices with IRQ # that aren't digits then has description
device = "-".join(words[(3 + len(cpus)):]) + "-IRQ" + device
# Deal with each column worth of data
for (cpu, datum) in zip(cpus, words[3:]):
if self.CPUS and cpu not in self.CPUS: # Skip if config defines which CPUs to look at
continue
outcsv = self.get_csv(cpu, device)
curr_data[outcsv] = int(datum)
if outcsv in data:
datum = int(datum) - prev_data[outcsv] # prev_data exists since outcsv exists in data
else:
data[outcsv] = []
datum = 0 # First data point is set to 0
# Store data point
data[outcsv].append(ts + ',' + str(datum))
# Deal with accumulating aggregate data for Ethernet
m = re.search("(?P<eth>eth\d)", device)
if m:
eth = m.group('eth')
if eth not in eth_data:
eth_data[eth] = 0
eth_data[eth] += datum
# Post processing, putting data in csv files
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as csvf:
csvf.write('\n'.join(sorted(data[csv])))
return True | Processes the files for each IRQ and each CPU in terms of the differences.
Also produces accumulated interrupt count differences for each set of Ethernet IRQs.
Generally Ethernet has 8 TxRx IRQs thus all are combined so that one can see the overall interrupts being generated by the NIC.
Simplified Interrupt File Format: (See examples for example log)
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
:returns: True or False whether parsing was successful or not. | Below is the the instruction that describes the task:
### Input:
Processes the files for each IRQ and each CPU in terms of the differences.
Also produces accumulated interrupt count differences for each set of Ethernet IRQs.
Generally Ethernet has 8 TxRx IRQs thus all are combined so that one can see the overall interrupts being generated by the NIC.
Simplified Interrupt File Format: (See examples for example log)
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
:returns: True or False whether parsing was successful or not.
### Response:
def parse(self):
"""
Processes the files for each IRQ and each CPU in terms of the differences.
Also produces accumulated interrupt count differences for each set of Ethernet IRQs.
Generally Ethernet has 8 TxRx IRQs thus all are combined so that one can see the overall interrupts being generated by the NIC.
Simplified Interrupt File Format: (See examples for example log)
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
CPU0 CPU1
2014-10-29 00:27:42.15161 59: 29 2 IR-IO-APIC-edge timer
2014-10-29 00:27:42.15161 60: 2123 0 IR-PCI-MSI-edge eth0
:returns: True or False whether parsing was successful or not.
"""
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
if not os.path.isdir(self.resource_directory):
os.makedirs(self.resource_directory)
data = {}
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
with open(input_file, 'r') as infile:
# Get the header for this file
cpus = self.find_header(infile)
if len(cpus) == 0: # Make sure we have header otherwise go to next file
logger.error("Header not found for file: %s", input_file)
continue
# Parse the actual file after header
prev_data = None # Stores the previous interval's log data
curr_data = {} # Stores the current interval's log data
eth_data = {}
for line in infile:
if self.is_header_line(line): # New section so save old and aggregate ETH
prev_data = curr_data
curr_data = {}
# New section so store the collected Ethernet data
# Example Aggregate metric: PROCINTERRUPTS.AGGREGATE.eth0
for eth in eth_data:
outcsv = self.get_csv('AGGREGATE', eth)
if outcsv not in data:
data[outcsv] = []
data[outcsv].append(ts + ',' + str(eth_data[eth]))
eth_data = {}
continue
words = line.split()
if len(words) <= 4: # Does not have any CPU data so skip
continue
# Process timestamp or determine timestamp
ts = words[0] + " " + words[1]
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(ts)
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(ts, timestamp_format)
if self.ts_out_of_range(ts): # See if time is in range
continue
# Process data lines
# Note that some IRQs such as ERR and MIS do not have device nor ascii name
device = words[2].strip(':') # Get IRQ Number/Name
if re.match("\d+", device):
# Devices with digits need ASCII name if exists
if (4 + len(cpus)) < len(words):
device = words[4 + len(cpus)] + "-IRQ" + device
else:
device = "IRQ" + device
else:
# For devices with IRQ # that aren't digits then has description
device = "-".join(words[(3 + len(cpus)):]) + "-IRQ" + device
# Deal with each column worth of data
for (cpu, datum) in zip(cpus, words[3:]):
if self.CPUS and cpu not in self.CPUS: # Skip if config defines which CPUs to look at
continue
outcsv = self.get_csv(cpu, device)
curr_data[outcsv] = int(datum)
if outcsv in data:
datum = int(datum) - prev_data[outcsv] # prev_data exists since outcsv exists in data
else:
data[outcsv] = []
datum = 0 # First data point is set to 0
# Store data point
data[outcsv].append(ts + ',' + str(datum))
# Deal with accumulating aggregate data for Ethernet
m = re.search("(?P<eth>eth\d)", device)
if m:
eth = m.group('eth')
if eth not in eth_data:
eth_data[eth] = 0
eth_data[eth] += datum
# Post processing, putting data in csv files
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as csvf:
csvf.write('\n'.join(sorted(data[csv])))
return True |
def deployment_groups(self):
"""
Gets the Deployment Groups API client.
Returns:
DeploymentGroups:
"""
if not self.__deployment_groups:
self.__deployment_groups = DeploymentGroups(self.__connection)
return self.__deployment_groups | Gets the Deployment Groups API client.
Returns:
DeploymentGroups: | Below is the the instruction that describes the task:
### Input:
Gets the Deployment Groups API client.
Returns:
DeploymentGroups:
### Response:
def deployment_groups(self):
"""
Gets the Deployment Groups API client.
Returns:
DeploymentGroups:
"""
if not self.__deployment_groups:
self.__deployment_groups = DeploymentGroups(self.__connection)
return self.__deployment_groups |
def copychildren(self, newdoc=None, idsuffix=""):
"""Generator creating a deep copy of the children of this element.
Invokes :meth:`copy` on all children, parameters are the same.
"""
if idsuffix is True: idsuffix = ".copy." + "%08x" % random.getrandbits(32) #random 32-bit hash for each copy, same one will be reused for all children
for c in self:
if isinstance(c, AbstractElement):
yield c.copy(newdoc,idsuffix) | Generator creating a deep copy of the children of this element.
Invokes :meth:`copy` on all children, parameters are the same. | Below is the the instruction that describes the task:
### Input:
Generator creating a deep copy of the children of this element.
Invokes :meth:`copy` on all children, parameters are the same.
### Response:
def copychildren(self, newdoc=None, idsuffix=""):
"""Generator creating a deep copy of the children of this element.
Invokes :meth:`copy` on all children, parameters are the same.
"""
if idsuffix is True: idsuffix = ".copy." + "%08x" % random.getrandbits(32) #random 32-bit hash for each copy, same one will be reused for all children
for c in self:
if isinstance(c, AbstractElement):
yield c.copy(newdoc,idsuffix) |
def script(state, host, filename, chdir=None):
'''
Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script
'''
temp_file = state.get_temp_filename(filename)
yield files.put(state, host, filename, temp_file)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file | Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script | Below is the the instruction that describes the task:
### Input:
Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script
### Response:
def script(state, host, filename, chdir=None):
'''
Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script
'''
temp_file = state.get_temp_filename(filename)
yield files.put(state, host, filename, temp_file)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file |
def init_static_combine():
"""
Process static combine, create md5 key according each static filename
"""
from uliweb import settings
from hashlib import md5
import os
d = {}
if settings.get_var('STATIC_COMBINE_CONFIG/enable', False):
for k, v in settings.get('STATIC_COMBINE', {}).items():
key = '_cmb_'+md5(''.join(v)).hexdigest()+os.path.splitext(v[0])[1]
d[key] = v
return d | Process static combine, create md5 key according each static filename | Below is the the instruction that describes the task:
### Input:
Process static combine, create md5 key according each static filename
### Response:
def init_static_combine():
"""
Process static combine, create md5 key according each static filename
"""
from uliweb import settings
from hashlib import md5
import os
d = {}
if settings.get_var('STATIC_COMBINE_CONFIG/enable', False):
for k, v in settings.get('STATIC_COMBINE', {}).items():
key = '_cmb_'+md5(''.join(v)).hexdigest()+os.path.splitext(v[0])[1]
d[key] = v
return d |
def interval(coro, interval=1, times=None, loop=None):
"""
Schedules the execution of a coroutine function every `x` amount of
seconds.
The function returns an `asyncio.Task`, which implements also an
`asyncio.Future` interface, allowing the user to cancel the execution
cycle.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
interval (int/float): number of seconds to repeat the coroutine
execution.
times (int): optional maximum time of executions. Infinite by default.
loop (asyncio.BaseEventLoop, optional): loop to run.
Defaults to asyncio.get_event_loop().
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
future (asyncio.Task): coroutine wrapped as task future.
Useful for cancellation and state checking.
Usage::
# Usage as function
future = paco.interval(coro, 1)
# Cancel it after a while...
await asyncio.sleep(5)
future.cancel()
# Usage as decorator
@paco.interval(10)
async def metrics():
await send_metrics()
future = await metrics()
"""
assert_corofunction(coro=coro)
# Store maximum allowed number of calls
times = int(times or 0) or float('inf')
@asyncio.coroutine
def schedule(times, *args, **kw):
while times > 0:
# Decrement times counter
times -= 1
# Schedule coroutine
yield from coro(*args, **kw)
yield from asyncio.sleep(interval)
def wrapper(*args, **kw):
return ensure_future(schedule(times, *args, **kw), loop=loop)
return wrapper | Schedules the execution of a coroutine function every `x` amount of
seconds.
The function returns an `asyncio.Task`, which implements also an
`asyncio.Future` interface, allowing the user to cancel the execution
cycle.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
interval (int/float): number of seconds to repeat the coroutine
execution.
times (int): optional maximum time of executions. Infinite by default.
loop (asyncio.BaseEventLoop, optional): loop to run.
Defaults to asyncio.get_event_loop().
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
future (asyncio.Task): coroutine wrapped as task future.
Useful for cancellation and state checking.
Usage::
# Usage as function
future = paco.interval(coro, 1)
# Cancel it after a while...
await asyncio.sleep(5)
future.cancel()
# Usage as decorator
@paco.interval(10)
async def metrics():
await send_metrics()
future = await metrics() | Below is the the instruction that describes the task:
### Input:
Schedules the execution of a coroutine function every `x` amount of
seconds.
The function returns an `asyncio.Task`, which implements also an
`asyncio.Future` interface, allowing the user to cancel the execution
cycle.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
interval (int/float): number of seconds to repeat the coroutine
execution.
times (int): optional maximum time of executions. Infinite by default.
loop (asyncio.BaseEventLoop, optional): loop to run.
Defaults to asyncio.get_event_loop().
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
future (asyncio.Task): coroutine wrapped as task future.
Useful for cancellation and state checking.
Usage::
# Usage as function
future = paco.interval(coro, 1)
# Cancel it after a while...
await asyncio.sleep(5)
future.cancel()
# Usage as decorator
@paco.interval(10)
async def metrics():
await send_metrics()
future = await metrics()
### Response:
def interval(coro, interval=1, times=None, loop=None):
"""
Schedules the execution of a coroutine function every `x` amount of
seconds.
The function returns an `asyncio.Task`, which implements also an
`asyncio.Future` interface, allowing the user to cancel the execution
cycle.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
interval (int/float): number of seconds to repeat the coroutine
execution.
times (int): optional maximum time of executions. Infinite by default.
loop (asyncio.BaseEventLoop, optional): loop to run.
Defaults to asyncio.get_event_loop().
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
future (asyncio.Task): coroutine wrapped as task future.
Useful for cancellation and state checking.
Usage::
# Usage as function
future = paco.interval(coro, 1)
# Cancel it after a while...
await asyncio.sleep(5)
future.cancel()
# Usage as decorator
@paco.interval(10)
async def metrics():
await send_metrics()
future = await metrics()
"""
assert_corofunction(coro=coro)
# Store maximum allowed number of calls
times = int(times or 0) or float('inf')
@asyncio.coroutine
def schedule(times, *args, **kw):
while times > 0:
# Decrement times counter
times -= 1
# Schedule coroutine
yield from coro(*args, **kw)
yield from asyncio.sleep(interval)
def wrapper(*args, **kw):
return ensure_future(schedule(times, *args, **kw), loop=loop)
return wrapper |
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
if notebook_id not in self.mapping:
return False
path = self.get_path_by_name(self.mapping[notebook_id])
return os.path.isfile(path) | Does a notebook exist? | Below is the the instruction that describes the task:
### Input:
Does a notebook exist?
### Response:
def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
if notebook_id not in self.mapping:
return False
path = self.get_path_by_name(self.mapping[notebook_id])
return os.path.isfile(path) |
def replace_initializer_configuration(self, name, body, **kwargs): # noqa: E501
"""replace_initializer_configuration # noqa: E501
replace the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_initializer_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param V1alpha1InitializerConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_initializer_configuration_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_initializer_configuration_with_http_info(name, body, **kwargs) # noqa: E501
return data | replace_initializer_configuration # noqa: E501
replace the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_initializer_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param V1alpha1InitializerConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
replace_initializer_configuration # noqa: E501
replace the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_initializer_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param V1alpha1InitializerConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_initializer_configuration(self, name, body, **kwargs): # noqa: E501
"""replace_initializer_configuration # noqa: E501
replace the specified InitializerConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_initializer_configuration(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the InitializerConfiguration (required)
:param V1alpha1InitializerConfiguration body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1alpha1InitializerConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_initializer_configuration_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.replace_initializer_configuration_with_http_info(name, body, **kwargs) # noqa: E501
return data |
def get_most_recent_network_by_name(self, name: str) -> Optional[Network]:
"""Get the most recently created network with the given name."""
return self.session.query(Network).filter(Network.name == name).order_by(Network.created.desc()).first() | Get the most recently created network with the given name. | Below is the the instruction that describes the task:
### Input:
Get the most recently created network with the given name.
### Response:
def get_most_recent_network_by_name(self, name: str) -> Optional[Network]:
"""Get the most recently created network with the given name."""
return self.session.query(Network).filter(Network.name == name).order_by(Network.created.desc()).first() |
def total_rated_level(octave_frequencies):
"""
Calculates the A-rated total sound pressure level
based on octave band frequencies
"""
sums = 0.0
for band in OCTAVE_BANDS.keys():
if band not in octave_frequencies:
continue
if octave_frequencies[band] is None:
continue
if octave_frequencies[band] == 0:
continue
sums += pow(10.0, ((float(octave_frequencies[band]) + OCTAVE_BANDS[band][1]) / 10.0))
level = 10.0 * math.log10(sums)
return level | Calculates the A-rated total sound pressure level
based on octave band frequencies | Below is the the instruction that describes the task:
### Input:
Calculates the A-rated total sound pressure level
based on octave band frequencies
### Response:
def total_rated_level(octave_frequencies):
"""
Calculates the A-rated total sound pressure level
based on octave band frequencies
"""
sums = 0.0
for band in OCTAVE_BANDS.keys():
if band not in octave_frequencies:
continue
if octave_frequencies[band] is None:
continue
if octave_frequencies[band] == 0:
continue
sums += pow(10.0, ((float(octave_frequencies[band]) + OCTAVE_BANDS[band][1]) / 10.0))
level = 10.0 * math.log10(sums)
return level |
def to_fmt(self):
"""
Return an Fmt representation for pretty-printing
"""
params = ""
txt = fmt.sep(" ", ['fun'])
name = self.show_name()
if name != "":
txt.lsdata.append(name)
tparams = []
if self.tparams is not None:
tparams = list(self.tparams)
if self.variadic:
tparams.append('...')
params = '(' + ", ".join(tparams) + ')'
txt.lsdata.append(': ' + params)
txt.lsdata.append('-> ' + self.tret)
return txt | Return an Fmt representation for pretty-printing | Below is the the instruction that describes the task:
### Input:
Return an Fmt representation for pretty-printing
### Response:
def to_fmt(self):
"""
Return an Fmt representation for pretty-printing
"""
params = ""
txt = fmt.sep(" ", ['fun'])
name = self.show_name()
if name != "":
txt.lsdata.append(name)
tparams = []
if self.tparams is not None:
tparams = list(self.tparams)
if self.variadic:
tparams.append('...')
params = '(' + ", ".join(tparams) + ')'
txt.lsdata.append(': ' + params)
txt.lsdata.append('-> ' + self.tret)
return txt |
def convert_from_bytes_if_necessary(prefix, suffix):
"""
Depending on how we extract data from pysam we may end up with either
a string or a byte array of nucleotides. For consistency and simplicity,
we want to only use strings in the rest of our code.
"""
if isinstance(prefix, bytes):
prefix = prefix.decode('ascii')
if isinstance(suffix, bytes):
suffix = suffix.decode('ascii')
return prefix, suffix | Depending on how we extract data from pysam we may end up with either
a string or a byte array of nucleotides. For consistency and simplicity,
we want to only use strings in the rest of our code. | Below is the the instruction that describes the task:
### Input:
Depending on how we extract data from pysam we may end up with either
a string or a byte array of nucleotides. For consistency and simplicity,
we want to only use strings in the rest of our code.
### Response:
def convert_from_bytes_if_necessary(prefix, suffix):
"""
Depending on how we extract data from pysam we may end up with either
a string or a byte array of nucleotides. For consistency and simplicity,
we want to only use strings in the rest of our code.
"""
if isinstance(prefix, bytes):
prefix = prefix.decode('ascii')
if isinstance(suffix, bytes):
suffix = suffix.decode('ascii')
return prefix, suffix |
def save(self, filename, compressed=True):
""" Save a tensor to disk. """
# check for data
if not self.has_data:
return False
# read ext and save accordingly
_, file_ext = os.path.splitext(filename)
if compressed:
if file_ext != COMPRESSED_TENSOR_EXT:
raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT))
np.savez_compressed(filename,
self.data[:self.cur_index,...])
else:
if file_ext != TENSOR_EXT:
raise ValueError('Can only save tensor with .npy extension')
np.save(filename, self.data[:self.cur_index,...])
return True | Save a tensor to disk. | Below is the the instruction that describes the task:
### Input:
Save a tensor to disk.
### Response:
def save(self, filename, compressed=True):
""" Save a tensor to disk. """
# check for data
if not self.has_data:
return False
# read ext and save accordingly
_, file_ext = os.path.splitext(filename)
if compressed:
if file_ext != COMPRESSED_TENSOR_EXT:
raise ValueError('Can only save compressed tensor with %s extension' %(COMPRESSED_TENSOR_EXT))
np.savez_compressed(filename,
self.data[:self.cur_index,...])
else:
if file_ext != TENSOR_EXT:
raise ValueError('Can only save tensor with .npy extension')
np.save(filename, self.data[:self.cur_index,...])
return True |
def compare(self, textOrFingerprint1, textOrFingerprint2):
"""Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint.
Args:
textOrFingerprint1, str OR list of integers
textOrFingerprint2, str OR list of integers
Returns:
float: the semantic similarity in the range [0;1]
Raises:
CorticalioException: if the request was not successful
"""
compareList = [self._createDictionary(textOrFingerprint1), self._createDictionary(textOrFingerprint2)]
metric = self._fullClient.compare(json.dumps(compareList))
return metric.cosineSimilarity | Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint.
Args:
textOrFingerprint1, str OR list of integers
textOrFingerprint2, str OR list of integers
Returns:
float: the semantic similarity in the range [0;1]
Raises:
CorticalioException: if the request was not successful | Below is the the instruction that describes the task:
### Input:
Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint.
Args:
textOrFingerprint1, str OR list of integers
textOrFingerprint2, str OR list of integers
Returns:
float: the semantic similarity in the range [0;1]
Raises:
CorticalioException: if the request was not successful
### Response:
def compare(self, textOrFingerprint1, textOrFingerprint2):
"""Returns the semantic similarity of texts or fingerprints. Each argument can be eiter a text or a fingerprint.
Args:
textOrFingerprint1, str OR list of integers
textOrFingerprint2, str OR list of integers
Returns:
float: the semantic similarity in the range [0;1]
Raises:
CorticalioException: if the request was not successful
"""
compareList = [self._createDictionary(textOrFingerprint1), self._createDictionary(textOrFingerprint2)]
metric = self._fullClient.compare(json.dumps(compareList))
return metric.cosineSimilarity |
def show_ring(devname):
'''
Queries the specified network device for rx/tx ring parameter information
CLI Example:
.. code-block:: bash
salt '*' ethtool.show_ring <devname>
'''
try:
ring = ethtool.get_ringparam(devname)
except IOError:
log.error('Ring parameters not supported on %s', devname)
return 'Not supported'
ret = {}
for key, value in ring.items():
ret[ethtool_ring_remap[key]] = ring[key]
return ret | Queries the specified network device for rx/tx ring parameter information
CLI Example:
.. code-block:: bash
salt '*' ethtool.show_ring <devname> | Below is the the instruction that describes the task:
### Input:
Queries the specified network device for rx/tx ring parameter information
CLI Example:
.. code-block:: bash
salt '*' ethtool.show_ring <devname>
### Response:
def show_ring(devname):
'''
Queries the specified network device for rx/tx ring parameter information
CLI Example:
.. code-block:: bash
salt '*' ethtool.show_ring <devname>
'''
try:
ring = ethtool.get_ringparam(devname)
except IOError:
log.error('Ring parameters not supported on %s', devname)
return 'Not supported'
ret = {}
for key, value in ring.items():
ret[ethtool_ring_remap[key]] = ring[key]
return ret |
def __convert_string(node):
"""Converts a StringProperty node to JSON format."""
converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue))
return __check_for_flag(converted) | Converts a StringProperty node to JSON format. | Below is the the instruction that describes the task:
### Input:
Converts a StringProperty node to JSON format.
### Response:
def __convert_string(node):
"""Converts a StringProperty node to JSON format."""
converted = __convert_node(node, default_flags=vsflags(VSFlags.UserValue))
return __check_for_flag(converted) |
def maps(self):
"""
A dictionary of dictionaries.
Each dictionary defines a map which is used to extend the metadata.
The precise way maps interact with the metadata is defined by `figure.fit._extend_meta`.
That method should be redefined or extended to suit specific use cases.
"""
if not hasattr(self, '_maps'):
maps = {}
maps['tex_symbol'] = {}
maps['siunitx'] = {}
maps['value_transforms'] = {
'__default__': lambda x: round(x, 2),
}
self._maps = maps
return self._maps | A dictionary of dictionaries.
Each dictionary defines a map which is used to extend the metadata.
The precise way maps interact with the metadata is defined by `figure.fit._extend_meta`.
That method should be redefined or extended to suit specific use cases. | Below is the the instruction that describes the task:
### Input:
A dictionary of dictionaries.
Each dictionary defines a map which is used to extend the metadata.
The precise way maps interact with the metadata is defined by `figure.fit._extend_meta`.
That method should be redefined or extended to suit specific use cases.
### Response:
def maps(self):
"""
A dictionary of dictionaries.
Each dictionary defines a map which is used to extend the metadata.
The precise way maps interact with the metadata is defined by `figure.fit._extend_meta`.
That method should be redefined or extended to suit specific use cases.
"""
if not hasattr(self, '_maps'):
maps = {}
maps['tex_symbol'] = {}
maps['siunitx'] = {}
maps['value_transforms'] = {
'__default__': lambda x: round(x, 2),
}
self._maps = maps
return self._maps |
def get_method_returning_field_value(self, field_name):
"""
Field values can be obtained from view or core.
"""
return (
super().get_method_returning_field_value(field_name)
or self.core.get_method_returning_field_value(field_name)
) | Field values can be obtained from view or core. | Below is the the instruction that describes the task:
### Input:
Field values can be obtained from view or core.
### Response:
def get_method_returning_field_value(self, field_name):
"""
Field values can be obtained from view or core.
"""
return (
super().get_method_returning_field_value(field_name)
or self.core.get_method_returning_field_value(field_name)
) |
def correct(self, z):
'''Correct the given approximate solution ``z`` with respect to the
linear system ``linear_system`` and the deflation space defined by
``U``.'''
c = self.linear_system.Ml*(
self.linear_system.b - self.linear_system.A*z)
c = utils.inner(self.W, c, ip_B=self.ip_B)
if self.Q is not None and self.R is not None:
c = scipy.linalg.solve_triangular(self.R, self.Q.T.conj().dot(c))
if self.WR is not self.VR:
c = self.WR.dot(scipy.linalg.solve_triangular(self.VR, c))
return z + self.W.dot(c) | Correct the given approximate solution ``z`` with respect to the
linear system ``linear_system`` and the deflation space defined by
``U``. | Below is the the instruction that describes the task:
### Input:
Correct the given approximate solution ``z`` with respect to the
linear system ``linear_system`` and the deflation space defined by
``U``.
### Response:
def correct(self, z):
'''Correct the given approximate solution ``z`` with respect to the
linear system ``linear_system`` and the deflation space defined by
``U``.'''
c = self.linear_system.Ml*(
self.linear_system.b - self.linear_system.A*z)
c = utils.inner(self.W, c, ip_B=self.ip_B)
if self.Q is not None and self.R is not None:
c = scipy.linalg.solve_triangular(self.R, self.Q.T.conj().dot(c))
if self.WR is not self.VR:
c = self.WR.dot(scipy.linalg.solve_triangular(self.VR, c))
return z + self.W.dot(c) |
def is_fresh(self, freshness):
"""Return False if given freshness value has expired, else True."""
if self.expire_after is None:
return True
return self.freshness() - freshness <= self.expire_after | Return False if given freshness value has expired, else True. | Below is the the instruction that describes the task:
### Input:
Return False if given freshness value has expired, else True.
### Response:
def is_fresh(self, freshness):
"""Return False if given freshness value has expired, else True."""
if self.expire_after is None:
return True
return self.freshness() - freshness <= self.expire_after |
def multiply(x1, x2, output_shape=None, name=None):
"""Binary multiplication with broadcasting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
if not isinstance(x2, Tensor):
return ScalarMultiplyOperation(x1, x2).outputs[0]
with tf.name_scope(name, default_name="mul"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return einsum(
[x1, x2],
output_shape=_infer_binary_broadcast_shape(
x1.shape, x2.shape, output_shape)) | Binary multiplication with broadcasting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor | Below is the the instruction that describes the task:
### Input:
Binary multiplication with broadcasting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
### Response:
def multiply(x1, x2, output_shape=None, name=None):
"""Binary multiplication with broadcasting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
if not isinstance(x2, Tensor):
return ScalarMultiplyOperation(x1, x2).outputs[0]
with tf.name_scope(name, default_name="mul"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return einsum(
[x1, x2],
output_shape=_infer_binary_broadcast_shape(
x1.shape, x2.shape, output_shape)) |
def _bowtie_args_from_config(data):
"""Configurable high level options for bowtie.
"""
config = data['config']
qual_format = config["algorithm"].get("quality_format", "")
if qual_format.lower() == "illumina":
qual_flags = ["--phred64-quals"]
else:
qual_flags = []
multi_mappers = config["algorithm"].get("multiple_mappers", True)
multi_flags = ["-M", 1] if multi_mappers else ["-m", 1]
multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags
cores = config.get("resources", {}).get("bowtie", {}).get("cores", None)
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-p", str(num_cores)] if num_cores > 1 else []
return core_flags + qual_flags + multi_flags | Configurable high level options for bowtie. | Below is the the instruction that describes the task:
### Input:
Configurable high level options for bowtie.
### Response:
def _bowtie_args_from_config(data):
"""Configurable high level options for bowtie.
"""
config = data['config']
qual_format = config["algorithm"].get("quality_format", "")
if qual_format.lower() == "illumina":
qual_flags = ["--phred64-quals"]
else:
qual_flags = []
multi_mappers = config["algorithm"].get("multiple_mappers", True)
multi_flags = ["-M", 1] if multi_mappers else ["-m", 1]
multi_flags = [] if data["analysis"].lower().startswith("smallrna-seq") else multi_flags
cores = config.get("resources", {}).get("bowtie", {}).get("cores", None)
num_cores = config["algorithm"].get("num_cores", 1)
core_flags = ["-p", str(num_cores)] if num_cores > 1 else []
return core_flags + qual_flags + multi_flags |
def cli(ctx, value, metadata=""):
"""Add a canned value
Output:
A dictionnary containing canned value description
"""
return ctx.gi.cannedvalues.add_value(value, metadata=metadata) | Add a canned value
Output:
A dictionnary containing canned value description | Below is the the instruction that describes the task:
### Input:
Add a canned value
Output:
A dictionnary containing canned value description
### Response:
def cli(ctx, value, metadata=""):
"""Add a canned value
Output:
A dictionnary containing canned value description
"""
return ctx.gi.cannedvalues.add_value(value, metadata=metadata) |
def derivative(self, x, der=1):
""" return the derivative a an array of input values
x : the inputs
der : the order of derivative
"""
from scipy.interpolate import splev
return splev(x, self._sp, der=der) | return the derivative a an array of input values
x : the inputs
der : the order of derivative | Below is the the instruction that describes the task:
### Input:
return the derivative a an array of input values
x : the inputs
der : the order of derivative
### Response:
def derivative(self, x, der=1):
""" return the derivative a an array of input values
x : the inputs
der : the order of derivative
"""
from scipy.interpolate import splev
return splev(x, self._sp, der=der) |
def _main(self, client, bucket, key, upload_id, parts, extra_args):
"""
:param client: The client to use when calling CompleteMultipartUpload
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param upload_id: The id of the upload
:param parts: A list of parts to use to complete the multipart upload::
[{'Etag': etag_value, 'PartNumber': part_number}, ...]
Each element in the list consists of a return value from
``UploadPartTask.main()``.
:param extra_args: A dictionary of any extra arguments that may be
used in completing the multipart transfer.
"""
client.complete_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id,
MultipartUpload={'Parts': parts},
**extra_args) | :param client: The client to use when calling CompleteMultipartUpload
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param upload_id: The id of the upload
:param parts: A list of parts to use to complete the multipart upload::
[{'Etag': etag_value, 'PartNumber': part_number}, ...]
Each element in the list consists of a return value from
``UploadPartTask.main()``.
:param extra_args: A dictionary of any extra arguments that may be
used in completing the multipart transfer. | Below is the the instruction that describes the task:
### Input:
:param client: The client to use when calling CompleteMultipartUpload
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param upload_id: The id of the upload
:param parts: A list of parts to use to complete the multipart upload::
[{'Etag': etag_value, 'PartNumber': part_number}, ...]
Each element in the list consists of a return value from
``UploadPartTask.main()``.
:param extra_args: A dictionary of any extra arguments that may be
used in completing the multipart transfer.
### Response:
def _main(self, client, bucket, key, upload_id, parts, extra_args):
"""
:param client: The client to use when calling CompleteMultipartUpload
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param upload_id: The id of the upload
:param parts: A list of parts to use to complete the multipart upload::
[{'Etag': etag_value, 'PartNumber': part_number}, ...]
Each element in the list consists of a return value from
``UploadPartTask.main()``.
:param extra_args: A dictionary of any extra arguments that may be
used in completing the multipart transfer.
"""
client.complete_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id,
MultipartUpload={'Parts': parts},
**extra_args) |
def _send_and_wait(self, **kwargs):
"""
Send a frame to either the local ZigBee or a remote device and wait
for a pre-defined amount of time for its response.
"""
frame_id = self.next_frame_id
kwargs.update(dict(frame_id=frame_id))
self._send(**kwargs)
timeout = datetime.now() + const.RX_TIMEOUT
while datetime.now() < timeout:
try:
frame = self._rx_frames.pop(frame_id)
raise_if_error(frame)
return frame
except KeyError:
sleep(0.1)
continue
_LOGGER.exception(
"Did not receive response within configured timeout period.")
raise exceptions.ZigBeeResponseTimeout() | Send a frame to either the local ZigBee or a remote device and wait
for a pre-defined amount of time for its response. | Below is the the instruction that describes the task:
### Input:
Send a frame to either the local ZigBee or a remote device and wait
for a pre-defined amount of time for its response.
### Response:
def _send_and_wait(self, **kwargs):
"""
Send a frame to either the local ZigBee or a remote device and wait
for a pre-defined amount of time for its response.
"""
frame_id = self.next_frame_id
kwargs.update(dict(frame_id=frame_id))
self._send(**kwargs)
timeout = datetime.now() + const.RX_TIMEOUT
while datetime.now() < timeout:
try:
frame = self._rx_frames.pop(frame_id)
raise_if_error(frame)
return frame
except KeyError:
sleep(0.1)
continue
_LOGGER.exception(
"Did not receive response within configured timeout period.")
raise exceptions.ZigBeeResponseTimeout() |
def save(self, *args, **kwargs):
"""
call synchronizer "after_external_layer_saved" method
for any additional operation that must be executed after save
"""
after_save = kwargs.pop('after_save', True)
super(LayerExternal, self).save(*args, **kwargs)
# call after_external_layer_saved method of synchronizer
if after_save:
try:
synchronizer = self.synchronizer
except ImproperlyConfigured:
pass
else:
if synchronizer:
synchronizer.after_external_layer_saved(self.config)
# reload schema
self._reload_schema() | call synchronizer "after_external_layer_saved" method
for any additional operation that must be executed after save | Below is the the instruction that describes the task:
### Input:
call synchronizer "after_external_layer_saved" method
for any additional operation that must be executed after save
### Response:
def save(self, *args, **kwargs):
"""
call synchronizer "after_external_layer_saved" method
for any additional operation that must be executed after save
"""
after_save = kwargs.pop('after_save', True)
super(LayerExternal, self).save(*args, **kwargs)
# call after_external_layer_saved method of synchronizer
if after_save:
try:
synchronizer = self.synchronizer
except ImproperlyConfigured:
pass
else:
if synchronizer:
synchronizer.after_external_layer_saved(self.config)
# reload schema
self._reload_schema() |
def patch_network_latency(seconds=0.01):
""" Add random latency to all I/O operations """
# Accept float(0.1), "0.1", "0.1-0.2"
def sleep():
if isinstance(seconds, float):
time.sleep(seconds)
elif isinstance(seconds, basestring):
# pylint: disable=maybe-no-member
if "-" in seconds:
time.sleep(random.uniform(
float(seconds.split("-")[0]),
float(seconds.split("-")[1])
))
else:
time.sleep(float(seconds))
def _patched_method(old_method, *args, **kwargs):
sleep()
return old_method(*args, **kwargs)
socket_methods = [
"send", "sendall", "sendto", "recv", "recvfrom", "recvfrom_into", "recv_into",
"connect", "connect_ex", "close"
]
from socket import socket as _socketmodule
from gevent.socket import socket as _geventmodule
from gevent.ssl import SSLSocket as _sslmodule # pylint: disable=no-name-in-module
for method in socket_methods:
patch_method(_socketmodule, method, _patched_method)
patch_method(_geventmodule, method, _patched_method)
patch_method(_sslmodule, method, _patched_method) | Add random latency to all I/O operations | Below is the the instruction that describes the task:
### Input:
Add random latency to all I/O operations
### Response:
def patch_network_latency(seconds=0.01):
""" Add random latency to all I/O operations """
# Accept float(0.1), "0.1", "0.1-0.2"
def sleep():
if isinstance(seconds, float):
time.sleep(seconds)
elif isinstance(seconds, basestring):
# pylint: disable=maybe-no-member
if "-" in seconds:
time.sleep(random.uniform(
float(seconds.split("-")[0]),
float(seconds.split("-")[1])
))
else:
time.sleep(float(seconds))
def _patched_method(old_method, *args, **kwargs):
sleep()
return old_method(*args, **kwargs)
socket_methods = [
"send", "sendall", "sendto", "recv", "recvfrom", "recvfrom_into", "recv_into",
"connect", "connect_ex", "close"
]
from socket import socket as _socketmodule
from gevent.socket import socket as _geventmodule
from gevent.ssl import SSLSocket as _sslmodule # pylint: disable=no-name-in-module
for method in socket_methods:
patch_method(_socketmodule, method, _patched_method)
patch_method(_geventmodule, method, _patched_method)
patch_method(_sslmodule, method, _patched_method) |
def corruptDenseVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1 | Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector. | Below is the the instruction that describes the task:
### Input:
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
### Response:
def corruptDenseVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1 |
def check_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'],
seqType='both', verbose=False):
"""Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing."""
# Check the input files for containers in their variadic form.
result1 = False
if seqType == "both" or seqType == "variadic":
if verbose:
print "Check if input files for pre-processing Boost.MPL variadic containers need fixing."
result1 = check_input_files_for_variadic_seq(headerDir, sourceDir)
if verbose:
if result1:
print " At least one input file needs fixing!"
else:
print " No input file needs fixing!"
# Check the input files for containers in their numbered form.
result2 = False
result3 = False
if seqType == "both" or seqType == "numbered":
if verbose:
print "Check input files for pre-processing Boost.MPL numbered containers."
result2 = check_input_files_for_numbered_seq(headerDir, ".hpp", containers)
result3 = check_input_files_for_numbered_seq(sourceDir, ".cpp", containers)
if verbose:
if result2 or result3:
print " At least one input file needs fixing!"
else:
print " No input file needs fixing!"
# Return result.
return result1 or result2 or result3 | Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing. | Below is the the instruction that describes the task:
### Input:
Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing.
### Response:
def check_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'],
seqType='both', verbose=False):
"""Checks if source- and header-files, used as input when pre-processing MPL-containers, need fixing."""
# Check the input files for containers in their variadic form.
result1 = False
if seqType == "both" or seqType == "variadic":
if verbose:
print "Check if input files for pre-processing Boost.MPL variadic containers need fixing."
result1 = check_input_files_for_variadic_seq(headerDir, sourceDir)
if verbose:
if result1:
print " At least one input file needs fixing!"
else:
print " No input file needs fixing!"
# Check the input files for containers in their numbered form.
result2 = False
result3 = False
if seqType == "both" or seqType == "numbered":
if verbose:
print "Check input files for pre-processing Boost.MPL numbered containers."
result2 = check_input_files_for_numbered_seq(headerDir, ".hpp", containers)
result3 = check_input_files_for_numbered_seq(sourceDir, ".cpp", containers)
if verbose:
if result2 or result3:
print " At least one input file needs fixing!"
else:
print " No input file needs fixing!"
# Return result.
return result1 or result2 or result3 |
def random_connection(self):
'''Pick a random living connection'''
# While at the moment there's no need for this to be a context manager
# per se, I would like to use that interface since I anticipate
# adding some wrapping around it at some point.
yield random.choice(
[conn for conn in self.connections() if conn.alive()]) | Pick a random living connection | Below is the the instruction that describes the task:
### Input:
Pick a random living connection
### Response:
def random_connection(self):
'''Pick a random living connection'''
# While at the moment there's no need for this to be a context manager
# per se, I would like to use that interface since I anticipate
# adding some wrapping around it at some point.
yield random.choice(
[conn for conn in self.connections() if conn.alive()]) |
def select_if(df, fun):
"""Selects columns where fun(ction) is true
Args:
fun: a function that will be applied to columns
"""
def _filter_f(col):
try:
return fun(df[col])
except:
return False
cols = list(filter(_filter_f, df.columns))
return df[cols] | Selects columns where fun(ction) is true
Args:
fun: a function that will be applied to columns | Below is the the instruction that describes the task:
### Input:
Selects columns where fun(ction) is true
Args:
fun: a function that will be applied to columns
### Response:
def select_if(df, fun):
"""Selects columns where fun(ction) is true
Args:
fun: a function that will be applied to columns
"""
def _filter_f(col):
try:
return fun(df[col])
except:
return False
cols = list(filter(_filter_f, df.columns))
return df[cols] |
def drop(self):
"""Drop the table from the database.
Deletes both the schema and all the contents within it.
"""
with self.db.lock:
if self.exists:
self._threading_warn()
self.table.drop(self.db.executable, checkfirst=True)
self._table = None | Drop the table from the database.
Deletes both the schema and all the contents within it. | Below is the the instruction that describes the task:
### Input:
Drop the table from the database.
Deletes both the schema and all the contents within it.
### Response:
def drop(self):
"""Drop the table from the database.
Deletes both the schema and all the contents within it.
"""
with self.db.lock:
if self.exists:
self._threading_warn()
self.table.drop(self.db.executable, checkfirst=True)
self._table = None |
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
lookup = kwargs.copy()
#TODO: check fields
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
meta = obj.get_meta()
meta.connection = get_es_connection(self.es_url, self.es_kwargs)
meta.index=self.index
meta.type=self.type
obj.save(force=True)
return obj, True | Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created. | Below is the the instruction that describes the task:
### Input:
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
### Response:
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
lookup = kwargs.copy()
#TODO: check fields
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
meta = obj.get_meta()
meta.connection = get_es_connection(self.es_url, self.es_kwargs)
meta.index=self.index
meta.type=self.type
obj.save(force=True)
return obj, True |
def from_string(self, repo, name, string):
"""
Create a new Item from a data stream.
:param repo: Repo object.
:param name: Name of item.
:param data: Data stream.
:return: New Item class instance.
"""
try:
log.debug('Creating new item: %s' % name)
blob = Blob.from_string(string)
item = Item(parent=repo, sha=blob.sha, path=name)
item.blob = blob
return item
except AssertionError, e:
raise ItemError(e) | Create a new Item from a data stream.
:param repo: Repo object.
:param name: Name of item.
:param data: Data stream.
:return: New Item class instance. | Below is the the instruction that describes the task:
### Input:
Create a new Item from a data stream.
:param repo: Repo object.
:param name: Name of item.
:param data: Data stream.
:return: New Item class instance.
### Response:
def from_string(self, repo, name, string):
"""
Create a new Item from a data stream.
:param repo: Repo object.
:param name: Name of item.
:param data: Data stream.
:return: New Item class instance.
"""
try:
log.debug('Creating new item: %s' % name)
blob = Blob.from_string(string)
item = Item(parent=repo, sha=blob.sha, path=name)
item.blob = blob
return item
except AssertionError, e:
raise ItemError(e) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.