code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def set_cached_commit_times(root_folder, parent_dir, first_commit, commit_times, sorted_relpaths):
"""
Set the cached commit times in a json file at cache_location(root_folder)
We first get what is currently in the cache and either modify the existing
entry for this combo of parent_dir and sorted_relpaths.
Or add to the entries.
We then ensure there's less than 5 entries to keep the cache from growing
too large (arbitrary number is arbitrary).
Finally, we write the cache or issue a warning if we can't.
"""
current = get_all_cached_commit_times(root_folder)
location = cache_location(root_folder)
found = False
for item in current:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
item["commit_times"] = commit_times
item["commit"] = str(first_commit)
found = True
break
if not found:
current.append({"commit": str(first_commit), "parent_dir": parent_dir, "commit_times": commit_times, "sorted_relpaths": sorted_relpaths})
# Make sure it doesn't grow too big....
# Arbitrary number is arbitrary
while len(current) > 5:
current.pop(0)
try:
log.info("Writing gitmit cached commit_times\tlocation=%s", location)
with open(location, "w") as fle:
json.dump(current, fle)
except (TypeError, ValueError, IOError) as error:
log.warning("Failed to dump gitmit mtime cache\tlocation=%s\terror=%s", location, error) | Set the cached commit times in a json file at cache_location(root_folder)
We first get what is currently in the cache and either modify the existing
entry for this combo of parent_dir and sorted_relpaths.
Or add to the entries.
We then ensure there's less than 5 entries to keep the cache from growing
too large (arbitrary number is arbitrary).
Finally, we write the cache or issue a warning if we can't. | Below is the the instruction that describes the task:
### Input:
Set the cached commit times in a json file at cache_location(root_folder)
We first get what is currently in the cache and either modify the existing
entry for this combo of parent_dir and sorted_relpaths.
Or add to the entries.
We then ensure there's less than 5 entries to keep the cache from growing
too large (arbitrary number is arbitrary).
Finally, we write the cache or issue a warning if we can't.
### Response:
def set_cached_commit_times(root_folder, parent_dir, first_commit, commit_times, sorted_relpaths):
"""
Set the cached commit times in a json file at cache_location(root_folder)
We first get what is currently in the cache and either modify the existing
entry for this combo of parent_dir and sorted_relpaths.
Or add to the entries.
We then ensure there's less than 5 entries to keep the cache from growing
too large (arbitrary number is arbitrary).
Finally, we write the cache or issue a warning if we can't.
"""
current = get_all_cached_commit_times(root_folder)
location = cache_location(root_folder)
found = False
for item in current:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
item["commit_times"] = commit_times
item["commit"] = str(first_commit)
found = True
break
if not found:
current.append({"commit": str(first_commit), "parent_dir": parent_dir, "commit_times": commit_times, "sorted_relpaths": sorted_relpaths})
# Make sure it doesn't grow too big....
# Arbitrary number is arbitrary
while len(current) > 5:
current.pop(0)
try:
log.info("Writing gitmit cached commit_times\tlocation=%s", location)
with open(location, "w") as fle:
json.dump(current, fle)
except (TypeError, ValueError, IOError) as error:
log.warning("Failed to dump gitmit mtime cache\tlocation=%s\terror=%s", location, error) |
def partial_safe_wraps(wrapped_func, *args, **kwargs):
"""
A version of `functools.wraps` that is safe to wrap a partial in.
"""
if isinstance(wrapped_func, functools.partial):
return partial_safe_wraps(wrapped_func.func)
else:
return functools.wraps(wrapped_func) | A version of `functools.wraps` that is safe to wrap a partial in. | Below is the the instruction that describes the task:
### Input:
A version of `functools.wraps` that is safe to wrap a partial in.
### Response:
def partial_safe_wraps(wrapped_func, *args, **kwargs):
"""
A version of `functools.wraps` that is safe to wrap a partial in.
"""
if isinstance(wrapped_func, functools.partial):
return partial_safe_wraps(wrapped_func.func)
else:
return functools.wraps(wrapped_func) |
def dayname(year, month, day):
'''
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
'''
legal_date(year, month, day)
yearday = (month - 1) * 28 + day
if isleap(year + YEAR_EPOCH - 1):
dname = data.day_names_leap[yearday - 1]
else:
dname = data.day_names[yearday - 1]
return MONTHS[month - 1], dname | Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name | Below is the the instruction that describes the task:
### Input:
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
### Response:
def dayname(year, month, day):
'''
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
'''
legal_date(year, month, day)
yearday = (month - 1) * 28 + day
if isleap(year + YEAR_EPOCH - 1):
dname = data.day_names_leap[yearday - 1]
else:
dname = data.day_names[yearday - 1]
return MONTHS[month - 1], dname |
def check_authorization(self, access_token):
"""OAuth applications can use this method to check token validity
without hitting normal rate limits because of failed login attempts.
If the token is valid, it will return True, otherwise it will return
False.
:returns: bool
"""
p = self._session.params
auth = (p.get('client_id'), p.get('client_secret'))
if access_token and auth:
url = self._build_url('applications', str(auth[0]), 'tokens',
str(access_token))
resp = self._get(url, auth=auth, params={
'client_id': None, 'client_secret': None
})
return self._boolean(resp, 200, 404)
return False | OAuth applications can use this method to check token validity
without hitting normal rate limits because of failed login attempts.
If the token is valid, it will return True, otherwise it will return
False.
:returns: bool | Below is the the instruction that describes the task:
### Input:
OAuth applications can use this method to check token validity
without hitting normal rate limits because of failed login attempts.
If the token is valid, it will return True, otherwise it will return
False.
:returns: bool
### Response:
def check_authorization(self, access_token):
"""OAuth applications can use this method to check token validity
without hitting normal rate limits because of failed login attempts.
If the token is valid, it will return True, otherwise it will return
False.
:returns: bool
"""
p = self._session.params
auth = (p.get('client_id'), p.get('client_secret'))
if access_token and auth:
url = self._build_url('applications', str(auth[0]), 'tokens',
str(access_token))
resp = self._get(url, auth=auth, params={
'client_id': None, 'client_secret': None
})
return self._boolean(resp, 200, 404)
return False |
def instruction_COM_register(self, opcode, register):
"""
Replaces the contents of accumulator A or B with its logical complement.
source code forms: COMA; COMB
"""
register.set(self.COM(value=register.value)) | Replaces the contents of accumulator A or B with its logical complement.
source code forms: COMA; COMB | Below is the the instruction that describes the task:
### Input:
Replaces the contents of accumulator A or B with its logical complement.
source code forms: COMA; COMB
### Response:
def instruction_COM_register(self, opcode, register):
"""
Replaces the contents of accumulator A or B with its logical complement.
source code forms: COMA; COMB
"""
register.set(self.COM(value=register.value)) |
def reinit_index(index=INDEX_NAME):
"""
Delete and then initialise the given index name
Gets settings if they exist in the mappings module.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
"""
es_conn.indices.delete(index, ignore=404)
try:
es_conn.indices.create(index, INDEX_SETTINGS.get(index, None))
except TransportError as e:
raise Exception('Failed to created index, got: {}'.format(e.error)) | Delete and then initialise the given index name
Gets settings if they exist in the mappings module.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html | Below is the the instruction that describes the task:
### Input:
Delete and then initialise the given index name
Gets settings if they exist in the mappings module.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
### Response:
def reinit_index(index=INDEX_NAME):
"""
Delete and then initialise the given index name
Gets settings if they exist in the mappings module.
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.create
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.client.IndicesClient.delete
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html
"""
es_conn.indices.delete(index, ignore=404)
try:
es_conn.indices.create(index, INDEX_SETTINGS.get(index, None))
except TransportError as e:
raise Exception('Failed to created index, got: {}'.format(e.error)) |
def htmresearchCorePrereleaseInstalled():
"""
Make an attempt to determine if a pre-release version of htmresearch-core is
installed already.
@return: boolean
"""
try:
coreDistribution = pkg_resources.get_distribution("htmresearch-core")
if pkg_resources.parse_version(coreDistribution.version).is_prerelease:
# A pre-release dev version of htmresearch-core is installed.
return True
except pkg_resources.DistributionNotFound:
pass # Silently ignore. The absence of htmresearch-core will be handled by
# setuptools by default
return False | Make an attempt to determine if a pre-release version of htmresearch-core is
installed already.
@return: boolean | Below is the the instruction that describes the task:
### Input:
Make an attempt to determine if a pre-release version of htmresearch-core is
installed already.
@return: boolean
### Response:
def htmresearchCorePrereleaseInstalled():
"""
Make an attempt to determine if a pre-release version of htmresearch-core is
installed already.
@return: boolean
"""
try:
coreDistribution = pkg_resources.get_distribution("htmresearch-core")
if pkg_resources.parse_version(coreDistribution.version).is_prerelease:
# A pre-release dev version of htmresearch-core is installed.
return True
except pkg_resources.DistributionNotFound:
pass # Silently ignore. The absence of htmresearch-core will be handled by
# setuptools by default
return False |
def accelerated_dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
if ndim(x) == 1:
x = x.reshape(-1, 1)
if ndim(y) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:]
D0[1:, 1:] = cdist(x, y, dist)
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
min_list += [D0[min(i + k, r), j],
D0[i, min(j + k, c)]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path | Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path. | Below is the the instruction that describes the task:
### Input:
Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
### Response:
def accelerated_dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
if ndim(x) == 1:
x = x.reshape(-1, 1)
if ndim(y) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:]
D0[1:, 1:] = cdist(x, y, dist)
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
min_list += [D0[min(i + k, r), j],
D0[i, min(j + k, c)]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1] / sum(D1.shape), C, D1, path |
def extract(self, content, output):
"""Try to extract tables from an invoice"""
for table in self['tables']:
# First apply default options.
plugin_settings = DEFAULT_OPTIONS.copy()
plugin_settings.update(table)
table = plugin_settings
# Validate settings
assert 'start' in table, 'Table start regex missing'
assert 'end' in table, 'Table end regex missing'
assert 'body' in table, 'Table body regex missing'
start = re.search(table['start'], content)
end = re.search(table['end'], content)
if not start or not end:
logger.warning('no table body found - start %s, end %s', start, end)
continue
table_body = content[start.end(): end.start()]
for line in re.split(table['line_separator'], table_body):
# if the line has empty lines in it , skip them
if not line.strip('').strip('\n') or not line:
continue
match = re.search(table['body'], line)
if match:
for field, value in match.groupdict().items():
# If a field name already exists, do not overwrite it
if field in output:
continue
if field.startswith('date') or field.endswith('date'):
output[field] = self.parse_date(value)
if not output[field]:
logger.error("Date parsing failed on date '%s'", value)
return None
elif field.startswith('amount'):
output[field] = self.parse_number(value)
else:
output[field] = value
logger.debug('ignoring *%s* because it doesn\'t match anything', line) | Try to extract tables from an invoice | Below is the the instruction that describes the task:
### Input:
Try to extract tables from an invoice
### Response:
def extract(self, content, output):
"""Try to extract tables from an invoice"""
for table in self['tables']:
# First apply default options.
plugin_settings = DEFAULT_OPTIONS.copy()
plugin_settings.update(table)
table = plugin_settings
# Validate settings
assert 'start' in table, 'Table start regex missing'
assert 'end' in table, 'Table end regex missing'
assert 'body' in table, 'Table body regex missing'
start = re.search(table['start'], content)
end = re.search(table['end'], content)
if not start or not end:
logger.warning('no table body found - start %s, end %s', start, end)
continue
table_body = content[start.end(): end.start()]
for line in re.split(table['line_separator'], table_body):
# if the line has empty lines in it , skip them
if not line.strip('').strip('\n') or not line:
continue
match = re.search(table['body'], line)
if match:
for field, value in match.groupdict().items():
# If a field name already exists, do not overwrite it
if field in output:
continue
if field.startswith('date') or field.endswith('date'):
output[field] = self.parse_date(value)
if not output[field]:
logger.error("Date parsing failed on date '%s'", value)
return None
elif field.startswith('amount'):
output[field] = self.parse_number(value)
else:
output[field] = value
logger.debug('ignoring *%s* because it doesn\'t match anything', line) |
async def get_user_groups(request):
"""Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
"""
acl_callback = request.get(GROUPS_KEY)
if acl_callback is None:
raise RuntimeError('acl_middleware not installed')
user_id = await get_auth(request)
groups = await acl_callback(user_id)
if groups is None:
return None
user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else ()
return set(itertools.chain(groups, (Group.Everyone,), user_groups)) | Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed | Below is the the instruction that describes the task:
### Input:
Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
### Response:
async def get_user_groups(request):
"""Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
"""
acl_callback = request.get(GROUPS_KEY)
if acl_callback is None:
raise RuntimeError('acl_middleware not installed')
user_id = await get_auth(request)
groups = await acl_callback(user_id)
if groups is None:
return None
user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else ()
return set(itertools.chain(groups, (Group.Everyone,), user_groups)) |
def calc_circuit_breaker_position(self, debug=False):
""" Calculates the optimal position of a circuit breaker on route.
Parameters
----------
debug: bool, defaults to False
If True, prints process information.
Returns
-------
int
position of circuit breaker on route (index of last node on 1st half-ring preceding the circuit breaker)
Notes
-----
According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a
circuit breaker which is open at normal operation.
Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit
breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on
the route. Instead of the peak current, the peak load is used here (assuming a constant voltage).
The circuit breakers are used here for checking tech. constraints only and will be re-located after connection
of satellites and stations in ding0.grid.mv_grid.tools.set_circuit_breakers
References
----------
See Also
--------
ding0.grid.mv_grid.tools.set_circuit_breakers
"""
# TODO: add references (Tao)
# set init value
demand_diff_min = 10e6
# check possible positions in route
for ctr in range(len(self._nodes)):
# split route and calc demand difference
route_demand_part1 = sum([node.demand() for node in self._nodes[0:ctr]])
route_demand_part2 = sum([node.demand() for node in self._nodes[ctr:len(self._nodes)]])
demand_diff = abs(route_demand_part1 - route_demand_part2)
if demand_diff < demand_diff_min:
demand_diff_min = demand_diff
position = ctr
if debug:
logger.debug('sum 1={}'.format(
sum([node.demand() for node in self._nodes[0:position]])))
logger.debug('sum 2={}'.format(sum([node.demand() for node in
self._nodes[
position:len(self._nodes)]])))
logger.debug(
'Position of circuit breaker: {0}-{1} (sumdiff={2})'.format(
self._nodes[position - 1], self._nodes[position],
demand_diff_min))
return position | Calculates the optimal position of a circuit breaker on route.
Parameters
----------
debug: bool, defaults to False
If True, prints process information.
Returns
-------
int
position of circuit breaker on route (index of last node on 1st half-ring preceding the circuit breaker)
Notes
-----
According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a
circuit breaker which is open at normal operation.
Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit
breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on
the route. Instead of the peak current, the peak load is used here (assuming a constant voltage).
The circuit breakers are used here for checking tech. constraints only and will be re-located after connection
of satellites and stations in ding0.grid.mv_grid.tools.set_circuit_breakers
References
----------
See Also
--------
ding0.grid.mv_grid.tools.set_circuit_breakers | Below is the the instruction that describes the task:
### Input:
Calculates the optimal position of a circuit breaker on route.
Parameters
----------
debug: bool, defaults to False
If True, prints process information.
Returns
-------
int
position of circuit breaker on route (index of last node on 1st half-ring preceding the circuit breaker)
Notes
-----
According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a
circuit breaker which is open at normal operation.
Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit
breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on
the route. Instead of the peak current, the peak load is used here (assuming a constant voltage).
The circuit breakers are used here for checking tech. constraints only and will be re-located after connection
of satellites and stations in ding0.grid.mv_grid.tools.set_circuit_breakers
References
----------
See Also
--------
ding0.grid.mv_grid.tools.set_circuit_breakers
### Response:
def calc_circuit_breaker_position(self, debug=False):
""" Calculates the optimal position of a circuit breaker on route.
Parameters
----------
debug: bool, defaults to False
If True, prints process information.
Returns
-------
int
position of circuit breaker on route (index of last node on 1st half-ring preceding the circuit breaker)
Notes
-----
According to planning principles of MV grids, a MV ring is run as two strings (half-rings) separated by a
circuit breaker which is open at normal operation.
Assuming a ring (route which is connected to the root node at either sides), the optimal position of a circuit
breaker is defined as the position (virtual cable) between two nodes where the conveyed current is minimal on
the route. Instead of the peak current, the peak load is used here (assuming a constant voltage).
The circuit breakers are used here for checking tech. constraints only and will be re-located after connection
of satellites and stations in ding0.grid.mv_grid.tools.set_circuit_breakers
References
----------
See Also
--------
ding0.grid.mv_grid.tools.set_circuit_breakers
"""
# TODO: add references (Tao)
# set init value
demand_diff_min = 10e6
# check possible positions in route
for ctr in range(len(self._nodes)):
# split route and calc demand difference
route_demand_part1 = sum([node.demand() for node in self._nodes[0:ctr]])
route_demand_part2 = sum([node.demand() for node in self._nodes[ctr:len(self._nodes)]])
demand_diff = abs(route_demand_part1 - route_demand_part2)
if demand_diff < demand_diff_min:
demand_diff_min = demand_diff
position = ctr
if debug:
logger.debug('sum 1={}'.format(
sum([node.demand() for node in self._nodes[0:position]])))
logger.debug('sum 2={}'.format(sum([node.demand() for node in
self._nodes[
position:len(self._nodes)]])))
logger.debug(
'Position of circuit breaker: {0}-{1} (sumdiff={2})'.format(
self._nodes[position - 1], self._nodes[position],
demand_diff_min))
return position |
def fixup_offsets(self, new_start, node):
"""Adjust all offsets under node"""
if hasattr(node, 'start'):
node.start += new_start
node.finish += new_start
for n in node:
if hasattr(n, 'offset'):
if hasattr(n, 'start'):
n.start += new_start
n.finish += new_start
else:
self.fixup_offsets(new_start, n)
return | Adjust all offsets under node | Below is the the instruction that describes the task:
### Input:
Adjust all offsets under node
### Response:
def fixup_offsets(self, new_start, node):
"""Adjust all offsets under node"""
if hasattr(node, 'start'):
node.start += new_start
node.finish += new_start
for n in node:
if hasattr(n, 'offset'):
if hasattr(n, 'start'):
n.start += new_start
n.finish += new_start
else:
self.fixup_offsets(new_start, n)
return |
def add_step(self, value_map):
""" Add the values in value_map to the end of the trace. """
if len(self.trace) == 0:
raise PyrtlError('error, simulation trace needs at least 1 signal to track '
'(by default, unnamed signals are not traced -- try either passing '
'a name to a WireVector or setting a "wirevector_subset" option)')
for wire in self.trace:
tracelist = self.trace[wire]
wirevec = self._wires[wire]
tracelist.append(value_map[wirevec]) | Add the values in value_map to the end of the trace. | Below is the the instruction that describes the task:
### Input:
Add the values in value_map to the end of the trace.
### Response:
def add_step(self, value_map):
""" Add the values in value_map to the end of the trace. """
if len(self.trace) == 0:
raise PyrtlError('error, simulation trace needs at least 1 signal to track '
'(by default, unnamed signals are not traced -- try either passing '
'a name to a WireVector or setting a "wirevector_subset" option)')
for wire in self.trace:
tracelist = self.trace[wire]
wirevec = self._wires[wire]
tracelist.append(value_map[wirevec]) |
def to_json(fn, obj):
"""Convenience method to save pyquil.operator_estimation objects as a JSON file.
See :py:func:`read_json`.
"""
with open(fn, 'w') as f:
json.dump(obj, f, cls=OperatorEncoder, indent=2, ensure_ascii=False)
return fn | Convenience method to save pyquil.operator_estimation objects as a JSON file.
See :py:func:`read_json`. | Below is the the instruction that describes the task:
### Input:
Convenience method to save pyquil.operator_estimation objects as a JSON file.
See :py:func:`read_json`.
### Response:
def to_json(fn, obj):
"""Convenience method to save pyquil.operator_estimation objects as a JSON file.
See :py:func:`read_json`.
"""
with open(fn, 'w') as f:
json.dump(obj, f, cls=OperatorEncoder, indent=2, ensure_ascii=False)
return fn |
def set_empty_text(self):
"""Display the empty text
"""
self.buffer.insert_with_tags_by_name(
self.buffer.get_start_iter(),
self.empty_text, 'empty-text') | Display the empty text | Below is the the instruction that describes the task:
### Input:
Display the empty text
### Response:
def set_empty_text(self):
"""Display the empty text
"""
self.buffer.insert_with_tags_by_name(
self.buffer.get_start_iter(),
self.empty_text, 'empty-text') |
def add_buffer(self, buf_header, buf_payload):
''' Associate a buffer header and payload with this message.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
MessageError
'''
if 'num_buffers' in self._header:
self._header['num_buffers'] += 1
else:
self._header['num_buffers'] = 1
self._header_json = None
self._buffers.append((buf_header, buf_payload)) | Associate a buffer header and payload with this message.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
MessageError | Below is the the instruction that describes the task:
### Input:
Associate a buffer header and payload with this message.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
MessageError
### Response:
def add_buffer(self, buf_header, buf_payload):
''' Associate a buffer header and payload with this message.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
MessageError
'''
if 'num_buffers' in self._header:
self._header['num_buffers'] += 1
else:
self._header['num_buffers'] = 1
self._header_json = None
self._buffers.append((buf_header, buf_payload)) |
def collect_data(parent_module):
""" Find Picard VariantCallingMetrics reports and parse their data """
data = dict()
for file_meta in parent_module.find_log_files('picard/variant_calling_metrics', filehandles=True):
s_name = None
for header, value in table_in(file_meta['f'], pre_header_string='## METRICS CLASS'):
if header == 'SAMPLE_ALIAS':
s_name = value
if s_name in data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(file_meta['fn'], s_name))
data[s_name] = OrderedDict()
else:
data[s_name][header] = value
return data | Find Picard VariantCallingMetrics reports and parse their data | Below is the the instruction that describes the task:
### Input:
Find Picard VariantCallingMetrics reports and parse their data
### Response:
def collect_data(parent_module):
""" Find Picard VariantCallingMetrics reports and parse their data """
data = dict()
for file_meta in parent_module.find_log_files('picard/variant_calling_metrics', filehandles=True):
s_name = None
for header, value in table_in(file_meta['f'], pre_header_string='## METRICS CLASS'):
if header == 'SAMPLE_ALIAS':
s_name = value
if s_name in data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(file_meta['fn'], s_name))
data[s_name] = OrderedDict()
else:
data[s_name][header] = value
return data |
def prep_models(aryPrfTc, varSdSmthTmp=2.0, lgcPrint=True):
"""
Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
if lgcPrint:
print('------Prepare pRF time course models')
# Define temporal smoothing of pRF time course models
def funcSmthTmp(aryPrfTc, varSdSmthTmp, lgcPrint=True):
"""Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following
dimensions: `aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float, positive
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If
`zero`, no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same dimension
as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
# adjust the input, if necessary, such that input is 2D, with last
# dim time
tplInpShp = aryPrfTc.shape
aryPrfTc = aryPrfTc.reshape((-1, aryPrfTc.shape[-1]))
# For the filtering to perform well at the ends of the time series, we
# set the method to 'nearest' and place a volume with mean intensity
# (over time) at the beginning and at the end.
aryPrfTcMean = np.mean(aryPrfTc, axis=-1, keepdims=True).reshape(-1, 1)
aryPrfTc = np.concatenate((aryPrfTcMean, aryPrfTc, aryPrfTcMean),
axis=-1)
# In the input data, time goes from left to right. Therefore, we apply
# the filter along axis=1.
aryPrfTc = gaussian_filter1d(aryPrfTc.astype('float32'), varSdSmthTmp,
axis=-1, order=0, mode='nearest',
truncate=4.0)
# Remove mean-intensity volumes at the beginning and at the end:
aryPrfTc = aryPrfTc[..., 1:-1]
# Output array:
return aryPrfTc.reshape(tplInpShp).astype('float16')
# Perform temporal smoothing of pRF time course models
if 0.0 < varSdSmthTmp:
if lgcPrint:
print('---------Temporal smoothing on pRF time course models')
print('------------SD tmp smooth is: ' + str(varSdSmthTmp))
aryPrfTc = funcSmthTmp(aryPrfTc, varSdSmthTmp)
# Z-score the prf time course models
if lgcPrint:
print('---------Zscore the pRF time course models')
# De-mean the prf time course models:
aryPrfTc = np.subtract(aryPrfTc, np.mean(aryPrfTc, axis=-1)[..., None])
# Standardize the prf time course models:
# In order to avoid devision by zero, only divide those voxels with a
# standard deviation greater than zero:
aryTmpStd = np.std(aryPrfTc, axis=-1)
aryTmpLgc = np.greater(aryTmpStd, np.array([0.0]))
aryPrfTc[aryTmpLgc, :] = np.divide(aryPrfTc[aryTmpLgc, :],
aryTmpStd[aryTmpLgc, None])
return aryPrfTc | Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`). | Below is the the instruction that describes the task:
### Input:
Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`).
### Response:
def prep_models(aryPrfTc, varSdSmthTmp=2.0, lgcPrint=True):
"""
Prepare pRF model time courses.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following dimensions:
`aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If `zero`,
no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same
dimensions as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
if lgcPrint:
print('------Prepare pRF time course models')
# Define temporal smoothing of pRF time course models
def funcSmthTmp(aryPrfTc, varSdSmthTmp, lgcPrint=True):
"""Apply temporal smoothing to fMRI data & pRF time course models.
Parameters
----------
aryPrfTc : np.array
4D numpy array with pRF time course models, with following
dimensions: `aryPrfTc[x-position, y-position, SD, volume]`.
varSdSmthTmp : float, positive
Extent of temporal smoothing that is applied to functional data and
pRF time course models, [SD of Gaussian kernel, in seconds]. If
`zero`, no temporal smoothing is applied.
lgcPrint : boolean
Whether print statements should be executed.
Returns
-------
aryPrfTc : np.array
4D numpy array with prepared pRF time course models, same dimension
as input (`aryPrfTc[x-position, y-position, SD, volume]`).
"""
# adjust the input, if necessary, such that input is 2D, with last
# dim time
tplInpShp = aryPrfTc.shape
aryPrfTc = aryPrfTc.reshape((-1, aryPrfTc.shape[-1]))
# For the filtering to perform well at the ends of the time series, we
# set the method to 'nearest' and place a volume with mean intensity
# (over time) at the beginning and at the end.
aryPrfTcMean = np.mean(aryPrfTc, axis=-1, keepdims=True).reshape(-1, 1)
aryPrfTc = np.concatenate((aryPrfTcMean, aryPrfTc, aryPrfTcMean),
axis=-1)
# In the input data, time goes from left to right. Therefore, we apply
# the filter along axis=1.
aryPrfTc = gaussian_filter1d(aryPrfTc.astype('float32'), varSdSmthTmp,
axis=-1, order=0, mode='nearest',
truncate=4.0)
# Remove mean-intensity volumes at the beginning and at the end:
aryPrfTc = aryPrfTc[..., 1:-1]
# Output array:
return aryPrfTc.reshape(tplInpShp).astype('float16')
# Perform temporal smoothing of pRF time course models
if 0.0 < varSdSmthTmp:
if lgcPrint:
print('---------Temporal smoothing on pRF time course models')
print('------------SD tmp smooth is: ' + str(varSdSmthTmp))
aryPrfTc = funcSmthTmp(aryPrfTc, varSdSmthTmp)
# Z-score the prf time course models
if lgcPrint:
print('---------Zscore the pRF time course models')
# De-mean the prf time course models:
aryPrfTc = np.subtract(aryPrfTc, np.mean(aryPrfTc, axis=-1)[..., None])
# Standardize the prf time course models:
# In order to avoid devision by zero, only divide those voxels with a
# standard deviation greater than zero:
aryTmpStd = np.std(aryPrfTc, axis=-1)
aryTmpLgc = np.greater(aryTmpStd, np.array([0.0]))
aryPrfTc[aryTmpLgc, :] = np.divide(aryPrfTc[aryTmpLgc, :],
aryTmpStd[aryTmpLgc, None])
return aryPrfTc |
def parse_user_data(variables, raw_user_data, blueprint_name):
"""Parse the given user data and renders it as a template
It supports referencing template variables to create userdata
that's supplemented with information from the stack, as commonly
required when creating EC2 userdata files.
For example:
Given a raw_user_data string: 'open file ${file}'
And a variables dictionary with: {'file': 'test.txt'}
parse_user_data would output: open file test.txt
Args:
variables (dict): variables available to the template
raw_user_data (str): the user_data to be parsed
blueprint_name (str): the name of the blueprint
Returns:
str: The parsed user data, with all the variables values and
refs replaced with their resolved values.
Raises:
InvalidUserdataPlaceholder: Raised when a placeholder name in
raw_user_data is not valid.
E.g ${100} would raise this.
MissingVariable: Raised when a variable is in the raw_user_data that
is not given in the blueprint
"""
variable_values = {}
for key, value in variables.items():
if type(value) is CFNParameter:
variable_values[key] = value.to_parameter_value()
else:
variable_values[key] = value
template = string.Template(raw_user_data)
res = ""
try:
res = template.substitute(variable_values)
except ValueError as exp:
raise InvalidUserdataPlaceholder(blueprint_name, exp.args[0])
except KeyError as key:
raise MissingVariable(blueprint_name, key)
return res | Parse the given user data and renders it as a template
It supports referencing template variables to create userdata
that's supplemented with information from the stack, as commonly
required when creating EC2 userdata files.
For example:
Given a raw_user_data string: 'open file ${file}'
And a variables dictionary with: {'file': 'test.txt'}
parse_user_data would output: open file test.txt
Args:
variables (dict): variables available to the template
raw_user_data (str): the user_data to be parsed
blueprint_name (str): the name of the blueprint
Returns:
str: The parsed user data, with all the variables values and
refs replaced with their resolved values.
Raises:
InvalidUserdataPlaceholder: Raised when a placeholder name in
raw_user_data is not valid.
E.g ${100} would raise this.
MissingVariable: Raised when a variable is in the raw_user_data that
is not given in the blueprint | Below is the the instruction that describes the task:
### Input:
Parse the given user data and renders it as a template
It supports referencing template variables to create userdata
that's supplemented with information from the stack, as commonly
required when creating EC2 userdata files.
For example:
Given a raw_user_data string: 'open file ${file}'
And a variables dictionary with: {'file': 'test.txt'}
parse_user_data would output: open file test.txt
Args:
variables (dict): variables available to the template
raw_user_data (str): the user_data to be parsed
blueprint_name (str): the name of the blueprint
Returns:
str: The parsed user data, with all the variables values and
refs replaced with their resolved values.
Raises:
InvalidUserdataPlaceholder: Raised when a placeholder name in
raw_user_data is not valid.
E.g ${100} would raise this.
MissingVariable: Raised when a variable is in the raw_user_data that
is not given in the blueprint
### Response:
def parse_user_data(variables, raw_user_data, blueprint_name):
"""Parse the given user data and renders it as a template
It supports referencing template variables to create userdata
that's supplemented with information from the stack, as commonly
required when creating EC2 userdata files.
For example:
Given a raw_user_data string: 'open file ${file}'
And a variables dictionary with: {'file': 'test.txt'}
parse_user_data would output: open file test.txt
Args:
variables (dict): variables available to the template
raw_user_data (str): the user_data to be parsed
blueprint_name (str): the name of the blueprint
Returns:
str: The parsed user data, with all the variables values and
refs replaced with their resolved values.
Raises:
InvalidUserdataPlaceholder: Raised when a placeholder name in
raw_user_data is not valid.
E.g ${100} would raise this.
MissingVariable: Raised when a variable is in the raw_user_data that
is not given in the blueprint
"""
variable_values = {}
for key, value in variables.items():
if type(value) is CFNParameter:
variable_values[key] = value.to_parameter_value()
else:
variable_values[key] = value
template = string.Template(raw_user_data)
res = ""
try:
res = template.substitute(variable_values)
except ValueError as exp:
raise InvalidUserdataPlaceholder(blueprint_name, exp.args[0])
except KeyError as key:
raise MissingVariable(blueprint_name, key)
return res |
def get_project_metrics(self, project, metric_aggregation_type=None, min_metrics_time=None):
"""GetProjectMetrics.
[Preview API] Gets build metrics for a project.
:param str project: Project ID or project name
:param str metric_aggregation_type: The aggregation type to use (hourly, daily).
:param datetime min_metrics_time: The date from which to calculate metrics.
:rtype: [BuildMetric]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if metric_aggregation_type is not None:
route_values['metricAggregationType'] = self._serialize.url('metric_aggregation_type', metric_aggregation_type, 'str')
query_parameters = {}
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='7433fae7-a6bc-41dc-a6e2-eef9005ce41a',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[BuildMetric]', self._unwrap_collection(response)) | GetProjectMetrics.
[Preview API] Gets build metrics for a project.
:param str project: Project ID or project name
:param str metric_aggregation_type: The aggregation type to use (hourly, daily).
:param datetime min_metrics_time: The date from which to calculate metrics.
:rtype: [BuildMetric] | Below is the the instruction that describes the task:
### Input:
GetProjectMetrics.
[Preview API] Gets build metrics for a project.
:param str project: Project ID or project name
:param str metric_aggregation_type: The aggregation type to use (hourly, daily).
:param datetime min_metrics_time: The date from which to calculate metrics.
:rtype: [BuildMetric]
### Response:
def get_project_metrics(self, project, metric_aggregation_type=None, min_metrics_time=None):
"""GetProjectMetrics.
[Preview API] Gets build metrics for a project.
:param str project: Project ID or project name
:param str metric_aggregation_type: The aggregation type to use (hourly, daily).
:param datetime min_metrics_time: The date from which to calculate metrics.
:rtype: [BuildMetric]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if metric_aggregation_type is not None:
route_values['metricAggregationType'] = self._serialize.url('metric_aggregation_type', metric_aggregation_type, 'str')
query_parameters = {}
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='7433fae7-a6bc-41dc-a6e2-eef9005ce41a',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[BuildMetric]', self._unwrap_collection(response)) |
def inspect_bucket(bucket):
"""Show all information known on a bucket."""
state = db.db()
found = None
for b in state.buckets():
if b.name == bucket:
found = b
if not found:
click.echo("no bucket named: %s" % bucket)
return
click.echo("Bucket: %s" % found.name)
click.echo("Account: %s" % found.account)
click.echo("Region: %s" % found.region)
click.echo("Created: %s" % found.created)
click.echo("Size: %s" % found.size)
click.echo("Inventory: %s" % found.inventory)
click.echo("Partitions: %s" % found.partitions)
click.echo("Scanned: %0.2f%%" % found.percent_scanned)
click.echo("")
click.echo("Errors")
click.echo("Denied: %s" % found.keys_denied)
click.echo("BErrors: %s" % found.error_count)
click.echo("KErrors: %s" % found.data['keys-error'].get(found.bucket_id, 0))
click.echo("Throttle: %s" % found.data['keys-throttled'].get(found.bucket_id, 0))
click.echo("Missing: %s" % found.data['keys-missing'].get(found.bucket_id, 0))
click.echo("Session: %s" % found.data['keys-sesserr'].get(found.bucket_id, 0))
click.echo("Connection: %s" % found.data['keys-connerr'].get(found.bucket_id, 0))
click.echo("Endpoint: %s" % found.data['keys-enderr'].get(found.bucket_id, 0)) | Show all information known on a bucket. | Below is the the instruction that describes the task:
### Input:
Show all information known on a bucket.
### Response:
def inspect_bucket(bucket):
"""Show all information known on a bucket."""
state = db.db()
found = None
for b in state.buckets():
if b.name == bucket:
found = b
if not found:
click.echo("no bucket named: %s" % bucket)
return
click.echo("Bucket: %s" % found.name)
click.echo("Account: %s" % found.account)
click.echo("Region: %s" % found.region)
click.echo("Created: %s" % found.created)
click.echo("Size: %s" % found.size)
click.echo("Inventory: %s" % found.inventory)
click.echo("Partitions: %s" % found.partitions)
click.echo("Scanned: %0.2f%%" % found.percent_scanned)
click.echo("")
click.echo("Errors")
click.echo("Denied: %s" % found.keys_denied)
click.echo("BErrors: %s" % found.error_count)
click.echo("KErrors: %s" % found.data['keys-error'].get(found.bucket_id, 0))
click.echo("Throttle: %s" % found.data['keys-throttled'].get(found.bucket_id, 0))
click.echo("Missing: %s" % found.data['keys-missing'].get(found.bucket_id, 0))
click.echo("Session: %s" % found.data['keys-sesserr'].get(found.bucket_id, 0))
click.echo("Connection: %s" % found.data['keys-connerr'].get(found.bucket_id, 0))
click.echo("Endpoint: %s" % found.data['keys-enderr'].get(found.bucket_id, 0)) |
def set_maintainer(self, maintainer):
# type: (Union[hdx.data.user.User,Dict,str]) -> None
"""Set the dataset's maintainer.
Args:
maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary.
Returns:
None
"""
if isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict):
if 'id' not in maintainer:
maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], configuration=self.configuration)
maintainer = maintainer['id']
elif not isinstance(maintainer, str):
raise HDXError('Type %s cannot be added as a maintainer!' % type(maintainer).__name__)
if is_valid_uuid(maintainer) is False:
raise HDXError('%s is not a valid user id for a maintainer!' % maintainer)
self.data['maintainer'] = maintainer | Set the dataset's maintainer.
Args:
maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Set the dataset's maintainer.
Args:
maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary.
Returns:
None
### Response:
def set_maintainer(self, maintainer):
# type: (Union[hdx.data.user.User,Dict,str]) -> None
"""Set the dataset's maintainer.
Args:
maintainer (Union[User,Dict,str]): Either a user id or User metadata from a User object or dictionary.
Returns:
None
"""
if isinstance(maintainer, hdx.data.user.User) or isinstance(maintainer, dict):
if 'id' not in maintainer:
maintainer = hdx.data.user.User.read_from_hdx(maintainer['name'], configuration=self.configuration)
maintainer = maintainer['id']
elif not isinstance(maintainer, str):
raise HDXError('Type %s cannot be added as a maintainer!' % type(maintainer).__name__)
if is_valid_uuid(maintainer) is False:
raise HDXError('%s is not a valid user id for a maintainer!' % maintainer)
self.data['maintainer'] = maintainer |
def _process_messages(self):
"""Processes messages received from kafka"""
try:
for message in self.consumer:
try:
if message is None:
self.logger.debug("no message")
break
loaded_dict = json.loads(message.value)
self.logger.debug("got valid kafka message")
with self.uuids_lock:
if 'uuid' in loaded_dict:
if loaded_dict['uuid'] in self.uuids and \
self.uuids[loaded_dict['uuid']] != 'poll':
self.logger.debug("Found Kafka message from request")
self.uuids[loaded_dict['uuid']] = loaded_dict
else:
self.logger.debug("Got poll result")
self._send_result_to_redis(loaded_dict)
else:
self.logger.debug("Got message not intended for this process")
except ValueError:
extras = {}
if message is not None:
extras["data"] = message.value
self.logger.warning('Unparseable JSON Received from kafka',
extra=extras)
self._check_kafka_disconnect()
except OffsetOutOfRangeError:
# consumer has no idea where they are
self.consumer.seek_to_end()
self.logger.error("Kafka offset out of range error") | Processes messages received from kafka | Below is the the instruction that describes the task:
### Input:
Processes messages received from kafka
### Response:
def _process_messages(self):
"""Processes messages received from kafka"""
try:
for message in self.consumer:
try:
if message is None:
self.logger.debug("no message")
break
loaded_dict = json.loads(message.value)
self.logger.debug("got valid kafka message")
with self.uuids_lock:
if 'uuid' in loaded_dict:
if loaded_dict['uuid'] in self.uuids and \
self.uuids[loaded_dict['uuid']] != 'poll':
self.logger.debug("Found Kafka message from request")
self.uuids[loaded_dict['uuid']] = loaded_dict
else:
self.logger.debug("Got poll result")
self._send_result_to_redis(loaded_dict)
else:
self.logger.debug("Got message not intended for this process")
except ValueError:
extras = {}
if message is not None:
extras["data"] = message.value
self.logger.warning('Unparseable JSON Received from kafka',
extra=extras)
self._check_kafka_disconnect()
except OffsetOutOfRangeError:
# consumer has no idea where they are
self.consumer.seek_to_end()
self.logger.error("Kafka offset out of range error") |
def plot_eeg_erp(all_epochs, conditions=None, times=None, include="all", exclude=None, hemisphere="both", central=True, name=None, colors=None, gfp=False, ci=0.95, ci_alpha=0.333, invert_y=False, linewidth=1, linestyle="-", filter_hfreq=None):
"""
DOCS INCOMPLETE :(
"""
# Preserve original
all_epochs_current = all_epochs.copy()
# Filter using Savitzky-Golay polynomial method
if (filter_hfreq is not None) and (isinstance(filter_hfreq, int)):
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.savgol_filter(filter_hfreq, copy=True)
# Crop
if isinstance(times, list) and len(times) == 2:
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.copy().crop(times[0], times[1])
# Transform to evokeds
all_evokeds = eeg_to_all_evokeds(all_epochs_current, conditions=conditions)
data = {}
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition] = []
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition].append(epoch)
conditions = list(data.keys())
# Line styles
if isinstance(linestyle, str):
linestyle = [linestyle] * len(conditions)
elif isinstance(linestyle, list) and len(linestyle) >= len(conditions):
pass
elif isinstance(linestyle, dict) and len(linestyle.keys()) >= len(conditions):
linestyle = [linestyle[cond] for cond in conditions]
else:
print("NeuroKit Warning: plot_eeg_erp(): linestyle must be either a str, a list or a dict.")
# Colors
if isinstance(colors, str):
colors = {condition: colors for condition in conditions}
elif isinstance(colors, list) and len(colors) >= len(conditions):
colors= {condition: colors[index] for index, condition in enumerate(conditions)}
elif isinstance(colors, dict) and len(colors.keys()) >= len(conditions):
pass
elif colors is None:
pass
else:
print("NeuroKit Warning: plot_eeg_erp(): colors must be either a str, a list, a dict or None.")
# Modify styles
styles = {}
for index, condition in enumerate(conditions):
styles[condition] = {"linewidth": linewidth, "linestyle": linestyle[index]}
# Select electrodes
picks = mne.pick_types(epoch.info, eeg=True, selection=eeg_select_electrodes(epoch, include=include, exclude=exclude, hemisphere=hemisphere, central=central))
# Plot
try:
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y, ci_alpha=ci_alpha)
except TypeError:
print("NeuroKit Warning: plot_eeg_erp(): You're using a version of mne that does not support ci_alpha or ci_method parameters. Leaving defaults.")
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y)
return(plot) | DOCS INCOMPLETE :( | Below is the the instruction that describes the task:
### Input:
DOCS INCOMPLETE :(
### Response:
def plot_eeg_erp(all_epochs, conditions=None, times=None, include="all", exclude=None, hemisphere="both", central=True, name=None, colors=None, gfp=False, ci=0.95, ci_alpha=0.333, invert_y=False, linewidth=1, linestyle="-", filter_hfreq=None):
"""
DOCS INCOMPLETE :(
"""
# Preserve original
all_epochs_current = all_epochs.copy()
# Filter using Savitzky-Golay polynomial method
if (filter_hfreq is not None) and (isinstance(filter_hfreq, int)):
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.savgol_filter(filter_hfreq, copy=True)
# Crop
if isinstance(times, list) and len(times) == 2:
for participant, epochs in all_epochs_current.items():
all_epochs_current[participant] = epochs.copy().crop(times[0], times[1])
# Transform to evokeds
all_evokeds = eeg_to_all_evokeds(all_epochs_current, conditions=conditions)
data = {}
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition] = []
for participant, epochs in all_evokeds.items():
for condition, epoch in epochs.items():
data[condition].append(epoch)
conditions = list(data.keys())
# Line styles
if isinstance(linestyle, str):
linestyle = [linestyle] * len(conditions)
elif isinstance(linestyle, list) and len(linestyle) >= len(conditions):
pass
elif isinstance(linestyle, dict) and len(linestyle.keys()) >= len(conditions):
linestyle = [linestyle[cond] for cond in conditions]
else:
print("NeuroKit Warning: plot_eeg_erp(): linestyle must be either a str, a list or a dict.")
# Colors
if isinstance(colors, str):
colors = {condition: colors for condition in conditions}
elif isinstance(colors, list) and len(colors) >= len(conditions):
colors= {condition: colors[index] for index, condition in enumerate(conditions)}
elif isinstance(colors, dict) and len(colors.keys()) >= len(conditions):
pass
elif colors is None:
pass
else:
print("NeuroKit Warning: plot_eeg_erp(): colors must be either a str, a list, a dict or None.")
# Modify styles
styles = {}
for index, condition in enumerate(conditions):
styles[condition] = {"linewidth": linewidth, "linestyle": linestyle[index]}
# Select electrodes
picks = mne.pick_types(epoch.info, eeg=True, selection=eeg_select_electrodes(epoch, include=include, exclude=exclude, hemisphere=hemisphere, central=central))
# Plot
try:
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y, ci_alpha=ci_alpha)
except TypeError:
print("NeuroKit Warning: plot_eeg_erp(): You're using a version of mne that does not support ci_alpha or ci_method parameters. Leaving defaults.")
plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y)
return(plot) |
def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items):
""" Removes nodes by index from an errorpath, relatively to the
basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
"""
dp_basedepth = len(self.document_path)
sp_basedepth = len(self.schema_path)
for error in _errors:
for i in sorted(dp_items, reverse=True):
error.document_path = \
drop_item_from_tuple(error.document_path, dp_basedepth + i)
for i in sorted(sp_items, reverse=True):
error.schema_path = \
drop_item_from_tuple(error.schema_path, sp_basedepth + i)
if error.child_errors:
self._drop_nodes_from_errorpaths(error.child_errors,
dp_items, sp_items) | Removes nodes by index from an errorpath, relatively to the
basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`. | Below is the the instruction that describes the task:
### Input:
Removes nodes by index from an errorpath, relatively to the
basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
### Response:
def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items):
""" Removes nodes by index from an errorpath, relatively to the
basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
"""
dp_basedepth = len(self.document_path)
sp_basedepth = len(self.schema_path)
for error in _errors:
for i in sorted(dp_items, reverse=True):
error.document_path = \
drop_item_from_tuple(error.document_path, dp_basedepth + i)
for i in sorted(sp_items, reverse=True):
error.schema_path = \
drop_item_from_tuple(error.schema_path, sp_basedepth + i)
if error.child_errors:
self._drop_nodes_from_errorpaths(error.child_errors,
dp_items, sp_items) |
def sqs_delete_queue(queue_url, client=None):
"""This deletes an SQS queue given its URL
Parameters
----------
queue_url : str
The SQS URL of the queue to delete.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
bool
True if the queue was deleted successfully. False otherwise.
"""
if not client:
client = boto3.client('sqs')
try:
client.delete_queue(QueueUrl=queue_url)
return True
except Exception as e:
LOGEXCEPTION('could not delete the specified queue: %s'
% (queue_url,))
return False | This deletes an SQS queue given its URL
Parameters
----------
queue_url : str
The SQS URL of the queue to delete.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
bool
True if the queue was deleted successfully. False otherwise. | Below is the the instruction that describes the task:
### Input:
This deletes an SQS queue given its URL
Parameters
----------
queue_url : str
The SQS URL of the queue to delete.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
bool
True if the queue was deleted successfully. False otherwise.
### Response:
def sqs_delete_queue(queue_url, client=None):
"""This deletes an SQS queue given its URL
Parameters
----------
queue_url : str
The SQS URL of the queue to delete.
client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its operations. Alternatively, pass in an existing `boto3.Client`
instance to re-use it here.
Returns
-------
bool
True if the queue was deleted successfully. False otherwise.
"""
if not client:
client = boto3.client('sqs')
try:
client.delete_queue(QueueUrl=queue_url)
return True
except Exception as e:
LOGEXCEPTION('could not delete the specified queue: %s'
% (queue_url,))
return False |
def _rewrite_paths_in_file(config_file, paths_to_replace):
"""
Rewrite paths in config files to match convention job_xxxx/symlink
Requires path to run_xxxx/input/config_file and a list of paths_to_replace
"""
lines = []
# make a copy of config
import shutil
shutil.copyfile(config_file, str(config_file + '_original'))
with open(config_file) as infile:
for line in infile:
for old_path in paths_to_replace:
if old_path in line:
new_path = os.path.split(old_path)[-1]
line = line.replace(old_path, new_path)
logger.debug("Changed path {0} ---> {1} in file {2}".format(old_path, new_path, config_file))
lines.append(line)
with open(config_file, 'w') as outfile:
for line in lines:
outfile.write(line) | Rewrite paths in config files to match convention job_xxxx/symlink
Requires path to run_xxxx/input/config_file and a list of paths_to_replace | Below is the the instruction that describes the task:
### Input:
Rewrite paths in config files to match convention job_xxxx/symlink
Requires path to run_xxxx/input/config_file and a list of paths_to_replace
### Response:
def _rewrite_paths_in_file(config_file, paths_to_replace):
"""
Rewrite paths in config files to match convention job_xxxx/symlink
Requires path to run_xxxx/input/config_file and a list of paths_to_replace
"""
lines = []
# make a copy of config
import shutil
shutil.copyfile(config_file, str(config_file + '_original'))
with open(config_file) as infile:
for line in infile:
for old_path in paths_to_replace:
if old_path in line:
new_path = os.path.split(old_path)[-1]
line = line.replace(old_path, new_path)
logger.debug("Changed path {0} ---> {1} in file {2}".format(old_path, new_path, config_file))
lines.append(line)
with open(config_file, 'w') as outfile:
for line in lines:
outfile.write(line) |
def __write_text(self, outfile):
"""
Write text information into file
This method should be called only from ``write_idat`` method
or chunk order will be ruined.
"""
for k, v in self.text.items():
if not isinstance(v, bytes):
try:
international = False
v = v.encode('latin-1')
except UnicodeEncodeError:
international = True
v = v.encode('utf-8')
else:
international = False
if not isinstance(k, bytes):
k = strtobytes(k)
if international:
# No compress, language tag or translated keyword for now
write_chunk(outfile, 'iTXt', k + zerobyte +
zerobyte + zerobyte +
zerobyte + zerobyte + v)
else:
write_chunk(outfile, 'tEXt', k + zerobyte + v) | Write text information into file
This method should be called only from ``write_idat`` method
or chunk order will be ruined. | Below is the the instruction that describes the task:
### Input:
Write text information into file
This method should be called only from ``write_idat`` method
or chunk order will be ruined.
### Response:
def __write_text(self, outfile):
"""
Write text information into file
This method should be called only from ``write_idat`` method
or chunk order will be ruined.
"""
for k, v in self.text.items():
if not isinstance(v, bytes):
try:
international = False
v = v.encode('latin-1')
except UnicodeEncodeError:
international = True
v = v.encode('utf-8')
else:
international = False
if not isinstance(k, bytes):
k = strtobytes(k)
if international:
# No compress, language tag or translated keyword for now
write_chunk(outfile, 'iTXt', k + zerobyte +
zerobyte + zerobyte +
zerobyte + zerobyte + v)
else:
write_chunk(outfile, 'tEXt', k + zerobyte + v) |
def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]:
"""Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to a bool after the full response has
been read. The result is true if the stream is still open.
"""
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate) | Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to a bool after the full response has
been read. The result is true if the stream is still open. | Below is the the instruction that describes the task:
### Input:
Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to a bool after the full response has
been read. The result is true if the stream is still open.
### Response:
def read_response(self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]:
"""Read a single HTTP response.
Typical client-mode usage is to write a request using `write_headers`,
`write`, and `finish`, and then call ``read_response``.
:arg delegate: a `.HTTPMessageDelegate`
Returns a `.Future` that resolves to a bool after the full response has
been read. The result is true if the stream is still open.
"""
if self.params.decompress:
delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
return self._read_message(delegate) |
def _get_video_id(self, url=None):
"""
Extract video id. It will try to avoid making an HTTP request
if it can find the ID in the URL, but otherwise it will try
to scrape it from the HTML document. Returns None in case it's
unable to extract the ID at all.
"""
if url:
html_data = self.http.request("get", url).text
else:
html_data = self.get_urldata()
html_data = self.get_urldata()
match = re.search(r'data-video-id="([0-9]+)"', html_data)
if match:
return match.group(1)
match = re.search(r'data-videoid="([0-9]+)', html_data)
if match:
return match.group(1)
match = re.search(r'"mediaGuid":"([0-9]+)"', html_data)
if match:
return match.group(1)
clips = False
slug = None
match = re.search('params":({.*}),"query', self.get_urldata())
if match:
jansson = json.loads(match.group(1))
if "seasonNumberOrVideoId" in jansson:
season = jansson["seasonNumberOrVideoId"]
match = re.search(r"\w-(\d+)$", season)
if match:
season = match.group(1)
else:
match = self._conentpage(self.get_urldata())
if match: # this only happen on the program page?
janson2 = json.loads(match.group(1))
if janson2["formatPage"]["format"]:
season = janson2["formatPage"]["format"]["seasonNumber"]
return janson2["formatPage"]["format"]["videos"][str(season)]["program"][0]["id"]
return None
if "videoIdOrEpisodeNumber" in jansson:
videp = jansson["videoIdOrEpisodeNumber"]
match = re.search(r'(\w+)-(\d+)', videp)
if match:
episodenr = match.group(2)
else:
episodenr = videp
clips = True
match = re.search(r'(s\w+)-(\d+)', season)
if match:
season = match.group(2)
else:
# sometimes videoIdOrEpisodeNumber does not work.. this is a workaround
match = re.search(r'(episode|avsnitt)-(\d+)', self.url)
if match:
episodenr = match.group(2)
else:
episodenr = season
if "slug" in jansson:
slug = jansson["slug"]
if clips:
return episodenr
else:
match = self._conentpage(self.get_urldata())
if match:
janson = json.loads(match.group(1))
for i in janson["formatPage"]["format"]["videos"].keys():
if "program" in janson["formatPage"]["format"]["videos"][str(i)]:
for n in janson["formatPage"]["format"]["videos"][i]["program"]:
if str(n["episodeNumber"]) and int(episodenr) == n["episodeNumber"] and int(season) == n["seasonNumber"]:
if slug is None or slug == n["formatSlug"]:
return n["id"]
elif n["id"] == episodenr:
return episodenr
parse = urlparse(self.url)
match = re.search(r'/\w+/(\d+)', parse.path)
if match:
return match.group(1)
match = re.search(r'iframe src="http://play.juicyplay.se[^\"]+id=(\d+)', html_data)
if match:
return match.group(1)
match = re.search(r'<meta property="og:image" content="([\S]+)"', html_data)
if match:
return match.group(1).split("/")[-2]
return None | Extract video id. It will try to avoid making an HTTP request
if it can find the ID in the URL, but otherwise it will try
to scrape it from the HTML document. Returns None in case it's
unable to extract the ID at all. | Below is the the instruction that describes the task:
### Input:
Extract video id. It will try to avoid making an HTTP request
if it can find the ID in the URL, but otherwise it will try
to scrape it from the HTML document. Returns None in case it's
unable to extract the ID at all.
### Response:
def _get_video_id(self, url=None):
"""
Extract video id. It will try to avoid making an HTTP request
if it can find the ID in the URL, but otherwise it will try
to scrape it from the HTML document. Returns None in case it's
unable to extract the ID at all.
"""
if url:
html_data = self.http.request("get", url).text
else:
html_data = self.get_urldata()
html_data = self.get_urldata()
match = re.search(r'data-video-id="([0-9]+)"', html_data)
if match:
return match.group(1)
match = re.search(r'data-videoid="([0-9]+)', html_data)
if match:
return match.group(1)
match = re.search(r'"mediaGuid":"([0-9]+)"', html_data)
if match:
return match.group(1)
clips = False
slug = None
match = re.search('params":({.*}),"query', self.get_urldata())
if match:
jansson = json.loads(match.group(1))
if "seasonNumberOrVideoId" in jansson:
season = jansson["seasonNumberOrVideoId"]
match = re.search(r"\w-(\d+)$", season)
if match:
season = match.group(1)
else:
match = self._conentpage(self.get_urldata())
if match: # this only happen on the program page?
janson2 = json.loads(match.group(1))
if janson2["formatPage"]["format"]:
season = janson2["formatPage"]["format"]["seasonNumber"]
return janson2["formatPage"]["format"]["videos"][str(season)]["program"][0]["id"]
return None
if "videoIdOrEpisodeNumber" in jansson:
videp = jansson["videoIdOrEpisodeNumber"]
match = re.search(r'(\w+)-(\d+)', videp)
if match:
episodenr = match.group(2)
else:
episodenr = videp
clips = True
match = re.search(r'(s\w+)-(\d+)', season)
if match:
season = match.group(2)
else:
# sometimes videoIdOrEpisodeNumber does not work.. this is a workaround
match = re.search(r'(episode|avsnitt)-(\d+)', self.url)
if match:
episodenr = match.group(2)
else:
episodenr = season
if "slug" in jansson:
slug = jansson["slug"]
if clips:
return episodenr
else:
match = self._conentpage(self.get_urldata())
if match:
janson = json.loads(match.group(1))
for i in janson["formatPage"]["format"]["videos"].keys():
if "program" in janson["formatPage"]["format"]["videos"][str(i)]:
for n in janson["formatPage"]["format"]["videos"][i]["program"]:
if str(n["episodeNumber"]) and int(episodenr) == n["episodeNumber"] and int(season) == n["seasonNumber"]:
if slug is None or slug == n["formatSlug"]:
return n["id"]
elif n["id"] == episodenr:
return episodenr
parse = urlparse(self.url)
match = re.search(r'/\w+/(\d+)', parse.path)
if match:
return match.group(1)
match = re.search(r'iframe src="http://play.juicyplay.se[^\"]+id=(\d+)', html_data)
if match:
return match.group(1)
match = re.search(r'<meta property="og:image" content="([\S]+)"', html_data)
if match:
return match.group(1).split("/")[-2]
return None |
def _readmodule(module, path, inpackage=None):
'''Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
'''
# Compute the full module name (prepending inpackage if set)
if inpackage is not None:
fullmodule = "%s.%s" % (inpackage, module)
else:
fullmodule = module
# Check in the cache
if fullmodule in _modules:
return _modules[fullmodule]
# Initialize the dict for this module's contents
dict = OrderedDict()
# Check if it is a built-in module; we don't do much for these
if module in sys.builtin_module_names and inpackage is None:
_modules[module] = dict
return dict
# Check for a dotted module name
i = module.rfind('.')
if i >= 0:
package = module[:i]
submodule = module[i + 1:]
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
f = None
if inpackage is not None:
f, fname, (_s, _m, ty) = imp.find_module(module, path)
else:
f, fname, (_s, _m, ty) = imp.find_module(module, path + sys.path)
if ty == imp.PKG_DIRECTORY:
dict['__path__'] = [fname]
path = [fname] + path
f, fname, (_s, _m, ty) = imp.find_module('__init__', [fname])
_modules[fullmodule] = dict
if ty != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
return dict
stack = [] # stack of (class, indent) pairs
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# close nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, meth_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
if stack:
cur_class = stack[-1][0]
if isinstance(cur_class, Class):
# it's a method
cur_class._addmethod(meth_name, lineno)
# else it's a nested def
else:
# it's a function
dict[meth_name] = Function(fullmodule, meth_name,
fname, lineno)
stack.append((None, thisindent)) # Marker for nested fns
elif token == 'class':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
# parse what follows the class name
tokentype, token, start = g.next()[0:3]
inherit = None
if token == '(':
names = [] # List of superclasses
# there's a list of superclasses
level = 1
super = [] # Tokens making up current superclass
while True:
tokentype, token, start = g.next()[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in dict:
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class is of the form
# module.class: look in module for
# class
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# only use NAME and OP (== dot) tokens for type name
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# expressions in the base list are not supported
inherit = names
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
if not stack:
dict[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# add any classes that were defined in the imported module
# to our name space if they were mentioned in the list
for n, n2 in names:
if n in d:
dict[n2 or n] = d[n]
elif n == '*':
# don't add names that start with _
for n in d:
if n[0] != '_':
dict[n] = d[n]
elif tokentype == NAME and start[1] == 0:
name = token
line = _line
tokentype, token = g.next()[0:2]
if tokentype == OP and token == "=":
dict[name] = Global(fullmodule, name, fname, _line)
except StopIteration:
pass
f.close()
return dict | Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path. | Below is the the instruction that describes the task:
### Input:
Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
### Response:
def _readmodule(module, path, inpackage=None):
'''Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
'''
# Compute the full module name (prepending inpackage if set)
if inpackage is not None:
fullmodule = "%s.%s" % (inpackage, module)
else:
fullmodule = module
# Check in the cache
if fullmodule in _modules:
return _modules[fullmodule]
# Initialize the dict for this module's contents
dict = OrderedDict()
# Check if it is a built-in module; we don't do much for these
if module in sys.builtin_module_names and inpackage is None:
_modules[module] = dict
return dict
# Check for a dotted module name
i = module.rfind('.')
if i >= 0:
package = module[:i]
submodule = module[i + 1:]
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
f = None
if inpackage is not None:
f, fname, (_s, _m, ty) = imp.find_module(module, path)
else:
f, fname, (_s, _m, ty) = imp.find_module(module, path + sys.path)
if ty == imp.PKG_DIRECTORY:
dict['__path__'] = [fname]
path = [fname] + path
f, fname, (_s, _m, ty) = imp.find_module('__init__', [fname])
_modules[fullmodule] = dict
if ty != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
return dict
stack = [] # stack of (class, indent) pairs
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# close nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, meth_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
if stack:
cur_class = stack[-1][0]
if isinstance(cur_class, Class):
# it's a method
cur_class._addmethod(meth_name, lineno)
# else it's a nested def
else:
# it's a function
dict[meth_name] = Function(fullmodule, meth_name,
fname, lineno)
stack.append((None, thisindent)) # Marker for nested fns
elif token == 'class':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
# parse what follows the class name
tokentype, token, start = g.next()[0:3]
inherit = None
if token == '(':
names = [] # List of superclasses
# there's a list of superclasses
level = 1
super = [] # Tokens making up current superclass
while True:
tokentype, token, start = g.next()[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in dict:
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class is of the form
# module.class: look in module for
# class
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# only use NAME and OP (== dot) tokens for type name
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# expressions in the base list are not supported
inherit = names
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
if not stack:
dict[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# add any classes that were defined in the imported module
# to our name space if they were mentioned in the list
for n, n2 in names:
if n in d:
dict[n2 or n] = d[n]
elif n == '*':
# don't add names that start with _
for n in d:
if n[0] != '_':
dict[n] = d[n]
elif tokentype == NAME and start[1] == 0:
name = token
line = _line
tokentype, token = g.next()[0:2]
if tokentype == OP and token == "=":
dict[name] = Global(fullmodule, name, fname, _line)
except StopIteration:
pass
f.close()
return dict |
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string | Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp. | Below is the the instruction that describes the task:
### Input:
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
### Response:
def dollar_signs(value, failure_string='N/A'):
"""
Converts an integer into the corresponding number of dollar sign symbols.
If the submitted value isn't a string, returns the `failure_string` keyword
argument.
Meant to emulate the illustration of price range on Yelp.
"""
try:
count = int(value)
except ValueError:
return failure_string
string = ''
for i in range(0, count):
string += '$'
return string |
def match_type_by_name(expected_type, actual_type):
"""
Matches expected type to an actual type.
:param expected_type: an expected type name to match.
:param actual_type: an actual type to match defined by type code.
:return: true if types are matching and false if they don't.
"""
if expected_type == None:
return True
if actual_type == None:
raise Exception("Actual type cannot be null")
expected_type = expected_type.lower()
if actual_type.__name__.lower() == expected_type:
return True
elif expected_type == "object":
return True
elif expected_type == "int" or expected_type == "integer":
return issubclass(actual_type, int) #or issubclass(actual_type, long)
elif expected_type == "long":
return issubclass(actual_type, int)
elif expected_type == "float" or expected_type == "double":
return issubclass(actual_type, float)
elif expected_type == "string":
return issubclass(actual_type, str) #or issubclass(actual_type, unicode)
elif expected_type == "bool" or expected_type == "boolean":
return issubclass(actual_type, bool)
elif expected_type == "date" or expected_type == "datetime":
return issubclass(actual_type, datetime.datetime) or issubclass(actual_type. datetime.date)
elif expected_type == "timespan" or expected_type == "duration":
return issubclass(actual_type, int) or issubclass(actual_type, float)
elif expected_type == "enum":
return issubclass(actual_type, str) or issubclass(actual_type, int)
elif expected_type == "map" or expected_type == "dict" or expected_type == "dictionary":
return issubclass(actual_type, dict)
elif expected_type == "array" or expected_type == "list":
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
elif expected_type.endswith("[]"):
# Todo: Check subtype
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
else:
return False | Matches expected type to an actual type.
:param expected_type: an expected type name to match.
:param actual_type: an actual type to match defined by type code.
:return: true if types are matching and false if they don't. | Below is the the instruction that describes the task:
### Input:
Matches expected type to an actual type.
:param expected_type: an expected type name to match.
:param actual_type: an actual type to match defined by type code.
:return: true if types are matching and false if they don't.
### Response:
def match_type_by_name(expected_type, actual_type):
"""
Matches expected type to an actual type.
:param expected_type: an expected type name to match.
:param actual_type: an actual type to match defined by type code.
:return: true if types are matching and false if they don't.
"""
if expected_type == None:
return True
if actual_type == None:
raise Exception("Actual type cannot be null")
expected_type = expected_type.lower()
if actual_type.__name__.lower() == expected_type:
return True
elif expected_type == "object":
return True
elif expected_type == "int" or expected_type == "integer":
return issubclass(actual_type, int) #or issubclass(actual_type, long)
elif expected_type == "long":
return issubclass(actual_type, int)
elif expected_type == "float" or expected_type == "double":
return issubclass(actual_type, float)
elif expected_type == "string":
return issubclass(actual_type, str) #or issubclass(actual_type, unicode)
elif expected_type == "bool" or expected_type == "boolean":
return issubclass(actual_type, bool)
elif expected_type == "date" or expected_type == "datetime":
return issubclass(actual_type, datetime.datetime) or issubclass(actual_type. datetime.date)
elif expected_type == "timespan" or expected_type == "duration":
return issubclass(actual_type, int) or issubclass(actual_type, float)
elif expected_type == "enum":
return issubclass(actual_type, str) or issubclass(actual_type, int)
elif expected_type == "map" or expected_type == "dict" or expected_type == "dictionary":
return issubclass(actual_type, dict)
elif expected_type == "array" or expected_type == "list":
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
elif expected_type.endswith("[]"):
# Todo: Check subtype
return issubclass(actual_type, list) or issubclass(actual_type, tuple) or issubclass(actual_type, set)
else:
return False |
def stake(confidence, value, tournament):
"""Participate in the staking competition."""
click.echo(napi.stake(confidence, value, tournament)) | Participate in the staking competition. | Below is the the instruction that describes the task:
### Input:
Participate in the staking competition.
### Response:
def stake(confidence, value, tournament):
"""Participate in the staking competition."""
click.echo(napi.stake(confidence, value, tournament)) |
def _setup_log_prefix(self, plugin_id=''):
"""Setup custom warning notification."""
self._logger_console_fmtter.prefix = '%s: ' % plugin_id
self._logger_console_fmtter.plugin_id = plugin_id
self._logger_file_fmtter.prefix = '*'
self._logger_file_fmtter.plugin_id = '%s: ' % plugin_id | Setup custom warning notification. | Below is the the instruction that describes the task:
### Input:
Setup custom warning notification.
### Response:
def _setup_log_prefix(self, plugin_id=''):
"""Setup custom warning notification."""
self._logger_console_fmtter.prefix = '%s: ' % plugin_id
self._logger_console_fmtter.plugin_id = plugin_id
self._logger_file_fmtter.prefix = '*'
self._logger_file_fmtter.plugin_id = '%s: ' % plugin_id |
def find_libs():
"""
Run through L{ETREE_MODULES} and find C{ElementTree} implementations so
that any type can be encoded.
We work through the C implementations first, then the pure Python versions.
The downside to this is that B{all} libraries will be imported but I{only}
one is ever used. The libs are small (relatively) and the flexibility that
this gives seems to outweigh the cost. Time will tell.
"""
from pyamf.util import get_module
types = []
mapping = {}
for mod in ETREE_MODULES:
try:
etree = get_module(mod)
except ImportError:
continue
t = _get_etree_type(etree)
types.append(t)
mapping[t] = etree
return tuple(types), mapping | Run through L{ETREE_MODULES} and find C{ElementTree} implementations so
that any type can be encoded.
We work through the C implementations first, then the pure Python versions.
The downside to this is that B{all} libraries will be imported but I{only}
one is ever used. The libs are small (relatively) and the flexibility that
this gives seems to outweigh the cost. Time will tell. | Below is the the instruction that describes the task:
### Input:
Run through L{ETREE_MODULES} and find C{ElementTree} implementations so
that any type can be encoded.
We work through the C implementations first, then the pure Python versions.
The downside to this is that B{all} libraries will be imported but I{only}
one is ever used. The libs are small (relatively) and the flexibility that
this gives seems to outweigh the cost. Time will tell.
### Response:
def find_libs():
"""
Run through L{ETREE_MODULES} and find C{ElementTree} implementations so
that any type can be encoded.
We work through the C implementations first, then the pure Python versions.
The downside to this is that B{all} libraries will be imported but I{only}
one is ever used. The libs are small (relatively) and the flexibility that
this gives seems to outweigh the cost. Time will tell.
"""
from pyamf.util import get_module
types = []
mapping = {}
for mod in ETREE_MODULES:
try:
etree = get_module(mod)
except ImportError:
continue
t = _get_etree_type(etree)
types.append(t)
mapping[t] = etree
return tuple(types), mapping |
def get_content_object(self, page, language, ctype):
"""Gets the latest published :class:`Content <pages.models.Content>`
for a particular page, language and placeholder type."""
params = {
'language': language,
'type': ctype,
'page': None if page is fake_page else page
}
if page.freeze_date:
params['creation_date__lte'] = page.freeze_date
return self.filter(**params).latest() | Gets the latest published :class:`Content <pages.models.Content>`
for a particular page, language and placeholder type. | Below is the the instruction that describes the task:
### Input:
Gets the latest published :class:`Content <pages.models.Content>`
for a particular page, language and placeholder type.
### Response:
def get_content_object(self, page, language, ctype):
"""Gets the latest published :class:`Content <pages.models.Content>`
for a particular page, language and placeholder type."""
params = {
'language': language,
'type': ctype,
'page': None if page is fake_page else page
}
if page.freeze_date:
params['creation_date__lte'] = page.freeze_date
return self.filter(**params).latest() |
def task_loop(tasks, execute, wait=None, store=TaskStore()):
"""
The inner task loop for a task runner.
execute: A function that runs a task. It should take a task as its
sole argument, and may optionally return a TaskResult.
wait: (optional, None) A function to run whenever there aren't any
runnable tasks (but there are still tasks listed as running).
If given, this function should take no arguments, and should
return an iterable of TaskResults.
"""
completed = set()
failed = set()
exceptions = []
def collect(task):
args = []
kwargs = {}
for arg in task.args:
if isinstance(arg, Task):
args.append(store.get(arg.name))
else:
args.append(arg)
for key in task.kwargs:
if isinstance(task.kwargs[key], Task):
kwargs[key] = store.get(task.kwargs[key].name)
else:
kwargs[key] = task.kwargs[key]
return args, kwargs
def complete(scheduler, result):
store.put(result.name, result.data)
scheduler.end_task(result.name, result.successful)
if result.exception:
exceptions.append(result.exception)
with Scheduler(tasks, completed=completed, failed=failed) as scheduler:
while not scheduler.is_finished():
task = scheduler.start_task()
while task is not None:
# Collect any dependent results
args, kwargs = collect(task)
func = partial(task.function, *args, **kwargs)
if task.handler:
func = partial(task.handler, func)
result = execute(func, task.name)
# result exists iff execute is synchroous
if result:
complete(scheduler, result)
task = scheduler.start_task()
if wait:
for result in wait():
complete(scheduler, result)
# TODO: if in debug mode print out all failed tasks?
return Results(completed, failed, exceptions) | The inner task loop for a task runner.
execute: A function that runs a task. It should take a task as its
sole argument, and may optionally return a TaskResult.
wait: (optional, None) A function to run whenever there aren't any
runnable tasks (but there are still tasks listed as running).
If given, this function should take no arguments, and should
return an iterable of TaskResults. | Below is the the instruction that describes the task:
### Input:
The inner task loop for a task runner.
execute: A function that runs a task. It should take a task as its
sole argument, and may optionally return a TaskResult.
wait: (optional, None) A function to run whenever there aren't any
runnable tasks (but there are still tasks listed as running).
If given, this function should take no arguments, and should
return an iterable of TaskResults.
### Response:
def task_loop(tasks, execute, wait=None, store=TaskStore()):
"""
The inner task loop for a task runner.
execute: A function that runs a task. It should take a task as its
sole argument, and may optionally return a TaskResult.
wait: (optional, None) A function to run whenever there aren't any
runnable tasks (but there are still tasks listed as running).
If given, this function should take no arguments, and should
return an iterable of TaskResults.
"""
completed = set()
failed = set()
exceptions = []
def collect(task):
args = []
kwargs = {}
for arg in task.args:
if isinstance(arg, Task):
args.append(store.get(arg.name))
else:
args.append(arg)
for key in task.kwargs:
if isinstance(task.kwargs[key], Task):
kwargs[key] = store.get(task.kwargs[key].name)
else:
kwargs[key] = task.kwargs[key]
return args, kwargs
def complete(scheduler, result):
store.put(result.name, result.data)
scheduler.end_task(result.name, result.successful)
if result.exception:
exceptions.append(result.exception)
with Scheduler(tasks, completed=completed, failed=failed) as scheduler:
while not scheduler.is_finished():
task = scheduler.start_task()
while task is not None:
# Collect any dependent results
args, kwargs = collect(task)
func = partial(task.function, *args, **kwargs)
if task.handler:
func = partial(task.handler, func)
result = execute(func, task.name)
# result exists iff execute is synchroous
if result:
complete(scheduler, result)
task = scheduler.start_task()
if wait:
for result in wait():
complete(scheduler, result)
# TODO: if in debug mode print out all failed tasks?
return Results(completed, failed, exceptions) |
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number | a helper method for ingesting a number
:return: valid_number | Below is the the instruction that describes the task:
### Input:
a helper method for ingesting a number
:return: valid_number
### Response:
def _ingest_number(self, input_number, path_to_root):
'''
a helper method for ingesting a number
:return: valid_number
'''
valid_number = 0.0
try:
valid_number = self._validate_number(input_number, path_to_root)
except:
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
if 'default_value' in self.keyMap[rules_path_to_root]:
valid_number = self.keyMap[rules_path_to_root]['default_value']
elif 'integer_data' in self.keyMap[rules_path_to_root].keys():
if self.keyMap[rules_path_to_root]['integer_data']:
valid_number = 0
return valid_number |
def touch(path):
"""Unix equivalent *touch*
@src: http://stackoverflow.com/a/1158096"""
import os
try:
OPEN_FUNC(path, 'a+').close()
except IOError:
os.utime(path, None) | Unix equivalent *touch*
@src: http://stackoverflow.com/a/1158096 | Below is the the instruction that describes the task:
### Input:
Unix equivalent *touch*
@src: http://stackoverflow.com/a/1158096
### Response:
def touch(path):
"""Unix equivalent *touch*
@src: http://stackoverflow.com/a/1158096"""
import os
try:
OPEN_FUNC(path, 'a+').close()
except IOError:
os.utime(path, None) |
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
trigger_type = event_values.get('trigger_type', None)
if trigger_type is not None:
event_values['trigger_type'] = self._TRIGGER_TYPES.get(
trigger_type, '0x{0:04x}'.format(trigger_type))
return self._ConditionalFormatMessages(event_values) | Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. | Below is the the instruction that describes the task:
### Input:
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
### Response:
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
trigger_type = event_values.get('trigger_type', None)
if trigger_type is not None:
event_values['trigger_type'] = self._TRIGGER_TYPES.get(
trigger_type, '0x{0:04x}'.format(trigger_type))
return self._ConditionalFormatMessages(event_values) |
def _python_rpath(self):
"""The relative path (from environment root) to python."""
# Windows virtualenv installation installs pip to the [Ss]cripts
# folder. Here's a simple check to support:
if sys.platform == 'win32':
return os.path.join('Scripts', 'python.exe')
return os.path.join('bin', 'python') | The relative path (from environment root) to python. | Below is the the instruction that describes the task:
### Input:
The relative path (from environment root) to python.
### Response:
def _python_rpath(self):
"""The relative path (from environment root) to python."""
# Windows virtualenv installation installs pip to the [Ss]cripts
# folder. Here's a simple check to support:
if sys.platform == 'win32':
return os.path.join('Scripts', 'python.exe')
return os.path.join('bin', 'python') |
def auto_reply_message(self):
""" The account's Internal auto reply message. Setting the value will change the auto reply message of the
account, automatically setting the status to enabled (but not altering the schedule). """
if self._auto_reply is None:
r = requests.get('https://outlook.office.com/api/v2.0/me/MailboxSettings/AutomaticRepliesSetting',
headers=self._headers)
check_response(r)
self._auto_reply = r.json().get('InternalReplyMessage')
return self._auto_reply | The account's Internal auto reply message. Setting the value will change the auto reply message of the
account, automatically setting the status to enabled (but not altering the schedule). | Below is the the instruction that describes the task:
### Input:
The account's Internal auto reply message. Setting the value will change the auto reply message of the
account, automatically setting the status to enabled (but not altering the schedule).
### Response:
def auto_reply_message(self):
""" The account's Internal auto reply message. Setting the value will change the auto reply message of the
account, automatically setting the status to enabled (but not altering the schedule). """
if self._auto_reply is None:
r = requests.get('https://outlook.office.com/api/v2.0/me/MailboxSettings/AutomaticRepliesSetting',
headers=self._headers)
check_response(r)
self._auto_reply = r.json().get('InternalReplyMessage')
return self._auto_reply |
def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs):
"""Applies a function to a list of remote partitions.
Note: The main use for this is to preprocess the func.
Args:
func: The func to apply
partitions: The list of partitions
Returns:
A list of BaseFramePartition objects.
"""
preprocessed_func = self.preprocess_func(func)
return [obj.apply(preprocessed_func, **kwargs) for obj in partitions] | Applies a function to a list of remote partitions.
Note: The main use for this is to preprocess the func.
Args:
func: The func to apply
partitions: The list of partitions
Returns:
A list of BaseFramePartition objects. | Below is the the instruction that describes the task:
### Input:
Applies a function to a list of remote partitions.
Note: The main use for this is to preprocess the func.
Args:
func: The func to apply
partitions: The list of partitions
Returns:
A list of BaseFramePartition objects.
### Response:
def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs):
"""Applies a function to a list of remote partitions.
Note: The main use for this is to preprocess the func.
Args:
func: The func to apply
partitions: The list of partitions
Returns:
A list of BaseFramePartition objects.
"""
preprocessed_func = self.preprocess_func(func)
return [obj.apply(preprocessed_func, **kwargs) for obj in partitions] |
def docs():
"""
Docs
"""
with safe_cd(SRC):
with safe_cd("docs"):
my_env = config_pythonpath()
command = "{0} make html".format(PIPENV).strip()
print(command)
execute_with_environment(command, env=my_env) | Docs | Below is the the instruction that describes the task:
### Input:
Docs
### Response:
def docs():
"""
Docs
"""
with safe_cd(SRC):
with safe_cd("docs"):
my_env = config_pythonpath()
command = "{0} make html".format(PIPENV).strip()
print(command)
execute_with_environment(command, env=my_env) |
def _default_output_dir():
"""Default output directory."""
try:
dataset_name = gin.query_parameter("inputs.dataset_name")
except ValueError:
dataset_name = "random"
dir_name = "{model_name}_{dataset_name}_{timestamp}".format(
model_name=gin.query_parameter("train.model").configurable.name,
dataset_name=dataset_name,
timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"),
)
dir_path = os.path.join("~", "trax", dir_name)
print()
trax.log("No --output_dir specified")
return dir_path | Default output directory. | Below is the the instruction that describes the task:
### Input:
Default output directory.
### Response:
def _default_output_dir():
"""Default output directory."""
try:
dataset_name = gin.query_parameter("inputs.dataset_name")
except ValueError:
dataset_name = "random"
dir_name = "{model_name}_{dataset_name}_{timestamp}".format(
model_name=gin.query_parameter("train.model").configurable.name,
dataset_name=dataset_name,
timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"),
)
dir_path = os.path.join("~", "trax", dir_name)
print()
trax.log("No --output_dir specified")
return dir_path |
def ltcube_sun(self, **kwargs):
""" return the name of a livetime cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ltcubesun_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a livetime cube file | Below is the the instruction that describes the task:
### Input:
return the name of a livetime cube file
### Response:
def ltcube_sun(self, **kwargs):
""" return the name of a livetime cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.ltcubesun_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath |
def SelectFont(message="Select a font:", title='FontParts', allFonts=None):
"""
Select a font from all open fonts.
Optionally a `message`, `title` and `allFonts` can be provided.
If `allFonts` is `None` it will list all open fonts.
::
from fontParts.ui import SelectFont
font = SelectFont()
print(font)
"""
return dispatcher["SelectFont"](message=message, title=title, allFonts=allFonts) | Select a font from all open fonts.
Optionally a `message`, `title` and `allFonts` can be provided.
If `allFonts` is `None` it will list all open fonts.
::
from fontParts.ui import SelectFont
font = SelectFont()
print(font) | Below is the the instruction that describes the task:
### Input:
Select a font from all open fonts.
Optionally a `message`, `title` and `allFonts` can be provided.
If `allFonts` is `None` it will list all open fonts.
::
from fontParts.ui import SelectFont
font = SelectFont()
print(font)
### Response:
def SelectFont(message="Select a font:", title='FontParts', allFonts=None):
"""
Select a font from all open fonts.
Optionally a `message`, `title` and `allFonts` can be provided.
If `allFonts` is `None` it will list all open fonts.
::
from fontParts.ui import SelectFont
font = SelectFont()
print(font)
"""
return dispatcher["SelectFont"](message=message, title=title, allFonts=allFonts) |
def relcurveto(self, h1x, h1y, h2x, h2y, x, y):
'''Draws a curve relatively to the last point.
'''
if self._path is None:
raise ShoebotError(_("No current path. Use beginpath() first."))
self._path.relcurveto(h1x, h1y, h2x, h2y, x, y) | Draws a curve relatively to the last point. | Below is the the instruction that describes the task:
### Input:
Draws a curve relatively to the last point.
### Response:
def relcurveto(self, h1x, h1y, h2x, h2y, x, y):
'''Draws a curve relatively to the last point.
'''
if self._path is None:
raise ShoebotError(_("No current path. Use beginpath() first."))
self._path.relcurveto(h1x, h1y, h2x, h2y, x, y) |
def __EncodedAttribute_encode_gray8(self, gray8, width=0, height=0):
"""Encode a 8 bit grayscale image (no compression)
:param gray8: an object containning image information
:type gray8: :py:obj:`str` or :class:`numpy.ndarray` or seq< seq<element> >
:param width: image width. **MUST** be given if gray8 is a string or
if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type width: :py:obj:`int`
:param height: image height. **MUST** be given if gray8 is a string
or if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type height: :py:obj:`int`
.. note::
When :class:`numpy.ndarray` is given:
- gray8 **MUST** be CONTIGUOUS, ALIGNED
- if gray8.ndims != 2, width and height **MUST** be given and
gray8.nbytes **MUST** match width*height
- if gray8.ndims == 2, gray8.itemsize **MUST** be 1 (typically,
gray8.dtype is one of `numpy.dtype.byte`, `numpy.dtype.ubyte`,
`numpy.dtype.int8` or `numpy.dtype.uint8`)
Example::
def read_myattr(self, attr):
enc = tango.EncodedAttribute()
data = numpy.arange(100, dtype=numpy.byte)
data = numpy.array((data,data,data))
enc.encode_gray8(data)
attr.set_value(enc)
"""
self._generic_encode_gray8(gray8, width=width, height=height, format=_ImageFormat.RawImage) | Encode a 8 bit grayscale image (no compression)
:param gray8: an object containning image information
:type gray8: :py:obj:`str` or :class:`numpy.ndarray` or seq< seq<element> >
:param width: image width. **MUST** be given if gray8 is a string or
if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type width: :py:obj:`int`
:param height: image height. **MUST** be given if gray8 is a string
or if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type height: :py:obj:`int`
.. note::
When :class:`numpy.ndarray` is given:
- gray8 **MUST** be CONTIGUOUS, ALIGNED
- if gray8.ndims != 2, width and height **MUST** be given and
gray8.nbytes **MUST** match width*height
- if gray8.ndims == 2, gray8.itemsize **MUST** be 1 (typically,
gray8.dtype is one of `numpy.dtype.byte`, `numpy.dtype.ubyte`,
`numpy.dtype.int8` or `numpy.dtype.uint8`)
Example::
def read_myattr(self, attr):
enc = tango.EncodedAttribute()
data = numpy.arange(100, dtype=numpy.byte)
data = numpy.array((data,data,data))
enc.encode_gray8(data)
attr.set_value(enc) | Below is the the instruction that describes the task:
### Input:
Encode a 8 bit grayscale image (no compression)
:param gray8: an object containning image information
:type gray8: :py:obj:`str` or :class:`numpy.ndarray` or seq< seq<element> >
:param width: image width. **MUST** be given if gray8 is a string or
if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type width: :py:obj:`int`
:param height: image height. **MUST** be given if gray8 is a string
or if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type height: :py:obj:`int`
.. note::
When :class:`numpy.ndarray` is given:
- gray8 **MUST** be CONTIGUOUS, ALIGNED
- if gray8.ndims != 2, width and height **MUST** be given and
gray8.nbytes **MUST** match width*height
- if gray8.ndims == 2, gray8.itemsize **MUST** be 1 (typically,
gray8.dtype is one of `numpy.dtype.byte`, `numpy.dtype.ubyte`,
`numpy.dtype.int8` or `numpy.dtype.uint8`)
Example::
def read_myattr(self, attr):
enc = tango.EncodedAttribute()
data = numpy.arange(100, dtype=numpy.byte)
data = numpy.array((data,data,data))
enc.encode_gray8(data)
attr.set_value(enc)
### Response:
def __EncodedAttribute_encode_gray8(self, gray8, width=0, height=0):
"""Encode a 8 bit grayscale image (no compression)
:param gray8: an object containning image information
:type gray8: :py:obj:`str` or :class:`numpy.ndarray` or seq< seq<element> >
:param width: image width. **MUST** be given if gray8 is a string or
if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type width: :py:obj:`int`
:param height: image height. **MUST** be given if gray8 is a string
or if it is a :class:`numpy.ndarray` with ndims != 2.
Otherwise it is calculated internally.
:type height: :py:obj:`int`
.. note::
When :class:`numpy.ndarray` is given:
- gray8 **MUST** be CONTIGUOUS, ALIGNED
- if gray8.ndims != 2, width and height **MUST** be given and
gray8.nbytes **MUST** match width*height
- if gray8.ndims == 2, gray8.itemsize **MUST** be 1 (typically,
gray8.dtype is one of `numpy.dtype.byte`, `numpy.dtype.ubyte`,
`numpy.dtype.int8` or `numpy.dtype.uint8`)
Example::
def read_myattr(self, attr):
enc = tango.EncodedAttribute()
data = numpy.arange(100, dtype=numpy.byte)
data = numpy.array((data,data,data))
enc.encode_gray8(data)
attr.set_value(enc)
"""
self._generic_encode_gray8(gray8, width=width, height=height, format=_ImageFormat.RawImage) |
def pull_core(self, **kwargs):
"""
Just the core of the pycurl logic.
"""
str_ip = self.str_ip
str_port = self.str_port
verbose = 0
d_msg = {}
for k,v in kwargs.items():
if k == 'ip': str_ip = v
if k == 'port': str_port = v
if k == 'msg': d_msg = v
if k == 'verbose': verbose = v
response = io.BytesIO()
str_query = ''
if len(d_msg):
d_meta = d_msg['meta']
str_query = '?%s' % urllib.parse.urlencode(d_msg)
str_URL = "%s://%s:%s%s%s" % (self.str_protocol, str_ip, str_port, self.str_URL, str_query)
self.dp.qprint(str_URL,
comms = 'tx')
c = pycurl.Curl()
c.setopt(c.URL, str_URL)
# pudb.set_trace()
if self.b_unverifiedCerts:
self.dp.qprint("Making an insecure connection with trusted host")
c.setopt(pycurl.SSL_VERIFYPEER, 0)
c.setopt(pycurl.SSL_VERIFYHOST, 0)
if verbose: c.setopt(c.VERBOSE, 1)
c.setopt(c.FOLLOWLOCATION, 1)
c.setopt(c.WRITEFUNCTION, response.write)
if len(self.str_auth):
self.dp.qprint("Using user:password authentication <%s>" %
self.str_auth)
c.setopt(c.USERPWD, self.str_auth)
elif len(self.str_authToken):
self.dp.qprint("Using token-based authorization <%s>" %
self.str_authToken)
header = 'Authorization: bearer %s' % self.str_authToken
c.setopt(pycurl.HTTPHEADER, [header])
self.dp.qprint("Waiting for PULL response...", level = 1, comms ='status')
c.perform()
c.close()
try:
str_response = response.getvalue().decode()
except:
str_response = response.getvalue()
self.dp.qprint('Incoming transmission received, length = %s' % "{:,}".format(len(str_response)),
level = 1, comms ='rx')
return str_response | Just the core of the pycurl logic. | Below is the the instruction that describes the task:
### Input:
Just the core of the pycurl logic.
### Response:
def pull_core(self, **kwargs):
"""
Just the core of the pycurl logic.
"""
str_ip = self.str_ip
str_port = self.str_port
verbose = 0
d_msg = {}
for k,v in kwargs.items():
if k == 'ip': str_ip = v
if k == 'port': str_port = v
if k == 'msg': d_msg = v
if k == 'verbose': verbose = v
response = io.BytesIO()
str_query = ''
if len(d_msg):
d_meta = d_msg['meta']
str_query = '?%s' % urllib.parse.urlencode(d_msg)
str_URL = "%s://%s:%s%s%s" % (self.str_protocol, str_ip, str_port, self.str_URL, str_query)
self.dp.qprint(str_URL,
comms = 'tx')
c = pycurl.Curl()
c.setopt(c.URL, str_URL)
# pudb.set_trace()
if self.b_unverifiedCerts:
self.dp.qprint("Making an insecure connection with trusted host")
c.setopt(pycurl.SSL_VERIFYPEER, 0)
c.setopt(pycurl.SSL_VERIFYHOST, 0)
if verbose: c.setopt(c.VERBOSE, 1)
c.setopt(c.FOLLOWLOCATION, 1)
c.setopt(c.WRITEFUNCTION, response.write)
if len(self.str_auth):
self.dp.qprint("Using user:password authentication <%s>" %
self.str_auth)
c.setopt(c.USERPWD, self.str_auth)
elif len(self.str_authToken):
self.dp.qprint("Using token-based authorization <%s>" %
self.str_authToken)
header = 'Authorization: bearer %s' % self.str_authToken
c.setopt(pycurl.HTTPHEADER, [header])
self.dp.qprint("Waiting for PULL response...", level = 1, comms ='status')
c.perform()
c.close()
try:
str_response = response.getvalue().decode()
except:
str_response = response.getvalue()
self.dp.qprint('Incoming transmission received, length = %s' % "{:,}".format(len(str_response)),
level = 1, comms ='rx')
return str_response |
def factory(self, url):
'''
Return (expiration, obj) corresponding to provided url, exercising the
cache_policy as necessary.
'''
try:
return self.fetch(url)
except BaseException as exc:
logger.exception('Reppy cache fetch error on %s' % url)
return self.cache_policy.exception(url, exc) | Return (expiration, obj) corresponding to provided url, exercising the
cache_policy as necessary. | Below is the the instruction that describes the task:
### Input:
Return (expiration, obj) corresponding to provided url, exercising the
cache_policy as necessary.
### Response:
def factory(self, url):
'''
Return (expiration, obj) corresponding to provided url, exercising the
cache_policy as necessary.
'''
try:
return self.fetch(url)
except BaseException as exc:
logger.exception('Reppy cache fetch error on %s' % url)
return self.cache_policy.exception(url, exc) |
def get_live_url(con_pool,
method,
host,
url,
headers,
retries=1,
redirect=True,
body=None,
service_name=None):
"""
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request
"""
timeout = con_pool.timeout.read_timeout
start_time = time.time()
response = con_pool.urlopen(method, url, body=body,
headers=headers, redirect=redirect,
retries=retries, timeout=timeout)
request_time = time.time() - start_time
rest_request.send(sender='restclients',
url=url,
request_time=request_time,
hostname=socket.gethostname(),
service_name=service_name)
rest_request_passfail.send(sender='restclients',
url=url,
success=True,
hostname=socket.gethostname(),
service_name=service_name)
return response | Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request | Below is the the instruction that describes the task:
### Input:
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request
### Response:
def get_live_url(con_pool,
method,
host,
url,
headers,
retries=1,
redirect=True,
body=None,
service_name=None):
"""
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT, PATCH body of the request
"""
timeout = con_pool.timeout.read_timeout
start_time = time.time()
response = con_pool.urlopen(method, url, body=body,
headers=headers, redirect=redirect,
retries=retries, timeout=timeout)
request_time = time.time() - start_time
rest_request.send(sender='restclients',
url=url,
request_time=request_time,
hostname=socket.gethostname(),
service_name=service_name)
rest_request_passfail.send(sender='restclients',
url=url,
success=True,
hostname=socket.gethostname(),
service_name=service_name)
return response |
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores | Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set. | Below is the the instruction that describes the task:
### Input:
Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set.
### Response:
def adamic(brands, exemplars):
""" Return the average Adamic/Adar similarity between a brand's followers
and the followers of each exemplar. We approximate the number of followed
accounts per user by only considering those in our brand set."""
print('adamic deprecated...requires loading all brands in memory.')
return
degrees = compute_log_degrees(brands, exemplars)
scores = {}
exemplar_sums = dict([(exemplar, sum(degrees[z] for z in exemplars[exemplar])) for exemplar in exemplars])
for brand in sorted(brands):
brand_sum = sum(degrees[z] for z in brands[brand])
total = 0.
for exemplar in exemplars:
total += sum(degrees[z] for z in brands[brand] & exemplars[exemplar]) / (brand_sum + exemplar_sums[exemplar])
scores[brand] = total / len(exemplars)
return scores |
def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result) | Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator | Below is the the instruction that describes the task:
### Input:
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
### Response:
def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result) |
def _http_req_apply_default_headers(self, request_headers,
content_type, body):
"""Set default values for common HTTP request headers
:param dict request_headers: The HTTP request headers
:param content_type: The mime-type used in the request/response
:type content_type: :py:class:`ietfparse.datastructures.ContentType`
or str
:param mixed body: The request body
:rtype: dict
"""
if not request_headers:
request_headers = {}
request_headers.setdefault(
'Accept', ', '.join([str(ct) for ct in AVAILABLE_CONTENT_TYPES]))
if body:
request_headers.setdefault(
'Content-Type', str(content_type) or str(CONTENT_TYPE_MSGPACK))
if hasattr(self, 'correlation_id'):
request_headers.setdefault(
'Correlation-Id', self.correlation_id)
elif hasattr(self, 'request') and \
self.request.headers.get('Correlation-Id'):
request_headers.setdefault(
'Correlation-Id', self.request.headers['Correlation-Id'])
return request_headers | Set default values for common HTTP request headers
:param dict request_headers: The HTTP request headers
:param content_type: The mime-type used in the request/response
:type content_type: :py:class:`ietfparse.datastructures.ContentType`
or str
:param mixed body: The request body
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Set default values for common HTTP request headers
:param dict request_headers: The HTTP request headers
:param content_type: The mime-type used in the request/response
:type content_type: :py:class:`ietfparse.datastructures.ContentType`
or str
:param mixed body: The request body
:rtype: dict
### Response:
def _http_req_apply_default_headers(self, request_headers,
content_type, body):
"""Set default values for common HTTP request headers
:param dict request_headers: The HTTP request headers
:param content_type: The mime-type used in the request/response
:type content_type: :py:class:`ietfparse.datastructures.ContentType`
or str
:param mixed body: The request body
:rtype: dict
"""
if not request_headers:
request_headers = {}
request_headers.setdefault(
'Accept', ', '.join([str(ct) for ct in AVAILABLE_CONTENT_TYPES]))
if body:
request_headers.setdefault(
'Content-Type', str(content_type) or str(CONTENT_TYPE_MSGPACK))
if hasattr(self, 'correlation_id'):
request_headers.setdefault(
'Correlation-Id', self.correlation_id)
elif hasattr(self, 'request') and \
self.request.headers.get('Correlation-Id'):
request_headers.setdefault(
'Correlation-Id', self.request.headers['Correlation-Id'])
return request_headers |
def score_alignment(a, b, gap_open, gap_extend, matrix):
'''Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open gap (negative number).
:type gap_extend: int.
:param matrix: A score matrix dictionary name. Examples can be found in
the substitution_matrices module.
'''
al = a
bl = b
l = len(al)
score = 0
assert len(bl) == l, 'Alignment lengths must be the same'
mat = as_ord_matrix(matrix)
gap_started = 0
for i in range(l):
if al[i] == '-' or bl[i] == '-':
score += gap_extend if gap_started else gap_open
gap_started = 1
else:
score += mat[ord(al[i]), ord(bl[i])]
gap_started = 0
return score | Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open gap (negative number).
:type gap_extend: int.
:param matrix: A score matrix dictionary name. Examples can be found in
the substitution_matrices module. | Below is the the instruction that describes the task:
### Input:
Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open gap (negative number).
:type gap_extend: int.
:param matrix: A score matrix dictionary name. Examples can be found in
the substitution_matrices module.
### Response:
def score_alignment(a, b, gap_open, gap_extend, matrix):
'''Calculate the alignment score from two aligned sequences.
:param a: The first aligned sequence.
:type a: str
:param b: The second aligned sequence.
:type b: str
:param gap_open: The cost of opening a gap (negative number).
:type gap_open: int
:param gap_extend: The cost of extending an open gap (negative number).
:type gap_extend: int.
:param matrix: A score matrix dictionary name. Examples can be found in
the substitution_matrices module.
'''
al = a
bl = b
l = len(al)
score = 0
assert len(bl) == l, 'Alignment lengths must be the same'
mat = as_ord_matrix(matrix)
gap_started = 0
for i in range(l):
if al[i] == '-' or bl[i] == '-':
score += gap_extend if gap_started else gap_open
gap_started = 1
else:
score += mat[ord(al[i]), ord(bl[i])]
gap_started = 0
return score |
def brighten(self):
"""Brighten the device one step."""
brighten_command = StandardSend(
self._address, COMMAND_LIGHT_BRIGHTEN_ONE_STEP_0X15_0X00)
self._send_method(brighten_command) | Brighten the device one step. | Below is the the instruction that describes the task:
### Input:
Brighten the device one step.
### Response:
def brighten(self):
"""Brighten the device one step."""
brighten_command = StandardSend(
self._address, COMMAND_LIGHT_BRIGHTEN_ONE_STEP_0X15_0X00)
self._send_method(brighten_command) |
def should_retry_on(self, exception_class, logger=None):
"""
Whether this task should be retried when the given exception occurs.
"""
for n in (self.retry_on or []):
try:
if issubclass(exception_class, import_attribute(n)):
return True
except TaskImportError:
if logger:
logger.error('should_retry_on could not import class',
exception_name=n)
return False | Whether this task should be retried when the given exception occurs. | Below is the the instruction that describes the task:
### Input:
Whether this task should be retried when the given exception occurs.
### Response:
def should_retry_on(self, exception_class, logger=None):
"""
Whether this task should be retried when the given exception occurs.
"""
for n in (self.retry_on or []):
try:
if issubclass(exception_class, import_attribute(n)):
return True
except TaskImportError:
if logger:
logger.error('should_retry_on could not import class',
exception_name=n)
return False |
def _resolved_pid(self):
"""Resolve self.pid if it is a fetched pid."""
if not isinstance(self.pid, PersistentIdentifier):
return resolve_pid(self.pid)
return self.pid | Resolve self.pid if it is a fetched pid. | Below is the the instruction that describes the task:
### Input:
Resolve self.pid if it is a fetched pid.
### Response:
def _resolved_pid(self):
"""Resolve self.pid if it is a fetched pid."""
if not isinstance(self.pid, PersistentIdentifier):
return resolve_pid(self.pid)
return self.pid |
def return_handler(module_logger, first_is_session=True):
"""Decorator for VISA library classes.
"""
def _outer(visa_library_method):
def _inner(self, session, *args, **kwargs):
ret_value = visa_library_method(*args, **kwargs)
module_logger.debug('%s%s -> %r',
visa_library_method.__name__,
_args_to_str(args, kwargs),
ret_value)
try:
ret_value = constants.StatusCode(ret_value)
except ValueError:
pass
if first_is_session:
self._last_status = ret_value
self._last_status_in_session[session] = ret_value
if ret_value < 0:
raise VisaIOError(ret_value)
if ret_value in self.issue_warning_on:
if session and ret_value not in self._ignore_warning_in_session[session]:
module_logger.warn(VisaIOWarning(ret_value), stacklevel=2)
return ret_value
return _inner
return _outer | Decorator for VISA library classes. | Below is the the instruction that describes the task:
### Input:
Decorator for VISA library classes.
### Response:
def return_handler(module_logger, first_is_session=True):
"""Decorator for VISA library classes.
"""
def _outer(visa_library_method):
def _inner(self, session, *args, **kwargs):
ret_value = visa_library_method(*args, **kwargs)
module_logger.debug('%s%s -> %r',
visa_library_method.__name__,
_args_to_str(args, kwargs),
ret_value)
try:
ret_value = constants.StatusCode(ret_value)
except ValueError:
pass
if first_is_session:
self._last_status = ret_value
self._last_status_in_session[session] = ret_value
if ret_value < 0:
raise VisaIOError(ret_value)
if ret_value in self.issue_warning_on:
if session and ret_value not in self._ignore_warning_in_session[session]:
module_logger.warn(VisaIOWarning(ret_value), stacklevel=2)
return ret_value
return _inner
return _outer |
def get_topics(
self,
topic_name=None,
names_only=False,
fetch_partition_state=True,
):
"""Get information on all the available topics.
Topic-data format with fetch_partition_state as False :-
topic_data = {
'version': 1,
'partitions': {
<p_id>: {
replicas: <broker-ids>
}
}
}
Topic-data format with fetch_partition_state as True:-
topic_data = {
'version': 1,
'ctime': <timestamp>,
'partitions': {
<p_id>:{
replicas: [<broker_id>, <broker_id>, ...],
isr: [<broker_id>, <broker_id>, ...],
controller_epoch: <val>,
leader_epoch: <val>,
version: 1,
leader: <broker-id>,
ctime: <timestamp>,
}
}
}
Note: By default we also fetch partition-state which results in
accessing the zookeeper twice. If just partition-replica information is
required fetch_partition_state should be set to False.
"""
try:
topic_ids = [topic_name] if topic_name else self.get_children(
"/brokers/topics",
)
except NoNodeError:
_log.error(
"Cluster is empty."
)
return {}
if names_only:
return topic_ids
topics_data = {}
for topic_id in topic_ids:
try:
topic_info = self.get("/brokers/topics/{id}".format(id=topic_id))
topic_data = load_json(topic_info[0])
topic_ctime = topic_info[1].ctime / 1000.0
topic_data['ctime'] = topic_ctime
except NoNodeError:
_log.info(
"topic '{topic}' not found.".format(topic=topic_id),
)
return {}
# Prepare data for each partition
partitions_data = {}
for p_id, replicas in six.iteritems(topic_data['partitions']):
partitions_data[p_id] = {}
if fetch_partition_state:
# Fetch partition-state from zookeeper
partition_state = self._fetch_partition_state(topic_id, p_id)
partitions_data[p_id] = load_json(partition_state[0])
partitions_data[p_id]['ctime'] = partition_state[1].ctime / 1000.0
else:
# Fetch partition-info from zookeeper
partition_info = self._fetch_partition_info(topic_id, p_id)
partitions_data[p_id]['ctime'] = partition_info.ctime / 1000.0
partitions_data[p_id]['replicas'] = replicas
topic_data['partitions'] = partitions_data
topics_data[topic_id] = topic_data
return topics_data | Get information on all the available topics.
Topic-data format with fetch_partition_state as False :-
topic_data = {
'version': 1,
'partitions': {
<p_id>: {
replicas: <broker-ids>
}
}
}
Topic-data format with fetch_partition_state as True:-
topic_data = {
'version': 1,
'ctime': <timestamp>,
'partitions': {
<p_id>:{
replicas: [<broker_id>, <broker_id>, ...],
isr: [<broker_id>, <broker_id>, ...],
controller_epoch: <val>,
leader_epoch: <val>,
version: 1,
leader: <broker-id>,
ctime: <timestamp>,
}
}
}
Note: By default we also fetch partition-state which results in
accessing the zookeeper twice. If just partition-replica information is
required fetch_partition_state should be set to False. | Below is the the instruction that describes the task:
### Input:
Get information on all the available topics.
Topic-data format with fetch_partition_state as False :-
topic_data = {
'version': 1,
'partitions': {
<p_id>: {
replicas: <broker-ids>
}
}
}
Topic-data format with fetch_partition_state as True:-
topic_data = {
'version': 1,
'ctime': <timestamp>,
'partitions': {
<p_id>:{
replicas: [<broker_id>, <broker_id>, ...],
isr: [<broker_id>, <broker_id>, ...],
controller_epoch: <val>,
leader_epoch: <val>,
version: 1,
leader: <broker-id>,
ctime: <timestamp>,
}
}
}
Note: By default we also fetch partition-state which results in
accessing the zookeeper twice. If just partition-replica information is
required fetch_partition_state should be set to False.
### Response:
def get_topics(
self,
topic_name=None,
names_only=False,
fetch_partition_state=True,
):
"""Get information on all the available topics.
Topic-data format with fetch_partition_state as False :-
topic_data = {
'version': 1,
'partitions': {
<p_id>: {
replicas: <broker-ids>
}
}
}
Topic-data format with fetch_partition_state as True:-
topic_data = {
'version': 1,
'ctime': <timestamp>,
'partitions': {
<p_id>:{
replicas: [<broker_id>, <broker_id>, ...],
isr: [<broker_id>, <broker_id>, ...],
controller_epoch: <val>,
leader_epoch: <val>,
version: 1,
leader: <broker-id>,
ctime: <timestamp>,
}
}
}
Note: By default we also fetch partition-state which results in
accessing the zookeeper twice. If just partition-replica information is
required fetch_partition_state should be set to False.
"""
try:
topic_ids = [topic_name] if topic_name else self.get_children(
"/brokers/topics",
)
except NoNodeError:
_log.error(
"Cluster is empty."
)
return {}
if names_only:
return topic_ids
topics_data = {}
for topic_id in topic_ids:
try:
topic_info = self.get("/brokers/topics/{id}".format(id=topic_id))
topic_data = load_json(topic_info[0])
topic_ctime = topic_info[1].ctime / 1000.0
topic_data['ctime'] = topic_ctime
except NoNodeError:
_log.info(
"topic '{topic}' not found.".format(topic=topic_id),
)
return {}
# Prepare data for each partition
partitions_data = {}
for p_id, replicas in six.iteritems(topic_data['partitions']):
partitions_data[p_id] = {}
if fetch_partition_state:
# Fetch partition-state from zookeeper
partition_state = self._fetch_partition_state(topic_id, p_id)
partitions_data[p_id] = load_json(partition_state[0])
partitions_data[p_id]['ctime'] = partition_state[1].ctime / 1000.0
else:
# Fetch partition-info from zookeeper
partition_info = self._fetch_partition_info(topic_id, p_id)
partitions_data[p_id]['ctime'] = partition_info.ctime / 1000.0
partitions_data[p_id]['replicas'] = replicas
topic_data['partitions'] = partitions_data
topics_data[topic_id] = topic_data
return topics_data |
def verify(
self, headers, serialized_request_env, deserialized_request_env):
# type: (Dict[str, Any], str, RequestEnvelope) -> None
"""Verify if the input request timestamp is in tolerated limits.
The verify method retrieves the request timestamp and check if
it falls in the limit set by the tolerance, by checking with
the current timestamp in UTC.
:param headers: headers of the input POST request
:type headers: Dict[str, Any]
:param serialized_request_env: raw request envelope in the
input POST request
:type serialized_request_env: str
:param deserialized_request_env: deserialized request envelope
instance of the input POST request
:type deserialized_request_env:
:py:class:`ask_sdk_model.request_envelope.RequestEnvelope`
:raises: :py:class:`VerificationException` if difference between
local timestamp and input request timestamp is more than
specific tolerance limit
"""
local_now = datetime.now(tz.tzutc())
request_timestamp = deserialized_request_env.request.timestamp
if (abs((local_now - request_timestamp).seconds) >
(self._tolerance_in_millis / 1000)):
raise VerificationException("Timestamp verification failed") | Verify if the input request timestamp is in tolerated limits.
The verify method retrieves the request timestamp and check if
it falls in the limit set by the tolerance, by checking with
the current timestamp in UTC.
:param headers: headers of the input POST request
:type headers: Dict[str, Any]
:param serialized_request_env: raw request envelope in the
input POST request
:type serialized_request_env: str
:param deserialized_request_env: deserialized request envelope
instance of the input POST request
:type deserialized_request_env:
:py:class:`ask_sdk_model.request_envelope.RequestEnvelope`
:raises: :py:class:`VerificationException` if difference between
local timestamp and input request timestamp is more than
specific tolerance limit | Below is the the instruction that describes the task:
### Input:
Verify if the input request timestamp is in tolerated limits.
The verify method retrieves the request timestamp and check if
it falls in the limit set by the tolerance, by checking with
the current timestamp in UTC.
:param headers: headers of the input POST request
:type headers: Dict[str, Any]
:param serialized_request_env: raw request envelope in the
input POST request
:type serialized_request_env: str
:param deserialized_request_env: deserialized request envelope
instance of the input POST request
:type deserialized_request_env:
:py:class:`ask_sdk_model.request_envelope.RequestEnvelope`
:raises: :py:class:`VerificationException` if difference between
local timestamp and input request timestamp is more than
specific tolerance limit
### Response:
def verify(
self, headers, serialized_request_env, deserialized_request_env):
# type: (Dict[str, Any], str, RequestEnvelope) -> None
"""Verify if the input request timestamp is in tolerated limits.
The verify method retrieves the request timestamp and check if
it falls in the limit set by the tolerance, by checking with
the current timestamp in UTC.
:param headers: headers of the input POST request
:type headers: Dict[str, Any]
:param serialized_request_env: raw request envelope in the
input POST request
:type serialized_request_env: str
:param deserialized_request_env: deserialized request envelope
instance of the input POST request
:type deserialized_request_env:
:py:class:`ask_sdk_model.request_envelope.RequestEnvelope`
:raises: :py:class:`VerificationException` if difference between
local timestamp and input request timestamp is more than
specific tolerance limit
"""
local_now = datetime.now(tz.tzutc())
request_timestamp = deserialized_request_env.request.timestamp
if (abs((local_now - request_timestamp).seconds) >
(self._tolerance_in_millis / 1000)):
raise VerificationException("Timestamp verification failed") |
def com_google_fonts_check_metadata_match_name_familyname(family_metadata, font_metadata):
"""METADATA.pb: Check font name is the same as family name."""
if font_metadata.name != family_metadata.name:
yield FAIL, ("METADATA.pb: {}: Family name \"{}\""
" does not match"
" font name: \"{}\"").format(font_metadata.filename,
family_metadata.name,
font_metadata.name)
else:
yield PASS, "Font name is the same as family name." | METADATA.pb: Check font name is the same as family name. | Below is the the instruction that describes the task:
### Input:
METADATA.pb: Check font name is the same as family name.
### Response:
def com_google_fonts_check_metadata_match_name_familyname(family_metadata, font_metadata):
"""METADATA.pb: Check font name is the same as family name."""
if font_metadata.name != family_metadata.name:
yield FAIL, ("METADATA.pb: {}: Family name \"{}\""
" does not match"
" font name: \"{}\"").format(font_metadata.filename,
family_metadata.name,
font_metadata.name)
else:
yield PASS, "Font name is the same as family name." |
def server(request):
"""
Respond to requests for the server's primary web page.
"""
return render_to_response(
'server/index.html', {
'user_url': getViewURL(request, idPage),
'server_xrds_url': getViewURL(request, idpXrds),
},
context_instance=RequestContext(request)) | Respond to requests for the server's primary web page. | Below is the the instruction that describes the task:
### Input:
Respond to requests for the server's primary web page.
### Response:
def server(request):
"""
Respond to requests for the server's primary web page.
"""
return render_to_response(
'server/index.html', {
'user_url': getViewURL(request, idPage),
'server_xrds_url': getViewURL(request, idpXrds),
},
context_instance=RequestContext(request)) |
def rmd_options_to_metadata(options):
"""
Parse rmd options and return a metadata dictionary
:param options:
:return:
"""
options = re.split(r'\s|,', options, 1)
if len(options) == 1:
language = options[0]
chunk_options = []
else:
language, others = options
language = language.rstrip(' ,')
others = others.lstrip(' ,')
chunk_options = parse_rmd_options(others)
language = 'R' if language == 'r' else language
metadata = {}
for i, opt in enumerate(chunk_options):
name, value = opt
if i == 0 and name == '':
metadata['name'] = value
continue
else:
if update_metadata_from_rmd_options(name, value, metadata):
continue
try:
metadata[name] = _py_logical_values(value)
continue
except RLogicalValueError:
metadata[name] = value
for name in metadata:
try_eval_metadata(metadata, name)
if ('active' in metadata or metadata.get('run_control', {}).get('frozen') is True) and 'eval' in metadata:
del metadata['eval']
return metadata.get('language') or language, metadata | Parse rmd options and return a metadata dictionary
:param options:
:return: | Below is the the instruction that describes the task:
### Input:
Parse rmd options and return a metadata dictionary
:param options:
:return:
### Response:
def rmd_options_to_metadata(options):
"""
Parse rmd options and return a metadata dictionary
:param options:
:return:
"""
options = re.split(r'\s|,', options, 1)
if len(options) == 1:
language = options[0]
chunk_options = []
else:
language, others = options
language = language.rstrip(' ,')
others = others.lstrip(' ,')
chunk_options = parse_rmd_options(others)
language = 'R' if language == 'r' else language
metadata = {}
for i, opt in enumerate(chunk_options):
name, value = opt
if i == 0 and name == '':
metadata['name'] = value
continue
else:
if update_metadata_from_rmd_options(name, value, metadata):
continue
try:
metadata[name] = _py_logical_values(value)
continue
except RLogicalValueError:
metadata[name] = value
for name in metadata:
try_eval_metadata(metadata, name)
if ('active' in metadata or metadata.get('run_control', {}).get('frozen') is True) and 'eval' in metadata:
del metadata['eval']
return metadata.get('language') or language, metadata |
def create_package(self, assay=None, mass=0.0, P=1.0, T=25.0,
normalise=True):
"""
Create a MaterialPackage based on the specified parameters.
:param assay: Name of the assay to be used to create the package.
:param mass: Package mass. [kg]
:param P: Package pressure. [atm]
:param T: Package temperature. [°C]
:param normalise: Indicates whether the assay must be normalised
before creating the package.
:returns: MaterialPackage object.
"""
if assay is None:
return MaterialPackage(self, self.create_empty_assay(), P, T)
if normalise:
assay_total = self.get_assay_total(assay)
else:
assay_total = 1.0
return MaterialPackage(self, mass * self.converted_assays[assay] /
assay_total, P, T, self._isCoal(assay),
self._get_HHV(assay)) | Create a MaterialPackage based on the specified parameters.
:param assay: Name of the assay to be used to create the package.
:param mass: Package mass. [kg]
:param P: Package pressure. [atm]
:param T: Package temperature. [°C]
:param normalise: Indicates whether the assay must be normalised
before creating the package.
:returns: MaterialPackage object. | Below is the the instruction that describes the task:
### Input:
Create a MaterialPackage based on the specified parameters.
:param assay: Name of the assay to be used to create the package.
:param mass: Package mass. [kg]
:param P: Package pressure. [atm]
:param T: Package temperature. [°C]
:param normalise: Indicates whether the assay must be normalised
before creating the package.
:returns: MaterialPackage object.
### Response:
def create_package(self, assay=None, mass=0.0, P=1.0, T=25.0,
normalise=True):
"""
Create a MaterialPackage based on the specified parameters.
:param assay: Name of the assay to be used to create the package.
:param mass: Package mass. [kg]
:param P: Package pressure. [atm]
:param T: Package temperature. [°C]
:param normalise: Indicates whether the assay must be normalised
before creating the package.
:returns: MaterialPackage object.
"""
if assay is None:
return MaterialPackage(self, self.create_empty_assay(), P, T)
if normalise:
assay_total = self.get_assay_total(assay)
else:
assay_total = 1.0
return MaterialPackage(self, mass * self.converted_assays[assay] /
assay_total, P, T, self._isCoal(assay),
self._get_HHV(assay)) |
def deployed(name, sourcepath, apppool='', hostheader='', ipaddress='*', port=80, protocol='http', preload=''):
'''
Ensure the website has been deployed.
.. note:
This function only validates against the site name, and will return True even
if the site already exists with a different configuration. It will not modify
the configuration of an existing site.
:param str name: The IIS site name.
:param str sourcepath: The physical path of the IIS site.
:param str apppool: The name of the IIS application pool.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
:param str protocol: The application protocol of the binding.
:param bool preload: Whether Preloading should be enabled
.. note:
If an application pool is specified, and that application pool does not already exist,
it will be created.
Example of usage with only the required arguments. This will default to using the default application pool
assigned by IIS:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
- apppool: site0
- hostheader: site0.local
- ipaddress: '*'
- port: 443
- protocol: https
- preload: True
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
current_sites = __salt__['win_iis.list_sites']()
if name in current_sites:
ret['comment'] = 'Site already present: {0}'.format(name)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = 'Site will be created: {0}'.format(name)
ret['changes'] = {'old': None,
'new': name}
else:
ret['comment'] = 'Created site: {0}'.format(name)
ret['changes'] = {'old': None,
'new': name}
ret['result'] = __salt__['win_iis.create_site'](name, sourcepath, apppool,
hostheader, ipaddress, port,
protocol, preload)
return ret | Ensure the website has been deployed.
.. note:
This function only validates against the site name, and will return True even
if the site already exists with a different configuration. It will not modify
the configuration of an existing site.
:param str name: The IIS site name.
:param str sourcepath: The physical path of the IIS site.
:param str apppool: The name of the IIS application pool.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
:param str protocol: The application protocol of the binding.
:param bool preload: Whether Preloading should be enabled
.. note:
If an application pool is specified, and that application pool does not already exist,
it will be created.
Example of usage with only the required arguments. This will default to using the default application pool
assigned by IIS:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
- apppool: site0
- hostheader: site0.local
- ipaddress: '*'
- port: 443
- protocol: https
- preload: True | Below is the the instruction that describes the task:
### Input:
Ensure the website has been deployed.
.. note:
This function only validates against the site name, and will return True even
if the site already exists with a different configuration. It will not modify
the configuration of an existing site.
:param str name: The IIS site name.
:param str sourcepath: The physical path of the IIS site.
:param str apppool: The name of the IIS application pool.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
:param str protocol: The application protocol of the binding.
:param bool preload: Whether Preloading should be enabled
.. note:
If an application pool is specified, and that application pool does not already exist,
it will be created.
Example of usage with only the required arguments. This will default to using the default application pool
assigned by IIS:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
- apppool: site0
- hostheader: site0.local
- ipaddress: '*'
- port: 443
- protocol: https
- preload: True
### Response:
def deployed(name, sourcepath, apppool='', hostheader='', ipaddress='*', port=80, protocol='http', preload=''):
'''
Ensure the website has been deployed.
.. note:
This function only validates against the site name, and will return True even
if the site already exists with a different configuration. It will not modify
the configuration of an existing site.
:param str name: The IIS site name.
:param str sourcepath: The physical path of the IIS site.
:param str apppool: The name of the IIS application pool.
:param str hostheader: The host header of the binding.
:param str ipaddress: The IP address of the binding.
:param str port: The TCP port of the binding.
:param str protocol: The application protocol of the binding.
:param bool preload: Whether Preloading should be enabled
.. note:
If an application pool is specified, and that application pool does not already exist,
it will be created.
Example of usage with only the required arguments. This will default to using the default application pool
assigned by IIS:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
Example of usage specifying all available arguments:
.. code-block:: yaml
site0-deployed:
win_iis.deployed:
- name: site0
- sourcepath: C:\\inetpub\\site0
- apppool: site0
- hostheader: site0.local
- ipaddress: '*'
- port: 443
- protocol: https
- preload: True
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
current_sites = __salt__['win_iis.list_sites']()
if name in current_sites:
ret['comment'] = 'Site already present: {0}'.format(name)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = 'Site will be created: {0}'.format(name)
ret['changes'] = {'old': None,
'new': name}
else:
ret['comment'] = 'Created site: {0}'.format(name)
ret['changes'] = {'old': None,
'new': name}
ret['result'] = __salt__['win_iis.create_site'](name, sourcepath, apppool,
hostheader, ipaddress, port,
protocol, preload)
return ret |
def display(self):
"""
Displays connection information to the screen.
"""
if self.toLayer._verbosity > 4:
print("wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.wed[i][j], end=" ")
print('')
print('')
print("dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.dweight[i][j], end=" ")
print('')
print('')
if self.toLayer._verbosity > 2:
print("Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
print(" ", end=" ")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.weight[i][j], end=" ")
print('')
print('') | Displays connection information to the screen. | Below is the the instruction that describes the task:
### Input:
Displays connection information to the screen.
### Response:
def display(self):
"""
Displays connection information to the screen.
"""
if self.toLayer._verbosity > 4:
print("wed: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.wed[i][j], end=" ")
print('')
print('')
print("dweight: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.dweight[i][j], end=" ")
print('')
print('')
if self.toLayer._verbosity > 2:
print("Weights: from '" + self.fromLayer.name + "' to '" + self.toLayer.name +"'")
print(" ", end=" ")
for j in range(self.toLayer.size):
print(self.toLayer.name, "[", j, "]", end=" ")
print('')
for i in range(self.fromLayer.size):
print(self.fromLayer.name, "[", i, "]", ": ", end=" ")
for j in range(self.toLayer.size):
print(self.weight[i][j], end=" ")
print('')
print('') |
def hash(self):
"""
Return the hash of the local KV state.
:returns: kv state hash
:rtype: int
"""
hash_request = etcdrpc.HashRequest()
return self.maintenancestub.Hash(hash_request).hash | Return the hash of the local KV state.
:returns: kv state hash
:rtype: int | Below is the the instruction that describes the task:
### Input:
Return the hash of the local KV state.
:returns: kv state hash
:rtype: int
### Response:
def hash(self):
"""
Return the hash of the local KV state.
:returns: kv state hash
:rtype: int
"""
hash_request = etcdrpc.HashRequest()
return self.maintenancestub.Hash(hash_request).hash |
def _get_archive_filelist(filename):
# type: (str) -> List[str]
"""Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
"""
names = [] # type: List[str]
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise ValueError("Can not get filenames from '{!s}'. "
"Not a tar or zip file".format(filename))
if "./" in names:
names.remove("./")
return names | Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2) | Below is the the instruction that describes the task:
### Input:
Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
### Response:
def _get_archive_filelist(filename):
# type: (str) -> List[str]
"""Extract the list of files from a tar or zip archive.
Args:
filename: name of the archive
Returns:
Sorted list of files in the archive, excluding './'
Raises:
ValueError: when the file is neither a zip nor a tar archive
FileNotFoundError: when the provided file does not exist (for Python 3)
IOError: when the provided file does not exist (for Python 2)
"""
names = [] # type: List[str]
if tarfile.is_tarfile(filename):
with tarfile.open(filename) as tar_file:
names = sorted(tar_file.getnames())
elif zipfile.is_zipfile(filename):
with zipfile.ZipFile(filename) as zip_file:
names = sorted(zip_file.namelist())
else:
raise ValueError("Can not get filenames from '{!s}'. "
"Not a tar or zip file".format(filename))
if "./" in names:
names.remove("./")
return names |
def extract_secs(self, tx, tx_in_idx):
"""
For a given script solution, iterate yield its sec blobs
"""
sc = tx.SolutionChecker(tx)
tx_context = sc.tx_context_for_idx(tx_in_idx)
# set solution_stack in case there are no results from puzzle_and_solution_iterator
solution_stack = []
for puzzle_script, solution_stack, flags, sighash_f in sc.puzzle_and_solution_iterator(tx_context):
for opcode, data, pc, new_pc in self._script_tools.get_opcodes(puzzle_script):
if data and is_sec(data):
yield data
for data in solution_stack:
if is_sec(data):
yield data | For a given script solution, iterate yield its sec blobs | Below is the the instruction that describes the task:
### Input:
For a given script solution, iterate yield its sec blobs
### Response:
def extract_secs(self, tx, tx_in_idx):
"""
For a given script solution, iterate yield its sec blobs
"""
sc = tx.SolutionChecker(tx)
tx_context = sc.tx_context_for_idx(tx_in_idx)
# set solution_stack in case there are no results from puzzle_and_solution_iterator
solution_stack = []
for puzzle_script, solution_stack, flags, sighash_f in sc.puzzle_and_solution_iterator(tx_context):
for opcode, data, pc, new_pc in self._script_tools.get_opcodes(puzzle_script):
if data and is_sec(data):
yield data
for data in solution_stack:
if is_sec(data):
yield data |
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_philips(dicom_input)
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file)
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return _multiframe_to_nifti(dicom_input, output_file)
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) | This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan | Below is the the instruction that describes the task:
### Input:
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
### Response:
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_philips(dicom_input)
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file)
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return _multiframe_to_nifti(dicom_input, output_file)
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file) |
def local_timezone(value):
"""Add the local timezone to `value` to make it aware."""
if hasattr(value, "tzinfo") and value.tzinfo is None:
return value.replace(tzinfo=dateutil.tz.tzlocal())
return value | Add the local timezone to `value` to make it aware. | Below is the the instruction that describes the task:
### Input:
Add the local timezone to `value` to make it aware.
### Response:
def local_timezone(value):
"""Add the local timezone to `value` to make it aware."""
if hasattr(value, "tzinfo") and value.tzinfo is None:
return value.replace(tzinfo=dateutil.tz.tzlocal())
return value |
def _validate(self):
"""
Do the actual validation
"""
if self.strictness == 'off':
return self.report
handle_inconsistencies(self.page, self.strictness, self.strategy, self.report)
return self.report | Do the actual validation | Below is the the instruction that describes the task:
### Input:
Do the actual validation
### Response:
def _validate(self):
"""
Do the actual validation
"""
if self.strictness == 'off':
return self.report
handle_inconsistencies(self.page, self.strictness, self.strategy, self.report)
return self.report |
def delete_work_item(self, id, project=None, destroy=None):
"""DeleteWorkItem.
[Preview API] Deletes the specified work item and sends it to the Recycle Bin, so that it can be restored back, if required. Optionally, if the destroy parameter has been set to true, it destroys the work item permanently. WARNING: If the destroy parameter is set to true, work items deleted by this command will NOT go to recycle-bin and there is no way to restore/recover them after deletion. It is recommended NOT to use this parameter. If you do, please use this parameter with extreme caution.
:param int id: ID of the work item to be deleted
:param str project: Project ID or project name
:param bool destroy: Optional parameter, if set to true, the work item is deleted permanently. Please note: the destroy action is PERMANENT and cannot be undone.
:rtype: :class:`<WorkItemDelete> <azure.devops.v5_1.work-item-tracking.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if destroy is not None:
query_parameters['destroy'] = self._serialize.query('destroy', destroy, 'bool')
response = self._send(http_method='DELETE',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemDelete', response) | DeleteWorkItem.
[Preview API] Deletes the specified work item and sends it to the Recycle Bin, so that it can be restored back, if required. Optionally, if the destroy parameter has been set to true, it destroys the work item permanently. WARNING: If the destroy parameter is set to true, work items deleted by this command will NOT go to recycle-bin and there is no way to restore/recover them after deletion. It is recommended NOT to use this parameter. If you do, please use this parameter with extreme caution.
:param int id: ID of the work item to be deleted
:param str project: Project ID or project name
:param bool destroy: Optional parameter, if set to true, the work item is deleted permanently. Please note: the destroy action is PERMANENT and cannot be undone.
:rtype: :class:`<WorkItemDelete> <azure.devops.v5_1.work-item-tracking.models.WorkItemDelete>` | Below is the the instruction that describes the task:
### Input:
DeleteWorkItem.
[Preview API] Deletes the specified work item and sends it to the Recycle Bin, so that it can be restored back, if required. Optionally, if the destroy parameter has been set to true, it destroys the work item permanently. WARNING: If the destroy parameter is set to true, work items deleted by this command will NOT go to recycle-bin and there is no way to restore/recover them after deletion. It is recommended NOT to use this parameter. If you do, please use this parameter with extreme caution.
:param int id: ID of the work item to be deleted
:param str project: Project ID or project name
:param bool destroy: Optional parameter, if set to true, the work item is deleted permanently. Please note: the destroy action is PERMANENT and cannot be undone.
:rtype: :class:`<WorkItemDelete> <azure.devops.v5_1.work-item-tracking.models.WorkItemDelete>`
### Response:
def delete_work_item(self, id, project=None, destroy=None):
"""DeleteWorkItem.
[Preview API] Deletes the specified work item and sends it to the Recycle Bin, so that it can be restored back, if required. Optionally, if the destroy parameter has been set to true, it destroys the work item permanently. WARNING: If the destroy parameter is set to true, work items deleted by this command will NOT go to recycle-bin and there is no way to restore/recover them after deletion. It is recommended NOT to use this parameter. If you do, please use this parameter with extreme caution.
:param int id: ID of the work item to be deleted
:param str project: Project ID or project name
:param bool destroy: Optional parameter, if set to true, the work item is deleted permanently. Please note: the destroy action is PERMANENT and cannot be undone.
:rtype: :class:`<WorkItemDelete> <azure.devops.v5_1.work-item-tracking.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if destroy is not None:
query_parameters['destroy'] = self._serialize.query('destroy', destroy, 'bool')
response = self._send(http_method='DELETE',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemDelete', response) |
def authenticate(json_path=None):
"""Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
"""
msg = ('budou.authentication() is deprecated. '
'Please use budou.get_parser() to obtain a parser instead.')
warnings.warn(msg, DeprecationWarning)
parser = get_parser('nlapi', credentials_path=json_path)
return parser | Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`) | Below is the the instruction that describes the task:
### Input:
Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
### Response:
def authenticate(json_path=None):
"""Gets a Natural Language API parser by authenticating the API.
**This method is deprecated.** Please use :obj:`budou.get_parser` to obtain a
parser instead.
Args:
json_path (:obj:`str`, optional): The file path to the service account's
credentials.
Returns:
Parser. (:obj:`budou.parser.NLAPIParser`)
"""
msg = ('budou.authentication() is deprecated. '
'Please use budou.get_parser() to obtain a parser instead.')
warnings.warn(msg, DeprecationWarning)
parser = get_parser('nlapi', credentials_path=json_path)
return parser |
def micro_calc(TP, item):
"""
Calculate PPV_Micro and TPR_Micro.
:param TP: true positive
:type TP:dict
:param item: FN or FP
:type item : dict
:return: PPV_Micro or TPR_Micro as float
"""
try:
TP_sum = sum(TP.values())
item_sum = sum(item.values())
return TP_sum / (TP_sum + item_sum)
except Exception:
return "None" | Calculate PPV_Micro and TPR_Micro.
:param TP: true positive
:type TP:dict
:param item: FN or FP
:type item : dict
:return: PPV_Micro or TPR_Micro as float | Below is the the instruction that describes the task:
### Input:
Calculate PPV_Micro and TPR_Micro.
:param TP: true positive
:type TP:dict
:param item: FN or FP
:type item : dict
:return: PPV_Micro or TPR_Micro as float
### Response:
def micro_calc(TP, item):
"""
Calculate PPV_Micro and TPR_Micro.
:param TP: true positive
:type TP:dict
:param item: FN or FP
:type item : dict
:return: PPV_Micro or TPR_Micro as float
"""
try:
TP_sum = sum(TP.values())
item_sum = sum(item.values())
return TP_sum / (TP_sum + item_sum)
except Exception:
return "None" |
def load(self, conn_list, **kwargs):
""" Takes a list of connections and sets them in the manager
args:
conn_list: list of connection defitions
"""
for conn in conn_list:
conn['delay_check'] = kwargs.get('delay_check', False)
self.set_conn(**conn)
if kwargs.get('delay_check'):
test = self.wait_for_conns(**kwargs)
if not test:
log.critical("\n\nEXITING:Unable to establish connections \n"
"%s", test) | Takes a list of connections and sets them in the manager
args:
conn_list: list of connection defitions | Below is the the instruction that describes the task:
### Input:
Takes a list of connections and sets them in the manager
args:
conn_list: list of connection defitions
### Response:
def load(self, conn_list, **kwargs):
""" Takes a list of connections and sets them in the manager
args:
conn_list: list of connection defitions
"""
for conn in conn_list:
conn['delay_check'] = kwargs.get('delay_check', False)
self.set_conn(**conn)
if kwargs.get('delay_check'):
test = self.wait_for_conns(**kwargs)
if not test:
log.critical("\n\nEXITING:Unable to establish connections \n"
"%s", test) |
def auth_check(self,
auth_list,
funs,
args,
tgt,
tgt_type='glob',
groups=None,
publish_validate=False,
minions=None,
whitelist=None):
'''
Returns a bool which defines if the requested function is authorized.
Used to evaluate the standard structure under external master
authentication interfaces, like eauth, peer, peer_run, etc.
'''
if self.opts.get('auth.enable_expanded_auth_matching', False):
return self.auth_check_expanded(auth_list, funs, args, tgt, tgt_type, groups, publish_validate)
if publish_validate:
v_tgt_type = tgt_type
if tgt_type.lower() in ('pillar', 'pillar_pcre'):
v_tgt_type = 'pillar_exact'
elif tgt_type.lower() == 'compound':
v_tgt_type = 'compound_pillar_exact'
_res = self.check_minions(tgt, v_tgt_type)
v_minions = set(_res['minions'])
_res = self.check_minions(tgt, tgt_type)
minions = set(_res['minions'])
mismatch = bool(minions.difference(v_minions))
# If the non-exact match gets more minions than the exact match
# then pillar globbing or PCRE is being used, and we have a
# problem
if mismatch:
return False
# compound commands will come in a list so treat everything as a list
if not isinstance(funs, list):
funs = [funs]
args = [args]
try:
for num, fun in enumerate(funs):
if whitelist and fun in whitelist:
return True
for ind in auth_list:
if isinstance(ind, six.string_types):
# Allowed for all minions
if self.match_check(ind, fun):
return True
elif isinstance(ind, dict):
if len(ind) != 1:
# Invalid argument
continue
valid = next(six.iterkeys(ind))
# Check if minions are allowed
if self.validate_tgt(
valid,
tgt,
tgt_type,
minions=minions):
# Minions are allowed, verify function in allowed list
fun_args = args[num]
fun_kwargs = fun_args[-1] if fun_args else None
if isinstance(fun_kwargs, dict) and '__kwarg__' in fun_kwargs:
fun_args = list(fun_args) # copy on modify
del fun_args[-1]
else:
fun_kwargs = None
if self.__fun_check(ind[valid], fun, fun_args, fun_kwargs):
return True
except TypeError:
return False
return False | Returns a bool which defines if the requested function is authorized.
Used to evaluate the standard structure under external master
authentication interfaces, like eauth, peer, peer_run, etc. | Below is the the instruction that describes the task:
### Input:
Returns a bool which defines if the requested function is authorized.
Used to evaluate the standard structure under external master
authentication interfaces, like eauth, peer, peer_run, etc.
### Response:
def auth_check(self,
auth_list,
funs,
args,
tgt,
tgt_type='glob',
groups=None,
publish_validate=False,
minions=None,
whitelist=None):
'''
Returns a bool which defines if the requested function is authorized.
Used to evaluate the standard structure under external master
authentication interfaces, like eauth, peer, peer_run, etc.
'''
if self.opts.get('auth.enable_expanded_auth_matching', False):
return self.auth_check_expanded(auth_list, funs, args, tgt, tgt_type, groups, publish_validate)
if publish_validate:
v_tgt_type = tgt_type
if tgt_type.lower() in ('pillar', 'pillar_pcre'):
v_tgt_type = 'pillar_exact'
elif tgt_type.lower() == 'compound':
v_tgt_type = 'compound_pillar_exact'
_res = self.check_minions(tgt, v_tgt_type)
v_minions = set(_res['minions'])
_res = self.check_minions(tgt, tgt_type)
minions = set(_res['minions'])
mismatch = bool(minions.difference(v_minions))
# If the non-exact match gets more minions than the exact match
# then pillar globbing or PCRE is being used, and we have a
# problem
if mismatch:
return False
# compound commands will come in a list so treat everything as a list
if not isinstance(funs, list):
funs = [funs]
args = [args]
try:
for num, fun in enumerate(funs):
if whitelist and fun in whitelist:
return True
for ind in auth_list:
if isinstance(ind, six.string_types):
# Allowed for all minions
if self.match_check(ind, fun):
return True
elif isinstance(ind, dict):
if len(ind) != 1:
# Invalid argument
continue
valid = next(six.iterkeys(ind))
# Check if minions are allowed
if self.validate_tgt(
valid,
tgt,
tgt_type,
minions=minions):
# Minions are allowed, verify function in allowed list
fun_args = args[num]
fun_kwargs = fun_args[-1] if fun_args else None
if isinstance(fun_kwargs, dict) and '__kwarg__' in fun_kwargs:
fun_args = list(fun_args) # copy on modify
del fun_args[-1]
else:
fun_kwargs = None
if self.__fun_check(ind[valid], fun, fun_args, fun_kwargs):
return True
except TypeError:
return False
return False |
def solveServiceArea(self,facilities,method="POST",
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
travelMode=None,
attributeParameterValues=None,
defaultBreaks=None,
excludeSourcesFromPolygons=None,
mergeSimilarPolygonRanges=None,
outputLines=None,
outputPolygons=None,
overlapLines=None,
overlapPolygons=None,
splitLinesAtBreaks=None,
splitPolygonsAtBreaks=None,
trimOuterPolygon=None,
trimPolygonDistance=None,
trimPolygonDistanceUnits=None,
returnFacilities=False,
returnBarriers=False,
returnPolylineBarriers=False,
returnPolygonBarriers=False,
outSR=None,
accumulateAttributeNames=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits='esriUnknownUnits',
useHierarchy=None,
timeOfDay=None,
timeOfDayIsUTC=None,
travelDirection=None,
returnZ=False):
""" The solve service area operation is performed on a network layer
resource of type service area (layerType is esriNAServerServiceArea).
You can provide arguments to the solve service area operation as
query parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis. If an empty json object is passed
('{}') preloaded facilities are ignored.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple
comma/semicolon-based syntax or as a JSON structure.
If barriers are not specified, preloaded barriers from
the map document are used in the analysis. If an empty
json object is passed ('{}'), preloaded barriers are
ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the
service area service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
defaultBreaks - A comma-separated list of doubles. The default is
defined in the network analysis layer.
excludeSourcesFromPolygons - A comma-separated list of string names.
The default is defined in the network
analysis layer.
mergeSimilarPolygonRanges - If true, similar ranges will be merged
in the result polygons. The default is
defined in the network analysis layer.
outputLines - The type of lines(s) generated. The default is as
defined in the network analysis layer.
outputPolygons - The type of polygon(s) generated. The default is
as defined in the network analysis layer.
overlapLines - Indicates if the lines should overlap from multiple
facilities. The default is defined in the network
analysis layer.
overlapPolygons - Indicates if the polygons for all facilities
should overlap. The default is defined in the
network analysis layer.
splitLinesAtBreaks - If true, lines will be split at breaks. The
default is defined in the network analysis
layer.
splitPolygonsAtBreaks - If true, polygons will be split at breaks.
The default is defined in the network
analysis layer.
trimOuterPolygon - If true, the outermost polygon (at the maximum
break value) will be trimmed. The default is
defined in the network analysis layer.
trimPolygonDistance - If polygons are being trimmed, provides the
distance to trim. The default is defined in
the network analysis layer.
trimPolygonDistanceUnits - If polygons are being trimmed, specifies
the units of the trimPolygonDistance. The
default is defined in the network analysis
layer.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers - If true, polyline barriers will be returned
with the analysis results. Default is false.
returnPolygonBarriers - If true, polygon barriers will be returned
with the analysis results. Default is false.
outSR - The well-known ID of the spatial reference for the geometries
returned with the analysis results. If outSR is not specified,
the geometries are returned in the spatial reference of the map.
accumulateAttributeNames - The list of network attribute names to be
accumulated with the analysis. The default
is as defined in the network analysis layer.
The value should be specified as a comma
separated list of attribute names. You can
also specify a value of none to indicate that
no network attributes should be accumulated.
impedanceAttributeName - The network attribute name to be used as the
impedance attribute in analysis. The default
is as defined in the network analysis layer.
restrictionAttributeNames - The list of network attribute names to be
used as restrictions with the analysis. The
default is as defined in the network analysis
layer. The value should be specified as a
comma separated list of attribute names.
You can also specify a value of none to
indicate that no network attributes should
be used as restrictions.
restrictUTurns - Specifies how U-Turns should be restricted in the
analysis. The default is as defined in the network
analysis layer. Values: esriNFSBAllowBacktrack |
esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack |
esriNFSBAtDeadEndsAndIntersections
outputGeometryPrecision - The precision of the output geometry after
generalization. If 0, no generalization of
output geometry is performed. The default is
as defined in the network service configuration.
outputGeometryPrecisionUnits - The units of the output geometry precision.
The default value is esriUnknownUnits.
Values: esriUnknownUnits | esriCentimeters |
esriDecimalDegrees | esriDecimeters |
esriFeet | esriInches | esriKilometers |
esriMeters | esriMiles | esriMillimeters |
esriNauticalMiles | esriPoints | esriYards
useHierarchy - If true, the hierarchy attribute for the network should be
used in analysis. The default is as defined in the network
layer. This cannot be used in conjunction with outputLines.
timeOfDay - The date and time at the facility. If travelDirection is set
to esriNATravelDirectionToFacility, the timeOfDay value
specifies the arrival time at the facility. if travelDirection
is set to esriNATravelDirectionFromFacility, the timeOfDay
value is the departure time from the facility. The time zone
for timeOfDay is specified by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware.
"""
if not self.layerType == "esriNAServerServiceAreaLayer":
raise ValueError("The solveServiceArea operation is supported on a network "
"layer of Service Area type only")
url = self._url + "/solveServiceArea"
params = {
"f" : "json",
"facilities": facilities
}
if not barriers is None:
params['barriers'] = barriers
if not polylineBarriers is None:
params['polylineBarriers'] = polylineBarriers
if not polygonBarriers is None:
params['polygonBarriers'] = polygonBarriers
if not travelMode is None:
params['travelMode'] = travelMode
if not attributeParameterValues is None:
params['attributeParameterValues'] = attributeParameterValues
if not defaultBreaks is None:
params['defaultBreaks'] = defaultBreaks
if not excludeSourcesFromPolygons is None:
params['excludeSourcesFromPolygons'] = excludeSourcesFromPolygons
if not mergeSimilarPolygonRanges is None:
params['mergeSimilarPolygonRanges'] = mergeSimilarPolygonRanges
if not outputLines is None:
params['outputLines'] = outputLines
if not outputPolygons is None:
params['outputPolygons'] = outputPolygons
if not overlapLines is None:
params['overlapLines'] = overlapLines
if not overlapPolygons is None:
params['overlapPolygons'] = overlapPolygons
if not splitLinesAtBreaks is None:
params['splitLinesAtBreaks'] = splitLinesAtBreaks
if not splitPolygonsAtBreaks is None:
params['splitPolygonsAtBreaks'] = splitPolygonsAtBreaks
if not trimOuterPolygon is None:
params['trimOuterPolygon'] = trimOuterPolygon
if not trimPolygonDistance is None:
params['trimPolygonDistance'] = trimPolygonDistance
if not trimPolygonDistanceUnits is None:
params['trimPolygonDistanceUnits'] = trimPolygonDistanceUnits
if not returnFacilities is None:
params['returnFacilities'] = returnFacilities
if not returnBarriers is None:
params['returnBarriers'] = returnBarriers
if not returnPolylineBarriers is None:
params['returnPolylineBarriers'] = returnPolylineBarriers
if not returnPolygonBarriers is None:
params['returnPolygonBarriers'] = returnPolygonBarriers
if not outSR is None:
params['outSR'] = outSR
if not accumulateAttributeNames is None:
params['accumulateAttributeNames'] = accumulateAttributeNames
if not impedanceAttributeName is None:
params['impedanceAttributeName'] = impedanceAttributeName
if not restrictionAttributeNames is None:
params['restrictionAttributeNames'] = restrictionAttributeNames
if not restrictUTurns is None:
params['restrictUTurns'] = restrictUTurns
if not outputGeometryPrecision is None:
params['outputGeometryPrecision'] = outputGeometryPrecision
if not outputGeometryPrecisionUnits is None:
params['outputGeometryPrecisionUnits'] = outputGeometryPrecisionUnits
if not useHierarchy is None:
params['useHierarchy'] = useHierarchy
if not timeOfDay is None:
params['timeOfDay'] = timeOfDay
if not timeOfDayIsUTC is None:
params['timeOfDayIsUTC'] = timeOfDayIsUTC
if not travelDirection is None:
params['travelDirection'] = travelDirection
if not returnZ is None:
params['returnZ'] = returnZ
if method.lower() == "post":
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | The solve service area operation is performed on a network layer
resource of type service area (layerType is esriNAServerServiceArea).
You can provide arguments to the solve service area operation as
query parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis. If an empty json object is passed
('{}') preloaded facilities are ignored.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple
comma/semicolon-based syntax or as a JSON structure.
If barriers are not specified, preloaded barriers from
the map document are used in the analysis. If an empty
json object is passed ('{}'), preloaded barriers are
ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the
service area service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
defaultBreaks - A comma-separated list of doubles. The default is
defined in the network analysis layer.
excludeSourcesFromPolygons - A comma-separated list of string names.
The default is defined in the network
analysis layer.
mergeSimilarPolygonRanges - If true, similar ranges will be merged
in the result polygons. The default is
defined in the network analysis layer.
outputLines - The type of lines(s) generated. The default is as
defined in the network analysis layer.
outputPolygons - The type of polygon(s) generated. The default is
as defined in the network analysis layer.
overlapLines - Indicates if the lines should overlap from multiple
facilities. The default is defined in the network
analysis layer.
overlapPolygons - Indicates if the polygons for all facilities
should overlap. The default is defined in the
network analysis layer.
splitLinesAtBreaks - If true, lines will be split at breaks. The
default is defined in the network analysis
layer.
splitPolygonsAtBreaks - If true, polygons will be split at breaks.
The default is defined in the network
analysis layer.
trimOuterPolygon - If true, the outermost polygon (at the maximum
break value) will be trimmed. The default is
defined in the network analysis layer.
trimPolygonDistance - If polygons are being trimmed, provides the
distance to trim. The default is defined in
the network analysis layer.
trimPolygonDistanceUnits - If polygons are being trimmed, specifies
the units of the trimPolygonDistance. The
default is defined in the network analysis
layer.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers - If true, polyline barriers will be returned
with the analysis results. Default is false.
returnPolygonBarriers - If true, polygon barriers will be returned
with the analysis results. Default is false.
outSR - The well-known ID of the spatial reference for the geometries
returned with the analysis results. If outSR is not specified,
the geometries are returned in the spatial reference of the map.
accumulateAttributeNames - The list of network attribute names to be
accumulated with the analysis. The default
is as defined in the network analysis layer.
The value should be specified as a comma
separated list of attribute names. You can
also specify a value of none to indicate that
no network attributes should be accumulated.
impedanceAttributeName - The network attribute name to be used as the
impedance attribute in analysis. The default
is as defined in the network analysis layer.
restrictionAttributeNames - The list of network attribute names to be
used as restrictions with the analysis. The
default is as defined in the network analysis
layer. The value should be specified as a
comma separated list of attribute names.
You can also specify a value of none to
indicate that no network attributes should
be used as restrictions.
restrictUTurns - Specifies how U-Turns should be restricted in the
analysis. The default is as defined in the network
analysis layer. Values: esriNFSBAllowBacktrack |
esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack |
esriNFSBAtDeadEndsAndIntersections
outputGeometryPrecision - The precision of the output geometry after
generalization. If 0, no generalization of
output geometry is performed. The default is
as defined in the network service configuration.
outputGeometryPrecisionUnits - The units of the output geometry precision.
The default value is esriUnknownUnits.
Values: esriUnknownUnits | esriCentimeters |
esriDecimalDegrees | esriDecimeters |
esriFeet | esriInches | esriKilometers |
esriMeters | esriMiles | esriMillimeters |
esriNauticalMiles | esriPoints | esriYards
useHierarchy - If true, the hierarchy attribute for the network should be
used in analysis. The default is as defined in the network
layer. This cannot be used in conjunction with outputLines.
timeOfDay - The date and time at the facility. If travelDirection is set
to esriNATravelDirectionToFacility, the timeOfDay value
specifies the arrival time at the facility. if travelDirection
is set to esriNATravelDirectionFromFacility, the timeOfDay
value is the departure time from the facility. The time zone
for timeOfDay is specified by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware. | Below is the the instruction that describes the task:
### Input:
The solve service area operation is performed on a network layer
resource of type service area (layerType is esriNAServerServiceArea).
You can provide arguments to the solve service area operation as
query parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis. If an empty json object is passed
('{}') preloaded facilities are ignored.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple
comma/semicolon-based syntax or as a JSON structure.
If barriers are not specified, preloaded barriers from
the map document are used in the analysis. If an empty
json object is passed ('{}'), preloaded barriers are
ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the
service area service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
defaultBreaks - A comma-separated list of doubles. The default is
defined in the network analysis layer.
excludeSourcesFromPolygons - A comma-separated list of string names.
The default is defined in the network
analysis layer.
mergeSimilarPolygonRanges - If true, similar ranges will be merged
in the result polygons. The default is
defined in the network analysis layer.
outputLines - The type of lines(s) generated. The default is as
defined in the network analysis layer.
outputPolygons - The type of polygon(s) generated. The default is
as defined in the network analysis layer.
overlapLines - Indicates if the lines should overlap from multiple
facilities. The default is defined in the network
analysis layer.
overlapPolygons - Indicates if the polygons for all facilities
should overlap. The default is defined in the
network analysis layer.
splitLinesAtBreaks - If true, lines will be split at breaks. The
default is defined in the network analysis
layer.
splitPolygonsAtBreaks - If true, polygons will be split at breaks.
The default is defined in the network
analysis layer.
trimOuterPolygon - If true, the outermost polygon (at the maximum
break value) will be trimmed. The default is
defined in the network analysis layer.
trimPolygonDistance - If polygons are being trimmed, provides the
distance to trim. The default is defined in
the network analysis layer.
trimPolygonDistanceUnits - If polygons are being trimmed, specifies
the units of the trimPolygonDistance. The
default is defined in the network analysis
layer.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers - If true, polyline barriers will be returned
with the analysis results. Default is false.
returnPolygonBarriers - If true, polygon barriers will be returned
with the analysis results. Default is false.
outSR - The well-known ID of the spatial reference for the geometries
returned with the analysis results. If outSR is not specified,
the geometries are returned in the spatial reference of the map.
accumulateAttributeNames - The list of network attribute names to be
accumulated with the analysis. The default
is as defined in the network analysis layer.
The value should be specified as a comma
separated list of attribute names. You can
also specify a value of none to indicate that
no network attributes should be accumulated.
impedanceAttributeName - The network attribute name to be used as the
impedance attribute in analysis. The default
is as defined in the network analysis layer.
restrictionAttributeNames - The list of network attribute names to be
used as restrictions with the analysis. The
default is as defined in the network analysis
layer. The value should be specified as a
comma separated list of attribute names.
You can also specify a value of none to
indicate that no network attributes should
be used as restrictions.
restrictUTurns - Specifies how U-Turns should be restricted in the
analysis. The default is as defined in the network
analysis layer. Values: esriNFSBAllowBacktrack |
esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack |
esriNFSBAtDeadEndsAndIntersections
outputGeometryPrecision - The precision of the output geometry after
generalization. If 0, no generalization of
output geometry is performed. The default is
as defined in the network service configuration.
outputGeometryPrecisionUnits - The units of the output geometry precision.
The default value is esriUnknownUnits.
Values: esriUnknownUnits | esriCentimeters |
esriDecimalDegrees | esriDecimeters |
esriFeet | esriInches | esriKilometers |
esriMeters | esriMiles | esriMillimeters |
esriNauticalMiles | esriPoints | esriYards
useHierarchy - If true, the hierarchy attribute for the network should be
used in analysis. The default is as defined in the network
layer. This cannot be used in conjunction with outputLines.
timeOfDay - The date and time at the facility. If travelDirection is set
to esriNATravelDirectionToFacility, the timeOfDay value
specifies the arrival time at the facility. if travelDirection
is set to esriNATravelDirectionFromFacility, the timeOfDay
value is the departure time from the facility. The time zone
for timeOfDay is specified by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware.
### Response:
def solveServiceArea(self,facilities,method="POST",
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
travelMode=None,
attributeParameterValues=None,
defaultBreaks=None,
excludeSourcesFromPolygons=None,
mergeSimilarPolygonRanges=None,
outputLines=None,
outputPolygons=None,
overlapLines=None,
overlapPolygons=None,
splitLinesAtBreaks=None,
splitPolygonsAtBreaks=None,
trimOuterPolygon=None,
trimPolygonDistance=None,
trimPolygonDistanceUnits=None,
returnFacilities=False,
returnBarriers=False,
returnPolylineBarriers=False,
returnPolygonBarriers=False,
outSR=None,
accumulateAttributeNames=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits='esriUnknownUnits',
useHierarchy=None,
timeOfDay=None,
timeOfDayIsUTC=None,
travelDirection=None,
returnZ=False):
""" The solve service area operation is performed on a network layer
resource of type service area (layerType is esriNAServerServiceArea).
You can provide arguments to the solve service area operation as
query parameters.
Inputs:
facilities - The set of facilities loaded as network locations
during analysis. Facilities can be specified using
a simple comma / semi-colon based syntax or as a
JSON structure. If facilities are not specified,
preloaded facilities from the map document are used
in the analysis. If an empty json object is passed
('{}') preloaded facilities are ignored.
barriers - The set of barriers loaded as network locations during
analysis. Barriers can be specified using a simple
comma/semicolon-based syntax or as a JSON structure.
If barriers are not specified, preloaded barriers from
the map document are used in the analysis. If an empty
json object is passed ('{}'), preloaded barriers are
ignored.
polylineBarriers - The set of polyline barriers loaded as network
locations during analysis. If polyline barriers
are not specified, preloaded polyline barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polyline barriers are ignored.
polygonBarriers - The set of polygon barriers loaded as network
locations during analysis. If polygon barriers
are not specified, preloaded polygon barriers
from the map document are used in the analysis.
If an empty json object is passed ('{}'),
preloaded polygon barriers are ignored.
travelMode - Travel modes provide override values that help you
quickly and consistently model a vehicle or mode of
transportation. The chosen travel mode must be
preconfigured on the network dataset that the
service area service references.
attributeParameterValues - A set of attribute parameter values that
can be parameterized to determine which
network elements can be used by a vehicle.
defaultBreaks - A comma-separated list of doubles. The default is
defined in the network analysis layer.
excludeSourcesFromPolygons - A comma-separated list of string names.
The default is defined in the network
analysis layer.
mergeSimilarPolygonRanges - If true, similar ranges will be merged
in the result polygons. The default is
defined in the network analysis layer.
outputLines - The type of lines(s) generated. The default is as
defined in the network analysis layer.
outputPolygons - The type of polygon(s) generated. The default is
as defined in the network analysis layer.
overlapLines - Indicates if the lines should overlap from multiple
facilities. The default is defined in the network
analysis layer.
overlapPolygons - Indicates if the polygons for all facilities
should overlap. The default is defined in the
network analysis layer.
splitLinesAtBreaks - If true, lines will be split at breaks. The
default is defined in the network analysis
layer.
splitPolygonsAtBreaks - If true, polygons will be split at breaks.
The default is defined in the network
analysis layer.
trimOuterPolygon - If true, the outermost polygon (at the maximum
break value) will be trimmed. The default is
defined in the network analysis layer.
trimPolygonDistance - If polygons are being trimmed, provides the
distance to trim. The default is defined in
the network analysis layer.
trimPolygonDistanceUnits - If polygons are being trimmed, specifies
the units of the trimPolygonDistance. The
default is defined in the network analysis
layer.
returnFacilities - If true, facilities will be returned with the
analysis results. Default is false.
returnBarriers - If true, barriers will be returned with the analysis
results. Default is false.
returnPolylineBarriers - If true, polyline barriers will be returned
with the analysis results. Default is false.
returnPolygonBarriers - If true, polygon barriers will be returned
with the analysis results. Default is false.
outSR - The well-known ID of the spatial reference for the geometries
returned with the analysis results. If outSR is not specified,
the geometries are returned in the spatial reference of the map.
accumulateAttributeNames - The list of network attribute names to be
accumulated with the analysis. The default
is as defined in the network analysis layer.
The value should be specified as a comma
separated list of attribute names. You can
also specify a value of none to indicate that
no network attributes should be accumulated.
impedanceAttributeName - The network attribute name to be used as the
impedance attribute in analysis. The default
is as defined in the network analysis layer.
restrictionAttributeNames - The list of network attribute names to be
used as restrictions with the analysis. The
default is as defined in the network analysis
layer. The value should be specified as a
comma separated list of attribute names.
You can also specify a value of none to
indicate that no network attributes should
be used as restrictions.
restrictUTurns - Specifies how U-Turns should be restricted in the
analysis. The default is as defined in the network
analysis layer. Values: esriNFSBAllowBacktrack |
esriNFSBAtDeadEndsOnly | esriNFSBNoBacktrack |
esriNFSBAtDeadEndsAndIntersections
outputGeometryPrecision - The precision of the output geometry after
generalization. If 0, no generalization of
output geometry is performed. The default is
as defined in the network service configuration.
outputGeometryPrecisionUnits - The units of the output geometry precision.
The default value is esriUnknownUnits.
Values: esriUnknownUnits | esriCentimeters |
esriDecimalDegrees | esriDecimeters |
esriFeet | esriInches | esriKilometers |
esriMeters | esriMiles | esriMillimeters |
esriNauticalMiles | esriPoints | esriYards
useHierarchy - If true, the hierarchy attribute for the network should be
used in analysis. The default is as defined in the network
layer. This cannot be used in conjunction with outputLines.
timeOfDay - The date and time at the facility. If travelDirection is set
to esriNATravelDirectionToFacility, the timeOfDay value
specifies the arrival time at the facility. if travelDirection
is set to esriNATravelDirectionFromFacility, the timeOfDay
value is the departure time from the facility. The time zone
for timeOfDay is specified by timeOfDayIsUTC.
timeOfDayIsUTC - The time zone or zones of the timeOfDay parameter. When
set to false, which is the default value, the timeOfDay
parameter refers to the time zone or zones in which the
facilities are located. Therefore, the start or end times
of the service areas are staggered by time zone.
travelDirection - Options for traveling to or from the facility. The
default is defined in the network analysis layer.
returnZ - If true, Z values will be included in saPolygons and saPolylines
geometry if the network dataset is Z-aware.
"""
if not self.layerType == "esriNAServerServiceAreaLayer":
raise ValueError("The solveServiceArea operation is supported on a network "
"layer of Service Area type only")
url = self._url + "/solveServiceArea"
params = {
"f" : "json",
"facilities": facilities
}
if not barriers is None:
params['barriers'] = barriers
if not polylineBarriers is None:
params['polylineBarriers'] = polylineBarriers
if not polygonBarriers is None:
params['polygonBarriers'] = polygonBarriers
if not travelMode is None:
params['travelMode'] = travelMode
if not attributeParameterValues is None:
params['attributeParameterValues'] = attributeParameterValues
if not defaultBreaks is None:
params['defaultBreaks'] = defaultBreaks
if not excludeSourcesFromPolygons is None:
params['excludeSourcesFromPolygons'] = excludeSourcesFromPolygons
if not mergeSimilarPolygonRanges is None:
params['mergeSimilarPolygonRanges'] = mergeSimilarPolygonRanges
if not outputLines is None:
params['outputLines'] = outputLines
if not outputPolygons is None:
params['outputPolygons'] = outputPolygons
if not overlapLines is None:
params['overlapLines'] = overlapLines
if not overlapPolygons is None:
params['overlapPolygons'] = overlapPolygons
if not splitLinesAtBreaks is None:
params['splitLinesAtBreaks'] = splitLinesAtBreaks
if not splitPolygonsAtBreaks is None:
params['splitPolygonsAtBreaks'] = splitPolygonsAtBreaks
if not trimOuterPolygon is None:
params['trimOuterPolygon'] = trimOuterPolygon
if not trimPolygonDistance is None:
params['trimPolygonDistance'] = trimPolygonDistance
if not trimPolygonDistanceUnits is None:
params['trimPolygonDistanceUnits'] = trimPolygonDistanceUnits
if not returnFacilities is None:
params['returnFacilities'] = returnFacilities
if not returnBarriers is None:
params['returnBarriers'] = returnBarriers
if not returnPolylineBarriers is None:
params['returnPolylineBarriers'] = returnPolylineBarriers
if not returnPolygonBarriers is None:
params['returnPolygonBarriers'] = returnPolygonBarriers
if not outSR is None:
params['outSR'] = outSR
if not accumulateAttributeNames is None:
params['accumulateAttributeNames'] = accumulateAttributeNames
if not impedanceAttributeName is None:
params['impedanceAttributeName'] = impedanceAttributeName
if not restrictionAttributeNames is None:
params['restrictionAttributeNames'] = restrictionAttributeNames
if not restrictUTurns is None:
params['restrictUTurns'] = restrictUTurns
if not outputGeometryPrecision is None:
params['outputGeometryPrecision'] = outputGeometryPrecision
if not outputGeometryPrecisionUnits is None:
params['outputGeometryPrecisionUnits'] = outputGeometryPrecisionUnits
if not useHierarchy is None:
params['useHierarchy'] = useHierarchy
if not timeOfDay is None:
params['timeOfDay'] = timeOfDay
if not timeOfDayIsUTC is None:
params['timeOfDayIsUTC'] = timeOfDayIsUTC
if not travelDirection is None:
params['travelDirection'] = travelDirection
if not returnZ is None:
params['returnZ'] = returnZ
if method.lower() == "post":
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def enter_pairs(iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start):
""" enters funcs for pairs """
## snps was created using only the selected samples.
LOGGER.info("edges in enter_pairs %s", edg)
seq1 = aseqs[iloc, :, edg[0]:edg[1]+1]
snp1 = asnps[iloc, edg[0]:edg[1]+1, ]
## the 2nd read edges are +5 for the spacer
seq2 = aseqs[iloc, :, edg[2]:edg[3]+1]
snp2 = asnps[iloc, edg[2]:edg[3]+1, ]
## remove rows with all Ns, seq has only selected samples
nalln = np.all(seq1 == "N", axis=1)
## make mask of removed rows and excluded samples. Use the inverse
## of this to save the coverage for samples
nsidx = nalln + smask
LOGGER.info("nsidx %s, nalln %s, smask %s", nsidx, nalln, smask)
samplecov = samplecov + np.invert(nsidx).astype(np.int32)
LOGGER.info("samplecov %s", samplecov)
idx = np.sum(np.invert(nsidx).astype(np.int32))
LOGGER.info("idx %s", idx)
locuscov[idx] += 1
## select the remaining names in order
seq1 = seq1[~nsidx, ]
seq2 = seq2[~nsidx, ]
names = pnames[~nsidx]
## save string for printing, excluding names not in samples
outstr = "\n".join(\
[name + s1.tostring()+"nnnn"+s2.tostring() for name, s1, s2 in \
zip(names, seq1, seq2)])
#LOGGER.info("s1 %s", s1.tostring())
#LOGGER.info("s2 %s", s2.tostring())
## get snp string and add to store
snpstring1 = ["-" if snp1[i, 0] else \
"*" if snp1[i, 1] else \
" " for i in range(len(snp1))]
snpstring2 = ["-" if snp2[i, 0] else \
"*" if snp2[i, 1] else \
" " for i in range(len(snp2))]
#npis = str(snpstring1+snpstring2).count("*")
#nvars = str(snpstring1+snpstring2).count("-") + npis
outstr += "\n" + snppad + "".join(snpstring1)+\
" "+"".join(snpstring2)+"|{}|".format(iloc+start)
#"|LOCID={},DBID={},NVAR={},NPIS={}|"\
#.format(1+iloc+start, iloc, nvars, npis)
return outstr, samplecov, locuscov | enters funcs for pairs | Below is the the instruction that describes the task:
### Input:
enters funcs for pairs
### Response:
def enter_pairs(iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start):
""" enters funcs for pairs """
## snps was created using only the selected samples.
LOGGER.info("edges in enter_pairs %s", edg)
seq1 = aseqs[iloc, :, edg[0]:edg[1]+1]
snp1 = asnps[iloc, edg[0]:edg[1]+1, ]
## the 2nd read edges are +5 for the spacer
seq2 = aseqs[iloc, :, edg[2]:edg[3]+1]
snp2 = asnps[iloc, edg[2]:edg[3]+1, ]
## remove rows with all Ns, seq has only selected samples
nalln = np.all(seq1 == "N", axis=1)
## make mask of removed rows and excluded samples. Use the inverse
## of this to save the coverage for samples
nsidx = nalln + smask
LOGGER.info("nsidx %s, nalln %s, smask %s", nsidx, nalln, smask)
samplecov = samplecov + np.invert(nsidx).astype(np.int32)
LOGGER.info("samplecov %s", samplecov)
idx = np.sum(np.invert(nsidx).astype(np.int32))
LOGGER.info("idx %s", idx)
locuscov[idx] += 1
## select the remaining names in order
seq1 = seq1[~nsidx, ]
seq2 = seq2[~nsidx, ]
names = pnames[~nsidx]
## save string for printing, excluding names not in samples
outstr = "\n".join(\
[name + s1.tostring()+"nnnn"+s2.tostring() for name, s1, s2 in \
zip(names, seq1, seq2)])
#LOGGER.info("s1 %s", s1.tostring())
#LOGGER.info("s2 %s", s2.tostring())
## get snp string and add to store
snpstring1 = ["-" if snp1[i, 0] else \
"*" if snp1[i, 1] else \
" " for i in range(len(snp1))]
snpstring2 = ["-" if snp2[i, 0] else \
"*" if snp2[i, 1] else \
" " for i in range(len(snp2))]
#npis = str(snpstring1+snpstring2).count("*")
#nvars = str(snpstring1+snpstring2).count("-") + npis
outstr += "\n" + snppad + "".join(snpstring1)+\
" "+"".join(snpstring2)+"|{}|".format(iloc+start)
#"|LOCID={},DBID={},NVAR={},NPIS={}|"\
#.format(1+iloc+start, iloc, nvars, npis)
return outstr, samplecov, locuscov |
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth) | Get a single repository on Bitbucket and return its tags. | Below is the the instruction that describes the task:
### Input:
Get a single repository on Bitbucket and return its tags.
### Response:
def get_tags(self, repo_slug=None):
""" Get a single repository on Bitbucket and return its tags."""
repo_slug = repo_slug or self.repo_slug or ''
url = self.url('GET_TAGS', username=self.username, repo_slug=repo_slug)
return self.dispatch('GET', url, auth=self.auth) |
def roll(self, shift):
"""shift vector
"""
self._saved = LimitedSizeDict(size_limit=2**5)
new_arr = zeros(len(self), dtype=self.dtype)
if shift < 0:
shift = shift - len(self) * (shift // len(self))
if shift == 0:
return
new_arr[0:shift] = self[len(self)-shift: len(self)]
new_arr[shift:len(self)] = self[0:len(self)-shift]
self._data = new_arr._data | shift vector | Below is the the instruction that describes the task:
### Input:
shift vector
### Response:
def roll(self, shift):
"""shift vector
"""
self._saved = LimitedSizeDict(size_limit=2**5)
new_arr = zeros(len(self), dtype=self.dtype)
if shift < 0:
shift = shift - len(self) * (shift // len(self))
if shift == 0:
return
new_arr[0:shift] = self[len(self)-shift: len(self)]
new_arr[shift:len(self)] = self[0:len(self)-shift]
self._data = new_arr._data |
def create_sciobj(request, sysmeta_pyxb):
"""Create object file and database entries for a new native locally stored (non-
proxied) science object.
This method takes a request object and is only called from the views that
handle:
- MNStorage.create()
- MNStorage.update()
Various sanity checking is performed. Raises D1 exceptions that are returned
directly to the client. Adds create event to the event log.
Preconditions:
- None. This method should check everything.
Postconditions:
- A new file containing sciobj bytes, and models (database rows) for the newly
added object.
"""
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
set_mn_controlled_values(request, sysmeta_pyxb, is_modification=False)
d1_gmn.app.views.assert_db.is_valid_pid_for_create(pid)
d1_gmn.app.views.assert_sysmeta.sanity(request, sysmeta_pyxb)
if _is_proxy_sciobj(request):
sciobj_url = _get_sciobj_proxy_url(request)
_sanity_check_proxy_url(sciobj_url)
else:
sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid)
if not _is_proxy_sciobj(request):
if d1_gmn.app.resource_map.is_resource_map_sysmeta_pyxb(sysmeta_pyxb):
_create_resource_map(pid, request, sysmeta_pyxb, sciobj_url)
else:
_save_sciobj_bytes_from_request(request, pid)
d1_gmn.app.scimeta.assert_valid(sysmeta_pyxb, pid)
d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, sciobj_url)
d1_gmn.app.event_log.create(
d1_common.xml.get_req_val(sysmeta_pyxb.identifier),
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
),
) | Create object file and database entries for a new native locally stored (non-
proxied) science object.
This method takes a request object and is only called from the views that
handle:
- MNStorage.create()
- MNStorage.update()
Various sanity checking is performed. Raises D1 exceptions that are returned
directly to the client. Adds create event to the event log.
Preconditions:
- None. This method should check everything.
Postconditions:
- A new file containing sciobj bytes, and models (database rows) for the newly
added object. | Below is the the instruction that describes the task:
### Input:
Create object file and database entries for a new native locally stored (non-
proxied) science object.
This method takes a request object and is only called from the views that
handle:
- MNStorage.create()
- MNStorage.update()
Various sanity checking is performed. Raises D1 exceptions that are returned
directly to the client. Adds create event to the event log.
Preconditions:
- None. This method should check everything.
Postconditions:
- A new file containing sciobj bytes, and models (database rows) for the newly
added object.
### Response:
def create_sciobj(request, sysmeta_pyxb):
"""Create object file and database entries for a new native locally stored (non-
proxied) science object.
This method takes a request object and is only called from the views that
handle:
- MNStorage.create()
- MNStorage.update()
Various sanity checking is performed. Raises D1 exceptions that are returned
directly to the client. Adds create event to the event log.
Preconditions:
- None. This method should check everything.
Postconditions:
- A new file containing sciobj bytes, and models (database rows) for the newly
added object.
"""
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
set_mn_controlled_values(request, sysmeta_pyxb, is_modification=False)
d1_gmn.app.views.assert_db.is_valid_pid_for_create(pid)
d1_gmn.app.views.assert_sysmeta.sanity(request, sysmeta_pyxb)
if _is_proxy_sciobj(request):
sciobj_url = _get_sciobj_proxy_url(request)
_sanity_check_proxy_url(sciobj_url)
else:
sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid)
if not _is_proxy_sciobj(request):
if d1_gmn.app.resource_map.is_resource_map_sysmeta_pyxb(sysmeta_pyxb):
_create_resource_map(pid, request, sysmeta_pyxb, sciobj_url)
else:
_save_sciobj_bytes_from_request(request, pid)
d1_gmn.app.scimeta.assert_valid(sysmeta_pyxb, pid)
d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, sciobj_url)
d1_gmn.app.event_log.create(
d1_common.xml.get_req_val(sysmeta_pyxb.identifier),
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
),
) |
def image_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all images.
Generates a sorted list of images available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
try:
images = get_available_images(request, request.user.project_id)
except Exception:
exceptions.handle(request, _('Unable to retrieve images'))
images.sort(key=lambda c: c.name)
images_list = [('', _('Select Image'))]
for image in images:
image_label = u"{} ({})".format(image.name, filesizeformat(image.size))
images_list.append((image.id, image_label))
if not images:
return [("", _("No images available")), ]
return images_list | Returns a list of tuples of all images.
Generates a sorted list of images available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples | Below is the the instruction that describes the task:
### Input:
Returns a list of tuples of all images.
Generates a sorted list of images available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
### Response:
def image_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all images.
Generates a sorted list of images available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
try:
images = get_available_images(request, request.user.project_id)
except Exception:
exceptions.handle(request, _('Unable to retrieve images'))
images.sort(key=lambda c: c.name)
images_list = [('', _('Select Image'))]
for image in images:
image_label = u"{} ({})".format(image.name, filesizeformat(image.size))
images_list.append((image.id, image_label))
if not images:
return [("", _("No images available")), ]
return images_list |
def add_adaptive_int(self, n):
"""
Add an integer to the stream.
:param int n: integer to add
"""
if n >= Message.big_int:
self.packet.write(max_byte)
self.add_string(util.deflate_long(n))
else:
self.packet.write(struct.pack(">I", n))
return self | Add an integer to the stream.
:param int n: integer to add | Below is the the instruction that describes the task:
### Input:
Add an integer to the stream.
:param int n: integer to add
### Response:
def add_adaptive_int(self, n):
"""
Add an integer to the stream.
:param int n: integer to add
"""
if n >= Message.big_int:
self.packet.write(max_byte)
self.add_string(util.deflate_long(n))
else:
self.packet.write(struct.pack(">I", n))
return self |
def simple_pattern_exists_in_gcs(file_pattern, credentials=None):
"""True iff an object exists matching the input GCS pattern.
The GCS pattern must be a full object reference or a "simple pattern" that
conforms to the dsub input and output parameter restrictions:
* No support for **, ? wildcards or [] character ranges
* Wildcards may only appear in the file name
Args:
file_pattern: eg. 'gs://foo/ba*'
credentials: Optional credential to be used to load the file from gcs.
Raises:
ValueError: if file_pattern breaks the rules.
Returns:
True iff a file exists that matches that pattern.
"""
if '*' not in file_pattern:
return _file_exists_in_gcs(file_pattern, credentials)
if not file_pattern.startswith('gs://'):
raise ValueError('file name must start with gs://')
gcs_service = _get_storage_service(credentials)
bucket_name, prefix = file_pattern[len('gs://'):].split('/', 1)
if '*' in bucket_name:
raise ValueError('Wildcards may not appear in the bucket name')
# There is a '*' in prefix because we checked there's one in file_pattern
# and there isn't one in bucket_name. Hence it must be in prefix.
assert '*' in prefix
prefix_no_wildcard = prefix[:prefix.index('*')]
request = gcs_service.objects().list(
bucket=bucket_name, prefix=prefix_no_wildcard)
response = request.execute()
if 'items' not in response:
return False
items_list = [i['name'] for i in response['items']]
return any(fnmatch.fnmatch(i, prefix) for i in items_list) | True iff an object exists matching the input GCS pattern.
The GCS pattern must be a full object reference or a "simple pattern" that
conforms to the dsub input and output parameter restrictions:
* No support for **, ? wildcards or [] character ranges
* Wildcards may only appear in the file name
Args:
file_pattern: eg. 'gs://foo/ba*'
credentials: Optional credential to be used to load the file from gcs.
Raises:
ValueError: if file_pattern breaks the rules.
Returns:
True iff a file exists that matches that pattern. | Below is the the instruction that describes the task:
### Input:
True iff an object exists matching the input GCS pattern.
The GCS pattern must be a full object reference or a "simple pattern" that
conforms to the dsub input and output parameter restrictions:
* No support for **, ? wildcards or [] character ranges
* Wildcards may only appear in the file name
Args:
file_pattern: eg. 'gs://foo/ba*'
credentials: Optional credential to be used to load the file from gcs.
Raises:
ValueError: if file_pattern breaks the rules.
Returns:
True iff a file exists that matches that pattern.
### Response:
def simple_pattern_exists_in_gcs(file_pattern, credentials=None):
"""True iff an object exists matching the input GCS pattern.
The GCS pattern must be a full object reference or a "simple pattern" that
conforms to the dsub input and output parameter restrictions:
* No support for **, ? wildcards or [] character ranges
* Wildcards may only appear in the file name
Args:
file_pattern: eg. 'gs://foo/ba*'
credentials: Optional credential to be used to load the file from gcs.
Raises:
ValueError: if file_pattern breaks the rules.
Returns:
True iff a file exists that matches that pattern.
"""
if '*' not in file_pattern:
return _file_exists_in_gcs(file_pattern, credentials)
if not file_pattern.startswith('gs://'):
raise ValueError('file name must start with gs://')
gcs_service = _get_storage_service(credentials)
bucket_name, prefix = file_pattern[len('gs://'):].split('/', 1)
if '*' in bucket_name:
raise ValueError('Wildcards may not appear in the bucket name')
# There is a '*' in prefix because we checked there's one in file_pattern
# and there isn't one in bucket_name. Hence it must be in prefix.
assert '*' in prefix
prefix_no_wildcard = prefix[:prefix.index('*')]
request = gcs_service.objects().list(
bucket=bucket_name, prefix=prefix_no_wildcard)
response = request.execute()
if 'items' not in response:
return False
items_list = [i['name'] for i in response['items']]
return any(fnmatch.fnmatch(i, prefix) for i in items_list) |
def add_2d_call_alignment(self, data):
""" Add the alignment and model_state data table..
:param data: Alignment and model_state table to be written.
"""
path = 'Analyses/{}'.format(self.group_name)
if 'BaseCalled_2D' not in self.handle.handle[path]:
self.handle.add_analysis_subgroup(self.group_name, 'BaseCalled_2D')
path = '{}/BaseCalled_2D'.format(self.group_name)
self.handle.add_analysis_dataset(path, 'Alignment', data) | Add the alignment and model_state data table..
:param data: Alignment and model_state table to be written. | Below is the the instruction that describes the task:
### Input:
Add the alignment and model_state data table..
:param data: Alignment and model_state table to be written.
### Response:
def add_2d_call_alignment(self, data):
""" Add the alignment and model_state data table..
:param data: Alignment and model_state table to be written.
"""
path = 'Analyses/{}'.format(self.group_name)
if 'BaseCalled_2D' not in self.handle.handle[path]:
self.handle.add_analysis_subgroup(self.group_name, 'BaseCalled_2D')
path = '{}/BaseCalled_2D'.format(self.group_name)
self.handle.add_analysis_dataset(path, 'Alignment', data) |
def estimate(self):
""" Returns the estimate of the cardinality """
E = self.alpha * float(self.m ** 2) / np.power(2.0, - self.M).sum()
if E <= 2.5 * self.m: # Small range correction
V = self.m - np.count_nonzero(self.M)
return int(self.m * np.log(self.m / float(V))) if V > 0 else int(E)
# intermidiate range correction -> No correction
elif E <= float(long(1) << self.precision) / 30.0:
return int(E)
else:
return int(-(long(1) << self.precision) *
np.log(1.0 - E / (long(1) << self.precision))) | Returns the estimate of the cardinality | Below is the the instruction that describes the task:
### Input:
Returns the estimate of the cardinality
### Response:
def estimate(self):
""" Returns the estimate of the cardinality """
E = self.alpha * float(self.m ** 2) / np.power(2.0, - self.M).sum()
if E <= 2.5 * self.m: # Small range correction
V = self.m - np.count_nonzero(self.M)
return int(self.m * np.log(self.m / float(V))) if V > 0 else int(E)
# intermidiate range correction -> No correction
elif E <= float(long(1) << self.precision) / 30.0:
return int(E)
else:
return int(-(long(1) << self.precision) *
np.log(1.0 - E / (long(1) << self.precision))) |
def minimize(func, bounds=None, nvar=None, args=(), disp=False,
eps=1e-4,
maxf=20000,
maxT=6000,
algmethod=0,
fglobal=-1e100,
fglper=0.01,
volper=-1.0,
sigmaper=-1.0,
**kwargs
):
"""
Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm.
It can be used to solve general nonlinear programming problems of the form:
.. math::
\min_ {x \in R^n} f(x)
subject to
.. math::
x_L \leq x \leq x_U
Where :math:`x` are the optimization variables (with upper and lower
bounds), :math:`f(x)` is the objective function.
Parameters
----------
func : objective function
called as `func(x, *args)`; does not need to be defined everywhere,
raise an Exception where function is not defined
bounds : array-like
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter.
nvar: integer
Dimensionality of x (only needed if `bounds` is not defined)
eps : float
Ensures sufficient decrease in function value when a new potentially
optimal interval is chosen.
maxf : integer
Approximate upper bound on objective function evaluations.
.. note::
Maximal allowed value is 90000 see documentation of Fortran library.
maxT : integer
Maximum number of iterations.
.. note::
Maximal allowed value is 6000 see documentation of Fortran library.
algmethod : integer
Whether to use the original or modified DIRECT algorithm. Possible values:
* ``algmethod=0`` - use the original DIRECT algorithm
* ``algmethod=1`` - use the modified DIRECT-l algorithm
fglobal : float
Function value of the global optimum. If this value is not known set this
to a very large negative value.
fglper : float
Terminate the optimization when the percent error satisfies:
.. math::
100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper}
volper : float
Terminate the optimization once the volume of a hyperrectangle is less
than volper percent of the original hyperrectangel.
sigmaper : float
Terminate the optimization once the measure of the hyperrectangle is less
than sigmaper.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
"""
if bounds is None:
l = np.zeros(nvar, dtype=np.float64)
u = np.ones(nvar, dtype=np.float64)
else:
bounds = np.asarray(bounds)
l = bounds[:, 0]
u = bounds[:, 1]
def _objective_wrap(x, iidata, ddata, cdata, n, iisize, idsize, icsize):
"""
Wrap the python objective to comply with the signature required by the
Fortran library.
Returns the function value and a flag indicating whether function is defined.
If function is not defined return np.nan
"""
try:
return func(x, *args), 0
except:
return np.nan, 1
#
# Dummy values so that the python wrapper will comply with the required
# signature of the fortran library.
#
iidata = np.ones(0, dtype=np.int32)
ddata = np.ones(0, dtype=np.float64)
cdata = np.ones([0, 40], dtype=np.uint8)
#
# Call the DIRECT algorithm
#
x, fun, ierror = direct(
_objective_wrap,
eps,
maxf,
maxT,
l,
u,
algmethod,
'dummylogfile',
fglobal,
fglper,
volper,
sigmaper,
iidata,
ddata,
cdata,
disp
)
return OptimizeResult(x=x,fun=fun, status=ierror, success=ierror>0,
message=SUCCESS_MESSAGES[ierror-1] if ierror>0 else ERROR_MESSAGES[abs(ierror)-1]) | Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm.
It can be used to solve general nonlinear programming problems of the form:
.. math::
\min_ {x \in R^n} f(x)
subject to
.. math::
x_L \leq x \leq x_U
Where :math:`x` are the optimization variables (with upper and lower
bounds), :math:`f(x)` is the objective function.
Parameters
----------
func : objective function
called as `func(x, *args)`; does not need to be defined everywhere,
raise an Exception where function is not defined
bounds : array-like
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter.
nvar: integer
Dimensionality of x (only needed if `bounds` is not defined)
eps : float
Ensures sufficient decrease in function value when a new potentially
optimal interval is chosen.
maxf : integer
Approximate upper bound on objective function evaluations.
.. note::
Maximal allowed value is 90000 see documentation of Fortran library.
maxT : integer
Maximum number of iterations.
.. note::
Maximal allowed value is 6000 see documentation of Fortran library.
algmethod : integer
Whether to use the original or modified DIRECT algorithm. Possible values:
* ``algmethod=0`` - use the original DIRECT algorithm
* ``algmethod=1`` - use the modified DIRECT-l algorithm
fglobal : float
Function value of the global optimum. If this value is not known set this
to a very large negative value.
fglper : float
Terminate the optimization when the percent error satisfies:
.. math::
100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper}
volper : float
Terminate the optimization once the volume of a hyperrectangle is less
than volper percent of the original hyperrectangel.
sigmaper : float
Terminate the optimization once the measure of the hyperrectangle is less
than sigmaper.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. | Below is the the instruction that describes the task:
### Input:
Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm.
It can be used to solve general nonlinear programming problems of the form:
.. math::
\min_ {x \in R^n} f(x)
subject to
.. math::
x_L \leq x \leq x_U
Where :math:`x` are the optimization variables (with upper and lower
bounds), :math:`f(x)` is the objective function.
Parameters
----------
func : objective function
called as `func(x, *args)`; does not need to be defined everywhere,
raise an Exception where function is not defined
bounds : array-like
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter.
nvar: integer
Dimensionality of x (only needed if `bounds` is not defined)
eps : float
Ensures sufficient decrease in function value when a new potentially
optimal interval is chosen.
maxf : integer
Approximate upper bound on objective function evaluations.
.. note::
Maximal allowed value is 90000 see documentation of Fortran library.
maxT : integer
Maximum number of iterations.
.. note::
Maximal allowed value is 6000 see documentation of Fortran library.
algmethod : integer
Whether to use the original or modified DIRECT algorithm. Possible values:
* ``algmethod=0`` - use the original DIRECT algorithm
* ``algmethod=1`` - use the modified DIRECT-l algorithm
fglobal : float
Function value of the global optimum. If this value is not known set this
to a very large negative value.
fglper : float
Terminate the optimization when the percent error satisfies:
.. math::
100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper}
volper : float
Terminate the optimization once the volume of a hyperrectangle is less
than volper percent of the original hyperrectangel.
sigmaper : float
Terminate the optimization once the measure of the hyperrectangle is less
than sigmaper.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
### Response:
def minimize(func, bounds=None, nvar=None, args=(), disp=False,
eps=1e-4,
maxf=20000,
maxT=6000,
algmethod=0,
fglobal=-1e100,
fglper=0.01,
volper=-1.0,
sigmaper=-1.0,
**kwargs
):
"""
Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm.
It can be used to solve general nonlinear programming problems of the form:
.. math::
\min_ {x \in R^n} f(x)
subject to
.. math::
x_L \leq x \leq x_U
Where :math:`x` are the optimization variables (with upper and lower
bounds), :math:`f(x)` is the objective function.
Parameters
----------
func : objective function
called as `func(x, *args)`; does not need to be defined everywhere,
raise an Exception where function is not defined
bounds : array-like
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter.
nvar: integer
Dimensionality of x (only needed if `bounds` is not defined)
eps : float
Ensures sufficient decrease in function value when a new potentially
optimal interval is chosen.
maxf : integer
Approximate upper bound on objective function evaluations.
.. note::
Maximal allowed value is 90000 see documentation of Fortran library.
maxT : integer
Maximum number of iterations.
.. note::
Maximal allowed value is 6000 see documentation of Fortran library.
algmethod : integer
Whether to use the original or modified DIRECT algorithm. Possible values:
* ``algmethod=0`` - use the original DIRECT algorithm
* ``algmethod=1`` - use the modified DIRECT-l algorithm
fglobal : float
Function value of the global optimum. If this value is not known set this
to a very large negative value.
fglper : float
Terminate the optimization when the percent error satisfies:
.. math::
100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper}
volper : float
Terminate the optimization once the volume of a hyperrectangle is less
than volper percent of the original hyperrectangel.
sigmaper : float
Terminate the optimization once the measure of the hyperrectangle is less
than sigmaper.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
"""
if bounds is None:
l = np.zeros(nvar, dtype=np.float64)
u = np.ones(nvar, dtype=np.float64)
else:
bounds = np.asarray(bounds)
l = bounds[:, 0]
u = bounds[:, 1]
def _objective_wrap(x, iidata, ddata, cdata, n, iisize, idsize, icsize):
"""
Wrap the python objective to comply with the signature required by the
Fortran library.
Returns the function value and a flag indicating whether function is defined.
If function is not defined return np.nan
"""
try:
return func(x, *args), 0
except:
return np.nan, 1
#
# Dummy values so that the python wrapper will comply with the required
# signature of the fortran library.
#
iidata = np.ones(0, dtype=np.int32)
ddata = np.ones(0, dtype=np.float64)
cdata = np.ones([0, 40], dtype=np.uint8)
#
# Call the DIRECT algorithm
#
x, fun, ierror = direct(
_objective_wrap,
eps,
maxf,
maxT,
l,
u,
algmethod,
'dummylogfile',
fglobal,
fglper,
volper,
sigmaper,
iidata,
ddata,
cdata,
disp
)
return OptimizeResult(x=x,fun=fun, status=ierror, success=ierror>0,
message=SUCCESS_MESSAGES[ierror-1] if ierror>0 else ERROR_MESSAGES[abs(ierror)-1]) |
async def commit(
request: web.Request, session: UpdateSession) -> web.Response:
""" Serves /update/:session/commit """
if session.stage != Stages.DONE:
return web.json_response(
data={'error': 'not-ready',
'message': f'System is not ready to commit the update '
f'(currently {session.stage.value.short})'},
status=409)
with dbus_actions.unmount_boot():
write_file(os.path.join(session.download_path, BOOT_NAME),
constants.BOOT_PARTITION_NAME,
lambda x: None)
session.set_stage(Stages.READY_FOR_RESTART)
return web.json_response(
data=session.state,
status=200) | Serves /update/:session/commit | Below is the the instruction that describes the task:
### Input:
Serves /update/:session/commit
### Response:
async def commit(
request: web.Request, session: UpdateSession) -> web.Response:
""" Serves /update/:session/commit """
if session.stage != Stages.DONE:
return web.json_response(
data={'error': 'not-ready',
'message': f'System is not ready to commit the update '
f'(currently {session.stage.value.short})'},
status=409)
with dbus_actions.unmount_boot():
write_file(os.path.join(session.download_path, BOOT_NAME),
constants.BOOT_PARTITION_NAME,
lambda x: None)
session.set_stage(Stages.READY_FOR_RESTART)
return web.json_response(
data=session.state,
status=200) |
def list_operations(self, name, filter_, page_size=0, options=None):
"""
Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns ``UNIMPLEMENTED``.
NOTE: the ``name`` binding below allows API services to override the binding
to use different resource name schemes, such as ``users/*/operations``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> filter_ = ''
>>>
>>> # Iterate over all results
>>> for element in api.list_operations(name, filter_):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The name of the operation collection.
filter_ (string): The standard list filter.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = operations_pb2.ListOperationsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_operations(request, options) | Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns ``UNIMPLEMENTED``.
NOTE: the ``name`` binding below allows API services to override the binding
to use different resource name schemes, such as ``users/*/operations``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> filter_ = ''
>>>
>>> # Iterate over all results
>>> for element in api.list_operations(name, filter_):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The name of the operation collection.
filter_ (string): The standard list filter.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns ``UNIMPLEMENTED``.
NOTE: the ``name`` binding below allows API services to override the binding
to use different resource name schemes, such as ``users/*/operations``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> filter_ = ''
>>>
>>> # Iterate over all results
>>> for element in api.list_operations(name, filter_):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The name of the operation collection.
filter_ (string): The standard list filter.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
### Response:
def list_operations(self, name, filter_, page_size=0, options=None):
"""
Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns ``UNIMPLEMENTED``.
NOTE: the ``name`` binding below allows API services to override the binding
to use different resource name schemes, such as ``users/*/operations``.
Example:
>>> from google.gapic.longrunning import operations_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = operations_client.OperationsClient()
>>> name = ''
>>> filter_ = ''
>>>
>>> # Iterate over all results
>>> for element in api.list_operations(name, filter_):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
name (string): The name of the operation collection.
filter_ (string): The standard list filter.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = operations_pb2.ListOperationsRequest(
name=name, filter=filter_, page_size=page_size)
return self._list_operations(request, options) |
def pmt_with_id(self, pmt_id):
"""Get PMT with global pmt_id"""
try:
return self.pmts[self._pmt_index_by_pmt_id[pmt_id]]
except KeyError:
raise KeyError("No PMT found for ID: {0}".format(pmt_id)) | Get PMT with global pmt_id | Below is the the instruction that describes the task:
### Input:
Get PMT with global pmt_id
### Response:
def pmt_with_id(self, pmt_id):
"""Get PMT with global pmt_id"""
try:
return self.pmts[self._pmt_index_by_pmt_id[pmt_id]]
except KeyError:
raise KeyError("No PMT found for ID: {0}".format(pmt_id)) |
def configure_timeindex(self):
""" Construct a DateTimeIndex with the queried temporal resolution,
start- and end_snapshot. """
try:
ormclass = self._mapped['TempResolution']
if self.version:
tr = self.session.query(ormclass).filter(
ormclass.temp_id == self.temp_id).filter(
ormclass.version == self.version).one()
else:
tr = self.session.query(ormclass).filter(
ormclass.temp_id == self.temp_id).one()
except (KeyError, NoResultFound):
print('temp_id %s does not exist.' % self.temp_id)
timeindex = pd.DatetimeIndex(start=tr.start_time,
periods=tr.timesteps,
freq=tr.resolution)
self.timeindex = timeindex[self.start_snapshot - 1: self.end_snapshot]
""" pandas.tseries.index.DateTimeIndex :
Index of snapshots or timesteps. """ | Construct a DateTimeIndex with the queried temporal resolution,
start- and end_snapshot. | Below is the the instruction that describes the task:
### Input:
Construct a DateTimeIndex with the queried temporal resolution,
start- and end_snapshot.
### Response:
def configure_timeindex(self):
""" Construct a DateTimeIndex with the queried temporal resolution,
start- and end_snapshot. """
try:
ormclass = self._mapped['TempResolution']
if self.version:
tr = self.session.query(ormclass).filter(
ormclass.temp_id == self.temp_id).filter(
ormclass.version == self.version).one()
else:
tr = self.session.query(ormclass).filter(
ormclass.temp_id == self.temp_id).one()
except (KeyError, NoResultFound):
print('temp_id %s does not exist.' % self.temp_id)
timeindex = pd.DatetimeIndex(start=tr.start_time,
periods=tr.timesteps,
freq=tr.resolution)
self.timeindex = timeindex[self.start_snapshot - 1: self.end_snapshot]
""" pandas.tseries.index.DateTimeIndex :
Index of snapshots or timesteps. """ |
def frosted_glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4,
2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip) | Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur. | Below is the the instruction that describes the task:
### Input:
Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
### Response:
def frosted_glass_blur(x, severity=1):
"""Frosted glass blurring to images.
Apply frosted glass blurring to images by shuffling pixels locally.
Args:
x: numpy array, uncorrupted image, assumed to have uint8 pixel in [0,255].
severity: integer, severity of corruption.
Returns:
numpy array, image with uint8 pixels in [0,255]. Applied frosted glass blur.
"""
# sigma, max_delta, iterations
c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4,
2)][severity - 1]
x = np.uint8(
tfds.core.lazy_imports.skimage.filters.gaussian(
np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for _ in range(c[2]):
for h in range(x.shape[0] - c[1], c[1], -1):
for w in range(x.shape[1] - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
x_clip = np.clip(
tfds.core.lazy_imports.skimage.filters.gaussian(
x / 255., sigma=c[0], multichannel=True), 0, 1)
x_clip *= 255
return around_and_astype(x_clip) |
def explicit_indexing_adapter(
key, shape, indexing_support, raw_indexing_method):
"""Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array.
"""
raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)
result = raw_indexing_method(raw_key.tuple)
if numpy_indices.tuple:
# index the loaded np.ndarray
result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]
return result | Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array. | Below is the the instruction that describes the task:
### Input:
Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array.
### Response:
def explicit_indexing_adapter(
key, shape, indexing_support, raw_indexing_method):
"""Support explicit indexing by delegating to a raw indexing method.
Outer and/or vectorized indexers are supported by indexing a second time
with a NumPy array.
Parameters
----------
key : ExplicitIndexer
Explicit indexing object.
shape : Tuple[int, ...]
Shape of the indexed array.
indexing_support : IndexingSupport enum
Form of indexing supported by raw_indexing_method.
raw_indexing_method: callable
Function (like ndarray.__getitem__) that when called with indexing key
in the form of a tuple returns an indexed array.
Returns
-------
Indexing result, in the form of a duck numpy-array.
"""
raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)
result = raw_indexing_method(raw_key.tuple)
if numpy_indices.tuple:
# index the loaded np.ndarray
result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]
return result |
def _clear(self, fully=True):
"""Delete all but first page from cache. Set keyframe to first page."""
pages = self.pages
if not pages:
return
self._keyframe = pages[0]
if fully:
# delete all but first TiffPage/TiffFrame
for i, page in enumerate(pages[1:]):
if not isinstance(page, inttypes) and page.offset is not None:
pages[i+1] = page.offset
elif TiffFrame is not TiffPage:
# delete only TiffFrames
for i, page in enumerate(pages):
if isinstance(page, TiffFrame) and page.offset is not None:
pages[i] = page.offset
self._cached = False | Delete all but first page from cache. Set keyframe to first page. | Below is the the instruction that describes the task:
### Input:
Delete all but first page from cache. Set keyframe to first page.
### Response:
def _clear(self, fully=True):
"""Delete all but first page from cache. Set keyframe to first page."""
pages = self.pages
if not pages:
return
self._keyframe = pages[0]
if fully:
# delete all but first TiffPage/TiffFrame
for i, page in enumerate(pages[1:]):
if not isinstance(page, inttypes) and page.offset is not None:
pages[i+1] = page.offset
elif TiffFrame is not TiffPage:
# delete only TiffFrames
for i, page in enumerate(pages):
if isinstance(page, TiffFrame) and page.offset is not None:
pages[i] = page.offset
self._cached = False |
def getDatastreamDissemination(self, pid, dsID, asOfDateTime=None, stream=False,
head=False, rqst_headers=None):
"""Get a single datastream on a Fedora object; optionally, get the version
as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param stream: return a streaming response (default: False); use
is recommended for large datastreams
:param head: return a HEAD request instead of GET (default: False)
:param rqst_headers: request headers to be passed through to Fedora,
such as http range requests
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams/{dsID}/content ? [asOfDateTime] [download]
http_args = {}
if rqst_headers is None:
rqst_headers = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
url = 'objects/%(pid)s/datastreams/%(dsid)s/content' % \
{'pid': pid, 'dsid': dsID}
if head:
reqmethod = self.head
else:
reqmethod = self.get
return reqmethod(url, params=http_args, stream=stream, headers=rqst_headers) | Get a single datastream on a Fedora object; optionally, get the version
as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param stream: return a streaming response (default: False); use
is recommended for large datastreams
:param head: return a HEAD request instead of GET (default: False)
:param rqst_headers: request headers to be passed through to Fedora,
such as http range requests
:rtype: :class:`requests.models.Response` | Below is the the instruction that describes the task:
### Input:
Get a single datastream on a Fedora object; optionally, get the version
as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param stream: return a streaming response (default: False); use
is recommended for large datastreams
:param head: return a HEAD request instead of GET (default: False)
:param rqst_headers: request headers to be passed through to Fedora,
such as http range requests
:rtype: :class:`requests.models.Response`
### Response:
def getDatastreamDissemination(self, pid, dsID, asOfDateTime=None, stream=False,
head=False, rqst_headers=None):
"""Get a single datastream on a Fedora object; optionally, get the version
as of a particular date time.
:param pid: object pid
:param dsID: datastream id
:param asOfDateTime: optional datetime; ``must`` be a non-naive datetime
so it can be converted to a date-time format Fedora can understand
:param stream: return a streaming response (default: False); use
is recommended for large datastreams
:param head: return a HEAD request instead of GET (default: False)
:param rqst_headers: request headers to be passed through to Fedora,
such as http range requests
:rtype: :class:`requests.models.Response`
"""
# /objects/{pid}/datastreams/{dsID}/content ? [asOfDateTime] [download]
http_args = {}
if rqst_headers is None:
rqst_headers = {}
if asOfDateTime:
http_args['asOfDateTime'] = datetime_to_fedoratime(asOfDateTime)
url = 'objects/%(pid)s/datastreams/%(dsid)s/content' % \
{'pid': pid, 'dsid': dsID}
if head:
reqmethod = self.head
else:
reqmethod = self.get
return reqmethod(url, params=http_args, stream=stream, headers=rqst_headers) |
def sample_all(generators, *args, **kwargs):
'''Convert list of audio waveform generators into list of packed sample generators.'''
return [sample(gen, *args, **kwargs) for gen in generators] | Convert list of audio waveform generators into list of packed sample generators. | Below is the the instruction that describes the task:
### Input:
Convert list of audio waveform generators into list of packed sample generators.
### Response:
def sample_all(generators, *args, **kwargs):
'''Convert list of audio waveform generators into list of packed sample generators.'''
return [sample(gen, *args, **kwargs) for gen in generators] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.