code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def data_received(self, data):
"""Handle received data."""
self._data_buffer += data.decode()
if not self._data_buffer.endswith('\r\n'):
return
data = self._data_buffer
self._data_buffer = '' # clear buffer
for cmd in data.strip().split('\r\n'):
data = json.loads(cmd)
if not isinstance(data, list):
data = [data]
for item in data:
self.handle_data(item) | Handle received data. | Below is the the instruction that describes the task:
### Input:
Handle received data.
### Response:
def data_received(self, data):
"""Handle received data."""
self._data_buffer += data.decode()
if not self._data_buffer.endswith('\r\n'):
return
data = self._data_buffer
self._data_buffer = '' # clear buffer
for cmd in data.strip().split('\r\n'):
data = json.loads(cmd)
if not isinstance(data, list):
data = [data]
for item in data:
self.handle_data(item) |
def _cleanup_closed(self) -> None:
"""Double confirmation for transport close.
Some broken ssl servers may leave socket open without proper close.
"""
if self._cleanup_closed_handle:
self._cleanup_closed_handle.cancel()
for transport in self._cleanup_closed_transports:
if transport is not None:
transport.abort()
self._cleanup_closed_transports = []
if not self._cleanup_closed_disabled:
self._cleanup_closed_handle = helpers.weakref_handle(
self, '_cleanup_closed',
self._cleanup_closed_period, self._loop) | Double confirmation for transport close.
Some broken ssl servers may leave socket open without proper close. | Below is the the instruction that describes the task:
### Input:
Double confirmation for transport close.
Some broken ssl servers may leave socket open without proper close.
### Response:
def _cleanup_closed(self) -> None:
"""Double confirmation for transport close.
Some broken ssl servers may leave socket open without proper close.
"""
if self._cleanup_closed_handle:
self._cleanup_closed_handle.cancel()
for transport in self._cleanup_closed_transports:
if transport is not None:
transport.abort()
self._cleanup_closed_transports = []
if not self._cleanup_closed_disabled:
self._cleanup_closed_handle = helpers.weakref_handle(
self, '_cleanup_closed',
self._cleanup_closed_period, self._loop) |
def ready(self):
"""Validate config and connect signals."""
super(ElasticAppConfig, self).ready()
_validate_config(settings.get_setting("strict_validation"))
_connect_signals() | Validate config and connect signals. | Below is the the instruction that describes the task:
### Input:
Validate config and connect signals.
### Response:
def ready(self):
"""Validate config and connect signals."""
super(ElasticAppConfig, self).ready()
_validate_config(settings.get_setting("strict_validation"))
_connect_signals() |
def wait_for_notification(self, notification_class=BaseNotification):
"""Wait for the specified notification to be displayed.
Args:
notification_class (:py:class:`BaseNotification`, optional):
The notification class to wait for. If `None` is specified it
will wait for any notification to be closed. Defaults to
`BaseNotification`.
Returns:
:py:class:`BaseNotification`: Firefox notification.
"""
if notification_class:
if notification_class is BaseNotification:
message = "No notification was shown."
else:
message = "{0} was not shown.".format(notification_class.__name__)
self.wait.until(
lambda _: isinstance(self.notification, notification_class),
message=message,
)
return self.notification
else:
self.wait.until(
lambda _: self.notification is None,
message="Unexpected notification shown.",
) | Wait for the specified notification to be displayed.
Args:
notification_class (:py:class:`BaseNotification`, optional):
The notification class to wait for. If `None` is specified it
will wait for any notification to be closed. Defaults to
`BaseNotification`.
Returns:
:py:class:`BaseNotification`: Firefox notification. | Below is the the instruction that describes the task:
### Input:
Wait for the specified notification to be displayed.
Args:
notification_class (:py:class:`BaseNotification`, optional):
The notification class to wait for. If `None` is specified it
will wait for any notification to be closed. Defaults to
`BaseNotification`.
Returns:
:py:class:`BaseNotification`: Firefox notification.
### Response:
def wait_for_notification(self, notification_class=BaseNotification):
"""Wait for the specified notification to be displayed.
Args:
notification_class (:py:class:`BaseNotification`, optional):
The notification class to wait for. If `None` is specified it
will wait for any notification to be closed. Defaults to
`BaseNotification`.
Returns:
:py:class:`BaseNotification`: Firefox notification.
"""
if notification_class:
if notification_class is BaseNotification:
message = "No notification was shown."
else:
message = "{0} was not shown.".format(notification_class.__name__)
self.wait.until(
lambda _: isinstance(self.notification, notification_class),
message=message,
)
return self.notification
else:
self.wait.until(
lambda _: self.notification is None,
message="Unexpected notification shown.",
) |
def children(self, **kwargs):
"""Retrieve the children of this `Part` as `Partset`.
When you call the :func:`Part.children()` method without any additional filtering options for the children,
the children are cached to help speed up subsequent calls to retrieve the children. The cached children are
returned as a list and not as a `Partset`.
When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the
cached children are _not_ used and a separate API call is made to retrieve only those children.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the
children are retrieved from the cached children.
:raises APIError: When an error occurs.
Example
-------
A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made.
>>> bike = project.part('Bike')
>>> direct_descendants_of_bike = bike.children()
An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the
API, not the bike's internal (already cached in previous example) cache.
>>> bike = project.part('Bike')
>>> wheel_children_of_bike = bike.children(name__icontains='wheel')
"""
if not kwargs:
# no kwargs provided is the default, we aim to cache it.
if not self._cached_children:
self._cached_children = list(self._client.parts(parent=self.id, category=self.category))
return self._cached_children
else:
# if kwargs are provided, we assume no use of cache as specific filtering on the children is performed.
return self._client.parts(parent=self.id, category=self.category, **kwargs) | Retrieve the children of this `Part` as `Partset`.
When you call the :func:`Part.children()` method without any additional filtering options for the children,
the children are cached to help speed up subsequent calls to retrieve the children. The cached children are
returned as a list and not as a `Partset`.
When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the
cached children are _not_ used and a separate API call is made to retrieve only those children.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the
children are retrieved from the cached children.
:raises APIError: When an error occurs.
Example
-------
A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made.
>>> bike = project.part('Bike')
>>> direct_descendants_of_bike = bike.children()
An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the
API, not the bike's internal (already cached in previous example) cache.
>>> bike = project.part('Bike')
>>> wheel_children_of_bike = bike.children(name__icontains='wheel') | Below is the the instruction that describes the task:
### Input:
Retrieve the children of this `Part` as `Partset`.
When you call the :func:`Part.children()` method without any additional filtering options for the children,
the children are cached to help speed up subsequent calls to retrieve the children. The cached children are
returned as a list and not as a `Partset`.
When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the
cached children are _not_ used and a separate API call is made to retrieve only those children.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the
children are retrieved from the cached children.
:raises APIError: When an error occurs.
Example
-------
A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made.
>>> bike = project.part('Bike')
>>> direct_descendants_of_bike = bike.children()
An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the
API, not the bike's internal (already cached in previous example) cache.
>>> bike = project.part('Bike')
>>> wheel_children_of_bike = bike.children(name__icontains='wheel')
### Response:
def children(self, **kwargs):
"""Retrieve the children of this `Part` as `Partset`.
When you call the :func:`Part.children()` method without any additional filtering options for the children,
the children are cached to help speed up subsequent calls to retrieve the children. The cached children are
returned as a list and not as a `Partset`.
When you *do provide* additional keyword arguments (kwargs) that act as a specific children filter, the
cached children are _not_ used and a separate API call is made to retrieve only those children.
:param kwargs: Additional search arguments to search for, check :class:`pykechain.Client.parts`
for additional info
:type kwargs: dict
:return: a set of `Parts` as a :class:`PartSet`. Will be empty if no children. Will be a `List` if the
children are retrieved from the cached children.
:raises APIError: When an error occurs.
Example
-------
A normal call, which caches all children of the bike. If you call `bike.children` twice only 1 API call is made.
>>> bike = project.part('Bike')
>>> direct_descendants_of_bike = bike.children()
An example with providing additional part search parameters 'name__icontains'. Children are retrieved from the
API, not the bike's internal (already cached in previous example) cache.
>>> bike = project.part('Bike')
>>> wheel_children_of_bike = bike.children(name__icontains='wheel')
"""
if not kwargs:
# no kwargs provided is the default, we aim to cache it.
if not self._cached_children:
self._cached_children = list(self._client.parts(parent=self.id, category=self.category))
return self._cached_children
else:
# if kwargs are provided, we assume no use of cache as specific filtering on the children is performed.
return self._client.parts(parent=self.id, category=self.category, **kwargs) |
def check_clean_master(self, commit=False):
"""Perform a sanity check on SCM publishing constraints.
Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push
to an allowed server if `commit` is `True`.
:param bool commit: `True` if a commit is in progress.
:raise TaskError: on failure
"""
if commit:
if self.restrict_push_branches:
branch = self.scm.branch_name
if branch not in self.restrict_push_branches:
raise self.InvalidBranchError('Can only push from {}, currently on branch: {}'
.format(' '.join(sorted(self.restrict_push_branches)),
branch))
if self.restrict_push_urls:
url = self.scm.server_url
if url not in self.restrict_push_urls:
raise self.InvalidRemoteError('Can only push to {}, currently the remote url is: {}'
.format(' '.join(sorted(self.restrict_push_urls)), url))
changed_files = self.scm.changed_files()
if changed_files:
raise self.DirtyWorkspaceError('Can only push from a clean branch, found : {}'
.format(' '.join(changed_files)))
elif self.scm:
self.log.info('Skipping check for a clean {} branch in test mode.'
.format(self.scm.branch_name)) | Perform a sanity check on SCM publishing constraints.
Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push
to an allowed server if `commit` is `True`.
:param bool commit: `True` if a commit is in progress.
:raise TaskError: on failure | Below is the the instruction that describes the task:
### Input:
Perform a sanity check on SCM publishing constraints.
Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push
to an allowed server if `commit` is `True`.
:param bool commit: `True` if a commit is in progress.
:raise TaskError: on failure
### Response:
def check_clean_master(self, commit=False):
"""Perform a sanity check on SCM publishing constraints.
Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push
to an allowed server if `commit` is `True`.
:param bool commit: `True` if a commit is in progress.
:raise TaskError: on failure
"""
if commit:
if self.restrict_push_branches:
branch = self.scm.branch_name
if branch not in self.restrict_push_branches:
raise self.InvalidBranchError('Can only push from {}, currently on branch: {}'
.format(' '.join(sorted(self.restrict_push_branches)),
branch))
if self.restrict_push_urls:
url = self.scm.server_url
if url not in self.restrict_push_urls:
raise self.InvalidRemoteError('Can only push to {}, currently the remote url is: {}'
.format(' '.join(sorted(self.restrict_push_urls)), url))
changed_files = self.scm.changed_files()
if changed_files:
raise self.DirtyWorkspaceError('Can only push from a clean branch, found : {}'
.format(' '.join(changed_files)))
elif self.scm:
self.log.info('Skipping check for a clean {} branch in test mode.'
.format(self.scm.branch_name)) |
def recurse(self, full_matrix=False):
"""
recursion to calculate inverse covariance matrix
Parameters
----------
full_matrix : bool, optional
if True, the entire inverse matrix is calculated. otherwise, only the weighing vector.
"""
for n in self.tree.get_nonterminals(order='postorder'):
n_leaves = len(n._ii)
if full_matrix: M = np.zeros((n_leaves, n_leaves), dtype=float)
r = np.zeros(n_leaves, dtype=float)
c_count = 0
for c in n:
ssq = self.branch_variance(c)
nc = len(c._ii)
if c.is_terminal():
if full_matrix:
M[c_count, c_count] = 1.0/ssq
r[c_count] = 1.0/ssq
else:
if full_matrix:
M[c_count:c_count+nc, c_count:c_count+nc] = c.cinv - ssq*np.outer(c.r,c.r)/(1+ssq*c.s)
r[c_count:c_count+nc] = c.r/(1+ssq*c.s)
c_count += nc
if full_matrix: n.cinv = M
n.r = r #M.sum(axis=1)
n.s = n.r.sum() | recursion to calculate inverse covariance matrix
Parameters
----------
full_matrix : bool, optional
if True, the entire inverse matrix is calculated. otherwise, only the weighing vector. | Below is the the instruction that describes the task:
### Input:
recursion to calculate inverse covariance matrix
Parameters
----------
full_matrix : bool, optional
if True, the entire inverse matrix is calculated. otherwise, only the weighing vector.
### Response:
def recurse(self, full_matrix=False):
"""
recursion to calculate inverse covariance matrix
Parameters
----------
full_matrix : bool, optional
if True, the entire inverse matrix is calculated. otherwise, only the weighing vector.
"""
for n in self.tree.get_nonterminals(order='postorder'):
n_leaves = len(n._ii)
if full_matrix: M = np.zeros((n_leaves, n_leaves), dtype=float)
r = np.zeros(n_leaves, dtype=float)
c_count = 0
for c in n:
ssq = self.branch_variance(c)
nc = len(c._ii)
if c.is_terminal():
if full_matrix:
M[c_count, c_count] = 1.0/ssq
r[c_count] = 1.0/ssq
else:
if full_matrix:
M[c_count:c_count+nc, c_count:c_count+nc] = c.cinv - ssq*np.outer(c.r,c.r)/(1+ssq*c.s)
r[c_count:c_count+nc] = c.r/(1+ssq*c.s)
c_count += nc
if full_matrix: n.cinv = M
n.r = r #M.sum(axis=1)
n.s = n.r.sum() |
def as_json(self, ensure_ascii=False):
"""Property return key-value json-string from __slots__."""
return json.dumps(self.as_dict, ensure_ascii=ensure_ascii) | Property return key-value json-string from __slots__. | Below is the the instruction that describes the task:
### Input:
Property return key-value json-string from __slots__.
### Response:
def as_json(self, ensure_ascii=False):
"""Property return key-value json-string from __slots__."""
return json.dumps(self.as_dict, ensure_ascii=ensure_ascii) |
def fixed_legend_position(self, fixed_legend_position):
"""Sets the fixed_legend_position of this ChartSettings.
Where the fixed legend should be displayed with respect to the chart # noqa: E501
:param fixed_legend_position: The fixed_legend_position of this ChartSettings. # noqa: E501
:type: str
"""
allowed_values = ["RIGHT", "TOP", "LEFT", "BOTTOM"] # noqa: E501
if fixed_legend_position not in allowed_values:
raise ValueError(
"Invalid value for `fixed_legend_position` ({0}), must be one of {1}" # noqa: E501
.format(fixed_legend_position, allowed_values)
)
self._fixed_legend_position = fixed_legend_position | Sets the fixed_legend_position of this ChartSettings.
Where the fixed legend should be displayed with respect to the chart # noqa: E501
:param fixed_legend_position: The fixed_legend_position of this ChartSettings. # noqa: E501
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the fixed_legend_position of this ChartSettings.
Where the fixed legend should be displayed with respect to the chart # noqa: E501
:param fixed_legend_position: The fixed_legend_position of this ChartSettings. # noqa: E501
:type: str
### Response:
def fixed_legend_position(self, fixed_legend_position):
"""Sets the fixed_legend_position of this ChartSettings.
Where the fixed legend should be displayed with respect to the chart # noqa: E501
:param fixed_legend_position: The fixed_legend_position of this ChartSettings. # noqa: E501
:type: str
"""
allowed_values = ["RIGHT", "TOP", "LEFT", "BOTTOM"] # noqa: E501
if fixed_legend_position not in allowed_values:
raise ValueError(
"Invalid value for `fixed_legend_position` ({0}), must be one of {1}" # noqa: E501
.format(fixed_legend_position, allowed_values)
)
self._fixed_legend_position = fixed_legend_position |
def align(self, alignment_tool = 'clustalw', gap_opening_penalty = 0.2, ignore_bad_chains = False):
'''If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case.'''
if len(self.pdb_chains) > 1:
sa = SequenceAligner(alignment_tool = alignment_tool, gap_opening_penalty = gap_opening_penalty)
for pdb_chain in self.pdb_chains:
sa.add_sequence('%s_%s' % (pdb_chain['pdb_id'], pdb_chain['chain_id']), pdb_chain['sequence'], ignore_bad_chains = ignore_bad_chains)
best_matches = sa.align()
return sa.alignment_output, best_matches
else:
raise Exception('Cannot align sequences - less than two chains were specified.') | If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case. | Below is the the instruction that describes the task:
### Input:
If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case.
### Response:
def align(self, alignment_tool = 'clustalw', gap_opening_penalty = 0.2, ignore_bad_chains = False):
'''If ignore_bad_chains is True then any chains containing all Xs as the sequence will be silently skipped.
The default behavior is to raise a MalformedSequenceException in this case.'''
if len(self.pdb_chains) > 1:
sa = SequenceAligner(alignment_tool = alignment_tool, gap_opening_penalty = gap_opening_penalty)
for pdb_chain in self.pdb_chains:
sa.add_sequence('%s_%s' % (pdb_chain['pdb_id'], pdb_chain['chain_id']), pdb_chain['sequence'], ignore_bad_chains = ignore_bad_chains)
best_matches = sa.align()
return sa.alignment_output, best_matches
else:
raise Exception('Cannot align sequences - less than two chains were specified.') |
def is_modified(self):
"""
Returns whether model is modified or not
"""
if len(self.__modified_data__) or len(self.__deleted_fields__):
return True
for value in self.__original_data__.values():
try:
if value.is_modified():
return True
except AttributeError:
pass
return False | Returns whether model is modified or not | Below is the the instruction that describes the task:
### Input:
Returns whether model is modified or not
### Response:
def is_modified(self):
"""
Returns whether model is modified or not
"""
if len(self.__modified_data__) or len(self.__deleted_fields__):
return True
for value in self.__original_data__.values():
try:
if value.is_modified():
return True
except AttributeError:
pass
return False |
def find_contours(array, level,
fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
array : 2D ndarray of double
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : either 'low' or 'high'
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[ 0. , 0.5],
[ 0.5, 0. ]])]
"""
array = np.asarray(array, dtype=np.double)
if array.ndim != 2:
raise ValueError('Only 2D arrays are supported.')
level = float(level)
if (fully_connected not in _param_options or
positive_orientation not in _param_options):
raise ValueError('Parameters "fully_connected" and'
' "positive_orientation" must be either "high" or'
' "low".')
point_list = _find_contours_cy.iterate_and_store(array, level,
fully_connected == 'high')
contours = _assemble_contours(_take_2(point_list))
if positive_orientation == 'high':
contours = [c[::-1] for c in contours]
return contours | Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
array : 2D ndarray of double
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : either 'low' or 'high'
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[ 0. , 0.5],
[ 0.5, 0. ]])] | Below is the the instruction that describes the task:
### Input:
Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
array : 2D ndarray of double
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : either 'low' or 'high'
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[ 0. , 0.5],
[ 0.5, 0. ]])]
### Response:
def find_contours(array, level,
fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value.
Uses the "marching squares" method to compute a the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
Parameters
----------
array : 2D ndarray of double
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str, {'low', 'high'}
Indicates whether array elements below the given level value are to be
considered fully-connected (and hence elements above the value will
only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : either 'low' or 'high'
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
contours will wind counter- clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
Returns
-------
contours : list of (n,2)-ndarrays
Each contour is an ndarray of shape ``(n, 2)``,
consisting of n ``(row, column)`` coordinates along the contour.
Notes
-----
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here::
http://www.essi.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
elements, each pair diagonally adjacent. (Where high- and low-valued is
with respect to the contour value sought.) In this case, either the
high-valued elements can be 'connected together' via a thin isthmus that
separates the low-valued elements, or vice-versa. When elements are
connected together across a diagonal, they are considered 'fully
connected' (also known as 'face+vertex-connected' or '8-connected'). Only
high-valued or low-valued elements can be fully-connected, the other set
will be considered as 'face-connected' or '4-connected'. By default,
low-valued elements are considered fully-connected; this can be altered
with the 'fully_connected' parameter.
Output contours are not guaranteed to be closed: contours which intersect
the array edge will be left open. All other contours will be closed. (The
closed-ness of a contours can be tested by checking whether the beginning
point is the same as the end point.)
Contours are oriented. By default, array values lower than the contour
value are to the left of the contour and values greater than the contour
value are to the right. This means that contours will wind
counter-clockwise (i.e. in 'positive orientation') around islands of
low-valued pixels. This behavior can be altered with the
'positive_orientation' parameter.
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
contour. This is a side-effect of how the input array is traversed, but
can be relied upon.
.. warning::
Array coordinates/values are assumed to refer to the *center* of the
array element. Take a simple example input: ``[0, 1]``. The interpolated
position of 0.5 in this array is midway between the 0-element (at
``x=0``) and the 1-element (at ``x=1``), and thus would fall at
``x=0.5``.
This means that to find reasonable contours, it is best to find contours
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
especially around structures that are a single array element wide. Instead
choose a middle value, as above.
References
----------
.. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
Resolution 3D Surface Construction Algorithm. Computer Graphics
(SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
Examples
--------
>>> a = np.zeros((3, 3))
>>> a[0, 0] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> find_contours(a, 0.5)
[array([[ 0. , 0.5],
[ 0.5, 0. ]])]
"""
array = np.asarray(array, dtype=np.double)
if array.ndim != 2:
raise ValueError('Only 2D arrays are supported.')
level = float(level)
if (fully_connected not in _param_options or
positive_orientation not in _param_options):
raise ValueError('Parameters "fully_connected" and'
' "positive_orientation" must be either "high" or'
' "low".')
point_list = _find_contours_cy.iterate_and_store(array, level,
fully_connected == 'high')
contours = _assemble_contours(_take_2(point_list))
if positive_orientation == 'high':
contours = [c[::-1] for c in contours]
return contours |
def read_config(cls):
""" Setup :attr:`wasp_launcher.apps.WAppsGlobals.log` configuration. Reads defaults and
override it by a file given via :attr:`WConfigApp.__environment_file_var__` environment variable.
After that configuration files are applied from :attr:`WConfigApp.__environment_dir_var__`
:return: None
"""
WAppsGlobals.config = WConfig()
def load(filename):
if os.path.isfile(filename) is False:
raise RuntimeError("Invalid configuration: '%s'" % filename)
WAppsGlobals.config.merge(filename)
WAppsGlobals.log.info('Configuration loaded from file: %s' % os.path.abspath(filename))
load(cls.__configuration_default__)
if cls.__environment_file_var__ in os.environ:
WAppsGlobals.log.info('Variable %s was set' % cls.__environment_file_var__)
load(os.environ[cls.__environment_file_var__])
if cls.__environment_dir_var__ in os.environ:
WAppsGlobals.log.info('Variable %s was set' % cls.__environment_dir_var__)
config_dir = os.environ[cls.__environment_dir_var__]
if os.path.isdir(config_dir) is False:
WAppsGlobals.log.error(
'Invalid configuration directory was specified: "%s"' % config_dir
)
entries = list(os.listdir(config_dir))
entries.sort()
for entry in entries:
config_file = os.path.join(config_dir, entry)
if os.path.isfile(config_file) is False:
WAppsGlobals.log.error(
'Invalid configuration "%s" in the directory "%s"' %
(config_file, config_dir)
)
continue
load(config_file) | Setup :attr:`wasp_launcher.apps.WAppsGlobals.log` configuration. Reads defaults and
override it by a file given via :attr:`WConfigApp.__environment_file_var__` environment variable.
After that configuration files are applied from :attr:`WConfigApp.__environment_dir_var__`
:return: None | Below is the the instruction that describes the task:
### Input:
Setup :attr:`wasp_launcher.apps.WAppsGlobals.log` configuration. Reads defaults and
override it by a file given via :attr:`WConfigApp.__environment_file_var__` environment variable.
After that configuration files are applied from :attr:`WConfigApp.__environment_dir_var__`
:return: None
### Response:
def read_config(cls):
""" Setup :attr:`wasp_launcher.apps.WAppsGlobals.log` configuration. Reads defaults and
override it by a file given via :attr:`WConfigApp.__environment_file_var__` environment variable.
After that configuration files are applied from :attr:`WConfigApp.__environment_dir_var__`
:return: None
"""
WAppsGlobals.config = WConfig()
def load(filename):
if os.path.isfile(filename) is False:
raise RuntimeError("Invalid configuration: '%s'" % filename)
WAppsGlobals.config.merge(filename)
WAppsGlobals.log.info('Configuration loaded from file: %s' % os.path.abspath(filename))
load(cls.__configuration_default__)
if cls.__environment_file_var__ in os.environ:
WAppsGlobals.log.info('Variable %s was set' % cls.__environment_file_var__)
load(os.environ[cls.__environment_file_var__])
if cls.__environment_dir_var__ in os.environ:
WAppsGlobals.log.info('Variable %s was set' % cls.__environment_dir_var__)
config_dir = os.environ[cls.__environment_dir_var__]
if os.path.isdir(config_dir) is False:
WAppsGlobals.log.error(
'Invalid configuration directory was specified: "%s"' % config_dir
)
entries = list(os.listdir(config_dir))
entries.sort()
for entry in entries:
config_file = os.path.join(config_dir, entry)
if os.path.isfile(config_file) is False:
WAppsGlobals.log.error(
'Invalid configuration "%s" in the directory "%s"' %
(config_file, config_dir)
)
continue
load(config_file) |
def mtf_image_transformer_base_imagenet_mp_sp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet_mp128()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;num_wblocks:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.attention_type = "local1d_spatial"
return hparams | Model parallel ImageNet parameters. | Below is the the instruction that describes the task:
### Input:
Model parallel ImageNet parameters.
### Response:
def mtf_image_transformer_base_imagenet_mp_sp():
"""Model parallel ImageNet parameters."""
hparams = mtf_image_transformer_base_imagenet_mp128()
hparams.mesh_shape = "model:8;batch:4"
hparams.layout = "batch:batch;d_ff:model;num_wblocks:model"
hparams.batch_size = 8
hparams.img_len = 128
hparams.block_length = 128
hparams.attention_type = "local1d_spatial"
return hparams |
def dl_hosted(
self,
token: dict = None,
resource_link: dict = None,
encode_clean: bool = 1,
proxy_url: str = None,
prot: str = "https",
) -> tuple:
"""Download hosted resource.
:param str token: API auth token
:param dict resource_link: link dictionary
:param bool encode_clean: option to ensure a clean filename and avoid OS errors
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
Example of resource_link dict:
.. code-block:: json
{
"_id": "g8h9i0j11k12l13m14n15o16p17Q18rS",
"type": "hosted",
"title": "label_of_hosted_file.zip",
"url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin",
"kind": "data",
"actions": ["download", ],
"size": "2253029",
}
"""
# check resource link parameter type
if not isinstance(resource_link, dict):
raise TypeError("Resource link expects a dictionary.")
else:
pass
# check resource link type
if not resource_link.get("type") == "hosted":
raise ValueError(
"Resource link passed is not a hosted one: {}".format(
resource_link.get("type")
)
)
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url}
# prepare URL request
hosted_url = "{}://v1.{}.isogeo.com/{}".format(
prot, self.api_url, resource_link.get("url")
)
# send stream request
hosted_req = self.get(
hosted_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# quick check
req_check = checker.check_api_response(hosted_req)
if not req_check:
raise ConnectionError(req_check[1])
else:
pass
# get filename from header
content_disposition = hosted_req.headers.get("Content-Disposition")
if content_disposition:
filename = re.findall("filename=(.+)", content_disposition)[0]
else:
filename = resource_link.get("title")
# remove special characters
if encode_clean:
filename = utils.encoded_words_to_text(filename)
filename = re.sub(r"[^\w\-_\. ]", "", filename)
# well-formed size
in_size = resource_link.get("size")
for size_cat in ("octets", "Ko", "Mo", "Go"):
if in_size < 1024.0:
out_size = "%3.1f %s" % (in_size, size_cat)
in_size /= 1024.0
out_size = "%3.1f %s" % (in_size, " To")
# end of method
return (hosted_req, filename, out_size) | Download hosted resource.
:param str token: API auth token
:param dict resource_link: link dictionary
:param bool encode_clean: option to ensure a clean filename and avoid OS errors
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
Example of resource_link dict:
.. code-block:: json
{
"_id": "g8h9i0j11k12l13m14n15o16p17Q18rS",
"type": "hosted",
"title": "label_of_hosted_file.zip",
"url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin",
"kind": "data",
"actions": ["download", ],
"size": "2253029",
} | Below is the the instruction that describes the task:
### Input:
Download hosted resource.
:param str token: API auth token
:param dict resource_link: link dictionary
:param bool encode_clean: option to ensure a clean filename and avoid OS errors
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
Example of resource_link dict:
.. code-block:: json
{
"_id": "g8h9i0j11k12l13m14n15o16p17Q18rS",
"type": "hosted",
"title": "label_of_hosted_file.zip",
"url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin",
"kind": "data",
"actions": ["download", ],
"size": "2253029",
}
### Response:
def dl_hosted(
self,
token: dict = None,
resource_link: dict = None,
encode_clean: bool = 1,
proxy_url: str = None,
prot: str = "https",
) -> tuple:
"""Download hosted resource.
:param str token: API auth token
:param dict resource_link: link dictionary
:param bool encode_clean: option to ensure a clean filename and avoid OS errors
:param str proxy_url: proxy to use to download
:param str prot: https [DEFAULT] or http
(use it only for dev and tracking needs).
Example of resource_link dict:
.. code-block:: json
{
"_id": "g8h9i0j11k12l13m14n15o16p17Q18rS",
"type": "hosted",
"title": "label_of_hosted_file.zip",
"url": "/resources/1a2b3c4d5e6f7g8h9i0j11k12l13m14n/links/g8h9i0j11k12l13m14n15o16p17Q18rS.bin",
"kind": "data",
"actions": ["download", ],
"size": "2253029",
}
"""
# check resource link parameter type
if not isinstance(resource_link, dict):
raise TypeError("Resource link expects a dictionary.")
else:
pass
# check resource link type
if not resource_link.get("type") == "hosted":
raise ValueError(
"Resource link passed is not a hosted one: {}".format(
resource_link.get("type")
)
)
else:
pass
# handling request parameters
payload = {"proxyUrl": proxy_url}
# prepare URL request
hosted_url = "{}://v1.{}.isogeo.com/{}".format(
prot, self.api_url, resource_link.get("url")
)
# send stream request
hosted_req = self.get(
hosted_url,
headers=self.header,
stream=True,
params=payload,
proxies=self.proxies,
verify=self.ssl,
)
# quick check
req_check = checker.check_api_response(hosted_req)
if not req_check:
raise ConnectionError(req_check[1])
else:
pass
# get filename from header
content_disposition = hosted_req.headers.get("Content-Disposition")
if content_disposition:
filename = re.findall("filename=(.+)", content_disposition)[0]
else:
filename = resource_link.get("title")
# remove special characters
if encode_clean:
filename = utils.encoded_words_to_text(filename)
filename = re.sub(r"[^\w\-_\. ]", "", filename)
# well-formed size
in_size = resource_link.get("size")
for size_cat in ("octets", "Ko", "Mo", "Go"):
if in_size < 1024.0:
out_size = "%3.1f %s" % (in_size, size_cat)
in_size /= 1024.0
out_size = "%3.1f %s" % (in_size, " To")
# end of method
return (hosted_req, filename, out_size) |
def json_as_html(self):
""" Print out self.json in a nice way. """
# To avoid circular import
from cspreports import utils
formatted_json = utils.format_report(self.json)
return mark_safe("<pre>\n%s</pre>" % escape(formatted_json)) | Print out self.json in a nice way. | Below is the the instruction that describes the task:
### Input:
Print out self.json in a nice way.
### Response:
def json_as_html(self):
""" Print out self.json in a nice way. """
# To avoid circular import
from cspreports import utils
formatted_json = utils.format_report(self.json)
return mark_safe("<pre>\n%s</pre>" % escape(formatted_json)) |
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4 | Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly | Below is the the instruction that describes the task:
### Input:
Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
### Response:
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4 |
def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return | Removes a permission from a group | Below is the the instruction that describes the task:
### Input:
Removes a permission from a group
### Response:
def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return |
def _emit_style_tag(self, tag, markup, body):
"""Write the body of a tag and the tokens that should surround it."""
self._emit(tokens.TagOpenOpen(wiki_markup=markup))
self._emit_text(tag)
self._emit(tokens.TagCloseOpen())
self._emit_all(body)
self._emit(tokens.TagOpenClose())
self._emit_text(tag)
self._emit(tokens.TagCloseClose()) | Write the body of a tag and the tokens that should surround it. | Below is the the instruction that describes the task:
### Input:
Write the body of a tag and the tokens that should surround it.
### Response:
def _emit_style_tag(self, tag, markup, body):
"""Write the body of a tag and the tokens that should surround it."""
self._emit(tokens.TagOpenOpen(wiki_markup=markup))
self._emit_text(tag)
self._emit(tokens.TagCloseOpen())
self._emit_all(body)
self._emit(tokens.TagOpenClose())
self._emit_text(tag)
self._emit(tokens.TagCloseClose()) |
def total_length_per_neurite(neurites, neurite_type=NeuriteType.all):
'''Get the path length per neurite in a collection'''
return list(sum(s.length for s in n.iter_sections())
for n in iter_neurites(neurites, filt=is_type(neurite_type))) | Get the path length per neurite in a collection | Below is the the instruction that describes the task:
### Input:
Get the path length per neurite in a collection
### Response:
def total_length_per_neurite(neurites, neurite_type=NeuriteType.all):
'''Get the path length per neurite in a collection'''
return list(sum(s.length for s in n.iter_sections())
for n in iter_neurites(neurites, filt=is_type(neurite_type))) |
def merge(self, merge_id):
"""Get the merge full data"""
path = urijoin(self.base_url,
GitLabClient.PROJECTS, self.owner + '%2F' + self.repository,
GitLabClient.MERGES, merge_id)
response = self.fetch(path)
return response.text | Get the merge full data | Below is the the instruction that describes the task:
### Input:
Get the merge full data
### Response:
def merge(self, merge_id):
"""Get the merge full data"""
path = urijoin(self.base_url,
GitLabClient.PROJECTS, self.owner + '%2F' + self.repository,
GitLabClient.MERGES, merge_id)
response = self.fetch(path)
return response.text |
def create_ellipse_mesh(points,**kwargs):
"""Visualize the ellipse by using the mesh of the points."""
import plotly.graph_objs as go
x,y,z = points.T
return (go.Mesh3d(x=x,y=y,z=z,**kwargs),
go.Scatter3d(x=x, y=y, z=z,
marker=dict(size=0.01),
line=dict(width=2,color='#000000'),
showlegend=False,
hoverinfo='none'
)
) | Visualize the ellipse by using the mesh of the points. | Below is the the instruction that describes the task:
### Input:
Visualize the ellipse by using the mesh of the points.
### Response:
def create_ellipse_mesh(points,**kwargs):
"""Visualize the ellipse by using the mesh of the points."""
import plotly.graph_objs as go
x,y,z = points.T
return (go.Mesh3d(x=x,y=y,z=z,**kwargs),
go.Scatter3d(x=x, y=y, z=z,
marker=dict(size=0.01),
line=dict(width=2,color='#000000'),
showlegend=False,
hoverinfo='none'
)
) |
def db(self):
"""
Get a loaded database session
"""
if self.database is NotImplemented:
self.database = Session
return self.database | Get a loaded database session | Below is the the instruction that describes the task:
### Input:
Get a loaded database session
### Response:
def db(self):
"""
Get a loaded database session
"""
if self.database is NotImplemented:
self.database = Session
return self.database |
def timer(module, name, delta, duration_units='milliseconds'):
"""
Record a timing delta:
::
start_time_s = time.time()
do_some_operation()
end_time_s = time.time()
delta_s = end_time_s - start_time_s
delta_ms = delta_s * 1000
timer(__name__, 'my_timer', delta_ms)
"""
timer = get_metric('timers', module, name, Timer(duration_units))
timer.update(delta) | Record a timing delta:
::
start_time_s = time.time()
do_some_operation()
end_time_s = time.time()
delta_s = end_time_s - start_time_s
delta_ms = delta_s * 1000
timer(__name__, 'my_timer', delta_ms) | Below is the the instruction that describes the task:
### Input:
Record a timing delta:
::
start_time_s = time.time()
do_some_operation()
end_time_s = time.time()
delta_s = end_time_s - start_time_s
delta_ms = delta_s * 1000
timer(__name__, 'my_timer', delta_ms)
### Response:
def timer(module, name, delta, duration_units='milliseconds'):
"""
Record a timing delta:
::
start_time_s = time.time()
do_some_operation()
end_time_s = time.time()
delta_s = end_time_s - start_time_s
delta_ms = delta_s * 1000
timer(__name__, 'my_timer', delta_ms)
"""
timer = get_metric('timers', module, name, Timer(duration_units))
timer.update(delta) |
def minimum(station_code):
"""Extreme Minimum Design Temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius
"""
temp = None
fin = None
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
except IOError:
logger.info("File not found")
download_extract(_eere_url(station_code))
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
for line in fin:
value = re.search('Max Drybulb=(-?\\d+\\.\\d*)', line)
if value:
temp = float(value.groups()[0])
if not temp:
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'stat')))
for line in fin:
if line.find('Minimum Dry Bulb') is not -1:
return float(line[37:-1].split('\xb0')[0])
except IOError:
pass
if temp:
return temp
else:
raise Exception("Error: Minimum Temperature not found") | Extreme Minimum Design Temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius | Below is the the instruction that describes the task:
### Input:
Extreme Minimum Design Temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius
### Response:
def minimum(station_code):
"""Extreme Minimum Design Temperature for a location.
Degrees in Celcius
Args:
station_code (str): Weather Station Code
Returns:
float degrees Celcius
"""
temp = None
fin = None
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
except IOError:
logger.info("File not found")
download_extract(_eere_url(station_code))
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'ddy')))
for line in fin:
value = re.search('Max Drybulb=(-?\\d+\\.\\d*)', line)
if value:
temp = float(value.groups()[0])
if not temp:
try:
fin = open('%s/%s' % (env.WEATHER_DATA_PATH,
_basename(station_code, 'stat')))
for line in fin:
if line.find('Minimum Dry Bulb') is not -1:
return float(line[37:-1].split('\xb0')[0])
except IOError:
pass
if temp:
return temp
else:
raise Exception("Error: Minimum Temperature not found") |
def token_permission_view(token):
"""Show permission garanted to authorized application token."""
scopes = [current_oauth2server.scopes[x] for x in token.scopes]
return render_template(
"invenio_oauth2server/settings/token_permission_view.html",
token=token,
scopes=scopes,
) | Show permission garanted to authorized application token. | Below is the the instruction that describes the task:
### Input:
Show permission garanted to authorized application token.
### Response:
def token_permission_view(token):
"""Show permission garanted to authorized application token."""
scopes = [current_oauth2server.scopes[x] for x in token.scopes]
return render_template(
"invenio_oauth2server/settings/token_permission_view.html",
token=token,
scopes=scopes,
) |
def name_transfer(self, key, new_address, value=None):
""" Check if this name exists and if it does, find the value field
note that update command needs an arg of <new value>.
in case we're simply transferring, need to obtain old value first
"""
key_details = self.name_show(key)
if 'code' in key_details and key_details.get('code') == -4:
return error_reply("Key does not exist")
# get new 'value' if given, otherwise use the old 'value'
if value is None:
value = json.dumps(key_details['value'])
if not self.unlock_wallet(self.passphrase):
error_reply("Error unlocking wallet", 403)
# transfer the name (underlying call is still name_update)
try:
# update the 'value'
reply = self.obj.name_update(key, value, new_address)
except JSONRPCException as e:
return e.error
return reply | Check if this name exists and if it does, find the value field
note that update command needs an arg of <new value>.
in case we're simply transferring, need to obtain old value first | Below is the the instruction that describes the task:
### Input:
Check if this name exists and if it does, find the value field
note that update command needs an arg of <new value>.
in case we're simply transferring, need to obtain old value first
### Response:
def name_transfer(self, key, new_address, value=None):
""" Check if this name exists and if it does, find the value field
note that update command needs an arg of <new value>.
in case we're simply transferring, need to obtain old value first
"""
key_details = self.name_show(key)
if 'code' in key_details and key_details.get('code') == -4:
return error_reply("Key does not exist")
# get new 'value' if given, otherwise use the old 'value'
if value is None:
value = json.dumps(key_details['value'])
if not self.unlock_wallet(self.passphrase):
error_reply("Error unlocking wallet", 403)
# transfer the name (underlying call is still name_update)
try:
# update the 'value'
reply = self.obj.name_update(key, value, new_address)
except JSONRPCException as e:
return e.error
return reply |
def recsph(rectan):
"""
Convert from rectangular coordinates to spherical coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/recrad_c.html
:param rectan: Rectangular coordinates of a point.
:type rectan: 3-Element Array of floats
:return:
Distance from the origin,
Angle from the positive Z-axis,
Longitude in radians.
:rtype: tuple
"""
rectan = stypes.toDoubleVector(rectan)
r = ctypes.c_double()
colat = ctypes.c_double()
lon = ctypes.c_double()
libspice.recsph_c(rectan, ctypes.byref(r), ctypes.byref(colat),
ctypes.byref(lon))
return r.value, colat.value, lon.value | Convert from rectangular coordinates to spherical coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/recrad_c.html
:param rectan: Rectangular coordinates of a point.
:type rectan: 3-Element Array of floats
:return:
Distance from the origin,
Angle from the positive Z-axis,
Longitude in radians.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Convert from rectangular coordinates to spherical coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/recrad_c.html
:param rectan: Rectangular coordinates of a point.
:type rectan: 3-Element Array of floats
:return:
Distance from the origin,
Angle from the positive Z-axis,
Longitude in radians.
:rtype: tuple
### Response:
def recsph(rectan):
"""
Convert from rectangular coordinates to spherical coordinates.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/recrad_c.html
:param rectan: Rectangular coordinates of a point.
:type rectan: 3-Element Array of floats
:return:
Distance from the origin,
Angle from the positive Z-axis,
Longitude in radians.
:rtype: tuple
"""
rectan = stypes.toDoubleVector(rectan)
r = ctypes.c_double()
colat = ctypes.c_double()
lon = ctypes.c_double()
libspice.recsph_c(rectan, ctypes.byref(r), ctypes.byref(colat),
ctypes.byref(lon))
return r.value, colat.value, lon.value |
def shard_stores(self, index=None, params=None):
"""
Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg status: A comma-separated list of statuses used to filter on shards
to get store information for, valid choices are: 'green', 'yellow',
'red', 'all'
"""
return self.transport.perform_request(
"GET", _make_path(index, "_shard_stores"), params=params
) | Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg status: A comma-separated list of statuses used to filter on shards
to get store information for, valid choices are: 'green', 'yellow',
'red', 'all' | Below is the the instruction that describes the task:
### Input:
Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg status: A comma-separated list of statuses used to filter on shards
to get store information for, valid choices are: 'green', 'yellow',
'red', 'all'
### Response:
def shard_stores(self, index=None, params=None):
"""
Provides store information for shard copies of indices. Store
information reports on which nodes shard copies exist, the shard copy
version, indicating how recent they are, and any exceptions encountered
while opening the shard index or from earlier engine failure.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shards-stores.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg operation_threading: TODO: ?
:arg status: A comma-separated list of statuses used to filter on shards
to get store information for, valid choices are: 'green', 'yellow',
'red', 'all'
"""
return self.transport.perform_request(
"GET", _make_path(index, "_shard_stores"), params=params
) |
def branch(self):
'''
:param branch:
Checks out specified branch (tracking if it exists on remote).
If set to ``None``, 'master' will be checked out
:returns:
The current branch
(This could also be 'master (Detatched-Head)' - Be warned)
'''
branch = self._get_branch().get('stdout')
if branch:
return ''.join(
[b for b in branch if '*' in b]
).replace('*', '').strip() | :param branch:
Checks out specified branch (tracking if it exists on remote).
If set to ``None``, 'master' will be checked out
:returns:
The current branch
(This could also be 'master (Detatched-Head)' - Be warned) | Below is the the instruction that describes the task:
### Input:
:param branch:
Checks out specified branch (tracking if it exists on remote).
If set to ``None``, 'master' will be checked out
:returns:
The current branch
(This could also be 'master (Detatched-Head)' - Be warned)
### Response:
def branch(self):
'''
:param branch:
Checks out specified branch (tracking if it exists on remote).
If set to ``None``, 'master' will be checked out
:returns:
The current branch
(This could also be 'master (Detatched-Head)' - Be warned)
'''
branch = self._get_branch().get('stdout')
if branch:
return ''.join(
[b for b in branch if '*' in b]
).replace('*', '').strip() |
def estimate_column_scales(
self,
X_centered,
row_scales):
"""
column_scale[j] ** 2 =
mean{i in observed[:, j]}{
(X[i, j] - row_center[i] - column_center[j]) ** 2
-------------------------------------------------
row_scale[i] ** 2
}
"""
n_rows, n_cols = X_centered.shape
row_scales = np.asarray(row_scales)
if len(row_scales) != n_rows:
raise ValueError("Expected length %s, got shape %s" % (
n_rows, row_scales.shape,))
column_variances = np.nanmean(
X_centered ** 2 / (row_scales ** 2).reshape((n_rows, 1)),
axis=0)
column_variances[column_variances == 0] = 1.0
assert len(column_variances) == n_cols, "%d != %d" % (
len(column_variances),
n_cols)
return np.sqrt(column_variances) | column_scale[j] ** 2 =
mean{i in observed[:, j]}{
(X[i, j] - row_center[i] - column_center[j]) ** 2
-------------------------------------------------
row_scale[i] ** 2
} | Below is the the instruction that describes the task:
### Input:
column_scale[j] ** 2 =
mean{i in observed[:, j]}{
(X[i, j] - row_center[i] - column_center[j]) ** 2
-------------------------------------------------
row_scale[i] ** 2
}
### Response:
def estimate_column_scales(
self,
X_centered,
row_scales):
"""
column_scale[j] ** 2 =
mean{i in observed[:, j]}{
(X[i, j] - row_center[i] - column_center[j]) ** 2
-------------------------------------------------
row_scale[i] ** 2
}
"""
n_rows, n_cols = X_centered.shape
row_scales = np.asarray(row_scales)
if len(row_scales) != n_rows:
raise ValueError("Expected length %s, got shape %s" % (
n_rows, row_scales.shape,))
column_variances = np.nanmean(
X_centered ** 2 / (row_scales ** 2).reshape((n_rows, 1)),
axis=0)
column_variances[column_variances == 0] = 1.0
assert len(column_variances) == n_cols, "%d != %d" % (
len(column_variances),
n_cols)
return np.sqrt(column_variances) |
def _print_bits(self,norm=2.3, height=8.0):
"""
m._print_bits(,norm=2.3, height=8.0) -- Print a text-rendering of the Motif Logo
norm -- maximum number of bits to show
height -- number of lines of text to use to render logo
"""
bits = []
tots = []
str = []
for i in range(self.width):
D = {}
tot = 0
for letter in ['A', 'C', 'T', 'G']:
if self.logP:
Pij = pow(2.0, self.logP[i][letter])
else:
Pij = pow(2.0, self.ll[i][letter]) * self.background[letter]
if Pij > 0.01:
'''Old'''
D[letter] = Pij * self.ll[i][letter]
#'''new'''
#Q = self.background[letter]
#D[letter] = ( Pij * math.log(Pij) - Pij * math.log(Q) ) / math.log(2.0)
'''for both old and new'''
tot = tot + D[letter]
bits.append(D)
tots.append(tot)
for i in range(self.width):
s = []
_l = bits[i].keys()
_l.sort(lambda x,y,D=bits[i]: cmp(D[y],D[x]))
for key in _l:
for j in range(int(bits[i][key] / norm * height)):
s.append(key)
str.append(''.join(s))
fmt = '%%%ds'%height
print '# %s'%('-'*self.width)
for h in range(int(height)):
sys.stdout.write("# ")
for i in range(self.width):
sys.stdout.write((fmt%str[i])[h])
if h == 0:
sys.stdout.write(' -- %4.2f bits\n'%norm)
elif h == height-1:
sys.stdout.write(' -- %4.2f bits\n'%(norm/height))
else:
sys.stdout.write('\n')
print '# %s'%('-'*self.width)
print '# %s'%self.oneletter | m._print_bits(,norm=2.3, height=8.0) -- Print a text-rendering of the Motif Logo
norm -- maximum number of bits to show
height -- number of lines of text to use to render logo | Below is the the instruction that describes the task:
### Input:
m._print_bits(,norm=2.3, height=8.0) -- Print a text-rendering of the Motif Logo
norm -- maximum number of bits to show
height -- number of lines of text to use to render logo
### Response:
def _print_bits(self,norm=2.3, height=8.0):
"""
m._print_bits(,norm=2.3, height=8.0) -- Print a text-rendering of the Motif Logo
norm -- maximum number of bits to show
height -- number of lines of text to use to render logo
"""
bits = []
tots = []
str = []
for i in range(self.width):
D = {}
tot = 0
for letter in ['A', 'C', 'T', 'G']:
if self.logP:
Pij = pow(2.0, self.logP[i][letter])
else:
Pij = pow(2.0, self.ll[i][letter]) * self.background[letter]
if Pij > 0.01:
'''Old'''
D[letter] = Pij * self.ll[i][letter]
#'''new'''
#Q = self.background[letter]
#D[letter] = ( Pij * math.log(Pij) - Pij * math.log(Q) ) / math.log(2.0)
'''for both old and new'''
tot = tot + D[letter]
bits.append(D)
tots.append(tot)
for i in range(self.width):
s = []
_l = bits[i].keys()
_l.sort(lambda x,y,D=bits[i]: cmp(D[y],D[x]))
for key in _l:
for j in range(int(bits[i][key] / norm * height)):
s.append(key)
str.append(''.join(s))
fmt = '%%%ds'%height
print '# %s'%('-'*self.width)
for h in range(int(height)):
sys.stdout.write("# ")
for i in range(self.width):
sys.stdout.write((fmt%str[i])[h])
if h == 0:
sys.stdout.write(' -- %4.2f bits\n'%norm)
elif h == height-1:
sys.stdout.write(' -- %4.2f bits\n'%(norm/height))
else:
sys.stdout.write('\n')
print '# %s'%('-'*self.width)
print '# %s'%self.oneletter |
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df | Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`. | Below is the the instruction that describes the task:
### Input:
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
### Response:
def despike(df, n1=2, n2=20, block=100, keep=0):
"""
Wild Edit Seabird-like function. Passes with Standard deviation
`n1` and `n2` with window size `block`.
"""
if isinstance(df, pd.Series):
new_df = _despike(df, n1=n1, n2=n2, block=block, keep=keep)
else:
new_df = df.apply(_despike, n1=n1, n2=n2, block=block, keep=keep)
return new_df |
def _load(self, **kwargs):
"""Must check if rule actually exists before proceeding with load."""
if self._check_existence_by_collection(
self._meta_data['container'], kwargs['name']):
return super(Rules, self)._load(**kwargs)
msg = 'The rule named, {}, does not exist on the device.'.format(
kwargs['name'])
raise NonExtantPolicyRule(msg) | Must check if rule actually exists before proceeding with load. | Below is the the instruction that describes the task:
### Input:
Must check if rule actually exists before proceeding with load.
### Response:
def _load(self, **kwargs):
"""Must check if rule actually exists before proceeding with load."""
if self._check_existence_by_collection(
self._meta_data['container'], kwargs['name']):
return super(Rules, self)._load(**kwargs)
msg = 'The rule named, {}, does not exist on the device.'.format(
kwargs['name'])
raise NonExtantPolicyRule(msg) |
def _gather_configs_in(directory):
""" Return list of fully qualified python filenames in the given dir """
try:
return sorted([
os.path.join(directory, fname)
for fname in os.listdir(directory)
if fname.endswith('.py')
])
except OSError:
return [] | Return list of fully qualified python filenames in the given dir | Below is the the instruction that describes the task:
### Input:
Return list of fully qualified python filenames in the given dir
### Response:
def _gather_configs_in(directory):
""" Return list of fully qualified python filenames in the given dir """
try:
return sorted([
os.path.join(directory, fname)
for fname in os.listdir(directory)
if fname.endswith('.py')
])
except OSError:
return [] |
def __send_buffer(self):
"""
Sends the contents of self.__out_buffer to serial device
:return: Number of bytes written
"""
bytes_written = self.serial.write(self.__out_buffer.raw)
if self.DEBUG_MODE:
print("Wrote: '{}'".format(binascii.hexlify(self.__out_buffer.raw)))
if bytes_written != len(self.__out_buffer):
raise IOError("{} bytes written for output buffer of size {}".format(bytes_written,
len(self.__out_buffer)))
return bytes_written | Sends the contents of self.__out_buffer to serial device
:return: Number of bytes written | Below is the the instruction that describes the task:
### Input:
Sends the contents of self.__out_buffer to serial device
:return: Number of bytes written
### Response:
def __send_buffer(self):
"""
Sends the contents of self.__out_buffer to serial device
:return: Number of bytes written
"""
bytes_written = self.serial.write(self.__out_buffer.raw)
if self.DEBUG_MODE:
print("Wrote: '{}'".format(binascii.hexlify(self.__out_buffer.raw)))
if bytes_written != len(self.__out_buffer):
raise IOError("{} bytes written for output buffer of size {}".format(bytes_written,
len(self.__out_buffer)))
return bytes_written |
def rotateInZMat(theta_deg):
"""Rotate a vector theta degrees around the z-axis
Equivalent to yaw left
Rotates the vector in the sense that the x-axis is rotated
towards the y-axis. If looking along the z-axis (which is
not the way you usually look at it), the vector rotates
clockwise.
If sitting on the vector [1,0,0], the rotation is towards the left
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply
"""
ct = np.cos( np.radians(theta_deg))
st = np.sin( np.radians(theta_deg))
rMat = np.array([ [ ct, -st, 0],
[ st, ct, 0],
[ 0, 0, 1],
])
return rMat | Rotate a vector theta degrees around the z-axis
Equivalent to yaw left
Rotates the vector in the sense that the x-axis is rotated
towards the y-axis. If looking along the z-axis (which is
not the way you usually look at it), the vector rotates
clockwise.
If sitting on the vector [1,0,0], the rotation is towards the left
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply | Below is the the instruction that describes the task:
### Input:
Rotate a vector theta degrees around the z-axis
Equivalent to yaw left
Rotates the vector in the sense that the x-axis is rotated
towards the y-axis. If looking along the z-axis (which is
not the way you usually look at it), the vector rotates
clockwise.
If sitting on the vector [1,0,0], the rotation is towards the left
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply
### Response:
def rotateInZMat(theta_deg):
"""Rotate a vector theta degrees around the z-axis
Equivalent to yaw left
Rotates the vector in the sense that the x-axis is rotated
towards the y-axis. If looking along the z-axis (which is
not the way you usually look at it), the vector rotates
clockwise.
If sitting on the vector [1,0,0], the rotation is towards the left
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply
"""
ct = np.cos( np.radians(theta_deg))
st = np.sin( np.radians(theta_deg))
rMat = np.array([ [ ct, -st, 0],
[ st, ct, 0],
[ 0, 0, 1],
])
return rMat |
def get_price_id_list(self, package_keyname, item_keynames, core=None):
"""Converts a list of item keynames to a list of price IDs.
This function is used to convert a list of item keynames into
a list of price IDs that are used in the Product_Order verifyOrder()
and placeOrder() functions.
:param str package_keyname: The package associated with the prices
:param list item_keynames: A list of item keyname strings
:param str core: preset guest core capacity.
:returns: A list of price IDs associated with the given item
keynames in the given package
"""
mask = 'id, itemCategory, keyName, prices[categories]'
items = self.list_items(package_keyname, mask=mask)
prices = []
category_dict = {"gpu0": -1, "pcie_slot0": -1}
for item_keyname in item_keynames:
try:
# Need to find the item in the package that has a matching
# keyName with the current item we are searching for
matching_item = [i for i in items
if i['keyName'] == item_keyname][0]
except IndexError:
raise exceptions.SoftLayerError(
"Item {} does not exist for package {}".format(item_keyname,
package_keyname))
# we want to get the price ID that has no location attached to it,
# because that is the most generic price. verifyOrder/placeOrder
# can take that ID and create the proper price for us in the location
# in which the order is made
item_category = matching_item['itemCategory']['categoryCode']
if item_category not in category_dict:
price_id = self.get_item_price_id(core, matching_item['prices'])
else:
# GPU and PCIe items has two generic prices and they are added to the list
# according to the number of items in the order.
category_dict[item_category] += 1
category_code = item_category[:-1] + str(category_dict[item_category])
price_id = [p['id'] for p in matching_item['prices']
if not p['locationGroupId']
and p['categories'][0]['categoryCode'] == category_code][0]
prices.append(price_id)
return prices | Converts a list of item keynames to a list of price IDs.
This function is used to convert a list of item keynames into
a list of price IDs that are used in the Product_Order verifyOrder()
and placeOrder() functions.
:param str package_keyname: The package associated with the prices
:param list item_keynames: A list of item keyname strings
:param str core: preset guest core capacity.
:returns: A list of price IDs associated with the given item
keynames in the given package | Below is the the instruction that describes the task:
### Input:
Converts a list of item keynames to a list of price IDs.
This function is used to convert a list of item keynames into
a list of price IDs that are used in the Product_Order verifyOrder()
and placeOrder() functions.
:param str package_keyname: The package associated with the prices
:param list item_keynames: A list of item keyname strings
:param str core: preset guest core capacity.
:returns: A list of price IDs associated with the given item
keynames in the given package
### Response:
def get_price_id_list(self, package_keyname, item_keynames, core=None):
"""Converts a list of item keynames to a list of price IDs.
This function is used to convert a list of item keynames into
a list of price IDs that are used in the Product_Order verifyOrder()
and placeOrder() functions.
:param str package_keyname: The package associated with the prices
:param list item_keynames: A list of item keyname strings
:param str core: preset guest core capacity.
:returns: A list of price IDs associated with the given item
keynames in the given package
"""
mask = 'id, itemCategory, keyName, prices[categories]'
items = self.list_items(package_keyname, mask=mask)
prices = []
category_dict = {"gpu0": -1, "pcie_slot0": -1}
for item_keyname in item_keynames:
try:
# Need to find the item in the package that has a matching
# keyName with the current item we are searching for
matching_item = [i for i in items
if i['keyName'] == item_keyname][0]
except IndexError:
raise exceptions.SoftLayerError(
"Item {} does not exist for package {}".format(item_keyname,
package_keyname))
# we want to get the price ID that has no location attached to it,
# because that is the most generic price. verifyOrder/placeOrder
# can take that ID and create the proper price for us in the location
# in which the order is made
item_category = matching_item['itemCategory']['categoryCode']
if item_category not in category_dict:
price_id = self.get_item_price_id(core, matching_item['prices'])
else:
# GPU and PCIe items has two generic prices and they are added to the list
# according to the number of items in the order.
category_dict[item_category] += 1
category_code = item_category[:-1] + str(category_dict[item_category])
price_id = [p['id'] for p in matching_item['prices']
if not p['locationGroupId']
and p['categories'][0]['categoryCode'] == category_code][0]
prices.append(price_id)
return prices |
def initialize_snapshot(self):
""" Copy the DAG and validate """
logger.debug('Initializing DAG snapshot for job {0}'.format(self.name))
if self.snapshot is not None:
logging.warn("Attempting to initialize DAG snapshot without " +
"first destroying old snapshot.")
snapshot_to_validate = deepcopy(self.graph)
is_valid, reason = self.validate(snapshot_to_validate)
if not is_valid:
raise DagobahError(reason)
self.snapshot = snapshot_to_validate | Copy the DAG and validate | Below is the the instruction that describes the task:
### Input:
Copy the DAG and validate
### Response:
def initialize_snapshot(self):
""" Copy the DAG and validate """
logger.debug('Initializing DAG snapshot for job {0}'.format(self.name))
if self.snapshot is not None:
logging.warn("Attempting to initialize DAG snapshot without " +
"first destroying old snapshot.")
snapshot_to_validate = deepcopy(self.graph)
is_valid, reason = self.validate(snapshot_to_validate)
if not is_valid:
raise DagobahError(reason)
self.snapshot = snapshot_to_validate |
def ipcidr(self, *args):
"""Returns a random address from within the given cidr notation
IPCIDR:cidr
%{IPCIDR:10.0.0.0/8} -> ''
"""
call_args = list(args)
return self.random.choice(IPNetwork(call_args.pop(0))) | Returns a random address from within the given cidr notation
IPCIDR:cidr
%{IPCIDR:10.0.0.0/8} -> '' | Below is the the instruction that describes the task:
### Input:
Returns a random address from within the given cidr notation
IPCIDR:cidr
%{IPCIDR:10.0.0.0/8} -> ''
### Response:
def ipcidr(self, *args):
"""Returns a random address from within the given cidr notation
IPCIDR:cidr
%{IPCIDR:10.0.0.0/8} -> ''
"""
call_args = list(args)
return self.random.choice(IPNetwork(call_args.pop(0))) |
def serialize(obj, **options):
'''
Serialize Python data to JSON.
:param obj: the data structure to serialize
:param options: options given to lower json/simplejson module.
'''
try:
if 'fp' in options:
return salt.utils.json.dump(obj, _json_module=_json, **options)
else:
return salt.utils.json.dumps(obj, _json_module=_json, **options)
except Exception as error:
raise SerializationError(error) | Serialize Python data to JSON.
:param obj: the data structure to serialize
:param options: options given to lower json/simplejson module. | Below is the the instruction that describes the task:
### Input:
Serialize Python data to JSON.
:param obj: the data structure to serialize
:param options: options given to lower json/simplejson module.
### Response:
def serialize(obj, **options):
'''
Serialize Python data to JSON.
:param obj: the data structure to serialize
:param options: options given to lower json/simplejson module.
'''
try:
if 'fp' in options:
return salt.utils.json.dump(obj, _json_module=_json, **options)
else:
return salt.utils.json.dumps(obj, _json_module=_json, **options)
except Exception as error:
raise SerializationError(error) |
def verify_recipient(self, recipient):
"""
Verify that I'm the recipient of the assertion
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:return: True/False
"""
if not self.conv_info:
return True
_info = self.conv_info
try:
if recipient == _info['entity_id']:
return True
except KeyError:
pass
try:
if recipient in self.return_addrs:
return True
except KeyError:
pass
return False | Verify that I'm the recipient of the assertion
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:return: True/False | Below is the the instruction that describes the task:
### Input:
Verify that I'm the recipient of the assertion
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:return: True/False
### Response:
def verify_recipient(self, recipient):
"""
Verify that I'm the recipient of the assertion
:param recipient: A URI specifying the entity or location to which an
attesting entity can present the assertion.
:return: True/False
"""
if not self.conv_info:
return True
_info = self.conv_info
try:
if recipient == _info['entity_id']:
return True
except KeyError:
pass
try:
if recipient in self.return_addrs:
return True
except KeyError:
pass
return False |
def template(template_name, *, app_key=APP_KEY, encoding='utf-8', status=200):
"""
Decorator compatible with aiohttp_apiset router
"""
def wrapper(func):
@functools.wraps(func)
async def wrapped(*args, **kwargs):
if asyncio.iscoroutinefunction(func):
coro = func
else:
coro = asyncio.coroutine(func)
context = await coro(*args, **kwargs)
if isinstance(context, web.StreamResponse):
return context
if 'request' in kwargs:
request = kwargs['request']
elif not args:
request = None
warnings.warn("Request not detected")
elif isinstance(args[0], AbstractView):
request = args[0].request
else:
request = args[-1]
response = render_template(template_name, request, context,
app_key=app_key, encoding=encoding)
response.set_status(status)
return response
return wrapped
return wrapper | Decorator compatible with aiohttp_apiset router | Below is the the instruction that describes the task:
### Input:
Decorator compatible with aiohttp_apiset router
### Response:
def template(template_name, *, app_key=APP_KEY, encoding='utf-8', status=200):
"""
Decorator compatible with aiohttp_apiset router
"""
def wrapper(func):
@functools.wraps(func)
async def wrapped(*args, **kwargs):
if asyncio.iscoroutinefunction(func):
coro = func
else:
coro = asyncio.coroutine(func)
context = await coro(*args, **kwargs)
if isinstance(context, web.StreamResponse):
return context
if 'request' in kwargs:
request = kwargs['request']
elif not args:
request = None
warnings.warn("Request not detected")
elif isinstance(args[0], AbstractView):
request = args[0].request
else:
request = args[-1]
response = render_template(template_name, request, context,
app_key=app_key, encoding=encoding)
response.set_status(status)
return response
return wrapped
return wrapper |
def run(self):
"""Compile libfaketime."""
if sys.platform == "linux" or sys.platform == "linux2":
libname = 'libfaketime.so.1'
libnamemt = 'libfaketimeMT.so.1'
elif sys.platform == "darwin":
libname = 'libfaketime.1.dylib'
libnamemt = 'libfaketimeMT.1.dylib'
else:
sys.stderr.write("WARNING : libfaketime does not support platform {}\n".format(sys.platform))
sys.stderr.flush()
return
faketime_lib = join('faketime', libname)
faketime_lib_mt = join('faketime', libnamemt)
self.my_outputs = []
setup_py_directory = dirname(realpath(__file__))
faketime_directory = join(setup_py_directory, "faketime")
os.chdir(faketime_directory)
if sys.platform == "linux" or sys.platform == "linux2":
subprocess.check_call(['make',])
else:
os.chdir(setup_py_directory)
if "10.12" in subprocess.check_output(["sw_vers", "-productVersion"]).decode('utf8'):
self.copy_file(
join('faketime', "libfaketime.c.sierra"),
join('faketime', "libfaketime.c")
)
os.chdir(faketime_directory)
subprocess.check_call(['make', '-f', 'Makefile.OSX'])
os.chdir(setup_py_directory)
dest = join(self.install_purelib, dirname(faketime_lib))
dest_mt = join(self.install_purelib, dirname(faketime_lib_mt))
try:
os.makedirs(dest)
except OSError as e:
if e.errno != 17:
raise
self.copy_file(faketime_lib, dest)
if exists(faketime_lib_mt):
self.copy_file(faketime_lib_mt, dest_mt)
self.my_outputs.append(join(dest, libname))
install.run(self) | Compile libfaketime. | Below is the the instruction that describes the task:
### Input:
Compile libfaketime.
### Response:
def run(self):
"""Compile libfaketime."""
if sys.platform == "linux" or sys.platform == "linux2":
libname = 'libfaketime.so.1'
libnamemt = 'libfaketimeMT.so.1'
elif sys.platform == "darwin":
libname = 'libfaketime.1.dylib'
libnamemt = 'libfaketimeMT.1.dylib'
else:
sys.stderr.write("WARNING : libfaketime does not support platform {}\n".format(sys.platform))
sys.stderr.flush()
return
faketime_lib = join('faketime', libname)
faketime_lib_mt = join('faketime', libnamemt)
self.my_outputs = []
setup_py_directory = dirname(realpath(__file__))
faketime_directory = join(setup_py_directory, "faketime")
os.chdir(faketime_directory)
if sys.platform == "linux" or sys.platform == "linux2":
subprocess.check_call(['make',])
else:
os.chdir(setup_py_directory)
if "10.12" in subprocess.check_output(["sw_vers", "-productVersion"]).decode('utf8'):
self.copy_file(
join('faketime', "libfaketime.c.sierra"),
join('faketime', "libfaketime.c")
)
os.chdir(faketime_directory)
subprocess.check_call(['make', '-f', 'Makefile.OSX'])
os.chdir(setup_py_directory)
dest = join(self.install_purelib, dirname(faketime_lib))
dest_mt = join(self.install_purelib, dirname(faketime_lib_mt))
try:
os.makedirs(dest)
except OSError as e:
if e.errno != 17:
raise
self.copy_file(faketime_lib, dest)
if exists(faketime_lib_mt):
self.copy_file(faketime_lib_mt, dest_mt)
self.my_outputs.append(join(dest, libname))
install.run(self) |
def find_root_path(absolute_path, relative_path):
"""
Return the root path of a path relative to an absolute path.
Example:
@param absolute_path: an absolute path that is ended by the specified
relative path.
@param relative_path: a relative path that ends the specified absolute
path.
@return: the root path of the relative path.
"""
_absolute_path = os.path.normpath(absolute_path)
_relative_path = os.path.normpath(relative_path)
index = _absolute_path.rfind(_relative_path)
if index == -1 or len(_relative_path) + index < len(_absolute_path):
raise ValueError('The relative path does not end the specified absolute path')
return _absolute_path[:index] | Return the root path of a path relative to an absolute path.
Example:
@param absolute_path: an absolute path that is ended by the specified
relative path.
@param relative_path: a relative path that ends the specified absolute
path.
@return: the root path of the relative path. | Below is the the instruction that describes the task:
### Input:
Return the root path of a path relative to an absolute path.
Example:
@param absolute_path: an absolute path that is ended by the specified
relative path.
@param relative_path: a relative path that ends the specified absolute
path.
@return: the root path of the relative path.
### Response:
def find_root_path(absolute_path, relative_path):
"""
Return the root path of a path relative to an absolute path.
Example:
@param absolute_path: an absolute path that is ended by the specified
relative path.
@param relative_path: a relative path that ends the specified absolute
path.
@return: the root path of the relative path.
"""
_absolute_path = os.path.normpath(absolute_path)
_relative_path = os.path.normpath(relative_path)
index = _absolute_path.rfind(_relative_path)
if index == -1 or len(_relative_path) + index < len(_absolute_path):
raise ValueError('The relative path does not end the specified absolute path')
return _absolute_path[:index] |
def head(self, display=True, html=None):
"""Return the header stats of this dataset. If in IPython, this will
be formatted to HTML. Otherwise returns a console friendly string"""
# Generate the output
if html:
fmt = ""
# HTML version
fmt += "\n"
fmt += "<table>\n"
fmt += "<tr><th>{}</th><th>Information</th></tr>\n".format(type(self).__name__)
row = "<tr><td>{}</td><td>{}</td></tr>\n"
# now make a call on the object to get its attributes as a list of len 2 tuples
for attr in self._get_attrs():
try:
fmt += row.format(attr[0], attr[2].format(*attr[1]))
except:
fmt += row.format(attr[0], attr[2].format(attr[1]))
fmt += row.format('N Scalars', self.n_scalars)
fmt += "</table>\n"
fmt += "\n"
if display:
from IPython.display import display, HTML
display(HTML(fmt))
return
return fmt
# Otherwise return a string that is Python console friendly
fmt = "{} ({})\n".format(type(self).__name__, hex(id(self)))
# now make a call on the object to get its attributes as a list of len 2 tuples
row = " {}:\t{}\n"
for attr in self._get_attrs():
try:
fmt += row.format(attr[0], attr[2].format(*attr[1]))
except:
fmt += row.format(attr[0], attr[2].format(attr[1]))
fmt += row.format('N Scalars', self.n_scalars)
return fmt | Return the header stats of this dataset. If in IPython, this will
be formatted to HTML. Otherwise returns a console friendly string | Below is the the instruction that describes the task:
### Input:
Return the header stats of this dataset. If in IPython, this will
be formatted to HTML. Otherwise returns a console friendly string
### Response:
def head(self, display=True, html=None):
"""Return the header stats of this dataset. If in IPython, this will
be formatted to HTML. Otherwise returns a console friendly string"""
# Generate the output
if html:
fmt = ""
# HTML version
fmt += "\n"
fmt += "<table>\n"
fmt += "<tr><th>{}</th><th>Information</th></tr>\n".format(type(self).__name__)
row = "<tr><td>{}</td><td>{}</td></tr>\n"
# now make a call on the object to get its attributes as a list of len 2 tuples
for attr in self._get_attrs():
try:
fmt += row.format(attr[0], attr[2].format(*attr[1]))
except:
fmt += row.format(attr[0], attr[2].format(attr[1]))
fmt += row.format('N Scalars', self.n_scalars)
fmt += "</table>\n"
fmt += "\n"
if display:
from IPython.display import display, HTML
display(HTML(fmt))
return
return fmt
# Otherwise return a string that is Python console friendly
fmt = "{} ({})\n".format(type(self).__name__, hex(id(self)))
# now make a call on the object to get its attributes as a list of len 2 tuples
row = " {}:\t{}\n"
for attr in self._get_attrs():
try:
fmt += row.format(attr[0], attr[2].format(*attr[1]))
except:
fmt += row.format(attr[0], attr[2].format(attr[1]))
fmt += row.format('N Scalars', self.n_scalars)
return fmt |
def with_random_weights(cls, options):
"""
Initialize from a list of options with random weights.
The weights assigned to each object are uniformally random
integers between ``1`` and ``len(options)``
Args:
options (list): The list of options of any type this object
can return with the ``get()`` method.
Returns:
SoftOptions: A newly constructed instance
"""
return cls([(value, random.randint(1, len(options)))
for value in options]) | Initialize from a list of options with random weights.
The weights assigned to each object are uniformally random
integers between ``1`` and ``len(options)``
Args:
options (list): The list of options of any type this object
can return with the ``get()`` method.
Returns:
SoftOptions: A newly constructed instance | Below is the the instruction that describes the task:
### Input:
Initialize from a list of options with random weights.
The weights assigned to each object are uniformally random
integers between ``1`` and ``len(options)``
Args:
options (list): The list of options of any type this object
can return with the ``get()`` method.
Returns:
SoftOptions: A newly constructed instance
### Response:
def with_random_weights(cls, options):
"""
Initialize from a list of options with random weights.
The weights assigned to each object are uniformally random
integers between ``1`` and ``len(options)``
Args:
options (list): The list of options of any type this object
can return with the ``get()`` method.
Returns:
SoftOptions: A newly constructed instance
"""
return cls([(value, random.randint(1, len(options)))
for value in options]) |
def delete_model(self, **kwargs):
"""Delete model.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
"""
self._fill_project_info(kwargs)
self.db.Model.delete_many(kwargs)
logging.info("[Database] Delete Model SUCCESS") | Delete model.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log. | Below is the the instruction that describes the task:
### Input:
Delete model.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
### Response:
def delete_model(self, **kwargs):
"""Delete model.
Parameters
-----------
kwargs : logging information
Find items to delete, leave it empty to delete all log.
"""
self._fill_project_info(kwargs)
self.db.Model.delete_many(kwargs)
logging.info("[Database] Delete Model SUCCESS") |
def get_link_density(node, node_text=None):
"""
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
"""
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length | Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links. | Below is the the instruction that describes the task:
### Input:
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
### Response:
def get_link_density(node, node_text=None):
"""
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
"""
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length |
def has_valid_padding(value, bits=7):
"""Whether the padding bits are all zero"""
assert bits <= 8
mask = (((1 << (8 - bits)) - 1) << bits)
if isinstance(value, integer_types):
while value:
if value & mask:
return False
value >>= 8
elif isinstance(value, bytes):
for byte in bytearray(value):
if byte & mask:
return False
else:
raise TypeError
return True | Whether the padding bits are all zero | Below is the the instruction that describes the task:
### Input:
Whether the padding bits are all zero
### Response:
def has_valid_padding(value, bits=7):
"""Whether the padding bits are all zero"""
assert bits <= 8
mask = (((1 << (8 - bits)) - 1) << bits)
if isinstance(value, integer_types):
while value:
if value & mask:
return False
value >>= 8
elif isinstance(value, bytes):
for byte in bytearray(value):
if byte & mask:
return False
else:
raise TypeError
return True |
def _get_first_part_id(self, assessment_id):
"""This session implemenation assumes all items are assigned to the first assessment part"""
if assessment_id not in self._first_part_index:
self._first_part_index[assessment_id] = get_first_part_id_for_assessment(
assessment_id,
runtime=self._runtime,
proxy=self._proxy,
create=True,
bank_id=self._catalog_id)
return self._first_part_index[assessment_id] | This session implemenation assumes all items are assigned to the first assessment part | Below is the the instruction that describes the task:
### Input:
This session implemenation assumes all items are assigned to the first assessment part
### Response:
def _get_first_part_id(self, assessment_id):
"""This session implemenation assumes all items are assigned to the first assessment part"""
if assessment_id not in self._first_part_index:
self._first_part_index[assessment_id] = get_first_part_id_for_assessment(
assessment_id,
runtime=self._runtime,
proxy=self._proxy,
create=True,
bank_id=self._catalog_id)
return self._first_part_index[assessment_id] |
def set_wsgi_params(self, module=None, callable_name=None, env_strategy=None):
"""Set wsgi related parameters.
:param str|unicode module:
* load .wsgi file as the Python application
* load a WSGI module as the application.
.. note:: The module (sans ``.py``) must be importable, ie. be in ``PYTHONPATH``.
Examples:
* mypackage.my_wsgi_module -- read from `application` attr of mypackage/my_wsgi_module.py
* mypackage.my_wsgi_module:my_app -- read from `my_app` attr of mypackage/my_wsgi_module.py
:param str|unicode callable_name: Set WSGI callable name. Default: application.
:param str|unicode env_strategy: Strategy for allocating/deallocating
the WSGI env, can be:
* ``cheat`` - preallocates the env dictionary on uWSGI startup and clears it
after each request. Default behaviour for uWSGI <= 2.0.x
* ``holy`` - creates and destroys the environ dictionary at each request.
Default behaviour for uWSGI >= 2.1
"""
module = module or ''
if '/' in module:
self._set('wsgi-file', module, condition=module)
else:
self._set('wsgi', module, condition=module)
self._set('callable', callable_name)
self._set('wsgi-env-behaviour', env_strategy)
return self._section | Set wsgi related parameters.
:param str|unicode module:
* load .wsgi file as the Python application
* load a WSGI module as the application.
.. note:: The module (sans ``.py``) must be importable, ie. be in ``PYTHONPATH``.
Examples:
* mypackage.my_wsgi_module -- read from `application` attr of mypackage/my_wsgi_module.py
* mypackage.my_wsgi_module:my_app -- read from `my_app` attr of mypackage/my_wsgi_module.py
:param str|unicode callable_name: Set WSGI callable name. Default: application.
:param str|unicode env_strategy: Strategy for allocating/deallocating
the WSGI env, can be:
* ``cheat`` - preallocates the env dictionary on uWSGI startup and clears it
after each request. Default behaviour for uWSGI <= 2.0.x
* ``holy`` - creates and destroys the environ dictionary at each request.
Default behaviour for uWSGI >= 2.1 | Below is the the instruction that describes the task:
### Input:
Set wsgi related parameters.
:param str|unicode module:
* load .wsgi file as the Python application
* load a WSGI module as the application.
.. note:: The module (sans ``.py``) must be importable, ie. be in ``PYTHONPATH``.
Examples:
* mypackage.my_wsgi_module -- read from `application` attr of mypackage/my_wsgi_module.py
* mypackage.my_wsgi_module:my_app -- read from `my_app` attr of mypackage/my_wsgi_module.py
:param str|unicode callable_name: Set WSGI callable name. Default: application.
:param str|unicode env_strategy: Strategy for allocating/deallocating
the WSGI env, can be:
* ``cheat`` - preallocates the env dictionary on uWSGI startup and clears it
after each request. Default behaviour for uWSGI <= 2.0.x
* ``holy`` - creates and destroys the environ dictionary at each request.
Default behaviour for uWSGI >= 2.1
### Response:
def set_wsgi_params(self, module=None, callable_name=None, env_strategy=None):
"""Set wsgi related parameters.
:param str|unicode module:
* load .wsgi file as the Python application
* load a WSGI module as the application.
.. note:: The module (sans ``.py``) must be importable, ie. be in ``PYTHONPATH``.
Examples:
* mypackage.my_wsgi_module -- read from `application` attr of mypackage/my_wsgi_module.py
* mypackage.my_wsgi_module:my_app -- read from `my_app` attr of mypackage/my_wsgi_module.py
:param str|unicode callable_name: Set WSGI callable name. Default: application.
:param str|unicode env_strategy: Strategy for allocating/deallocating
the WSGI env, can be:
* ``cheat`` - preallocates the env dictionary on uWSGI startup and clears it
after each request. Default behaviour for uWSGI <= 2.0.x
* ``holy`` - creates and destroys the environ dictionary at each request.
Default behaviour for uWSGI >= 2.1
"""
module = module or ''
if '/' in module:
self._set('wsgi-file', module, condition=module)
else:
self._set('wsgi', module, condition=module)
self._set('callable', callable_name)
self._set('wsgi-env-behaviour', env_strategy)
return self._section |
def union(self, *args):
"""
Produce an array that contains the union: each distinct element
from all of the passed-in arrays.
"""
# setobj = set(self.obj)
# for i, v in enumerate(args):
# setobj = setobj + set(args[i])
# return self._wrap(self._clean._toOriginal(setobj))
args = list(args)
args.insert(0, self.obj)
return self._wrap(_.uniq(self._flatten(args, True, []))) | Produce an array that contains the union: each distinct element
from all of the passed-in arrays. | Below is the the instruction that describes the task:
### Input:
Produce an array that contains the union: each distinct element
from all of the passed-in arrays.
### Response:
def union(self, *args):
"""
Produce an array that contains the union: each distinct element
from all of the passed-in arrays.
"""
# setobj = set(self.obj)
# for i, v in enumerate(args):
# setobj = setobj + set(args[i])
# return self._wrap(self._clean._toOriginal(setobj))
args = list(args)
args.insert(0, self.obj)
return self._wrap(_.uniq(self._flatten(args, True, []))) |
def covariance_matrix(self,x,y,names=None,cov=None):
"""build a pyemu.Cov instance from GeoStruct
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
names of location. If None, cov must not be None. Default is None.
cov : (pyemu.Cov) instance
an existing Cov instance. The contribution of this GeoStruct is added
to cov. If cov is None, names must not be None. Default is None
Returns
-------
cov : pyemu.Cov
the covariance matrix implied by this GeoStruct for the x,y pairs.
cov has row and column names supplied by the names argument unless
the "cov" argument was passed.
Note
----
either "names" or "cov" must be passed. If "cov" is passed, cov.shape
must equal len(x) and len(y).
Example
-------
``>>>pp_df = pyemu.pp_utils.pp_file_to_dataframe("hkpp.dat")``
``>>>cov = gs.covariance_matrix(pp_df.x,pp_df.y,pp_df.name)``
"""
if not isinstance(x,np.ndarray):
x = np.array(x)
if not isinstance(y,np.ndarray):
y = np.array(y)
assert x.shape[0] == y.shape[0]
if names is not None:
assert x.shape[0] == len(names)
c = np.zeros((len(names),len(names)))
np.fill_diagonal(c,self.nugget)
cov = Cov(x=c,names=names)
elif cov is not None:
assert cov.shape[0] == x.shape[0]
names = cov.row_names
c = np.zeros((len(names),1))
c += self.nugget
cont = Cov(x=c,names=names,isdiagonal=True)
cov += cont
else:
raise Exception("GeoStruct.covariance_matrix() requires either " +
"names or cov arg")
for v in self.variograms:
v.covariance_matrix(x,y,cov=cov)
return cov | build a pyemu.Cov instance from GeoStruct
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
names of location. If None, cov must not be None. Default is None.
cov : (pyemu.Cov) instance
an existing Cov instance. The contribution of this GeoStruct is added
to cov. If cov is None, names must not be None. Default is None
Returns
-------
cov : pyemu.Cov
the covariance matrix implied by this GeoStruct for the x,y pairs.
cov has row and column names supplied by the names argument unless
the "cov" argument was passed.
Note
----
either "names" or "cov" must be passed. If "cov" is passed, cov.shape
must equal len(x) and len(y).
Example
-------
``>>>pp_df = pyemu.pp_utils.pp_file_to_dataframe("hkpp.dat")``
``>>>cov = gs.covariance_matrix(pp_df.x,pp_df.y,pp_df.name)`` | Below is the the instruction that describes the task:
### Input:
build a pyemu.Cov instance from GeoStruct
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
names of location. If None, cov must not be None. Default is None.
cov : (pyemu.Cov) instance
an existing Cov instance. The contribution of this GeoStruct is added
to cov. If cov is None, names must not be None. Default is None
Returns
-------
cov : pyemu.Cov
the covariance matrix implied by this GeoStruct for the x,y pairs.
cov has row and column names supplied by the names argument unless
the "cov" argument was passed.
Note
----
either "names" or "cov" must be passed. If "cov" is passed, cov.shape
must equal len(x) and len(y).
Example
-------
``>>>pp_df = pyemu.pp_utils.pp_file_to_dataframe("hkpp.dat")``
``>>>cov = gs.covariance_matrix(pp_df.x,pp_df.y,pp_df.name)``
### Response:
def covariance_matrix(self,x,y,names=None,cov=None):
"""build a pyemu.Cov instance from GeoStruct
Parameters
----------
x : (iterable of floats)
x-coordinate locations
y : (iterable of floats)
y-coordinate locations
names : (iterable of str)
names of location. If None, cov must not be None. Default is None.
cov : (pyemu.Cov) instance
an existing Cov instance. The contribution of this GeoStruct is added
to cov. If cov is None, names must not be None. Default is None
Returns
-------
cov : pyemu.Cov
the covariance matrix implied by this GeoStruct for the x,y pairs.
cov has row and column names supplied by the names argument unless
the "cov" argument was passed.
Note
----
either "names" or "cov" must be passed. If "cov" is passed, cov.shape
must equal len(x) and len(y).
Example
-------
``>>>pp_df = pyemu.pp_utils.pp_file_to_dataframe("hkpp.dat")``
``>>>cov = gs.covariance_matrix(pp_df.x,pp_df.y,pp_df.name)``
"""
if not isinstance(x,np.ndarray):
x = np.array(x)
if not isinstance(y,np.ndarray):
y = np.array(y)
assert x.shape[0] == y.shape[0]
if names is not None:
assert x.shape[0] == len(names)
c = np.zeros((len(names),len(names)))
np.fill_diagonal(c,self.nugget)
cov = Cov(x=c,names=names)
elif cov is not None:
assert cov.shape[0] == x.shape[0]
names = cov.row_names
c = np.zeros((len(names),1))
c += self.nugget
cont = Cov(x=c,names=names,isdiagonal=True)
cov += cont
else:
raise Exception("GeoStruct.covariance_matrix() requires either " +
"names or cov arg")
for v in self.variograms:
v.covariance_matrix(x,y,cov=cov)
return cov |
def seasonal_subset(dataframe,
months='all'):
'''Get the seasonal data.
Parameters
----------
dataframe : pd.DataFrame
months: int, str
Months to use for statistics, or 'all' for 1-12 (default='all')
'''
if isinstance(months, str) and months == 'all':
months = np.arange(12) + 1
for month_num, month in enumerate(months):
df_cur = dataframe[dataframe.index.month == month]
if month_num == 0:
df = df_cur
else:
df = df.append(df_cur)
return df.sort_index() | Get the seasonal data.
Parameters
----------
dataframe : pd.DataFrame
months: int, str
Months to use for statistics, or 'all' for 1-12 (default='all') | Below is the the instruction that describes the task:
### Input:
Get the seasonal data.
Parameters
----------
dataframe : pd.DataFrame
months: int, str
Months to use for statistics, or 'all' for 1-12 (default='all')
### Response:
def seasonal_subset(dataframe,
months='all'):
'''Get the seasonal data.
Parameters
----------
dataframe : pd.DataFrame
months: int, str
Months to use for statistics, or 'all' for 1-12 (default='all')
'''
if isinstance(months, str) and months == 'all':
months = np.arange(12) + 1
for month_num, month in enumerate(months):
df_cur = dataframe[dataframe.index.month == month]
if month_num == 0:
df = df_cur
else:
df = df.append(df_cur)
return df.sort_index() |
def get_frame_locals(stepback=0):
"""Returns locals dictionary from a given frame.
:param int stepback:
:rtype: dict
"""
with Frame(stepback=stepback) as frame:
locals_dict = frame.f_locals
return locals_dict | Returns locals dictionary from a given frame.
:param int stepback:
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Returns locals dictionary from a given frame.
:param int stepback:
:rtype: dict
### Response:
def get_frame_locals(stepback=0):
"""Returns locals dictionary from a given frame.
:param int stepback:
:rtype: dict
"""
with Frame(stepback=stepback) as frame:
locals_dict = frame.f_locals
return locals_dict |
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
self.add_attributes({
PROV['type']: self.type,
NIDM_DIMENSIONS_IN_VOXELS: json.dumps(self.dimensions.tolist()),
NIDM_NUMBER_OF_DIMENSIONS: self.number_of_dimensions,
NIDM_VOXEL_TO_WORLD_MAPPING:
json.dumps(self.voxel_to_world.tolist()),
NIDM_IN_WORLD_COORDINATE_SYSTEM: self.coordinate_system,
NIDM_VOXEL_UNITS: json.dumps(self.units),
NIDM_VOXEL_SIZE: json.dumps(self.voxel_size.tolist()),
PROV['label']: self.label}) | Create prov entities and activities. | Below is the the instruction that describes the task:
### Input:
Create prov entities and activities.
### Response:
def export(self, nidm_version, export_dir):
"""
Create prov entities and activities.
"""
self.add_attributes({
PROV['type']: self.type,
NIDM_DIMENSIONS_IN_VOXELS: json.dumps(self.dimensions.tolist()),
NIDM_NUMBER_OF_DIMENSIONS: self.number_of_dimensions,
NIDM_VOXEL_TO_WORLD_MAPPING:
json.dumps(self.voxel_to_world.tolist()),
NIDM_IN_WORLD_COORDINATE_SYSTEM: self.coordinate_system,
NIDM_VOXEL_UNITS: json.dumps(self.units),
NIDM_VOXEL_SIZE: json.dumps(self.voxel_size.tolist()),
PROV['label']: self.label}) |
async def _setops(self, name, valu, editatom, init=False):
'''
Generate operations to set a property on a node.
'''
prop = self.form.prop(name)
if prop is None:
if self.snap.strict:
raise s_exc.NoSuchProp(name=name)
await self.snap.warn(f'NoSuchProp: name={name}')
return False
if self.isrunt:
if prop.info.get('ro'):
raise s_exc.IsRuntForm(mesg='Cannot set read-only props on runt nodes',
form=self.form.full, prop=name, valu=valu)
return await self.snap.core.runRuntPropSet(self, prop, valu)
curv = self.props.get(name)
# normalize the property value...
try:
norm, info = prop.type.norm(valu)
except Exception as e:
mesg = f'Bad property value: {prop.full}={valu!r}'
return await self.snap._raiseOnStrict(s_exc.BadPropValu, mesg, name=prop.name, valu=valu, emesg=str(e))
# do we already have the value?
if curv == norm:
return False
if curv is not None and not init:
if prop.info.get('ro'):
if self.snap.strict:
raise s_exc.ReadOnlyProp(name=prop.full)
# not setting a set-once prop unless we are init...
await self.snap.warn(f'ReadOnlyProp: name={prop.full}')
return False
# check for type specific merging...
norm = prop.type.merge(curv, norm)
if curv == norm:
return False
sops = prop.getSetOps(self.buid, norm)
editatom.sops.extend(sops)
# self.props[prop.name] = norm
editatom.npvs.append((self, prop, curv, norm))
# do we have any auto nodes to add?
auto = self.snap.model.form(prop.type.name)
if auto is not None:
buid = s_common.buid((auto.name, norm))
await self.snap._addNodeFnibOps((auto, norm, info, buid), editatom)
# does the type think we have special auto nodes to add?
# ( used only for adds which do not meet the above block )
for autoname, autovalu in info.get('adds', ()):
auto = self.snap.model.form(autoname)
autonorm, autoinfo = auto.type.norm(autovalu)
buid = s_common.buid((auto.name, autonorm))
await self.snap._addNodeFnibOps((auto, autovalu, autoinfo, buid), editatom)
# do we need to set any sub props?
subs = info.get('subs')
if subs is not None:
for subname, subvalu in subs.items():
full = prop.name + ':' + subname
subprop = self.form.prop(full)
if subprop is None:
continue
await self._setops(full, subvalu, editatom, init=init)
return True | Generate operations to set a property on a node. | Below is the the instruction that describes the task:
### Input:
Generate operations to set a property on a node.
### Response:
async def _setops(self, name, valu, editatom, init=False):
'''
Generate operations to set a property on a node.
'''
prop = self.form.prop(name)
if prop is None:
if self.snap.strict:
raise s_exc.NoSuchProp(name=name)
await self.snap.warn(f'NoSuchProp: name={name}')
return False
if self.isrunt:
if prop.info.get('ro'):
raise s_exc.IsRuntForm(mesg='Cannot set read-only props on runt nodes',
form=self.form.full, prop=name, valu=valu)
return await self.snap.core.runRuntPropSet(self, prop, valu)
curv = self.props.get(name)
# normalize the property value...
try:
norm, info = prop.type.norm(valu)
except Exception as e:
mesg = f'Bad property value: {prop.full}={valu!r}'
return await self.snap._raiseOnStrict(s_exc.BadPropValu, mesg, name=prop.name, valu=valu, emesg=str(e))
# do we already have the value?
if curv == norm:
return False
if curv is not None and not init:
if prop.info.get('ro'):
if self.snap.strict:
raise s_exc.ReadOnlyProp(name=prop.full)
# not setting a set-once prop unless we are init...
await self.snap.warn(f'ReadOnlyProp: name={prop.full}')
return False
# check for type specific merging...
norm = prop.type.merge(curv, norm)
if curv == norm:
return False
sops = prop.getSetOps(self.buid, norm)
editatom.sops.extend(sops)
# self.props[prop.name] = norm
editatom.npvs.append((self, prop, curv, norm))
# do we have any auto nodes to add?
auto = self.snap.model.form(prop.type.name)
if auto is not None:
buid = s_common.buid((auto.name, norm))
await self.snap._addNodeFnibOps((auto, norm, info, buid), editatom)
# does the type think we have special auto nodes to add?
# ( used only for adds which do not meet the above block )
for autoname, autovalu in info.get('adds', ()):
auto = self.snap.model.form(autoname)
autonorm, autoinfo = auto.type.norm(autovalu)
buid = s_common.buid((auto.name, autonorm))
await self.snap._addNodeFnibOps((auto, autovalu, autoinfo, buid), editatom)
# do we need to set any sub props?
subs = info.get('subs')
if subs is not None:
for subname, subvalu in subs.items():
full = prop.name + ':' + subname
subprop = self.form.prop(full)
if subprop is None:
continue
await self._setops(full, subvalu, editatom, init=init)
return True |
def samplewise_norm(
x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=2, epsilon=1e-7
):
"""Normalize an image by rescale, samplewise centering and samplewise centering in order.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rescale : float
Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)
samplewise_center : boolean
If True, set each sample mean to 0.
samplewise_std_normalization : boolean
If True, divide each input by its std.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)
>>> print(x.shape, np.mean(x), np.std(x))
(160, 176, 1), 0.0, 1.0
Notes
------
When samplewise_center and samplewise_std_normalization are True.
- For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.
- For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1.
"""
if rescale:
x *= rescale
if x.shape[channel_index] == 1:
# greyscale
if samplewise_center:
x = x - np.mean(x)
if samplewise_std_normalization:
x = x / np.std(x)
return x
elif x.shape[channel_index] == 3:
# rgb
if samplewise_center:
x = x - np.mean(x, axis=channel_index, keepdims=True)
if samplewise_std_normalization:
x = x / (np.std(x, axis=channel_index, keepdims=True) + epsilon)
return x
else:
raise Exception("Unsupported channels %d" % x.shape[channel_index]) | Normalize an image by rescale, samplewise centering and samplewise centering in order.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rescale : float
Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)
samplewise_center : boolean
If True, set each sample mean to 0.
samplewise_std_normalization : boolean
If True, divide each input by its std.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)
>>> print(x.shape, np.mean(x), np.std(x))
(160, 176, 1), 0.0, 1.0
Notes
------
When samplewise_center and samplewise_std_normalization are True.
- For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.
- For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1. | Below is the the instruction that describes the task:
### Input:
Normalize an image by rescale, samplewise centering and samplewise centering in order.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rescale : float
Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)
samplewise_center : boolean
If True, set each sample mean to 0.
samplewise_std_normalization : boolean
If True, divide each input by its std.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)
>>> print(x.shape, np.mean(x), np.std(x))
(160, 176, 1), 0.0, 1.0
Notes
------
When samplewise_center and samplewise_std_normalization are True.
- For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.
- For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1.
### Response:
def samplewise_norm(
x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=2, epsilon=1e-7
):
"""Normalize an image by rescale, samplewise centering and samplewise centering in order.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rescale : float
Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)
samplewise_center : boolean
If True, set each sample mean to 0.
samplewise_std_normalization : boolean
If True, divide each input by its std.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)
>>> print(x.shape, np.mean(x), np.std(x))
(160, 176, 1), 0.0, 1.0
Notes
------
When samplewise_center and samplewise_std_normalization are True.
- For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.
- For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1.
"""
if rescale:
x *= rescale
if x.shape[channel_index] == 1:
# greyscale
if samplewise_center:
x = x - np.mean(x)
if samplewise_std_normalization:
x = x / np.std(x)
return x
elif x.shape[channel_index] == 3:
# rgb
if samplewise_center:
x = x - np.mean(x, axis=channel_index, keepdims=True)
if samplewise_std_normalization:
x = x / (np.std(x, axis=channel_index, keepdims=True) + epsilon)
return x
else:
raise Exception("Unsupported channels %d" % x.shape[channel_index]) |
def _setup_trunk(self, trunk, vlan_id=None):
"""Sets up VLAN trunk and updates the trunk status."""
LOG.info('Binding trunk port: %s.', trunk)
try:
# bind sub_ports to host.
self._trunk_rpc.update_subport_bindings(self._context,
trunk.sub_ports)
vlan_trunk = [s.segmentation_id for s in trunk.sub_ports]
self._set_port_vlan(trunk.port_id, vlan_id, vlan_trunk)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.ACTIVE_STATUS)
except Exception:
# something broke
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.DEGRADED_STATUS) | Sets up VLAN trunk and updates the trunk status. | Below is the the instruction that describes the task:
### Input:
Sets up VLAN trunk and updates the trunk status.
### Response:
def _setup_trunk(self, trunk, vlan_id=None):
"""Sets up VLAN trunk and updates the trunk status."""
LOG.info('Binding trunk port: %s.', trunk)
try:
# bind sub_ports to host.
self._trunk_rpc.update_subport_bindings(self._context,
trunk.sub_ports)
vlan_trunk = [s.segmentation_id for s in trunk.sub_ports]
self._set_port_vlan(trunk.port_id, vlan_id, vlan_trunk)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.ACTIVE_STATUS)
except Exception:
# something broke
LOG.exception("Failure setting up subports for %s", trunk.port_id)
self._trunk_rpc.update_trunk_status(self._context, trunk.id,
t_const.DEGRADED_STATUS) |
def clear_input_score_start_range(self):
"""Clears the input score start.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score
if (self.get_input_score_start_range_metadata().is_read_only() or
self.get_input_score_start_range_metadata().is_required()):
raise errors.NoAccess()
self._my_map['inputScoreStartRange'] = self._input_score_start_range_default | Clears the input score start.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Clears the input score start.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
### Response:
def clear_input_score_start_range(self):
"""Clears the input score start.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score
if (self.get_input_score_start_range_metadata().is_read_only() or
self.get_input_score_start_range_metadata().is_required()):
raise errors.NoAccess()
self._my_map['inputScoreStartRange'] = self._input_score_start_range_default |
def dynamic_presence(self):
"""
Determine presence based on bed heating level and end presence
time reported by the api.
Idea originated from Alex Lee Yuk Cheung SmartThings Code.
"""
# self.heating_stats()
if not self.presence:
if self.heating_level > 50:
# Can likely make this better
if not self.now_heating:
self.presence = True
elif self.heating_level - self.target_heating_level >= 8:
self.presence = True
elif self.heating_level > 25:
# Catch rising edge
if self.past_heating_level(0) - self.past_heating_level(1) >= 2 \
and self.past_heating_level(1) - self.past_heating_level(2) >= 2 \
and self.past_heating_level(2) - self.past_heating_level(3) >= 2:
# Values are increasing so we are likely in bed
if not self.now_heating:
self.presence = True
elif self.heating_level - self.target_heating_level >= 8:
self.presence = True
elif self.presence:
if self.heating_level <= 15:
# Failsafe, very slow
self.presence = False
elif self.heating_level < 50:
if self.past_heating_level(0) - self.past_heating_level(1) < 0 \
and self.past_heating_level(1) - self.past_heating_level(2) < 0 \
and self.past_heating_level(2) - self.past_heating_level(3) < 0:
# Values are decreasing so we are likely out of bed
self.presence = False
# Last seen can lag real-time by up to 35min so this is
# mostly a backup to using the heat values.
# seen_delta = datetime.fromtimestamp(time.time()) \
# - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S')
# _LOGGER.debug('%s Last seen time delta: %s', self.side,
# seen_delta.total_seconds())
# if self.presence and seen_delta.total_seconds() > 2100:
# self.presence = False
_LOGGER.debug('%s Presence Results: %s', self.side, self.presence) | Determine presence based on bed heating level and end presence
time reported by the api.
Idea originated from Alex Lee Yuk Cheung SmartThings Code. | Below is the the instruction that describes the task:
### Input:
Determine presence based on bed heating level and end presence
time reported by the api.
Idea originated from Alex Lee Yuk Cheung SmartThings Code.
### Response:
def dynamic_presence(self):
"""
Determine presence based on bed heating level and end presence
time reported by the api.
Idea originated from Alex Lee Yuk Cheung SmartThings Code.
"""
# self.heating_stats()
if not self.presence:
if self.heating_level > 50:
# Can likely make this better
if not self.now_heating:
self.presence = True
elif self.heating_level - self.target_heating_level >= 8:
self.presence = True
elif self.heating_level > 25:
# Catch rising edge
if self.past_heating_level(0) - self.past_heating_level(1) >= 2 \
and self.past_heating_level(1) - self.past_heating_level(2) >= 2 \
and self.past_heating_level(2) - self.past_heating_level(3) >= 2:
# Values are increasing so we are likely in bed
if not self.now_heating:
self.presence = True
elif self.heating_level - self.target_heating_level >= 8:
self.presence = True
elif self.presence:
if self.heating_level <= 15:
# Failsafe, very slow
self.presence = False
elif self.heating_level < 50:
if self.past_heating_level(0) - self.past_heating_level(1) < 0 \
and self.past_heating_level(1) - self.past_heating_level(2) < 0 \
and self.past_heating_level(2) - self.past_heating_level(3) < 0:
# Values are decreasing so we are likely out of bed
self.presence = False
# Last seen can lag real-time by up to 35min so this is
# mostly a backup to using the heat values.
# seen_delta = datetime.fromtimestamp(time.time()) \
# - datetime.strptime(self.last_seen, '%Y-%m-%dT%H:%M:%S')
# _LOGGER.debug('%s Last seen time delta: %s', self.side,
# seen_delta.total_seconds())
# if self.presence and seen_delta.total_seconds() > 2100:
# self.presence = False
_LOGGER.debug('%s Presence Results: %s', self.side, self.presence) |
def print_generated_python(
var_name: str = _PRINT_GENERATED_PY_VAR_NAME, core_ns_name: str = CORE_NS
) -> bool:
"""Return the value of the `*print-generated-python*` dynamic variable."""
ns_sym = sym.Symbol(var_name, ns=core_ns_name)
return (
Maybe(Var.find(ns_sym))
.map(lambda v: v.value)
.or_else_raise(lambda: RuntimeException(f"Dynamic Var {ns_sym} not bound!"))
) | Return the value of the `*print-generated-python*` dynamic variable. | Below is the the instruction that describes the task:
### Input:
Return the value of the `*print-generated-python*` dynamic variable.
### Response:
def print_generated_python(
var_name: str = _PRINT_GENERATED_PY_VAR_NAME, core_ns_name: str = CORE_NS
) -> bool:
"""Return the value of the `*print-generated-python*` dynamic variable."""
ns_sym = sym.Symbol(var_name, ns=core_ns_name)
return (
Maybe(Var.find(ns_sym))
.map(lambda v: v.value)
.or_else_raise(lambda: RuntimeException(f"Dynamic Var {ns_sym} not bound!"))
) |
def _initialize_initial_state_fluents(self):
'''Returns the initial state-fluents instantiated.'''
state_fluents = self.rddl.domain.state_fluents
initializer = self.rddl.instance.init_state
self.initial_state_fluents = self._initialize_pvariables(
state_fluents,
self.rddl.domain.state_fluent_ordering,
initializer)
return self.initial_state_fluents | Returns the initial state-fluents instantiated. | Below is the the instruction that describes the task:
### Input:
Returns the initial state-fluents instantiated.
### Response:
def _initialize_initial_state_fluents(self):
'''Returns the initial state-fluents instantiated.'''
state_fluents = self.rddl.domain.state_fluents
initializer = self.rddl.instance.init_state
self.initial_state_fluents = self._initialize_pvariables(
state_fluents,
self.rddl.domain.state_fluent_ordering,
initializer)
return self.initial_state_fluents |
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses | Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines | Below is the the instruction that describes the task:
### Input:
Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
### Response:
async def _async_loop(self, urls):
"""Asynchronous internal method used to request multiple URLs
Args:
urls (list): URLs to fetch
Returns:
responses (obj): All URL requests' response coroutines
"""
results = []
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=False)
) as session:
for url in urls:
result = asyncio.ensure_future(self._get_async(url, session))
results.append(result)
responses = await asyncio.gather(*results)
return responses |
def get_option_choices(opt_name, opt_value, default_value, all_choices):
"""
Generate possible choices for the option `opt_name`
limited to `opt_value` value with default value
as `default_value`
"""
choices = []
if isinstance(opt_value, six.string_types):
choices = [opt_value]
elif isinstance(opt_value, (list, tuple)):
choices = list(opt_value)
elif opt_value is None:
choices = default_value
else:
raise InvalidOption('Option %s has invalid'
' value: %s' % (opt_name, opt_value))
if 'all' in choices:
choices = all_choices
for item in choices:
if item not in all_choices:
raise InvalidOption('Choices of option %s contains invalid'
' item: %s' % (opt_name, item))
return choices | Generate possible choices for the option `opt_name`
limited to `opt_value` value with default value
as `default_value` | Below is the the instruction that describes the task:
### Input:
Generate possible choices for the option `opt_name`
limited to `opt_value` value with default value
as `default_value`
### Response:
def get_option_choices(opt_name, opt_value, default_value, all_choices):
"""
Generate possible choices for the option `opt_name`
limited to `opt_value` value with default value
as `default_value`
"""
choices = []
if isinstance(opt_value, six.string_types):
choices = [opt_value]
elif isinstance(opt_value, (list, tuple)):
choices = list(opt_value)
elif opt_value is None:
choices = default_value
else:
raise InvalidOption('Option %s has invalid'
' value: %s' % (opt_name, opt_value))
if 'all' in choices:
choices = all_choices
for item in choices:
if item not in all_choices:
raise InvalidOption('Choices of option %s contains invalid'
' item: %s' % (opt_name, item))
return choices |
def logs(ctx, past, follow, hide_time):
"""Get job logs.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 2 logs
```
\b
```bash
$ polyaxon job logs
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
if past:
try:
response = PolyaxonClient().job.logs(
user, project_name, _job, stream=False)
get_logs_handler(handle_job_info=False,
show_timestamp=not hide_time,
stream=False)(response.content.decode().split('\n'))
print()
if not follow:
return
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
if not follow:
Printer.print_error('Could not get logs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
try:
PolyaxonClient().job.logs(
user,
project_name,
_job,
message_handler=get_logs_handler(handle_job_info=False, show_timestamp=not hide_time))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get logs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) | Get job logs.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 2 logs
```
\b
```bash
$ polyaxon job logs
``` | Below is the the instruction that describes the task:
### Input:
Get job logs.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 2 logs
```
\b
```bash
$ polyaxon job logs
```
### Response:
def logs(ctx, past, follow, hide_time):
"""Get job logs.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon job -j 2 logs
```
\b
```bash
$ polyaxon job logs
```
"""
user, project_name, _job = get_job_or_local(ctx.obj.get('project'), ctx.obj.get('job'))
if past:
try:
response = PolyaxonClient().job.logs(
user, project_name, _job, stream=False)
get_logs_handler(handle_job_info=False,
show_timestamp=not hide_time,
stream=False)(response.content.decode().split('\n'))
print()
if not follow:
return
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
if not follow:
Printer.print_error('Could not get logs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
try:
PolyaxonClient().job.logs(
user,
project_name,
_job,
message_handler=get_logs_handler(handle_job_info=False, show_timestamp=not hide_time))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get logs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1) |
def split_Text( text, file_name, verbose = True ):
''' Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
'''
if verbose:
print(' processing '+file_name+' ... ', end="" )
# Tokenize text into sentences
start = timer()
text = text.tokenize_sentences()
all_sentences = len(text[SENTENCES])
end = timer()
if verbose:
print(' (tok time: '+format_time( end-start )+')', end="" )
if all_sentences > max_sentences:
# Acquire spans of length *max_sentences* from the text
start = timer()
i = 0
spans = []
len_total = 0
while i < all_sentences:
startSent = text[SENTENCES][i]
endSent = text[SENTENCES][min(i+(max_sentences-1), all_sentences-1)]
span = (startSent[START], endSent[END])
len_total += (span[1]-span[0])
spans.append(span)
i += max_sentences
# Divide the text into spans
text_spans = text.texts_from_spans(spans)
assert len(text.text) >= len_total, '(!) Total spans_len must be =< than text_len: '+str(len_total)+'/'+str(len(text.text))
new_texts = []
for i, small_text in enumerate( text_spans ):
newText = Text( small_text )
for key in text.keys():
if key != TEXT and key != SENTENCES and key != PARAGRAPHS:
newText[key] = text[key]
newText['_text_split_id'] = i
newText['_text_split_origin'] = str(spans[i]) # Convert it to string; Otherwise, split_by(*) may mistakenly consider
# it a layer and may run into error while trying to split it;
newText['_text_split_file'] = file_name
#print( json.dumps(newText) )
new_texts.append( newText )
end = timer()
if verbose:
print(' (split time: '+format_time( end-start )+')', end="" )
print(' (sents: '+str(all_sentences)+', new_texts:'+str(len(new_texts))+')', end="")
print()
return new_texts
else:
if verbose:
print(' (sents: '+str(all_sentences)+', no_split)', end=" \n")
return [text] | Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts); | Below is the the instruction that describes the task:
### Input:
Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
### Response:
def split_Text( text, file_name, verbose = True ):
''' Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
'''
if verbose:
print(' processing '+file_name+' ... ', end="" )
# Tokenize text into sentences
start = timer()
text = text.tokenize_sentences()
all_sentences = len(text[SENTENCES])
end = timer()
if verbose:
print(' (tok time: '+format_time( end-start )+')', end="" )
if all_sentences > max_sentences:
# Acquire spans of length *max_sentences* from the text
start = timer()
i = 0
spans = []
len_total = 0
while i < all_sentences:
startSent = text[SENTENCES][i]
endSent = text[SENTENCES][min(i+(max_sentences-1), all_sentences-1)]
span = (startSent[START], endSent[END])
len_total += (span[1]-span[0])
spans.append(span)
i += max_sentences
# Divide the text into spans
text_spans = text.texts_from_spans(spans)
assert len(text.text) >= len_total, '(!) Total spans_len must be =< than text_len: '+str(len_total)+'/'+str(len(text.text))
new_texts = []
for i, small_text in enumerate( text_spans ):
newText = Text( small_text )
for key in text.keys():
if key != TEXT and key != SENTENCES and key != PARAGRAPHS:
newText[key] = text[key]
newText['_text_split_id'] = i
newText['_text_split_origin'] = str(spans[i]) # Convert it to string; Otherwise, split_by(*) may mistakenly consider
# it a layer and may run into error while trying to split it;
newText['_text_split_file'] = file_name
#print( json.dumps(newText) )
new_texts.append( newText )
end = timer()
if verbose:
print(' (split time: '+format_time( end-start )+')', end="" )
print(' (sents: '+str(all_sentences)+', new_texts:'+str(len(new_texts))+')', end="")
print()
return new_texts
else:
if verbose:
print(' (sents: '+str(all_sentences)+', no_split)', end=" \n")
return [text] |
def check_schema(self):
"""Check the schema exists and matches configuration"""
if self.valid_schema:
return
config = self.config
metadata = self.metadata()
if 'current_version' not in metadata:
raise GaugedSchemaError('Gauged schema not found, '
'try a gauged.sync()')
if metadata['current_version'] != Gauged.VERSION:
msg = 'The schema is version %s while this Gauged is version %s. '
msg += 'Try upgrading Gauged and/or running gauged_migrate.py'
msg = msg % (metadata['current_version'], Gauged.VERSION)
raise GaugedVersionMismatchError(msg)
expected_block_size = '%s/%s' % (config.block_size, config.resolution)
block_size = '%s/%s' % (metadata['block_size'], metadata['resolution'])
if block_size != expected_block_size:
msg = 'Expected %s and got %s' % (expected_block_size, block_size)
warn(msg, GaugedBlockSizeMismatch)
self.valid_schema = True | Check the schema exists and matches configuration | Below is the the instruction that describes the task:
### Input:
Check the schema exists and matches configuration
### Response:
def check_schema(self):
"""Check the schema exists and matches configuration"""
if self.valid_schema:
return
config = self.config
metadata = self.metadata()
if 'current_version' not in metadata:
raise GaugedSchemaError('Gauged schema not found, '
'try a gauged.sync()')
if metadata['current_version'] != Gauged.VERSION:
msg = 'The schema is version %s while this Gauged is version %s. '
msg += 'Try upgrading Gauged and/or running gauged_migrate.py'
msg = msg % (metadata['current_version'], Gauged.VERSION)
raise GaugedVersionMismatchError(msg)
expected_block_size = '%s/%s' % (config.block_size, config.resolution)
block_size = '%s/%s' % (metadata['block_size'], metadata['resolution'])
if block_size != expected_block_size:
msg = 'Expected %s and got %s' % (expected_block_size, block_size)
warn(msg, GaugedBlockSizeMismatch)
self.valid_schema = True |
def post_url(self):
"""
Determine which page this post lives on within the topic
and return link to anchor within that page
"""
topic = self.topic
topic_page = topic.post_set.filter(id__lt=self.id).count() / get_paginate_by() + 1
return "{0}page{1}/#post-{2}".format(topic.get_short_url(), topic_page, self.id) | Determine which page this post lives on within the topic
and return link to anchor within that page | Below is the the instruction that describes the task:
### Input:
Determine which page this post lives on within the topic
and return link to anchor within that page
### Response:
def post_url(self):
"""
Determine which page this post lives on within the topic
and return link to anchor within that page
"""
topic = self.topic
topic_page = topic.post_set.filter(id__lt=self.id).count() / get_paginate_by() + 1
return "{0}page{1}/#post-{2}".format(topic.get_short_url(), topic_page, self.id) |
def create_app_factory(app_name, config_loader=None,
extension_entry_points=None, extensions=None,
blueprint_entry_points=None, blueprints=None,
converter_entry_points=None, converters=None,
wsgi_factory=None, **app_kwargs):
"""Create a Flask application factory.
The application factory will load Flask extensions and blueprints specified
using both entry points and directly in the arguments. Loading order of
entry points are not guaranteed and can happen in any order.
:param app_name: Flask application name.
:param config_loader: Callable which will be invoked on application
creation in order to load the Flask configuration. See example below.
:param extension_entry_points: List of entry points, which specifies Flask
extensions that will be initialized only by passing in the Flask
application object
:param extensions: List of Flask extensions that can be initialized only by
passing in the Flask application object.
:param blueprint_entry_points: List of entry points, which specifies
Blueprints that will be registered on the Flask application.
:param blueprints: List of Blueprints that will be registered on the
Flask application.
:param converter_entry_points: List of entry points, which specifies
Werkzeug URL map converters that will be added to
``app.url_map.converters``.
:param converters: Map of Werkzeug URL map converter classes that will
be added to ``app.url_map.converters``.
:param wsgi_factory: A callable that will be passed the Flask application
object in order to overwrite the default WSGI application (e.g. to
install ``DispatcherMiddleware``).
:param app_kwargs: Keyword arguments passed to :py:meth:`base_app`.
:returns: Flask application factory.
Example of a configuration loader:
.. code-block:: python
def my_config_loader(app, **kwargs):
app.config.from_module('mysite.config')
app.config.update(**kwargs)
.. note::
`Invenio-Config <https://pythonhosted.org/invenio-config>`_ provides a
factory creating default configuration loader (see
:func:`invenio_config.utils.create_config_loader`) which is sufficient
for most cases.
Example of a WSGI factory:
.. code-block:: python
def my_wsgi_factory(app):
return DispatcherMiddleware(app.wsgi_app, {'/api': api_app})
.. versionadded: 1.0.0
"""
def _create_app(**kwargs):
app = base_app(app_name, **app_kwargs)
app_created.send(_create_app, app=app)
debug = kwargs.get('debug')
if debug is not None:
app.debug = debug
# Load configuration
if config_loader:
config_loader(app, **kwargs)
# Load URL converters.
converter_loader(
app,
entry_points=converter_entry_points,
modules=converters,
)
# Load application based on entrypoints.
app_loader(
app,
entry_points=extension_entry_points,
modules=extensions,
)
# Load blueprints
blueprint_loader(
app,
entry_points=blueprint_entry_points,
modules=blueprints,
)
app_loaded.send(_create_app, app=app)
# Replace WSGI application using factory if provided (e.g. to install
# WSGI middleware).
if wsgi_factory:
app.wsgi_app = wsgi_factory(app, **kwargs)
return app
return _create_app | Create a Flask application factory.
The application factory will load Flask extensions and blueprints specified
using both entry points and directly in the arguments. Loading order of
entry points are not guaranteed and can happen in any order.
:param app_name: Flask application name.
:param config_loader: Callable which will be invoked on application
creation in order to load the Flask configuration. See example below.
:param extension_entry_points: List of entry points, which specifies Flask
extensions that will be initialized only by passing in the Flask
application object
:param extensions: List of Flask extensions that can be initialized only by
passing in the Flask application object.
:param blueprint_entry_points: List of entry points, which specifies
Blueprints that will be registered on the Flask application.
:param blueprints: List of Blueprints that will be registered on the
Flask application.
:param converter_entry_points: List of entry points, which specifies
Werkzeug URL map converters that will be added to
``app.url_map.converters``.
:param converters: Map of Werkzeug URL map converter classes that will
be added to ``app.url_map.converters``.
:param wsgi_factory: A callable that will be passed the Flask application
object in order to overwrite the default WSGI application (e.g. to
install ``DispatcherMiddleware``).
:param app_kwargs: Keyword arguments passed to :py:meth:`base_app`.
:returns: Flask application factory.
Example of a configuration loader:
.. code-block:: python
def my_config_loader(app, **kwargs):
app.config.from_module('mysite.config')
app.config.update(**kwargs)
.. note::
`Invenio-Config <https://pythonhosted.org/invenio-config>`_ provides a
factory creating default configuration loader (see
:func:`invenio_config.utils.create_config_loader`) which is sufficient
for most cases.
Example of a WSGI factory:
.. code-block:: python
def my_wsgi_factory(app):
return DispatcherMiddleware(app.wsgi_app, {'/api': api_app})
.. versionadded: 1.0.0 | Below is the the instruction that describes the task:
### Input:
Create a Flask application factory.
The application factory will load Flask extensions and blueprints specified
using both entry points and directly in the arguments. Loading order of
entry points are not guaranteed and can happen in any order.
:param app_name: Flask application name.
:param config_loader: Callable which will be invoked on application
creation in order to load the Flask configuration. See example below.
:param extension_entry_points: List of entry points, which specifies Flask
extensions that will be initialized only by passing in the Flask
application object
:param extensions: List of Flask extensions that can be initialized only by
passing in the Flask application object.
:param blueprint_entry_points: List of entry points, which specifies
Blueprints that will be registered on the Flask application.
:param blueprints: List of Blueprints that will be registered on the
Flask application.
:param converter_entry_points: List of entry points, which specifies
Werkzeug URL map converters that will be added to
``app.url_map.converters``.
:param converters: Map of Werkzeug URL map converter classes that will
be added to ``app.url_map.converters``.
:param wsgi_factory: A callable that will be passed the Flask application
object in order to overwrite the default WSGI application (e.g. to
install ``DispatcherMiddleware``).
:param app_kwargs: Keyword arguments passed to :py:meth:`base_app`.
:returns: Flask application factory.
Example of a configuration loader:
.. code-block:: python
def my_config_loader(app, **kwargs):
app.config.from_module('mysite.config')
app.config.update(**kwargs)
.. note::
`Invenio-Config <https://pythonhosted.org/invenio-config>`_ provides a
factory creating default configuration loader (see
:func:`invenio_config.utils.create_config_loader`) which is sufficient
for most cases.
Example of a WSGI factory:
.. code-block:: python
def my_wsgi_factory(app):
return DispatcherMiddleware(app.wsgi_app, {'/api': api_app})
.. versionadded: 1.0.0
### Response:
def create_app_factory(app_name, config_loader=None,
extension_entry_points=None, extensions=None,
blueprint_entry_points=None, blueprints=None,
converter_entry_points=None, converters=None,
wsgi_factory=None, **app_kwargs):
"""Create a Flask application factory.
The application factory will load Flask extensions and blueprints specified
using both entry points and directly in the arguments. Loading order of
entry points are not guaranteed and can happen in any order.
:param app_name: Flask application name.
:param config_loader: Callable which will be invoked on application
creation in order to load the Flask configuration. See example below.
:param extension_entry_points: List of entry points, which specifies Flask
extensions that will be initialized only by passing in the Flask
application object
:param extensions: List of Flask extensions that can be initialized only by
passing in the Flask application object.
:param blueprint_entry_points: List of entry points, which specifies
Blueprints that will be registered on the Flask application.
:param blueprints: List of Blueprints that will be registered on the
Flask application.
:param converter_entry_points: List of entry points, which specifies
Werkzeug URL map converters that will be added to
``app.url_map.converters``.
:param converters: Map of Werkzeug URL map converter classes that will
be added to ``app.url_map.converters``.
:param wsgi_factory: A callable that will be passed the Flask application
object in order to overwrite the default WSGI application (e.g. to
install ``DispatcherMiddleware``).
:param app_kwargs: Keyword arguments passed to :py:meth:`base_app`.
:returns: Flask application factory.
Example of a configuration loader:
.. code-block:: python
def my_config_loader(app, **kwargs):
app.config.from_module('mysite.config')
app.config.update(**kwargs)
.. note::
`Invenio-Config <https://pythonhosted.org/invenio-config>`_ provides a
factory creating default configuration loader (see
:func:`invenio_config.utils.create_config_loader`) which is sufficient
for most cases.
Example of a WSGI factory:
.. code-block:: python
def my_wsgi_factory(app):
return DispatcherMiddleware(app.wsgi_app, {'/api': api_app})
.. versionadded: 1.0.0
"""
def _create_app(**kwargs):
app = base_app(app_name, **app_kwargs)
app_created.send(_create_app, app=app)
debug = kwargs.get('debug')
if debug is not None:
app.debug = debug
# Load configuration
if config_loader:
config_loader(app, **kwargs)
# Load URL converters.
converter_loader(
app,
entry_points=converter_entry_points,
modules=converters,
)
# Load application based on entrypoints.
app_loader(
app,
entry_points=extension_entry_points,
modules=extensions,
)
# Load blueprints
blueprint_loader(
app,
entry_points=blueprint_entry_points,
modules=blueprints,
)
app_loaded.send(_create_app, app=app)
# Replace WSGI application using factory if provided (e.g. to install
# WSGI middleware).
if wsgi_factory:
app.wsgi_app = wsgi_factory(app, **kwargs)
return app
return _create_app |
def counter_multi(self, kvs, initial=None, delta=1, ttl=0):
"""Perform counter operations on multiple items
:param kvs: Keys to operate on. See below for more options
:param initial: Initial value to use for all keys.
:param delta: Delta value for all keys.
:param ttl: Expiration value to use for all keys
:return: A :class:`~.MultiResult` containing :class:`~.ValueResult`
values
The `kvs` can be a:
- Iterable of keys
.. code-block:: python
cb.counter_multi((k1, k2))
- A dictionary mapping a key to its delta
.. code-block:: python
cb.counter_multi({
k1: 42,
k2: 99
})
- A dictionary mapping a key to its additional options
.. code-block:: python
cb.counter_multi({
k1: {'delta': 42, 'initial': 9, 'ttl': 300},
k2: {'delta': 99, 'initial': 4, 'ttl': 700}
})
When using a dictionary, you can override settings for each key on
a per-key basis (for example, the initial value). Global settings
(global here means something passed as a parameter to the method)
will take effect for those values which do not have a given option
specified.
"""
return _Base.counter_multi(self, kvs, initial=initial, delta=delta,
ttl=ttl) | Perform counter operations on multiple items
:param kvs: Keys to operate on. See below for more options
:param initial: Initial value to use for all keys.
:param delta: Delta value for all keys.
:param ttl: Expiration value to use for all keys
:return: A :class:`~.MultiResult` containing :class:`~.ValueResult`
values
The `kvs` can be a:
- Iterable of keys
.. code-block:: python
cb.counter_multi((k1, k2))
- A dictionary mapping a key to its delta
.. code-block:: python
cb.counter_multi({
k1: 42,
k2: 99
})
- A dictionary mapping a key to its additional options
.. code-block:: python
cb.counter_multi({
k1: {'delta': 42, 'initial': 9, 'ttl': 300},
k2: {'delta': 99, 'initial': 4, 'ttl': 700}
})
When using a dictionary, you can override settings for each key on
a per-key basis (for example, the initial value). Global settings
(global here means something passed as a parameter to the method)
will take effect for those values which do not have a given option
specified. | Below is the the instruction that describes the task:
### Input:
Perform counter operations on multiple items
:param kvs: Keys to operate on. See below for more options
:param initial: Initial value to use for all keys.
:param delta: Delta value for all keys.
:param ttl: Expiration value to use for all keys
:return: A :class:`~.MultiResult` containing :class:`~.ValueResult`
values
The `kvs` can be a:
- Iterable of keys
.. code-block:: python
cb.counter_multi((k1, k2))
- A dictionary mapping a key to its delta
.. code-block:: python
cb.counter_multi({
k1: 42,
k2: 99
})
- A dictionary mapping a key to its additional options
.. code-block:: python
cb.counter_multi({
k1: {'delta': 42, 'initial': 9, 'ttl': 300},
k2: {'delta': 99, 'initial': 4, 'ttl': 700}
})
When using a dictionary, you can override settings for each key on
a per-key basis (for example, the initial value). Global settings
(global here means something passed as a parameter to the method)
will take effect for those values which do not have a given option
specified.
### Response:
def counter_multi(self, kvs, initial=None, delta=1, ttl=0):
"""Perform counter operations on multiple items
:param kvs: Keys to operate on. See below for more options
:param initial: Initial value to use for all keys.
:param delta: Delta value for all keys.
:param ttl: Expiration value to use for all keys
:return: A :class:`~.MultiResult` containing :class:`~.ValueResult`
values
The `kvs` can be a:
- Iterable of keys
.. code-block:: python
cb.counter_multi((k1, k2))
- A dictionary mapping a key to its delta
.. code-block:: python
cb.counter_multi({
k1: 42,
k2: 99
})
- A dictionary mapping a key to its additional options
.. code-block:: python
cb.counter_multi({
k1: {'delta': 42, 'initial': 9, 'ttl': 300},
k2: {'delta': 99, 'initial': 4, 'ttl': 700}
})
When using a dictionary, you can override settings for each key on
a per-key basis (for example, the initial value). Global settings
(global here means something passed as a parameter to the method)
will take effect for those values which do not have a given option
specified.
"""
return _Base.counter_multi(self, kvs, initial=initial, delta=delta,
ttl=ttl) |
def from_file(cls, path=None):
"""Read a config file and instantiate the RCParser.
Create new :class:`configparser.ConfigParser` for the given **path**
and instantiate the :class:`RCParser` with the ConfigParser as
:attr:`config` attribute.
If the **path** doesn't exist, raise :exc:`ConfigFileError`.
Otherwise return a new :class:`RCParser` instance.
:param path:
Optional path to the config file to parse.
If not given, use ``'~/.pypirc'``.
"""
path = path or cls.CONFIG_PATH
if not os.path.exists(path):
error = 'Config file not found: {0!r}'.format(path)
raise ConfigFileError(error)
config = read_config(path)
return cls(config) | Read a config file and instantiate the RCParser.
Create new :class:`configparser.ConfigParser` for the given **path**
and instantiate the :class:`RCParser` with the ConfigParser as
:attr:`config` attribute.
If the **path** doesn't exist, raise :exc:`ConfigFileError`.
Otherwise return a new :class:`RCParser` instance.
:param path:
Optional path to the config file to parse.
If not given, use ``'~/.pypirc'``. | Below is the the instruction that describes the task:
### Input:
Read a config file and instantiate the RCParser.
Create new :class:`configparser.ConfigParser` for the given **path**
and instantiate the :class:`RCParser` with the ConfigParser as
:attr:`config` attribute.
If the **path** doesn't exist, raise :exc:`ConfigFileError`.
Otherwise return a new :class:`RCParser` instance.
:param path:
Optional path to the config file to parse.
If not given, use ``'~/.pypirc'``.
### Response:
def from_file(cls, path=None):
"""Read a config file and instantiate the RCParser.
Create new :class:`configparser.ConfigParser` for the given **path**
and instantiate the :class:`RCParser` with the ConfigParser as
:attr:`config` attribute.
If the **path** doesn't exist, raise :exc:`ConfigFileError`.
Otherwise return a new :class:`RCParser` instance.
:param path:
Optional path to the config file to parse.
If not given, use ``'~/.pypirc'``.
"""
path = path or cls.CONFIG_PATH
if not os.path.exists(path):
error = 'Config file not found: {0!r}'.format(path)
raise ConfigFileError(error)
config = read_config(path)
return cls(config) |
def _get_structure(self):
"""
Get the structure we are going to work with.
:return: The structure we have to work with.
:rtype: dict
"""
# We initiate an empty variable which is going to save the location of
# file we are going to download.
structure_file = ""
# We initiate the variable which will save the request instance.
req = ""
if PyFunceble.path.isfile(self.structure):
# The structure path file exist.
# We set it as the destination file.
structure_file = self.structure
elif PyFunceble.path.isfile(self.base + "dir_structure_production.json"):
# * The structure path file does not exist.
# but
# * The production structure path file exist.
# We set it as the destination file
structure_file = self.base + "dir_structure_production.json"
else:
# * The structure path file does not exist.
# and
# * The production structure path file does not exist.
if "dev" not in PyFunceble.VERSION:
# `dev` is not into the local version name.
# We get the production file from the master branch.
req = PyFunceble.requests.get(
PyFunceble.LINKS["dir_structure"].replace("dev", "master")
)
else:
# `dev` is into the local version name.
# We get the production file from the dev branch.
req = PyFunceble.requests.get(
PyFunceble.LINKS["dir_structure"].replace("master", "dev")
)
if structure_file.endswith("_production.json"):
# The destination is the production file.
# And we return the updated the structure from the last read file.
# (with the names from the configuration file).
return self._update_structure_from_config(
Dict().from_json(File(structure_file).read())
)
# The destination is not the production file.
if structure_file.endswith(".json"):
# The destination ends with `.json`.
# And we return the updated the structure from the given file.
# (with the names from the configuration file).
return self._update_structure_from_config(
Dict().from_json(File(structure_file).read())
)
# The destination does not ends with `.json`.
# We return the updated the structure from the link we previously got.
# (with the names from the configuration file).
return self._update_structure_from_config(Dict().from_json(req.text)) | Get the structure we are going to work with.
:return: The structure we have to work with.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get the structure we are going to work with.
:return: The structure we have to work with.
:rtype: dict
### Response:
def _get_structure(self):
"""
Get the structure we are going to work with.
:return: The structure we have to work with.
:rtype: dict
"""
# We initiate an empty variable which is going to save the location of
# file we are going to download.
structure_file = ""
# We initiate the variable which will save the request instance.
req = ""
if PyFunceble.path.isfile(self.structure):
# The structure path file exist.
# We set it as the destination file.
structure_file = self.structure
elif PyFunceble.path.isfile(self.base + "dir_structure_production.json"):
# * The structure path file does not exist.
# but
# * The production structure path file exist.
# We set it as the destination file
structure_file = self.base + "dir_structure_production.json"
else:
# * The structure path file does not exist.
# and
# * The production structure path file does not exist.
if "dev" not in PyFunceble.VERSION:
# `dev` is not into the local version name.
# We get the production file from the master branch.
req = PyFunceble.requests.get(
PyFunceble.LINKS["dir_structure"].replace("dev", "master")
)
else:
# `dev` is into the local version name.
# We get the production file from the dev branch.
req = PyFunceble.requests.get(
PyFunceble.LINKS["dir_structure"].replace("master", "dev")
)
if structure_file.endswith("_production.json"):
# The destination is the production file.
# And we return the updated the structure from the last read file.
# (with the names from the configuration file).
return self._update_structure_from_config(
Dict().from_json(File(structure_file).read())
)
# The destination is not the production file.
if structure_file.endswith(".json"):
# The destination ends with `.json`.
# And we return the updated the structure from the given file.
# (with the names from the configuration file).
return self._update_structure_from_config(
Dict().from_json(File(structure_file).read())
)
# The destination does not ends with `.json`.
# We return the updated the structure from the link we previously got.
# (with the names from the configuration file).
return self._update_structure_from_config(Dict().from_json(req.text)) |
def log_error(self, callback, error=None):
""" Log the error that occurred when running the given callback. """
print("Uncaught error during callback: {}".format(callback))
print("Error: {}".format(error)) | Log the error that occurred when running the given callback. | Below is the the instruction that describes the task:
### Input:
Log the error that occurred when running the given callback.
### Response:
def log_error(self, callback, error=None):
""" Log the error that occurred when running the given callback. """
print("Uncaught error during callback: {}".format(callback))
print("Error: {}".format(error)) |
def get(self, *args, **kwargs):
"""
An interface for get requests that handles errors more gracefully to
prevent data loss
"""
try:
req_func = self.session.get if self.session else requests.get
req = req_func(*args, **kwargs)
req.raise_for_status()
self.failed_last = False
return req
except requests.exceptions.RequestException as e:
self.log_error(e)
for i in range(1, self.num_retries):
sleep_time = self.retry_rate * i
self.log_function("Retrying in %s seconds" % sleep_time)
self._sleep(sleep_time)
try:
req = requests.get(*args, **kwargs)
req.raise_for_status()
self.log_function("New request successful")
return req
except requests.exceptions.RequestException:
self.log_function("New request failed")
# Allows for the api to ignore one potentially bad request
if not self.failed_last:
self.failed_last = True
raise ApiError(e)
else:
raise FatalApiError(e) | An interface for get requests that handles errors more gracefully to
prevent data loss | Below is the the instruction that describes the task:
### Input:
An interface for get requests that handles errors more gracefully to
prevent data loss
### Response:
def get(self, *args, **kwargs):
"""
An interface for get requests that handles errors more gracefully to
prevent data loss
"""
try:
req_func = self.session.get if self.session else requests.get
req = req_func(*args, **kwargs)
req.raise_for_status()
self.failed_last = False
return req
except requests.exceptions.RequestException as e:
self.log_error(e)
for i in range(1, self.num_retries):
sleep_time = self.retry_rate * i
self.log_function("Retrying in %s seconds" % sleep_time)
self._sleep(sleep_time)
try:
req = requests.get(*args, **kwargs)
req.raise_for_status()
self.log_function("New request successful")
return req
except requests.exceptions.RequestException:
self.log_function("New request failed")
# Allows for the api to ignore one potentially bad request
if not self.failed_last:
self.failed_last = True
raise ApiError(e)
else:
raise FatalApiError(e) |
def get_objectives_by_ids(self, objective_ids):
"""Gets an ``ObjectiveList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
objectives specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``Objectives`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: objective_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.learning.ObjectiveList) - the returned
``Objective`` list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``objective_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_ids
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('learning',
collection='Objective',
runtime=self._runtime)
object_id_list = []
for i in objective_ids:
object_id_list.append(ObjectId(self._get_id(i, 'learning').get_identifier()))
result = collection.find(
dict({'_id': {'$in': object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break
return objects.ObjectiveList(sorted_result, runtime=self._runtime, proxy=self._proxy) | Gets an ``ObjectiveList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
objectives specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``Objectives`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: objective_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.learning.ObjectiveList) - the returned
``Objective`` list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``objective_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets an ``ObjectiveList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
objectives specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``Objectives`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: objective_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.learning.ObjectiveList) - the returned
``Objective`` list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``objective_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_objectives_by_ids(self, objective_ids):
"""Gets an ``ObjectiveList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
objectives specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``Objectives`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
arg: objective_ids (osid.id.IdList): the list of ``Ids`` to
retrieve
return: (osid.learning.ObjectiveList) - the returned
``Objective`` list
raise: NotFound - an ``Id was`` not found
raise: NullArgument - ``objective_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_ids
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('learning',
collection='Objective',
runtime=self._runtime)
object_id_list = []
for i in objective_ids:
object_id_list.append(ObjectId(self._get_id(i, 'learning').get_identifier()))
result = collection.find(
dict({'_id': {'$in': object_id_list}},
**self._view_filter()))
result = list(result)
sorted_result = []
for object_id in object_id_list:
for object_map in result:
if object_map['_id'] == object_id:
sorted_result.append(object_map)
break
return objects.ObjectiveList(sorted_result, runtime=self._runtime, proxy=self._proxy) |
def add_ti_txt(self, lines, overwrite=False):
"""Add given TI-TXT string `lines`. Set `overwrite` to ``True`` to
allow already added data to be overwritten.
"""
address = None
eof_found = False
for line in StringIO(lines):
# Abort if data is found after end of file.
if eof_found:
raise Error("bad file terminator")
line = line.strip()
if len(line) < 1:
raise Error("bad line length")
if line[0] == 'q':
eof_found = True
elif line[0] == '@':
try:
address = int(line[1:], 16)
except ValueError:
raise Error("bad section address")
else:
# Try to decode the data.
try:
data = bytearray(binascii.unhexlify(line.replace(' ', '')))
except (TypeError, binascii.Error):
raise Error("bad data")
size = len(data)
# Check that there are correct number of bytes per
# line. There should TI_TXT_BYTES_PER_LINE. Only
# exception is last line of section which may be
# shorter.
if size > TI_TXT_BYTES_PER_LINE:
raise Error("bad line length")
if address is None:
raise Error("missing section address")
self._segments.add(_Segment(address,
address + size,
data,
self.word_size_bytes),
overwrite)
if size == TI_TXT_BYTES_PER_LINE:
address += size
else:
address = None
if not eof_found:
raise Error("missing file terminator") | Add given TI-TXT string `lines`. Set `overwrite` to ``True`` to
allow already added data to be overwritten. | Below is the the instruction that describes the task:
### Input:
Add given TI-TXT string `lines`. Set `overwrite` to ``True`` to
allow already added data to be overwritten.
### Response:
def add_ti_txt(self, lines, overwrite=False):
"""Add given TI-TXT string `lines`. Set `overwrite` to ``True`` to
allow already added data to be overwritten.
"""
address = None
eof_found = False
for line in StringIO(lines):
# Abort if data is found after end of file.
if eof_found:
raise Error("bad file terminator")
line = line.strip()
if len(line) < 1:
raise Error("bad line length")
if line[0] == 'q':
eof_found = True
elif line[0] == '@':
try:
address = int(line[1:], 16)
except ValueError:
raise Error("bad section address")
else:
# Try to decode the data.
try:
data = bytearray(binascii.unhexlify(line.replace(' ', '')))
except (TypeError, binascii.Error):
raise Error("bad data")
size = len(data)
# Check that there are correct number of bytes per
# line. There should TI_TXT_BYTES_PER_LINE. Only
# exception is last line of section which may be
# shorter.
if size > TI_TXT_BYTES_PER_LINE:
raise Error("bad line length")
if address is None:
raise Error("missing section address")
self._segments.add(_Segment(address,
address + size,
data,
self.word_size_bytes),
overwrite)
if size == TI_TXT_BYTES_PER_LINE:
address += size
else:
address = None
if not eof_found:
raise Error("missing file terminator") |
def truncate_label(cls, label):
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
"""
label = hashlib.md5(label.encode('utf-8')).hexdigest()
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[:cls.max_column_name_length]
return label | In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash. | Below is the the instruction that describes the task:
### Input:
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
### Response:
def truncate_label(cls, label):
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
"""
label = hashlib.md5(label.encode('utf-8')).hexdigest()
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[:cls.max_column_name_length]
return label |
def save_object(self, obj):
"""
Save object to disk as JSON.
Generally shouldn't be called directly.
"""
obj.pre_save(self.jurisdiction.jurisdiction_id)
filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-')
self.info('save %s %s as %s', obj._type, obj, filename)
self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())),
cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': ')))
self.output_names[obj._type].add(filename)
with open(os.path.join(self.datadir, filename), 'w') as f:
json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus)
# validate after writing, allows for inspection on failure
try:
obj.validate()
except ValueError as ve:
if self.strict_validation:
raise ve
else:
self.warning(ve)
# after saving and validating, save subordinate objects
for obj in obj._related:
self.save_object(obj) | Save object to disk as JSON.
Generally shouldn't be called directly. | Below is the the instruction that describes the task:
### Input:
Save object to disk as JSON.
Generally shouldn't be called directly.
### Response:
def save_object(self, obj):
"""
Save object to disk as JSON.
Generally shouldn't be called directly.
"""
obj.pre_save(self.jurisdiction.jurisdiction_id)
filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-')
self.info('save %s %s as %s', obj._type, obj, filename)
self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())),
cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': ')))
self.output_names[obj._type].add(filename)
with open(os.path.join(self.datadir, filename), 'w') as f:
json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus)
# validate after writing, allows for inspection on failure
try:
obj.validate()
except ValueError as ve:
if self.strict_validation:
raise ve
else:
self.warning(ve)
# after saving and validating, save subordinate objects
for obj in obj._related:
self.save_object(obj) |
def notify_all(self):
"""wake all waiting greenlets
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
scheduler.state.awoken_from_events.update(x[0] for x in self._waiters)
self._waiters.clear() | wake all waiting greenlets
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>` | Below is the the instruction that describes the task:
### Input:
wake all waiting greenlets
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
### Response:
def notify_all(self):
"""wake all waiting greenlets
:raises:
`RuntimeError` if the underlying lock hasn't been
:meth:`acquired <Lock.acquire>`
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
scheduler.state.awoken_from_events.update(x[0] for x in self._waiters)
self._waiters.clear() |
def delete(self, ip_dest, next_hop, **kwargs):
"""Delete a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with the delete flag set to True
kwargs.update({'delete': True})
return self._set_route(ip_dest, next_hop, **kwargs) | Delete a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False. | Below is the the instruction that describes the task:
### Input:
Delete a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
### Response:
def delete(self, ip_dest, next_hop, **kwargs):
"""Delete a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with the delete flag set to True
kwargs.update({'delete': True})
return self._set_route(ip_dest, next_hop, **kwargs) |
def new_post(GITDIRECTORY=CONFIG['output_to'], kind=KINDS['writing']): # pragma: no coverage # noqa
"""
This function should create a template for a new post with a title
read from the user input.
Most other fields should be defaults.
TODO: update this function
"""
title = input("Give the title of the post: ")
while ':' in title:
title = input("Give the title of the post (':' not allowed): ")
author = CONFIG['author']
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
tags = input("Give the tags, separated by ', ':")
published = 'yes'
chronological = 'yes'
summary = ("summary: Type your summary here.")
# make file name
fname = os.path.join(os.getcwd(), 'content', kind['name_plural'],
datetime.datetime.strftime(datetime.datetime.now(),
'%Y'),
date + '-' + title.replace(' ', '-') + '.markdown')
# first post every year need to create a new directory
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
with open(fname, 'w') as npost:
npost.write('---\n')
npost.write('title: %s\n' % title)
npost.write('author: %s\n' % author)
npost.write('published: %s\n' % date)
npost.write('tags: %s\n' % tags)
npost.write('public: %s\n' % published)
npost.write('chronological: %s\n' % chronological)
npost.write('kind: %s\n' % kind['name'])
npost.write('%s\n' % summary)
npost.write('---\n')
os.system('%s %s' % (CONFIG['editor'], fname)) | This function should create a template for a new post with a title
read from the user input.
Most other fields should be defaults.
TODO: update this function | Below is the the instruction that describes the task:
### Input:
This function should create a template for a new post with a title
read from the user input.
Most other fields should be defaults.
TODO: update this function
### Response:
def new_post(GITDIRECTORY=CONFIG['output_to'], kind=KINDS['writing']): # pragma: no coverage # noqa
"""
This function should create a template for a new post with a title
read from the user input.
Most other fields should be defaults.
TODO: update this function
"""
title = input("Give the title of the post: ")
while ':' in title:
title = input("Give the title of the post (':' not allowed): ")
author = CONFIG['author']
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
tags = input("Give the tags, separated by ', ':")
published = 'yes'
chronological = 'yes'
summary = ("summary: Type your summary here.")
# make file name
fname = os.path.join(os.getcwd(), 'content', kind['name_plural'],
datetime.datetime.strftime(datetime.datetime.now(),
'%Y'),
date + '-' + title.replace(' ', '-') + '.markdown')
# first post every year need to create a new directory
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
with open(fname, 'w') as npost:
npost.write('---\n')
npost.write('title: %s\n' % title)
npost.write('author: %s\n' % author)
npost.write('published: %s\n' % date)
npost.write('tags: %s\n' % tags)
npost.write('public: %s\n' % published)
npost.write('chronological: %s\n' % chronological)
npost.write('kind: %s\n' % kind['name'])
npost.write('%s\n' % summary)
npost.write('---\n')
os.system('%s %s' % (CONFIG['editor'], fname)) |
def mapReduce(mapFunc, reductionFunc, *iterables, **kwargs):
"""Exectues the :meth:`~scoop.futures.map` function and then applies a
reduction function to its result. The reduction function will cumulatively
merge the results of the map function in order to get a single final value.
This call is blocking.
:param mapFunc: Any picklable callable object (function or class object
with *__call__* method); this object will be called to execute the
Futures. The callable must return a value.
:param reductionFunc: Any picklable callable object (function or class
object with *__call__* method); this object will be called to reduce
pairs of Futures results. The callable must support two parameters and
return a single value.
:param iterables: Iterable objects; each will be zipped to form an iterable
of arguments tuples that will be passed to the callable object as a
separate Future.
:param timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
:returns: A single value."""
return submit(
_recursiveReduce,
mapFunc,
reductionFunc,
False,
*iterables
).result() | Exectues the :meth:`~scoop.futures.map` function and then applies a
reduction function to its result. The reduction function will cumulatively
merge the results of the map function in order to get a single final value.
This call is blocking.
:param mapFunc: Any picklable callable object (function or class object
with *__call__* method); this object will be called to execute the
Futures. The callable must return a value.
:param reductionFunc: Any picklable callable object (function or class
object with *__call__* method); this object will be called to reduce
pairs of Futures results. The callable must support two parameters and
return a single value.
:param iterables: Iterable objects; each will be zipped to form an iterable
of arguments tuples that will be passed to the callable object as a
separate Future.
:param timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
:returns: A single value. | Below is the the instruction that describes the task:
### Input:
Exectues the :meth:`~scoop.futures.map` function and then applies a
reduction function to its result. The reduction function will cumulatively
merge the results of the map function in order to get a single final value.
This call is blocking.
:param mapFunc: Any picklable callable object (function or class object
with *__call__* method); this object will be called to execute the
Futures. The callable must return a value.
:param reductionFunc: Any picklable callable object (function or class
object with *__call__* method); this object will be called to reduce
pairs of Futures results. The callable must support two parameters and
return a single value.
:param iterables: Iterable objects; each will be zipped to form an iterable
of arguments tuples that will be passed to the callable object as a
separate Future.
:param timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
:returns: A single value.
### Response:
def mapReduce(mapFunc, reductionFunc, *iterables, **kwargs):
"""Exectues the :meth:`~scoop.futures.map` function and then applies a
reduction function to its result. The reduction function will cumulatively
merge the results of the map function in order to get a single final value.
This call is blocking.
:param mapFunc: Any picklable callable object (function or class object
with *__call__* method); this object will be called to execute the
Futures. The callable must return a value.
:param reductionFunc: Any picklable callable object (function or class
object with *__call__* method); this object will be called to reduce
pairs of Futures results. The callable must support two parameters and
return a single value.
:param iterables: Iterable objects; each will be zipped to form an iterable
of arguments tuples that will be passed to the callable object as a
separate Future.
:param timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
:returns: A single value."""
return submit(
_recursiveReduce,
mapFunc,
reductionFunc,
False,
*iterables
).result() |
def _create_sequences(self):
'''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.'''
# Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences
try:
self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir)
except PDBMissingMainchainAtomsException:
self.pdb_to_rosetta_residue_map_error = True
# Get all the Sequences
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences
else:
self.uniparc_sequences = self.sifts.get_uniparc_sequences()
self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id)
self.seqres_sequences = self.pdb.seqres_sequences
self.atom_sequences = self.pdb.atom_sequences
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_sequences = {}
for c in self.atom_sequences.keys():
self.rosetta_sequences[c] = Sequence()
else:
self.rosetta_sequences = self.pdb.rosetta_sequences
# Update the chain types for the UniParc sequences
uniparc_pdb_chain_mapping = {}
if self.pdb_id not in do_not_use_the_sequence_aligner:
for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.iteritems():
if matches:
# we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc.
uniparc_chain_id = matches.keys()[0]
assert(len(matches) == 1)
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
else:
for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().iteritems():
for uniparc_chain_id in uniparc_chain_ids:
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.iteritems():
sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids])
assert(len(sequence_type) == 1)
sequence_type = sequence_type.pop()
assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None)
self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type)
for p in pdb_chain_ids:
self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id
# Update the chain types for the FASTA sequences
for chain_id, sequence in self.seqres_sequences.iteritems():
self.fasta_sequences[chain_id].set_type(sequence.sequence_type) | Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc. | Below is the the instruction that describes the task:
### Input:
Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.
### Response:
def _create_sequences(self):
'''Get all of the Sequences - Rosetta, ATOM, SEQRES, FASTA, UniParc.'''
# Create the Rosetta sequences and the maps from the Rosetta sequences to the ATOM sequences
try:
self.pdb.construct_pdb_to_rosetta_residue_map(self.rosetta_scripts_path, rosetta_database_path = self.rosetta_database_path, cache_dir = self.cache_dir)
except PDBMissingMainchainAtomsException:
self.pdb_to_rosetta_residue_map_error = True
# Get all the Sequences
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.uniparc_sequences = self.PDB_UniParc_SA.uniparc_sequences
else:
self.uniparc_sequences = self.sifts.get_uniparc_sequences()
self.fasta_sequences = self.FASTA.get_sequences(self.pdb_id)
self.seqres_sequences = self.pdb.seqres_sequences
self.atom_sequences = self.pdb.atom_sequences
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_sequences = {}
for c in self.atom_sequences.keys():
self.rosetta_sequences[c] = Sequence()
else:
self.rosetta_sequences = self.pdb.rosetta_sequences
# Update the chain types for the UniParc sequences
uniparc_pdb_chain_mapping = {}
if self.pdb_id not in do_not_use_the_sequence_aligner:
for pdb_chain_id, matches in self.PDB_UniParc_SA.clustal_matches.iteritems():
if matches:
# we are not guaranteed to have a match e.g. the short chain J in 1A2C, chimeras, etc.
uniparc_chain_id = matches.keys()[0]
assert(len(matches) == 1)
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
else:
for pdb_chain_id, uniparc_chain_ids in self.sifts.get_pdb_chain_to_uniparc_id_map().iteritems():
for uniparc_chain_id in uniparc_chain_ids:
uniparc_pdb_chain_mapping[uniparc_chain_id] = uniparc_pdb_chain_mapping.get(uniparc_chain_id, [])
uniparc_pdb_chain_mapping[uniparc_chain_id].append(pdb_chain_id)
for uniparc_chain_id, pdb_chain_ids in uniparc_pdb_chain_mapping.iteritems():
sequence_type = set([self.seqres_sequences[p].sequence_type for p in pdb_chain_ids])
assert(len(sequence_type) == 1)
sequence_type = sequence_type.pop()
assert(self.uniparc_sequences[uniparc_chain_id].sequence_type == None)
self.uniparc_sequences[uniparc_chain_id].set_type(sequence_type)
for p in pdb_chain_ids:
self.pdb_chain_to_uniparc_chain_mapping[p] = uniparc_chain_id
# Update the chain types for the FASTA sequences
for chain_id, sequence in self.seqres_sequences.iteritems():
self.fasta_sequences[chain_id].set_type(sequence.sequence_type) |
def _skip_spaces(string, idx):
# type: (str, int) -> int
"""
Retrieves the next non-space character after idx index in the given string
:param string: The string to look into
:param idx: The base search index
:return: The next non-space character index, -1 if not found
"""
i = idx
for char in string[idx:]:
if not char.isspace():
return i
i += 1
return -1 | Retrieves the next non-space character after idx index in the given string
:param string: The string to look into
:param idx: The base search index
:return: The next non-space character index, -1 if not found | Below is the the instruction that describes the task:
### Input:
Retrieves the next non-space character after idx index in the given string
:param string: The string to look into
:param idx: The base search index
:return: The next non-space character index, -1 if not found
### Response:
def _skip_spaces(string, idx):
# type: (str, int) -> int
"""
Retrieves the next non-space character after idx index in the given string
:param string: The string to look into
:param idx: The base search index
:return: The next non-space character index, -1 if not found
"""
i = idx
for char in string[idx:]:
if not char.isspace():
return i
i += 1
return -1 |
def authorize_password(self, client_id, username, password):
"""Authorize to platform as regular user
You must provide a valid client_id (same as web application),
your password and your username. Username and password is not stored in
client but refresh token is stored. The only valid scope for this
authorization is "regular_user".
:param client_id: Valid client_id
:type client_id: String
:param username: User email
:type username: String
:param password: User password
:type password: String
"""
self.auth_data = {
"grant_type": "password",
"username": username,
"password": password,
"client_id": client_id,
"scope": ["regular_user"]
}
self._do_authorize() | Authorize to platform as regular user
You must provide a valid client_id (same as web application),
your password and your username. Username and password is not stored in
client but refresh token is stored. The only valid scope for this
authorization is "regular_user".
:param client_id: Valid client_id
:type client_id: String
:param username: User email
:type username: String
:param password: User password
:type password: String | Below is the the instruction that describes the task:
### Input:
Authorize to platform as regular user
You must provide a valid client_id (same as web application),
your password and your username. Username and password is not stored in
client but refresh token is stored. The only valid scope for this
authorization is "regular_user".
:param client_id: Valid client_id
:type client_id: String
:param username: User email
:type username: String
:param password: User password
:type password: String
### Response:
def authorize_password(self, client_id, username, password):
"""Authorize to platform as regular user
You must provide a valid client_id (same as web application),
your password and your username. Username and password is not stored in
client but refresh token is stored. The only valid scope for this
authorization is "regular_user".
:param client_id: Valid client_id
:type client_id: String
:param username: User email
:type username: String
:param password: User password
:type password: String
"""
self.auth_data = {
"grant_type": "password",
"username": username,
"password": password,
"client_id": client_id,
"scope": ["regular_user"]
}
self._do_authorize() |
def create_topic(self, project, topic, fail_if_exists=False):
"""Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project ID in which to create
the topic
:type project: str
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().create(
name=full_topic, body={}).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 409 indicates that the topic already exists.
if str(e.resp['status']) == '409':
message = 'Topic already exists: {}'.format(full_topic)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating topic {}'.format(full_topic), e) | Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project ID in which to create
the topic
:type project: str
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool | Below is the the instruction that describes the task:
### Input:
Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project ID in which to create
the topic
:type project: str
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
### Response:
def create_topic(self, project, topic, fail_if_exists=False):
"""Creates a Pub/Sub topic, if it does not already exist.
:param project: the GCP project ID in which to create
the topic
:type project: str
:param topic: the Pub/Sub topic name to create; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_exists: if set, raise an exception if the topic
already exists
:type fail_if_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().create(
name=full_topic, body={}).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 409 indicates that the topic already exists.
if str(e.resp['status']) == '409':
message = 'Topic already exists: {}'.format(full_topic)
self.log.warning(message)
if fail_if_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error creating topic {}'.format(full_topic), e) |
def set_table_acl(self, table_name, signed_identifiers=None, timeout=None):
'''
Sets stored access policies for the table that may be used with Shared
Access Signatures.
When you set permissions for a table, the existing permissions are replaced.
To update the table’s permissions, call :func:`~get_table_acl` to fetch
all access policies associated with the table, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a table, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str table_name:
The name of an existing table.
:param signed_identifiers:
A dictionary of access policies to associate with the table. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
request.body = _get_request_body(
_convert_signed_identifiers_to_xml(signed_identifiers))
self._perform_request(request) | Sets stored access policies for the table that may be used with Shared
Access Signatures.
When you set permissions for a table, the existing permissions are replaced.
To update the table’s permissions, call :func:`~get_table_acl` to fetch
all access policies associated with the table, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a table, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str table_name:
The name of an existing table.
:param signed_identifiers:
A dictionary of access policies to associate with the table. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds. | Below is the the instruction that describes the task:
### Input:
Sets stored access policies for the table that may be used with Shared
Access Signatures.
When you set permissions for a table, the existing permissions are replaced.
To update the table’s permissions, call :func:`~get_table_acl` to fetch
all access policies associated with the table, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a table, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str table_name:
The name of an existing table.
:param signed_identifiers:
A dictionary of access policies to associate with the table. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds.
### Response:
def set_table_acl(self, table_name, signed_identifiers=None, timeout=None):
'''
Sets stored access policies for the table that may be used with Shared
Access Signatures.
When you set permissions for a table, the existing permissions are replaced.
To update the table’s permissions, call :func:`~get_table_acl` to fetch
all access policies associated with the table, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a table, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str table_name:
The name of an existing table.
:param signed_identifiers:
A dictionary of access policies to associate with the table. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _to_str(table_name)
request.query = [
('comp', 'acl'),
('timeout', _int_to_str(timeout)),
]
request.body = _get_request_body(
_convert_signed_identifiers_to_xml(signed_identifiers))
self._perform_request(request) |
def web_address(self):
"""
Return the url of the web server or None if not running
"""
port = self._current_web_port()
address = self.address or '127.0.0.1'
if port is None:
return None
return 'http://{0}:{1}/'.format(
address if address and not is_boot2docker() else docker_host(),
port) | Return the url of the web server or None if not running | Below is the the instruction that describes the task:
### Input:
Return the url of the web server or None if not running
### Response:
def web_address(self):
"""
Return the url of the web server or None if not running
"""
port = self._current_web_port()
address = self.address or '127.0.0.1'
if port is None:
return None
return 'http://{0}:{1}/'.format(
address if address and not is_boot2docker() else docker_host(),
port) |
def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)):
"""
Create VTK image data directly from numpy arrays. A uniform grid is
defined by the node spacings for each axis (uniform along each
individual axis) and the number of nodes on each axis. These are
relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis
"""
xn, yn, zn = dims[0], dims[1], dims[2]
xs, ys, zs = spacing[0], spacing[1], spacing[2]
xo, yo, zo = origin[0], origin[1], origin[2]
self.SetDimensions(xn, yn, zn)
self.SetOrigin(xo, yo, zo)
self.SetSpacing(xs, ys, zs) | Create VTK image data directly from numpy arrays. A uniform grid is
defined by the node spacings for each axis (uniform along each
individual axis) and the number of nodes on each axis. These are
relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis | Below is the the instruction that describes the task:
### Input:
Create VTK image data directly from numpy arrays. A uniform grid is
defined by the node spacings for each axis (uniform along each
individual axis) and the number of nodes on each axis. These are
relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis
### Response:
def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)):
"""
Create VTK image data directly from numpy arrays. A uniform grid is
defined by the node spacings for each axis (uniform along each
individual axis) and the number of nodes on each axis. These are
relative to a specified origin (default is ``(0.0, 0.0, 0.0)``).
Parameters
----------
dims : tuple(int)
Length 3 tuple of ints specifying how many nodes along each axis
spacing : tuple(float)
Length 3 tuple of floats/ints specifying the node spacings for each axis
origin : tuple(float)
Length 3 tuple of floats/ints specifying minimum value for each axis
"""
xn, yn, zn = dims[0], dims[1], dims[2]
xs, ys, zs = spacing[0], spacing[1], spacing[2]
xo, yo, zo = origin[0], origin[1], origin[2]
self.SetDimensions(xn, yn, zn)
self.SetOrigin(xo, yo, zo)
self.SetSpacing(xs, ys, zs) |
def lnlike(self, p):
"""Log-likelihood of model at given parameters
:param p:
mass, log10(age), feh, [distance, A_V (extinction)].
Final two should only be provided if ``self.fit_for_distance``
is ``True``; that is, apparent magnitudes are provided.
:return:
log-likelihood. Will be -np.inf if values out of range.
"""
if not self._props_cleaned:
self._clean_props()
if not self.use_emcee:
fit_for_distance = True
mass, age, feh, dist, AV = (p[0], p[1], p[2], p[3], p[4])
else:
if len(p)==5:
fit_for_distance = True
mass,age,feh,dist,AV = p
elif len(p)==3:
fit_for_distance = False
mass,age,feh = p
if mass < self.ic.minmass or mass > self.ic.maxmass \
or age < self.ic.minage or age > self.ic.maxage \
or feh < self.ic.minfeh or feh > self.ic.maxfeh:
return -np.inf
if fit_for_distance:
if dist < 0 or AV < 0 or dist > self.max_distance:
return -np.inf
if AV > self.maxAV:
return -np.inf
if self.min_logg is not None:
logg = self.ic.logg(mass,age,feh)
if logg < self.min_logg:
return -np.inf
logl = 0
for prop in self.properties.keys():
try:
val,err = self.properties[prop]
except TypeError:
#property not appropriate for fitting (e.g. no error provided)
continue
if prop in self.ic.bands:
if not fit_for_distance:
raise ValueError('must fit for mass, age, feh, dist, A_V if apparent magnitudes provided.')
mod = self.ic.mag[prop](mass,age,feh) + 5*np.log10(dist) - 5
A = AV*EXTINCTION[prop]
mod += A
elif re.search('delta_',prop):
continue
elif prop=='feh':
mod = feh
elif prop=='parallax':
mod = 1./dist * 1000
else:
mod = getattr(self.ic,prop)(mass,age,feh)
logl += -(val-mod)**2/(2*err**2) + np.log(1/(err*np.sqrt(2*np.pi)))
if np.isnan(logl):
logl = -np.inf
return logl | Log-likelihood of model at given parameters
:param p:
mass, log10(age), feh, [distance, A_V (extinction)].
Final two should only be provided if ``self.fit_for_distance``
is ``True``; that is, apparent magnitudes are provided.
:return:
log-likelihood. Will be -np.inf if values out of range. | Below is the the instruction that describes the task:
### Input:
Log-likelihood of model at given parameters
:param p:
mass, log10(age), feh, [distance, A_V (extinction)].
Final two should only be provided if ``self.fit_for_distance``
is ``True``; that is, apparent magnitudes are provided.
:return:
log-likelihood. Will be -np.inf if values out of range.
### Response:
def lnlike(self, p):
"""Log-likelihood of model at given parameters
:param p:
mass, log10(age), feh, [distance, A_V (extinction)].
Final two should only be provided if ``self.fit_for_distance``
is ``True``; that is, apparent magnitudes are provided.
:return:
log-likelihood. Will be -np.inf if values out of range.
"""
if not self._props_cleaned:
self._clean_props()
if not self.use_emcee:
fit_for_distance = True
mass, age, feh, dist, AV = (p[0], p[1], p[2], p[3], p[4])
else:
if len(p)==5:
fit_for_distance = True
mass,age,feh,dist,AV = p
elif len(p)==3:
fit_for_distance = False
mass,age,feh = p
if mass < self.ic.minmass or mass > self.ic.maxmass \
or age < self.ic.minage or age > self.ic.maxage \
or feh < self.ic.minfeh or feh > self.ic.maxfeh:
return -np.inf
if fit_for_distance:
if dist < 0 or AV < 0 or dist > self.max_distance:
return -np.inf
if AV > self.maxAV:
return -np.inf
if self.min_logg is not None:
logg = self.ic.logg(mass,age,feh)
if logg < self.min_logg:
return -np.inf
logl = 0
for prop in self.properties.keys():
try:
val,err = self.properties[prop]
except TypeError:
#property not appropriate for fitting (e.g. no error provided)
continue
if prop in self.ic.bands:
if not fit_for_distance:
raise ValueError('must fit for mass, age, feh, dist, A_V if apparent magnitudes provided.')
mod = self.ic.mag[prop](mass,age,feh) + 5*np.log10(dist) - 5
A = AV*EXTINCTION[prop]
mod += A
elif re.search('delta_',prop):
continue
elif prop=='feh':
mod = feh
elif prop=='parallax':
mod = 1./dist * 1000
else:
mod = getattr(self.ic,prop)(mass,age,feh)
logl += -(val-mod)**2/(2*err**2) + np.log(1/(err*np.sqrt(2*np.pi)))
if np.isnan(logl):
logl = -np.inf
return logl |
def _instructions(self, time: int = 0) -> Iterable[Tuple[int, 'Instruction']]:
"""Iterable for flattening Schedule tree.
Args:
time: Shifted time due to parent
Yields:
Tuple[int, ScheduleComponent]: Tuple containing time `ScheduleComponent` starts
at and the flattened `ScheduleComponent`.
"""
for insert_time, child_sched in self.children:
yield from child_sched._instructions(time + insert_time) | Iterable for flattening Schedule tree.
Args:
time: Shifted time due to parent
Yields:
Tuple[int, ScheduleComponent]: Tuple containing time `ScheduleComponent` starts
at and the flattened `ScheduleComponent`. | Below is the the instruction that describes the task:
### Input:
Iterable for flattening Schedule tree.
Args:
time: Shifted time due to parent
Yields:
Tuple[int, ScheduleComponent]: Tuple containing time `ScheduleComponent` starts
at and the flattened `ScheduleComponent`.
### Response:
def _instructions(self, time: int = 0) -> Iterable[Tuple[int, 'Instruction']]:
"""Iterable for flattening Schedule tree.
Args:
time: Shifted time due to parent
Yields:
Tuple[int, ScheduleComponent]: Tuple containing time `ScheduleComponent` starts
at and the flattened `ScheduleComponent`.
"""
for insert_time, child_sched in self.children:
yield from child_sched._instructions(time + insert_time) |
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = -\\frac{N \\ln 2\\pi}{2} - \\frac{\\ln |K|}{2} - \\frac{(y_{i} - \\lambda(f_{i}))^{T}\\sigma^{-2}(y_{i} - \\lambda(f_{i}))}{2}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: likelihood evaluated for this point
:rtype: float
"""
#Assumes no covariance, exp, sum, log for numerical stability
return np.exp(np.sum(np.log(stats.norm.pdf(y, link_f, np.sqrt(self.variance))))) | Likelihood function given link(f)
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = -\\frac{N \\ln 2\\pi}{2} - \\frac{\\ln |K|}{2} - \\frac{(y_{i} - \\lambda(f_{i}))^{T}\\sigma^{-2}(y_{i} - \\lambda(f_{i}))}{2}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: likelihood evaluated for this point
:rtype: float | Below is the the instruction that describes the task:
### Input:
Likelihood function given link(f)
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = -\\frac{N \\ln 2\\pi}{2} - \\frac{\\ln |K|}{2} - \\frac{(y_{i} - \\lambda(f_{i}))^{T}\\sigma^{-2}(y_{i} - \\lambda(f_{i}))}{2}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: likelihood evaluated for this point
:rtype: float
### Response:
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = -\\frac{N \\ln 2\\pi}{2} - \\frac{\\ln |K|}{2} - \\frac{(y_{i} - \\lambda(f_{i}))^{T}\\sigma^{-2}(y_{i} - \\lambda(f_{i}))}{2}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in gaussian
:returns: likelihood evaluated for this point
:rtype: float
"""
#Assumes no covariance, exp, sum, log for numerical stability
return np.exp(np.sum(np.log(stats.norm.pdf(y, link_f, np.sqrt(self.variance))))) |
def set_status(self, status):
"""
Updates the status text
Args:
status (int): The offline/starting/online status of Modis
0: offline, 1: starting, 2: online
"""
text = ""
colour = "#FFFFFF"
if status == 0:
text = "OFFLINE"
colour = "#EF9A9A"
elif status == 1:
text = "STARTING"
colour = "#FFE082"
elif status == 2:
text = "ONLINE"
colour = "#A5D6A7"
self.status.set(text)
self.statusbar.config(background=colour) | Updates the status text
Args:
status (int): The offline/starting/online status of Modis
0: offline, 1: starting, 2: online | Below is the the instruction that describes the task:
### Input:
Updates the status text
Args:
status (int): The offline/starting/online status of Modis
0: offline, 1: starting, 2: online
### Response:
def set_status(self, status):
"""
Updates the status text
Args:
status (int): The offline/starting/online status of Modis
0: offline, 1: starting, 2: online
"""
text = ""
colour = "#FFFFFF"
if status == 0:
text = "OFFLINE"
colour = "#EF9A9A"
elif status == 1:
text = "STARTING"
colour = "#FFE082"
elif status == 2:
text = "ONLINE"
colour = "#A5D6A7"
self.status.set(text)
self.statusbar.config(background=colour) |
def resample(source_area, data, destination_area,
resampler=None, **kwargs):
"""Do the resampling."""
if 'resampler_class' in kwargs:
import warnings
warnings.warn("'resampler_class' is deprecated, use 'resampler'",
DeprecationWarning)
resampler = kwargs.pop('resampler_class')
if not isinstance(resampler, BaseResampler):
# we don't use the first argument (cache key)
_, resampler_instance = prepare_resampler(source_area,
destination_area,
resampler)
else:
resampler_instance = resampler
if isinstance(data, list):
res = [resampler_instance.resample(ds, **kwargs) for ds in data]
else:
res = resampler_instance.resample(data, **kwargs)
return res | Do the resampling. | Below is the the instruction that describes the task:
### Input:
Do the resampling.
### Response:
def resample(source_area, data, destination_area,
resampler=None, **kwargs):
"""Do the resampling."""
if 'resampler_class' in kwargs:
import warnings
warnings.warn("'resampler_class' is deprecated, use 'resampler'",
DeprecationWarning)
resampler = kwargs.pop('resampler_class')
if not isinstance(resampler, BaseResampler):
# we don't use the first argument (cache key)
_, resampler_instance = prepare_resampler(source_area,
destination_area,
resampler)
else:
resampler_instance = resampler
if isinstance(data, list):
res = [resampler_instance.resample(ds, **kwargs) for ds in data]
else:
res = resampler_instance.resample(data, **kwargs)
return res |
def main(arguments=None):
"""
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="transientNamer"
)
arguments, settings, log, dbConn = su.setup()
# tab completion for raw_input
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(tab_complete)
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
if search or new or cone:
if ra:
tns = transientNamer.search(
log=log,
ra=ra,
dec=dec,
radiusArcsec=arcsecRadius,
comments=withCommentsFlag
)
if name:
tns = transientNamer.search(
log=log,
name=name,
comments=withCommentsFlag
)
if discInLastDays:
tns = transientNamer.search(
log=log,
discInLastDays=discInLastDays,
comments=withCommentsFlag
)
# Recursively create missing directories
if outputFlag and not os.path.exists(outputFlag):
os.makedirs(outputFlag)
if tableNamePrefix:
sources, phot, spec, files = tns.mysql(
tableNamePrefix=tableNamePrefix, dirPath=outputFlag)
numSources = len(sources.split("\n")) - 1
elif not render or render == "table":
sources, phot, spec, files = tns.table(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 4
elif render == "csv":
sources, phot, spec, files = tns.csv(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 1
elif render == "json":
sources, phot, spec, files = tns.json(dirPath=outputFlag)
numSources = len(sources.split("{")) - 1
elif render == "yaml":
sources, phot, spec, files = tns.yaml(dirPath=outputFlag)
numSources = len(sources.split("\n-"))
elif render == "markdown":
sources, phot, spec, files = tns.markdown(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 2
if numSources == 1:
print "%(numSources)s transient found" % locals()
elif numSources > 1:
print "%(numSources)s transients found" % locals()
if not outputFlag:
print "\n# Matched Transients"
print sources
print "\n# Transient Photometry"
print phot
print "\n# Transient Spectra"
print spec
print "\n# Transient Supplementary Files"
print files
print "\n# Original TNS Search URL"
print tns.url
# CALL FUNCTIONS/OBJECTS
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return | *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* | Below is the the instruction that describes the task:
### Input:
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
### Response:
def main(arguments=None):
"""
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="transientNamer"
)
arguments, settings, log, dbConn = su.setup()
# tab completion for raw_input
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(tab_complete)
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
if search or new or cone:
if ra:
tns = transientNamer.search(
log=log,
ra=ra,
dec=dec,
radiusArcsec=arcsecRadius,
comments=withCommentsFlag
)
if name:
tns = transientNamer.search(
log=log,
name=name,
comments=withCommentsFlag
)
if discInLastDays:
tns = transientNamer.search(
log=log,
discInLastDays=discInLastDays,
comments=withCommentsFlag
)
# Recursively create missing directories
if outputFlag and not os.path.exists(outputFlag):
os.makedirs(outputFlag)
if tableNamePrefix:
sources, phot, spec, files = tns.mysql(
tableNamePrefix=tableNamePrefix, dirPath=outputFlag)
numSources = len(sources.split("\n")) - 1
elif not render or render == "table":
sources, phot, spec, files = tns.table(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 4
elif render == "csv":
sources, phot, spec, files = tns.csv(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 1
elif render == "json":
sources, phot, spec, files = tns.json(dirPath=outputFlag)
numSources = len(sources.split("{")) - 1
elif render == "yaml":
sources, phot, spec, files = tns.yaml(dirPath=outputFlag)
numSources = len(sources.split("\n-"))
elif render == "markdown":
sources, phot, spec, files = tns.markdown(dirPath=outputFlag)
numSources = len(sources.split("\n")) - 2
if numSources == 1:
print "%(numSources)s transient found" % locals()
elif numSources > 1:
print "%(numSources)s transients found" % locals()
if not outputFlag:
print "\n# Matched Transients"
print sources
print "\n# Transient Photometry"
print phot
print "\n# Transient Spectra"
print spec
print "\n# Transient Supplementary Files"
print files
print "\n# Original TNS Search URL"
print tns.url
# CALL FUNCTIONS/OBJECTS
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return |
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name) | Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_class_name('foo') | Below is the the instruction that describes the task:
### Input:
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_class_name('foo')
### Response:
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
::
elements = driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name) |
def add_reaction(self, reaction_id):
"""Add reaction to model"""
if reaction_id in self._reaction_set:
return
reaction = self._database.get_reaction(reaction_id)
self._reaction_set.add(reaction_id)
for compound, _ in reaction.compounds:
self._compound_set.add(compound) | Add reaction to model | Below is the the instruction that describes the task:
### Input:
Add reaction to model
### Response:
def add_reaction(self, reaction_id):
"""Add reaction to model"""
if reaction_id in self._reaction_set:
return
reaction = self._database.get_reaction(reaction_id)
self._reaction_set.add(reaction_id)
for compound, _ in reaction.compounds:
self._compound_set.add(compound) |
def from_json(cls, key):
"""Creates a RFC 7517 JWK from the standard JSON format.
:param key: The RFC 7517 representation of a JWK.
"""
obj = cls()
try:
jkey = json_decode(key)
except Exception as e: # pylint: disable=broad-except
raise InvalidJWKValue(e)
obj.import_key(**jkey)
return obj | Creates a RFC 7517 JWK from the standard JSON format.
:param key: The RFC 7517 representation of a JWK. | Below is the the instruction that describes the task:
### Input:
Creates a RFC 7517 JWK from the standard JSON format.
:param key: The RFC 7517 representation of a JWK.
### Response:
def from_json(cls, key):
"""Creates a RFC 7517 JWK from the standard JSON format.
:param key: The RFC 7517 representation of a JWK.
"""
obj = cls()
try:
jkey = json_decode(key)
except Exception as e: # pylint: disable=broad-except
raise InvalidJWKValue(e)
obj.import_key(**jkey)
return obj |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.