code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def schema():
"""Provide schema for shell configuration."""
return Schema({
'script': And(Or(type(' '), type(u' ')), len),
Optional('title', default=''): str,
Optional('model', default={}): {Optional(And(str, len)): object},
Optional('env', default={}): {Optional(And(str, len)): And(str, len)},
Optional('item', default=None): object,
Optional('dry_run', default=False): bool,
Optional('debug', default=False): bool,
Optional('strict', default=False): bool,
Optional('variables', default={}): {
Optional(And(Or(type(' '), type(u' ')), len, Regex(r'([a-zA-Z][_a-zA-Z]*)'))):
Or(type(' '), type(u' '))
},
Optional('temporary_scripts_path', default=''): Or(type(''), type(u'')),
Optional('internal', default=False): bool
}) | Provide schema for shell configuration. | Below is the the instruction that describes the task:
### Input:
Provide schema for shell configuration.
### Response:
def schema():
"""Provide schema for shell configuration."""
return Schema({
'script': And(Or(type(' '), type(u' ')), len),
Optional('title', default=''): str,
Optional('model', default={}): {Optional(And(str, len)): object},
Optional('env', default={}): {Optional(And(str, len)): And(str, len)},
Optional('item', default=None): object,
Optional('dry_run', default=False): bool,
Optional('debug', default=False): bool,
Optional('strict', default=False): bool,
Optional('variables', default={}): {
Optional(And(Or(type(' '), type(u' ')), len, Regex(r'([a-zA-Z][_a-zA-Z]*)'))):
Or(type(' '), type(u' '))
},
Optional('temporary_scripts_path', default=''): Or(type(''), type(u'')),
Optional('internal', default=False): bool
}) |
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs | Cast input to a given dtype | Below is the the instruction that describes the task:
### Input:
Cast input to a given dtype
### Response:
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs |
def capitalized_words_percent(s):
'''Returns capitalized words percent.'''
s = to_unicode(s, precise=True)
words = re.split('\s', s)
words = [w for w in words if w.strip()]
words = [w for w in words if len(w) > 2]
capitalized_words_counter = 0
valid_words_counter = 0
for word in words:
if not INVALID_WORD_START.match(word):
valid_words_counter += 1
if word[0].isupper() and not word[1].isupper():
capitalized_words_counter += 1
if valid_words_counter > 0 and len(words) > 1:
return 100 * float(capitalized_words_counter) / valid_words_counter
return 0 | Returns capitalized words percent. | Below is the the instruction that describes the task:
### Input:
Returns capitalized words percent.
### Response:
def capitalized_words_percent(s):
'''Returns capitalized words percent.'''
s = to_unicode(s, precise=True)
words = re.split('\s', s)
words = [w for w in words if w.strip()]
words = [w for w in words if len(w) > 2]
capitalized_words_counter = 0
valid_words_counter = 0
for word in words:
if not INVALID_WORD_START.match(word):
valid_words_counter += 1
if word[0].isupper() and not word[1].isupper():
capitalized_words_counter += 1
if valid_words_counter > 0 and len(words) > 1:
return 100 * float(capitalized_words_counter) / valid_words_counter
return 0 |
def _get_response_frame(self, uuid):
"""Get a response frame.
:param str uuid: Rpc Identifier
:return:
"""
frame = None
frames = self._response.get(uuid, None)
if frames:
frame = frames.pop(0)
return frame | Get a response frame.
:param str uuid: Rpc Identifier
:return: | Below is the the instruction that describes the task:
### Input:
Get a response frame.
:param str uuid: Rpc Identifier
:return:
### Response:
def _get_response_frame(self, uuid):
"""Get a response frame.
:param str uuid: Rpc Identifier
:return:
"""
frame = None
frames = self._response.get(uuid, None)
if frames:
frame = frames.pop(0)
return frame |
def to_representation(self, failure_line):
"""
Manually add matches our wrapper of the TLEMetadata -> TLE relation.
I could not work out how to do this multiple relation jump with DRF (or
even if it was possible) so using this manual method instead.
"""
try:
matches = failure_line.error.matches.all()
except AttributeError: # failure_line.error can return None
matches = []
tle_serializer = TextLogErrorMatchSerializer(matches, many=True)
classified_failures = models.ClassifiedFailure.objects.filter(error_matches__in=matches)
cf_serializer = ClassifiedFailureSerializer(classified_failures, many=True)
response = super().to_representation(failure_line)
response['matches'] = tle_serializer.data
response['classified_failures'] = cf_serializer.data
return response | Manually add matches our wrapper of the TLEMetadata -> TLE relation.
I could not work out how to do this multiple relation jump with DRF (or
even if it was possible) so using this manual method instead. | Below is the the instruction that describes the task:
### Input:
Manually add matches our wrapper of the TLEMetadata -> TLE relation.
I could not work out how to do this multiple relation jump with DRF (or
even if it was possible) so using this manual method instead.
### Response:
def to_representation(self, failure_line):
"""
Manually add matches our wrapper of the TLEMetadata -> TLE relation.
I could not work out how to do this multiple relation jump with DRF (or
even if it was possible) so using this manual method instead.
"""
try:
matches = failure_line.error.matches.all()
except AttributeError: # failure_line.error can return None
matches = []
tle_serializer = TextLogErrorMatchSerializer(matches, many=True)
classified_failures = models.ClassifiedFailure.objects.filter(error_matches__in=matches)
cf_serializer = ClassifiedFailureSerializer(classified_failures, many=True)
response = super().to_representation(failure_line)
response['matches'] = tle_serializer.data
response['classified_failures'] = cf_serializer.data
return response |
def setHoverBackground( self, column, brush ):
"""
Returns the brush to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
brush | <QtGui.QBrush)
"""
self._hoverBackground[column] = QtGui.QBrush(brush) | Returns the brush to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
brush | <QtGui.QBrush) | Below is the the instruction that describes the task:
### Input:
Returns the brush to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
brush | <QtGui.QBrush)
### Response:
def setHoverBackground( self, column, brush ):
"""
Returns the brush to use when coloring when the user hovers over
the item for the given column.
:param column | <int>
brush | <QtGui.QBrush)
"""
self._hoverBackground[column] = QtGui.QBrush(brush) |
def updateproduct(self, category_id, product, position=None):
"""
Update assigned product
:param category_id: ID of a category
:param product: ID or Code of the product
:param position: Position of product in category
:return: boolean
"""
return bool(self.call(
'catalog_category.updateProduct', [category_id, product, position])
) | Update assigned product
:param category_id: ID of a category
:param product: ID or Code of the product
:param position: Position of product in category
:return: boolean | Below is the the instruction that describes the task:
### Input:
Update assigned product
:param category_id: ID of a category
:param product: ID or Code of the product
:param position: Position of product in category
:return: boolean
### Response:
def updateproduct(self, category_id, product, position=None):
"""
Update assigned product
:param category_id: ID of a category
:param product: ID or Code of the product
:param position: Position of product in category
:return: boolean
"""
return bool(self.call(
'catalog_category.updateProduct', [category_id, product, position])
) |
def file_flags(self):
"""Return the file flags attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILE_FLAGS) | Return the file flags attribute of the BFD file being processed. | Below is the the instruction that describes the task:
### Input:
Return the file flags attribute of the BFD file being processed.
### Response:
def file_flags(self):
"""Return the file flags attribute of the BFD file being processed."""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(self._ptr, BfdAttributes.FILE_FLAGS) |
def to_utc_date(date):
"""Convert a datetime object from local to UTC format
>>> import datetime
>>> d = datetime.datetime(2017, 8, 15, 18, 24, 31)
>>> to_utc_date(d)
datetime.datetime(2017, 8, 16, 1, 24, 31)
Args:
date (`datetime`): Input datetime object
Returns:
`datetime`
"""
return datetime.utcfromtimestamp(float(date.strftime('%s'))).replace(tzinfo=None) if date else None | Convert a datetime object from local to UTC format
>>> import datetime
>>> d = datetime.datetime(2017, 8, 15, 18, 24, 31)
>>> to_utc_date(d)
datetime.datetime(2017, 8, 16, 1, 24, 31)
Args:
date (`datetime`): Input datetime object
Returns:
`datetime` | Below is the the instruction that describes the task:
### Input:
Convert a datetime object from local to UTC format
>>> import datetime
>>> d = datetime.datetime(2017, 8, 15, 18, 24, 31)
>>> to_utc_date(d)
datetime.datetime(2017, 8, 16, 1, 24, 31)
Args:
date (`datetime`): Input datetime object
Returns:
`datetime`
### Response:
def to_utc_date(date):
"""Convert a datetime object from local to UTC format
>>> import datetime
>>> d = datetime.datetime(2017, 8, 15, 18, 24, 31)
>>> to_utc_date(d)
datetime.datetime(2017, 8, 16, 1, 24, 31)
Args:
date (`datetime`): Input datetime object
Returns:
`datetime`
"""
return datetime.utcfromtimestamp(float(date.strftime('%s'))).replace(tzinfo=None) if date else None |
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
The full 400 labels are translated to the 59 class task labels.
"""
label_400 = scipy.io.loadmat('{}/trainval/{}.mat'.format(self.context_dir, idx))['LabelMap']
label = np.zeros_like(label_400, dtype=np.uint8)
for idx, l in enumerate(self.labels_59):
idx_400 = self.labels_400.index(l) + 1
label[label_400 == idx_400] = idx + 1
label = label[np.newaxis, ...]
return label | Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
The full 400 labels are translated to the 59 class task labels. | Below is the the instruction that describes the task:
### Input:
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
The full 400 labels are translated to the 59 class task labels.
### Response:
def load_label(self, idx):
"""
Load label image as 1 x height x width integer array of label indices.
The leading singleton dimension is required by the loss.
The full 400 labels are translated to the 59 class task labels.
"""
label_400 = scipy.io.loadmat('{}/trainval/{}.mat'.format(self.context_dir, idx))['LabelMap']
label = np.zeros_like(label_400, dtype=np.uint8)
for idx, l in enumerate(self.labels_59):
idx_400 = self.labels_400.index(l) + 1
label[label_400 == idx_400] = idx + 1
label = label[np.newaxis, ...]
return label |
def apply(self, func, shortcut=False, args=(), **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs))
for arr in grouped)
return self._combine(applied, shortcut=shortcut) | Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array. | Below is the the instruction that describes the task:
### Input:
Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
### Response:
def apply(self, func, shortcut=False, args=(), **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
args : tuple, optional
Positional arguments passed to `func`.
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray or DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs))
for arr in grouped)
return self._combine(applied, shortcut=shortcut) |
def get(self, key):
"""
Get a key and its CAS value from server. If the value isn't cached, return
(None, None).
:param key: Key's name
:type key: six.string_types
:return: Returns (value, cas).
:rtype: object
"""
logger.debug('Getting key %s', key)
data = struct.pack(self.HEADER_STRUCT +
self.COMMANDS['get']['struct'] % (len(key)),
self.MAGIC['request'],
self.COMMANDS['get']['command'],
len(key), 0, 0, 0, len(key), 0, 0, str_to_bytes(key))
self._send(data)
(magic, opcode, keylen, extlen, datatype, status, bodylen, opaque,
cas, extra_content) = self._get_response()
logger.debug('Value Length: %d. Body length: %d. Data type: %d',
extlen, bodylen, datatype)
if status != self.STATUS['success']:
if status == self.STATUS['key_not_found']:
logger.debug('Key not found. Message: %s', extra_content)
return None, None
if status == self.STATUS['server_disconnected']:
return None, None
raise MemcachedException('Code: %d Message: %s' % (status, extra_content), status)
flags, value = struct.unpack('!L%ds' % (bodylen - 4, ), extra_content)
return self.deserialize(value, flags), cas | Get a key and its CAS value from server. If the value isn't cached, return
(None, None).
:param key: Key's name
:type key: six.string_types
:return: Returns (value, cas).
:rtype: object | Below is the the instruction that describes the task:
### Input:
Get a key and its CAS value from server. If the value isn't cached, return
(None, None).
:param key: Key's name
:type key: six.string_types
:return: Returns (value, cas).
:rtype: object
### Response:
def get(self, key):
"""
Get a key and its CAS value from server. If the value isn't cached, return
(None, None).
:param key: Key's name
:type key: six.string_types
:return: Returns (value, cas).
:rtype: object
"""
logger.debug('Getting key %s', key)
data = struct.pack(self.HEADER_STRUCT +
self.COMMANDS['get']['struct'] % (len(key)),
self.MAGIC['request'],
self.COMMANDS['get']['command'],
len(key), 0, 0, 0, len(key), 0, 0, str_to_bytes(key))
self._send(data)
(magic, opcode, keylen, extlen, datatype, status, bodylen, opaque,
cas, extra_content) = self._get_response()
logger.debug('Value Length: %d. Body length: %d. Data type: %d',
extlen, bodylen, datatype)
if status != self.STATUS['success']:
if status == self.STATUS['key_not_found']:
logger.debug('Key not found. Message: %s', extra_content)
return None, None
if status == self.STATUS['server_disconnected']:
return None, None
raise MemcachedException('Code: %d Message: %s' % (status, extra_content), status)
flags, value = struct.unpack('!L%ds' % (bodylen - 4, ), extra_content)
return self.deserialize(value, flags), cas |
def encode_chr(chromosome):
"""Encodes chromosomes.
:param chromosome: the chromosome to encode.
:type chromosome: str
:returns: the encoded chromosome as :py:class:`int`.
It changes ``X``, ``Y``, ``XY`` and ``MT`` to ``23``, ``24``, ``25`` and
``26``, respectively. It changes everything else as :py:class:`int`.
If :py:class:`ValueError` is raised, then :py:class:`ProgramError` is
also raised. If a chromosome as a value below 1 or above 26, a
:py:class:`ProgramError` is raised.
.. testsetup::
from pyGenClean.SexCheck.gender_plot import encode_chr
.. doctest::
>>> [encode_chr(str(i)) for i in range(0, 11)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> [encode_chr(str(i)) for i in range(11, 21)]
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
>>> [encode_chr(str(i)) for i in range(21, 27)]
[21, 22, 23, 24, 25, 26]
>>> [encode_chr(i) for i in ["X", "Y", "XY", "MT"]]
[23, 24, 25, 26]
>>> encode_chr("27")
Traceback (most recent call last):
...
ProgramError: 27: invalid chromosome
>>> encode_chr("XX")
Traceback (most recent call last):
...
ProgramError: XX: invalid chromosome
"""
if chromosome == "X":
return 23
if chromosome == "Y":
return 24
if chromosome == "XY":
return 25
if chromosome == "MT":
return 26
try:
new_chromosome = int(chromosome)
if new_chromosome < 0 or new_chromosome > 26:
msg = "{}: invalid chromosome".format(chromosome)
raise ProgramError(msg)
return new_chromosome
except ValueError:
msg = "{}: invalid chromosome".format(chromosome)
raise ProgramError(msg) | Encodes chromosomes.
:param chromosome: the chromosome to encode.
:type chromosome: str
:returns: the encoded chromosome as :py:class:`int`.
It changes ``X``, ``Y``, ``XY`` and ``MT`` to ``23``, ``24``, ``25`` and
``26``, respectively. It changes everything else as :py:class:`int`.
If :py:class:`ValueError` is raised, then :py:class:`ProgramError` is
also raised. If a chromosome as a value below 1 or above 26, a
:py:class:`ProgramError` is raised.
.. testsetup::
from pyGenClean.SexCheck.gender_plot import encode_chr
.. doctest::
>>> [encode_chr(str(i)) for i in range(0, 11)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> [encode_chr(str(i)) for i in range(11, 21)]
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
>>> [encode_chr(str(i)) for i in range(21, 27)]
[21, 22, 23, 24, 25, 26]
>>> [encode_chr(i) for i in ["X", "Y", "XY", "MT"]]
[23, 24, 25, 26]
>>> encode_chr("27")
Traceback (most recent call last):
...
ProgramError: 27: invalid chromosome
>>> encode_chr("XX")
Traceback (most recent call last):
...
ProgramError: XX: invalid chromosome | Below is the the instruction that describes the task:
### Input:
Encodes chromosomes.
:param chromosome: the chromosome to encode.
:type chromosome: str
:returns: the encoded chromosome as :py:class:`int`.
It changes ``X``, ``Y``, ``XY`` and ``MT`` to ``23``, ``24``, ``25`` and
``26``, respectively. It changes everything else as :py:class:`int`.
If :py:class:`ValueError` is raised, then :py:class:`ProgramError` is
also raised. If a chromosome as a value below 1 or above 26, a
:py:class:`ProgramError` is raised.
.. testsetup::
from pyGenClean.SexCheck.gender_plot import encode_chr
.. doctest::
>>> [encode_chr(str(i)) for i in range(0, 11)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> [encode_chr(str(i)) for i in range(11, 21)]
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
>>> [encode_chr(str(i)) for i in range(21, 27)]
[21, 22, 23, 24, 25, 26]
>>> [encode_chr(i) for i in ["X", "Y", "XY", "MT"]]
[23, 24, 25, 26]
>>> encode_chr("27")
Traceback (most recent call last):
...
ProgramError: 27: invalid chromosome
>>> encode_chr("XX")
Traceback (most recent call last):
...
ProgramError: XX: invalid chromosome
### Response:
def encode_chr(chromosome):
"""Encodes chromosomes.
:param chromosome: the chromosome to encode.
:type chromosome: str
:returns: the encoded chromosome as :py:class:`int`.
It changes ``X``, ``Y``, ``XY`` and ``MT`` to ``23``, ``24``, ``25`` and
``26``, respectively. It changes everything else as :py:class:`int`.
If :py:class:`ValueError` is raised, then :py:class:`ProgramError` is
also raised. If a chromosome as a value below 1 or above 26, a
:py:class:`ProgramError` is raised.
.. testsetup::
from pyGenClean.SexCheck.gender_plot import encode_chr
.. doctest::
>>> [encode_chr(str(i)) for i in range(0, 11)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> [encode_chr(str(i)) for i in range(11, 21)]
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
>>> [encode_chr(str(i)) for i in range(21, 27)]
[21, 22, 23, 24, 25, 26]
>>> [encode_chr(i) for i in ["X", "Y", "XY", "MT"]]
[23, 24, 25, 26]
>>> encode_chr("27")
Traceback (most recent call last):
...
ProgramError: 27: invalid chromosome
>>> encode_chr("XX")
Traceback (most recent call last):
...
ProgramError: XX: invalid chromosome
"""
if chromosome == "X":
return 23
if chromosome == "Y":
return 24
if chromosome == "XY":
return 25
if chromosome == "MT":
return 26
try:
new_chromosome = int(chromosome)
if new_chromosome < 0 or new_chromosome > 26:
msg = "{}: invalid chromosome".format(chromosome)
raise ProgramError(msg)
return new_chromosome
except ValueError:
msg = "{}: invalid chromosome".format(chromosome)
raise ProgramError(msg) |
def df_weighted_average_grouped(dataframe, groupe, varlist):
'''
Agrège les résultats de weighted_average_grouped() en une unique dataframe pour la liste de variable 'varlist'.
'''
return DataFrame(
dict([
(var, collapse(dataframe, groupe, var)) for var in varlist
])
) | Agrège les résultats de weighted_average_grouped() en une unique dataframe pour la liste de variable 'varlist'. | Below is the the instruction that describes the task:
### Input:
Agrège les résultats de weighted_average_grouped() en une unique dataframe pour la liste de variable 'varlist'.
### Response:
def df_weighted_average_grouped(dataframe, groupe, varlist):
'''
Agrège les résultats de weighted_average_grouped() en une unique dataframe pour la liste de variable 'varlist'.
'''
return DataFrame(
dict([
(var, collapse(dataframe, groupe, var)) for var in varlist
])
) |
def _chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection,
drop_axes=None, fields=None):
"""Obtain part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : selection
Location of region within the chunk to extract.
out : ndarray
Array to store result in.
out_selection : selection
Location of region within output array to store results in.
drop_axes : tuple of ints
Axes to squeeze out of the chunk.
fields
TODO
"""
assert len(chunk_coords) == len(self._cdata_shape)
# obtain key for chunk
ckey = self._chunk_key(chunk_coords)
try:
# obtain compressed data for chunk
cdata = self.chunk_store[ckey]
except KeyError:
# chunk not initialized
if self._fill_value is not None:
if fields:
fill_value = self._fill_value[fields]
else:
fill_value = self._fill_value
out[out_selection] = fill_value
else:
if (isinstance(out, np.ndarray) and
not fields and
is_contiguous_selection(out_selection) and
is_total_slice(chunk_selection, self._chunks) and
not self._filters and
self._dtype != object):
dest = out[out_selection]
write_direct = (
dest.flags.writeable and (
(self._order == 'C' and dest.flags.c_contiguous) or
(self._order == 'F' and dest.flags.f_contiguous)
)
)
if write_direct:
# optimization: we want the whole chunk, and the destination is
# contiguous, so we can decompress directly from the chunk
# into the destination array
if self._compressor:
self._compressor.decode(cdata, dest)
else:
chunk = ensure_ndarray(cdata).view(self._dtype)
chunk = chunk.reshape(self._chunks, order=self._order)
np.copyto(dest, chunk)
return
# decode chunk
chunk = self._decode_chunk(cdata)
# select data from chunk
if fields:
chunk = chunk[fields]
tmp = chunk[chunk_selection]
if drop_axes:
tmp = np.squeeze(tmp, axis=drop_axes)
# store selected data in output
out[out_selection] = tmp | Obtain part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : selection
Location of region within the chunk to extract.
out : ndarray
Array to store result in.
out_selection : selection
Location of region within output array to store results in.
drop_axes : tuple of ints
Axes to squeeze out of the chunk.
fields
TODO | Below is the the instruction that describes the task:
### Input:
Obtain part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : selection
Location of region within the chunk to extract.
out : ndarray
Array to store result in.
out_selection : selection
Location of region within output array to store results in.
drop_axes : tuple of ints
Axes to squeeze out of the chunk.
fields
TODO
### Response:
def _chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection,
drop_axes=None, fields=None):
"""Obtain part or whole of a chunk.
Parameters
----------
chunk_coords : tuple of ints
Indices of the chunk.
chunk_selection : selection
Location of region within the chunk to extract.
out : ndarray
Array to store result in.
out_selection : selection
Location of region within output array to store results in.
drop_axes : tuple of ints
Axes to squeeze out of the chunk.
fields
TODO
"""
assert len(chunk_coords) == len(self._cdata_shape)
# obtain key for chunk
ckey = self._chunk_key(chunk_coords)
try:
# obtain compressed data for chunk
cdata = self.chunk_store[ckey]
except KeyError:
# chunk not initialized
if self._fill_value is not None:
if fields:
fill_value = self._fill_value[fields]
else:
fill_value = self._fill_value
out[out_selection] = fill_value
else:
if (isinstance(out, np.ndarray) and
not fields and
is_contiguous_selection(out_selection) and
is_total_slice(chunk_selection, self._chunks) and
not self._filters and
self._dtype != object):
dest = out[out_selection]
write_direct = (
dest.flags.writeable and (
(self._order == 'C' and dest.flags.c_contiguous) or
(self._order == 'F' and dest.flags.f_contiguous)
)
)
if write_direct:
# optimization: we want the whole chunk, and the destination is
# contiguous, so we can decompress directly from the chunk
# into the destination array
if self._compressor:
self._compressor.decode(cdata, dest)
else:
chunk = ensure_ndarray(cdata).view(self._dtype)
chunk = chunk.reshape(self._chunks, order=self._order)
np.copyto(dest, chunk)
return
# decode chunk
chunk = self._decode_chunk(cdata)
# select data from chunk
if fields:
chunk = chunk[fields]
tmp = chunk[chunk_selection]
if drop_axes:
tmp = np.squeeze(tmp, axis=drop_axes)
# store selected data in output
out[out_selection] = tmp |
def init_shutit_map(self):
"""Initializes the module map of shutit based on the modules
we have gathered.
Checks we have core modules
Checks for duplicate module details.
Sets up common config.
Sets up map of modules.
"""
shutit_global.shutit_global_object.yield_to_draw()
modules = self.shutit_modules
# Have we got anything to process outside of special modules?
if len([mod for mod in modules if mod.run_order > 0]) < 1:
self.log(modules,level=logging.DEBUG)
path = ':'.join(self.host['shutit_module_path'])
self.log('\nIf you are new to ShutIt, see:\n\n\thttp://ianmiell.github.io/shutit/\n\nor try running\n\n\tshutit skeleton\n\n',level=logging.INFO)
if path == '':
self.fail('No ShutIt modules aside from core ones found and no ShutIt module path given.\nDid you set --shutit_module_path/-m wrongly?\n') # pragma: no cover
elif path == '.':
self.fail('No modules aside from core ones found and no ShutIt module path given apart from default (.).\n\n- Did you set --shutit_module_path/-m?\n- Is there a STOP* file in your . dir?') # pragma: no cover
else:
self.fail('No modules aside from core ones found and no ShutIt modules in path:\n\n' + path + '\n\nor their subfolders. Check your --shutit_module_path/-m setting and check that there are ShutIt modules below without STOP* files in any relevant directories.') # pragma: no cover
self.log('PHASE: base setup', level=logging.DEBUG)
run_orders = {}
has_core_module = False
for module in modules:
assert isinstance(module, ShutItModule), shutit_util.print_debug()
if module.module_id in self.shutit_map:
self.fail('Duplicated module id: ' + module.module_id + '\n\nYou may want to check your --shutit_module_path setting') # pragma: no cover
if module.run_order in run_orders:
self.fail('Duplicate run order: ' + str(module.run_order) + ' for ' + module.module_id + ' and ' + run_orders[module.run_order].module_id + '\n\nYou may want to check your --shutit_module_path setting') # pragma: no cover
if module.run_order == 0:
has_core_module = True
self.shutit_map[module.module_id] = run_orders[module.run_order] = module
self.shutit_file_map[module.module_id] = get_module_file(self, module)
if not has_core_module:
self.fail('No module with run_order=0 specified! This is required.') | Initializes the module map of shutit based on the modules
we have gathered.
Checks we have core modules
Checks for duplicate module details.
Sets up common config.
Sets up map of modules. | Below is the the instruction that describes the task:
### Input:
Initializes the module map of shutit based on the modules
we have gathered.
Checks we have core modules
Checks for duplicate module details.
Sets up common config.
Sets up map of modules.
### Response:
def init_shutit_map(self):
"""Initializes the module map of shutit based on the modules
we have gathered.
Checks we have core modules
Checks for duplicate module details.
Sets up common config.
Sets up map of modules.
"""
shutit_global.shutit_global_object.yield_to_draw()
modules = self.shutit_modules
# Have we got anything to process outside of special modules?
if len([mod for mod in modules if mod.run_order > 0]) < 1:
self.log(modules,level=logging.DEBUG)
path = ':'.join(self.host['shutit_module_path'])
self.log('\nIf you are new to ShutIt, see:\n\n\thttp://ianmiell.github.io/shutit/\n\nor try running\n\n\tshutit skeleton\n\n',level=logging.INFO)
if path == '':
self.fail('No ShutIt modules aside from core ones found and no ShutIt module path given.\nDid you set --shutit_module_path/-m wrongly?\n') # pragma: no cover
elif path == '.':
self.fail('No modules aside from core ones found and no ShutIt module path given apart from default (.).\n\n- Did you set --shutit_module_path/-m?\n- Is there a STOP* file in your . dir?') # pragma: no cover
else:
self.fail('No modules aside from core ones found and no ShutIt modules in path:\n\n' + path + '\n\nor their subfolders. Check your --shutit_module_path/-m setting and check that there are ShutIt modules below without STOP* files in any relevant directories.') # pragma: no cover
self.log('PHASE: base setup', level=logging.DEBUG)
run_orders = {}
has_core_module = False
for module in modules:
assert isinstance(module, ShutItModule), shutit_util.print_debug()
if module.module_id in self.shutit_map:
self.fail('Duplicated module id: ' + module.module_id + '\n\nYou may want to check your --shutit_module_path setting') # pragma: no cover
if module.run_order in run_orders:
self.fail('Duplicate run order: ' + str(module.run_order) + ' for ' + module.module_id + ' and ' + run_orders[module.run_order].module_id + '\n\nYou may want to check your --shutit_module_path setting') # pragma: no cover
if module.run_order == 0:
has_core_module = True
self.shutit_map[module.module_id] = run_orders[module.run_order] = module
self.shutit_file_map[module.module_id] = get_module_file(self, module)
if not has_core_module:
self.fail('No module with run_order=0 specified! This is required.') |
def eventFilter(self, obj, event):
"""Override eventFilter to catch resize event."""
if obj == self.dataTable and event.type() == QEvent.Resize:
self._resizeVisibleColumnsToContents()
return False | Override eventFilter to catch resize event. | Below is the the instruction that describes the task:
### Input:
Override eventFilter to catch resize event.
### Response:
def eventFilter(self, obj, event):
"""Override eventFilter to catch resize event."""
if obj == self.dataTable and event.type() == QEvent.Resize:
self._resizeVisibleColumnsToContents()
return False |
def safe_call(func, *args, **kwargs):
"""
安全调用
"""
try:
return func(*args, **kwargs)
except Exception as e:
logger.error('exc occur. e: %s, func: %s', e, func, exc_info=True)
# 调用方可以通过 isinstance(e, BaseException) 来判断是否发生了异常
return e | 安全调用 | Below is the the instruction that describes the task:
### Input:
安全调用
### Response:
def safe_call(func, *args, **kwargs):
"""
安全调用
"""
try:
return func(*args, **kwargs)
except Exception as e:
logger.error('exc occur. e: %s, func: %s', e, func, exc_info=True)
# 调用方可以通过 isinstance(e, BaseException) 来判断是否发生了异常
return e |
def create_endpoint(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval, max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states
)
return response | Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation | Below is the the instruction that describes the task:
### Input:
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation
### Response:
def create_endpoint(self, config, wait_for_completion=True,
check_interval=30, max_ingestion_time=None):
"""
Create an endpoint
:param config: the config for endpoint
:type config: dict
:param wait_for_completion: if the program should keep running until job finishes
:type wait_for_completion: bool
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:type check_interval: int
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:type max_ingestion_time: int
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval, max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states
)
return response |
def print(self, indent=0):
"""Print self with optional indent."""
text = (
'{indent}{magenta}{name}{none} ({dim}{cls}{none}, '
'default {dim}{default}{none})'
).format(
indent=' ' * indent,
dim=Style.DIM,
magenta=Fore.MAGENTA,
none=Style.RESET_ALL,
name=self.name,
cls=self.cls,
default=self.default
)
if self.description:
text += ':\n' + pretty_description(self.description,
indent=indent + 2)
print(text) | Print self with optional indent. | Below is the the instruction that describes the task:
### Input:
Print self with optional indent.
### Response:
def print(self, indent=0):
"""Print self with optional indent."""
text = (
'{indent}{magenta}{name}{none} ({dim}{cls}{none}, '
'default {dim}{default}{none})'
).format(
indent=' ' * indent,
dim=Style.DIM,
magenta=Fore.MAGENTA,
none=Style.RESET_ALL,
name=self.name,
cls=self.cls,
default=self.default
)
if self.description:
text += ':\n' + pretty_description(self.description,
indent=indent + 2)
print(text) |
def update_alias(self, addressid, data):
"""Update alias address"""
return self.api_call(
ENDPOINTS['aliases']['update'],
dict(addressid=addressid),
body=data) | Update alias address | Below is the the instruction that describes the task:
### Input:
Update alias address
### Response:
def update_alias(self, addressid, data):
"""Update alias address"""
return self.api_call(
ENDPOINTS['aliases']['update'],
dict(addressid=addressid),
body=data) |
def geturi(self, key, **kwargs):
"""
Gets the setting value as a :class:`urllib.parse.ParseResult`.
:rtype: urllib.parse.ParseResult
"""
return self.get(key, cast_func=urlparse, **kwargs) | Gets the setting value as a :class:`urllib.parse.ParseResult`.
:rtype: urllib.parse.ParseResult | Below is the the instruction that describes the task:
### Input:
Gets the setting value as a :class:`urllib.parse.ParseResult`.
:rtype: urllib.parse.ParseResult
### Response:
def geturi(self, key, **kwargs):
"""
Gets the setting value as a :class:`urllib.parse.ParseResult`.
:rtype: urllib.parse.ParseResult
"""
return self.get(key, cast_func=urlparse, **kwargs) |
def record(self):
# type: () -> bytes
'''
A method to generate a string representing this El Torito Entry.
Parameters:
None.
Returns:
String representing this El Torito Entry.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')
return struct.pack(self.FMT, self.boot_indicator, self.boot_media_type,
self.load_segment, self.system_type, 0,
self.sector_count, self.load_rba,
self.selection_criteria_type,
self.selection_criteria) | A method to generate a string representing this El Torito Entry.
Parameters:
None.
Returns:
String representing this El Torito Entry. | Below is the the instruction that describes the task:
### Input:
A method to generate a string representing this El Torito Entry.
Parameters:
None.
Returns:
String representing this El Torito Entry.
### Response:
def record(self):
# type: () -> bytes
'''
A method to generate a string representing this El Torito Entry.
Parameters:
None.
Returns:
String representing this El Torito Entry.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')
return struct.pack(self.FMT, self.boot_indicator, self.boot_media_type,
self.load_segment, self.system_type, 0,
self.sector_count, self.load_rba,
self.selection_criteria_type,
self.selection_criteria) |
def _compute_term2(self, C, mag, r):
"""
This computes the term f2 equation 8 Drouet & Cotton (2015)
"""
return (C['c4'] + C['c5'] * mag) * \
np.log(np.sqrt(r**2 + C['c6']**2)) + C['c7'] * r | This computes the term f2 equation 8 Drouet & Cotton (2015) | Below is the the instruction that describes the task:
### Input:
This computes the term f2 equation 8 Drouet & Cotton (2015)
### Response:
def _compute_term2(self, C, mag, r):
"""
This computes the term f2 equation 8 Drouet & Cotton (2015)
"""
return (C['c4'] + C['c5'] * mag) * \
np.log(np.sqrt(r**2 + C['c6']**2)) + C['c7'] * r |
def _load_secrets(self):
'''load the secrets credentials file with the Globus OAuthTokenResponse
'''
# Second priority: load from cache
self.auth = self._get_and_update_setting('GLOBUS_AUTH_RESPONSE')
self.transfer = self._get_and_update_setting('GLOBUS_TRANSFER_RESPONSE') | load the secrets credentials file with the Globus OAuthTokenResponse | Below is the the instruction that describes the task:
### Input:
load the secrets credentials file with the Globus OAuthTokenResponse
### Response:
def _load_secrets(self):
'''load the secrets credentials file with the Globus OAuthTokenResponse
'''
# Second priority: load from cache
self.auth = self._get_and_update_setting('GLOBUS_AUTH_RESPONSE')
self.transfer = self._get_and_update_setting('GLOBUS_TRANSFER_RESPONSE') |
def get_node(self, node_id):
"""
Returns a Node instance.
:param node_id: Node identifier
:returns: Node instance
"""
try:
UUID(node_id, version=4)
except ValueError:
raise aiohttp.web.HTTPBadRequest(text="Node ID {} is not a valid UUID".format(node_id))
for node in self._nodes:
if node.id == node_id:
return node
raise aiohttp.web.HTTPNotFound(text="Node ID {} doesn't exist".format(node_id)) | Returns a Node instance.
:param node_id: Node identifier
:returns: Node instance | Below is the the instruction that describes the task:
### Input:
Returns a Node instance.
:param node_id: Node identifier
:returns: Node instance
### Response:
def get_node(self, node_id):
"""
Returns a Node instance.
:param node_id: Node identifier
:returns: Node instance
"""
try:
UUID(node_id, version=4)
except ValueError:
raise aiohttp.web.HTTPBadRequest(text="Node ID {} is not a valid UUID".format(node_id))
for node in self._nodes:
if node.id == node_id:
return node
raise aiohttp.web.HTTPNotFound(text="Node ID {} doesn't exist".format(node_id)) |
def copy(self):
"""
Copy constructor.
"""
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True) | Copy constructor. | Below is the the instruction that describes the task:
### Input:
Copy constructor.
### Response:
def copy(self):
"""
Copy constructor.
"""
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True) |
def authenticate_account(self, identifiers, authc_token, second_factor_token=None):
"""
:type identifiers: SimpleIdentifierCollection or None
:returns: account_id (identifiers) if the account authenticates
:rtype: SimpleIdentifierCollection
"""
msg = ("Authentication submission received for authentication "
"token [" + str(authc_token) + "]")
logger.debug(msg)
# the following conditions verify correct authentication sequence
if not getattr(authc_token, 'identifier', None):
if not identifiers:
msg = "Authentication must be performed in expected sequence."
raise InvalidAuthenticationSequenceException(msg)
authc_token.identifier = identifiers.primary_identifier
# add token metadata before sending it onward:
authc_token.token_info = token_info[authc_token.__class__]
try:
account = self.do_authenticate_account(authc_token)
if (account is None):
msg2 = ("No account returned by any configured realms for "
"submitted authentication token [{0}]".
format(authc_token))
raise AccountException(msg2)
except AdditionalAuthenticationRequired as exc:
if second_factor_token:
return self.authenticate_account(exc.account_id, second_factor_token, None)
self.notify_event(authc_token.identifier, 'AUTHENTICATION.PROGRESS')
raise exc # the security_manager saves subject identifiers
except AccountException:
self.notify_event(authc_token.identifier,
'AUTHENTICATION.ACCOUNT_NOT_FOUND')
raise
except LockedAccountException:
self.notify_event(authc_token.identifier, 'AUTHENTICATION.FAILED')
self.notify_event(authc_token.identifier, 'AUTHENTICATION.ACCOUNT_LOCKED')
raise
except IncorrectCredentialsException as exc:
self.notify_event(authc_token.identifier, 'AUTHENTICATION.FAILED')
self.validate_locked(authc_token, exc.failed_attempts)
# this won't be called if the Account is locked:
raise IncorrectCredentialsException
self.notify_event(account['account_id'].primary_identifier,
'AUTHENTICATION.SUCCEEDED')
return account['account_id'] | :type identifiers: SimpleIdentifierCollection or None
:returns: account_id (identifiers) if the account authenticates
:rtype: SimpleIdentifierCollection | Below is the the instruction that describes the task:
### Input:
:type identifiers: SimpleIdentifierCollection or None
:returns: account_id (identifiers) if the account authenticates
:rtype: SimpleIdentifierCollection
### Response:
def authenticate_account(self, identifiers, authc_token, second_factor_token=None):
"""
:type identifiers: SimpleIdentifierCollection or None
:returns: account_id (identifiers) if the account authenticates
:rtype: SimpleIdentifierCollection
"""
msg = ("Authentication submission received for authentication "
"token [" + str(authc_token) + "]")
logger.debug(msg)
# the following conditions verify correct authentication sequence
if not getattr(authc_token, 'identifier', None):
if not identifiers:
msg = "Authentication must be performed in expected sequence."
raise InvalidAuthenticationSequenceException(msg)
authc_token.identifier = identifiers.primary_identifier
# add token metadata before sending it onward:
authc_token.token_info = token_info[authc_token.__class__]
try:
account = self.do_authenticate_account(authc_token)
if (account is None):
msg2 = ("No account returned by any configured realms for "
"submitted authentication token [{0}]".
format(authc_token))
raise AccountException(msg2)
except AdditionalAuthenticationRequired as exc:
if second_factor_token:
return self.authenticate_account(exc.account_id, second_factor_token, None)
self.notify_event(authc_token.identifier, 'AUTHENTICATION.PROGRESS')
raise exc # the security_manager saves subject identifiers
except AccountException:
self.notify_event(authc_token.identifier,
'AUTHENTICATION.ACCOUNT_NOT_FOUND')
raise
except LockedAccountException:
self.notify_event(authc_token.identifier, 'AUTHENTICATION.FAILED')
self.notify_event(authc_token.identifier, 'AUTHENTICATION.ACCOUNT_LOCKED')
raise
except IncorrectCredentialsException as exc:
self.notify_event(authc_token.identifier, 'AUTHENTICATION.FAILED')
self.validate_locked(authc_token, exc.failed_attempts)
# this won't be called if the Account is locked:
raise IncorrectCredentialsException
self.notify_event(account['account_id'].primary_identifier,
'AUTHENTICATION.SUCCEEDED')
return account['account_id'] |
def is_parameter(self):
"""Whether this is a function parameter."""
return (isinstance(self.scope, CodeFunction)
and self in self.scope.parameters) | Whether this is a function parameter. | Below is the the instruction that describes the task:
### Input:
Whether this is a function parameter.
### Response:
def is_parameter(self):
"""Whether this is a function parameter."""
return (isinstance(self.scope, CodeFunction)
and self in self.scope.parameters) |
def _updateWidgets(self):
""" Updates the combo and spin boxes given the new rti or axes.
Emits the sigContentsChanged signal.
"""
row = 0
model = self.tree.model()
# Create path label
nodePath = '' if self.rti is None else self.rti.nodePath
pathItem = QtGui.QStandardItem(nodePath)
pathItem.setToolTip(nodePath)
pathItem.setEditable(False)
if self.rti is not None:
pathItem.setIcon(self.rti.decoration)
model.setItem(row, 0, pathItem)
self._deleteSpinBoxes(row)
self._populateComboBoxes(row)
self._createSpinBoxes(row)
self._updateRtiInfo()
self.tree.resizeColumnsToContents(startCol=self.COL_FIRST_COMBO)
logger.debug("{} sigContentsChanged signal (_updateWidgets)"
.format("Blocked" if self.signalsBlocked() else "Emitting"))
self.sigContentsChanged.emit(UpdateReason.RTI_CHANGED) | Updates the combo and spin boxes given the new rti or axes.
Emits the sigContentsChanged signal. | Below is the the instruction that describes the task:
### Input:
Updates the combo and spin boxes given the new rti or axes.
Emits the sigContentsChanged signal.
### Response:
def _updateWidgets(self):
""" Updates the combo and spin boxes given the new rti or axes.
Emits the sigContentsChanged signal.
"""
row = 0
model = self.tree.model()
# Create path label
nodePath = '' if self.rti is None else self.rti.nodePath
pathItem = QtGui.QStandardItem(nodePath)
pathItem.setToolTip(nodePath)
pathItem.setEditable(False)
if self.rti is not None:
pathItem.setIcon(self.rti.decoration)
model.setItem(row, 0, pathItem)
self._deleteSpinBoxes(row)
self._populateComboBoxes(row)
self._createSpinBoxes(row)
self._updateRtiInfo()
self.tree.resizeColumnsToContents(startCol=self.COL_FIRST_COMBO)
logger.debug("{} sigContentsChanged signal (_updateWidgets)"
.format("Blocked" if self.signalsBlocked() else "Emitting"))
self.sigContentsChanged.emit(UpdateReason.RTI_CHANGED) |
def ed25519_public_key_to_string(key):
"""Convert an ed25519 public key to a base64-encoded string.
Args:
key (Ed25519PublicKey): the key to write to the file.
Returns:
str: the key representation as a str
"""
return base64.b64encode(key.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw,
), None).decode('utf-8') | Convert an ed25519 public key to a base64-encoded string.
Args:
key (Ed25519PublicKey): the key to write to the file.
Returns:
str: the key representation as a str | Below is the the instruction that describes the task:
### Input:
Convert an ed25519 public key to a base64-encoded string.
Args:
key (Ed25519PublicKey): the key to write to the file.
Returns:
str: the key representation as a str
### Response:
def ed25519_public_key_to_string(key):
"""Convert an ed25519 public key to a base64-encoded string.
Args:
key (Ed25519PublicKey): the key to write to the file.
Returns:
str: the key representation as a str
"""
return base64.b64encode(key.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw,
), None).decode('utf-8') |
def append_metrics(self, metrics, df_name):
"""Append new metrics to selected dataframes.
Parameters
----------
metrics : metric.EvalMetric
New metrics to be added.
df_name : str
Name of the dataframe to be modified.
"""
dataframe = self._dataframes[df_name]
_add_new_columns(dataframe, metrics)
dataframe.loc[len(dataframe)] = metrics | Append new metrics to selected dataframes.
Parameters
----------
metrics : metric.EvalMetric
New metrics to be added.
df_name : str
Name of the dataframe to be modified. | Below is the the instruction that describes the task:
### Input:
Append new metrics to selected dataframes.
Parameters
----------
metrics : metric.EvalMetric
New metrics to be added.
df_name : str
Name of the dataframe to be modified.
### Response:
def append_metrics(self, metrics, df_name):
"""Append new metrics to selected dataframes.
Parameters
----------
metrics : metric.EvalMetric
New metrics to be added.
df_name : str
Name of the dataframe to be modified.
"""
dataframe = self._dataframes[df_name]
_add_new_columns(dataframe, metrics)
dataframe.loc[len(dataframe)] = metrics |
def _datapaths(self):
"""Returns a simple key-value map for easy access to data paths"""
paths = { }
try:
data = self._config['data']
for k in data:
paths[k] = data[k]['path']
except KeyError as e:
raise AitConfigMissing(e.message)
except Exception as e:
raise AitConfigError('Error reading data paths: %s' % e)
return paths | Returns a simple key-value map for easy access to data paths | Below is the the instruction that describes the task:
### Input:
Returns a simple key-value map for easy access to data paths
### Response:
def _datapaths(self):
"""Returns a simple key-value map for easy access to data paths"""
paths = { }
try:
data = self._config['data']
for k in data:
paths[k] = data[k]['path']
except KeyError as e:
raise AitConfigMissing(e.message)
except Exception as e:
raise AitConfigError('Error reading data paths: %s' % e)
return paths |
def debug_generate(self, debug_generator, *gen_args, **gen_kwargs):
'''
Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger.
'''
if self.isEnabledFor(logging.DEBUG):
message = debug_generator(*gen_args, **gen_kwargs)
# Allow for content filtering to skip logging
if message is not None:
return self.debug(message) | Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger. | Below is the the instruction that describes the task:
### Input:
Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger.
### Response:
def debug_generate(self, debug_generator, *gen_args, **gen_kwargs):
'''
Used for efficient debug logging, where the actual message isn't evaluated unless it
will actually be accepted by the logger.
'''
if self.isEnabledFor(logging.DEBUG):
message = debug_generator(*gen_args, **gen_kwargs)
# Allow for content filtering to skip logging
if message is not None:
return self.debug(message) |
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Negative loglikelihood of the Poisson family
"""
return -np.sum(-mean + np.log(mean)*y - sp.gammaln(y + 1)) | Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Negative loglikelihood of the Poisson family | Below is the the instruction that describes the task:
### Input:
Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Negative loglikelihood of the Poisson family
### Response:
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Negative loglikelihood of the Poisson family
"""
return -np.sum(-mean + np.log(mean)*y - sp.gammaln(y + 1)) |
def push_notebook(document=None, state=None, handle=None):
''' Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
'''
from ..protocol import Protocol
if state is None:
state = curstate()
if not document:
document = state.document
if not document:
warn("No document to push")
return
if handle is None:
handle = state.last_comms_handle
if not handle:
warn("Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()")
return
events = list(handle.doc._held_events)
# This is to avoid having an exception raised for attempting to create a
# PATCH-DOC with no events. In the notebook, we just want to silently
# ignore calls to push_notebook when there are no new events
if len(events) == 0:
return
handle.doc._held_events = []
msg = Protocol("1.0").create("PATCH-DOC", events)
handle.comms.send(msg.header_json)
handle.comms.send(msg.metadata_json)
handle.comms.send(msg.content_json)
for header, payload in msg.buffers:
handle.comms.send(json.dumps(header))
handle.comms.send(buffers=[payload]) | Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle) | Below is the the instruction that describes the task:
### Input:
Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
### Response:
def push_notebook(document=None, state=None, handle=None):
''' Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working the the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/howto/notebook_comms` directory.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``. (default: None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by ``output_file``, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.circle([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
'''
from ..protocol import Protocol
if state is None:
state = curstate()
if not document:
document = state.document
if not document:
warn("No document to push")
return
if handle is None:
handle = state.last_comms_handle
if not handle:
warn("Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()")
return
events = list(handle.doc._held_events)
# This is to avoid having an exception raised for attempting to create a
# PATCH-DOC with no events. In the notebook, we just want to silently
# ignore calls to push_notebook when there are no new events
if len(events) == 0:
return
handle.doc._held_events = []
msg = Protocol("1.0").create("PATCH-DOC", events)
handle.comms.send(msg.header_json)
handle.comms.send(msg.metadata_json)
handle.comms.send(msg.content_json)
for header, payload in msg.buffers:
handle.comms.send(json.dumps(header))
handle.comms.send(buffers=[payload]) |
def choice_README(self):
"""View README file
"""
README = ReadSBo(self.sbo_url).readme("README")
fill = self.fill_pager(README)
self.pager(README + fill) | View README file | Below is the the instruction that describes the task:
### Input:
View README file
### Response:
def choice_README(self):
"""View README file
"""
README = ReadSBo(self.sbo_url).readme("README")
fill = self.fill_pager(README)
self.pager(README + fill) |
def get_session(self, key):
"""
:returns: DelegatingSession
"""
# a SimpleSession:
session = self.session_handler.do_get_session(key)
if (session):
return self.create_exposed_session(session, key)
else:
return None | :returns: DelegatingSession | Below is the the instruction that describes the task:
### Input:
:returns: DelegatingSession
### Response:
def get_session(self, key):
"""
:returns: DelegatingSession
"""
# a SimpleSession:
session = self.session_handler.do_get_session(key)
if (session):
return self.create_exposed_session(session, key)
else:
return None |
def _generate_base_svm_regression_spec(model):
"""
Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs.
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
svm = spec.supportVectorRegressor
_set_kernel(model, svm)
svm.rho = -model.intercept_[0]
for i in range(len(model._dual_coef_)):
for cur_alpha in model._dual_coef_[i]:
svm.coefficients.alpha.append(cur_alpha)
for cur_src_vector in model.support_vectors_:
cur_dest_vector = svm.denseSupportVectors.vectors.add()
for i in cur_src_vector:
cur_dest_vector.values.append(i)
return spec | Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs. | Below is the the instruction that describes the task:
### Input:
Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs.
### Response:
def _generate_base_svm_regression_spec(model):
"""
Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs.
"""
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
svm = spec.supportVectorRegressor
_set_kernel(model, svm)
svm.rho = -model.intercept_[0]
for i in range(len(model._dual_coef_)):
for cur_alpha in model._dual_coef_[i]:
svm.coefficients.alpha.append(cur_alpha)
for cur_src_vector in model.support_vectors_:
cur_dest_vector = svm.denseSupportVectors.vectors.add()
for i in cur_src_vector:
cur_dest_vector.values.append(i)
return spec |
def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
"""Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
"""
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[9+index].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(
sample_id = sample_id,
genotype = genotype.genotype,
case_id = case_obj.name,
phenotype = individual.phenotype,
ref_depth = genotype.ref_depth,
alt_depth = genotype.alt_depth,
genotype_quality = genotype.genotype_quality,
depth = genotype.depth_of_coverage,
supporting_evidence = genotype.supporting_evidence,
pe_support = genotype.pe_support,
sr_support = genotype.sr_support,
)) | Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case) | Below is the the instruction that describes the task:
### Input:
Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
### Response:
def _add_genotype_calls(self, variant_obj, variant_line, case_obj):
"""Add the genotype calls for the variant
Args:
variant_obj (puzzle.models.Variant)
variant_dict (dict): A variant dictionary
case_obj (puzzle.models.Case)
"""
variant_line = variant_line.split('\t')
#if there is gt calls we have no individuals to add
if len(variant_line) > 8:
gt_format = variant_line[8].split(':')
for individual in case_obj.individuals:
sample_id = individual.ind_id
index = individual.ind_index
gt_call = variant_line[9+index].split(':')
raw_call = dict(zip(gt_format, gt_call))
genotype = Genotype(**raw_call)
variant_obj.add_individual(puzzle_genotype(
sample_id = sample_id,
genotype = genotype.genotype,
case_id = case_obj.name,
phenotype = individual.phenotype,
ref_depth = genotype.ref_depth,
alt_depth = genotype.alt_depth,
genotype_quality = genotype.genotype_quality,
depth = genotype.depth_of_coverage,
supporting_evidence = genotype.supporting_evidence,
pe_support = genotype.pe_support,
sr_support = genotype.sr_support,
)) |
def _get_loggers():
"""Return list of Logger classes."""
from .. import loader
modules = loader.get_package_modules('logger')
return list(loader.get_plugins(modules, [_Logger])) | Return list of Logger classes. | Below is the the instruction that describes the task:
### Input:
Return list of Logger classes.
### Response:
def _get_loggers():
"""Return list of Logger classes."""
from .. import loader
modules = loader.get_package_modules('logger')
return list(loader.get_plugins(modules, [_Logger])) |
def find_by_reference_ids(reference_ids, _connection=None, page_size=100,
page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
"""
List all videos identified by a list of reference ids
"""
if not isinstance(reference_ids, (list, tuple)):
err = "Video.find_by_reference_ids expects an iterable argument"
raise exceptions.PyBrightcoveError(err)
ids = ','.join(reference_ids)
return connection.ItemResultSet(
'find_videos_by_reference_ids', Video, _connection, page_size,
page_number, sort_by, sort_order, reference_ids=ids) | List all videos identified by a list of reference ids | Below is the the instruction that describes the task:
### Input:
List all videos identified by a list of reference ids
### Response:
def find_by_reference_ids(reference_ids, _connection=None, page_size=100,
page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
"""
List all videos identified by a list of reference ids
"""
if not isinstance(reference_ids, (list, tuple)):
err = "Video.find_by_reference_ids expects an iterable argument"
raise exceptions.PyBrightcoveError(err)
ids = ','.join(reference_ids)
return connection.ItemResultSet(
'find_videos_by_reference_ids', Video, _connection, page_size,
page_number, sort_by, sort_order, reference_ids=ids) |
def extract_examples_from_readme_rst(indent=' '):
"""Extract examples from this project's README.rst file.
Parameters
----------
indent: str
Prepend each line with this string. Should contain some number of spaces.
Returns
-------
str
The examples.
Notes
-----
Quite fragile, depends on named labels inside the README.rst file.
"""
curr_dir = os.path.dirname(os.path.abspath(__file__))
readme_path = os.path.join(curr_dir, '..', 'README.rst')
try:
with open(readme_path) as fin:
lines = list(fin)
start = lines.index('.. _doctools_before_examples:\n')
end = lines.index(".. _doctools_after_examples:\n")
lines = lines[start+4:end-2]
return ''.join([indent + re.sub('^ ', '', l) for l in lines])
except Exception:
return indent + 'See README.rst' | Extract examples from this project's README.rst file.
Parameters
----------
indent: str
Prepend each line with this string. Should contain some number of spaces.
Returns
-------
str
The examples.
Notes
-----
Quite fragile, depends on named labels inside the README.rst file. | Below is the the instruction that describes the task:
### Input:
Extract examples from this project's README.rst file.
Parameters
----------
indent: str
Prepend each line with this string. Should contain some number of spaces.
Returns
-------
str
The examples.
Notes
-----
Quite fragile, depends on named labels inside the README.rst file.
### Response:
def extract_examples_from_readme_rst(indent=' '):
"""Extract examples from this project's README.rst file.
Parameters
----------
indent: str
Prepend each line with this string. Should contain some number of spaces.
Returns
-------
str
The examples.
Notes
-----
Quite fragile, depends on named labels inside the README.rst file.
"""
curr_dir = os.path.dirname(os.path.abspath(__file__))
readme_path = os.path.join(curr_dir, '..', 'README.rst')
try:
with open(readme_path) as fin:
lines = list(fin)
start = lines.index('.. _doctools_before_examples:\n')
end = lines.index(".. _doctools_after_examples:\n")
lines = lines[start+4:end-2]
return ''.join([indent + re.sub('^ ', '', l) for l in lines])
except Exception:
return indent + 'See README.rst' |
def evaluate(self, parameters):
"""Returns False if there's no match, or whatever the ParameterCondition evaluates to (recursively applied!)"""
if self.match(parameters):
if isinstance(self.then, ParameterCondition):
#recursive parametercondition
return self.then.evaluate(parameters)
else:
return self.then
elif self.otherwise:
if isinstance(self.otherwise, ParameterCondition):
#recursive else
return self.otherwise.evaluate(parameters)
else:
return self.otherwise
return False | Returns False if there's no match, or whatever the ParameterCondition evaluates to (recursively applied!) | Below is the the instruction that describes the task:
### Input:
Returns False if there's no match, or whatever the ParameterCondition evaluates to (recursively applied!)
### Response:
def evaluate(self, parameters):
"""Returns False if there's no match, or whatever the ParameterCondition evaluates to (recursively applied!)"""
if self.match(parameters):
if isinstance(self.then, ParameterCondition):
#recursive parametercondition
return self.then.evaluate(parameters)
else:
return self.then
elif self.otherwise:
if isinstance(self.otherwise, ParameterCondition):
#recursive else
return self.otherwise.evaluate(parameters)
else:
return self.otherwise
return False |
def getK1(self, type='simu'):
""" get quad k1 value
:param type: 'simu' or 'online'
:return: quad strength,i.e. k1
"""
if type == 'ctrl':
pv = self.ctrlinfo.get('k1')['pv']
rval = epics.caget(pv)
if rval is None:
val = self.getConfig(type='simu')['k1']
else:
val = self.unitTrans(rval, direction='+')
return val
else:
return self.getConfig(type='simu')['k1'] | get quad k1 value
:param type: 'simu' or 'online'
:return: quad strength,i.e. k1 | Below is the the instruction that describes the task:
### Input:
get quad k1 value
:param type: 'simu' or 'online'
:return: quad strength,i.e. k1
### Response:
def getK1(self, type='simu'):
""" get quad k1 value
:param type: 'simu' or 'online'
:return: quad strength,i.e. k1
"""
if type == 'ctrl':
pv = self.ctrlinfo.get('k1')['pv']
rval = epics.caget(pv)
if rval is None:
val = self.getConfig(type='simu')['k1']
else:
val = self.unitTrans(rval, direction='+')
return val
else:
return self.getConfig(type='simu')['k1'] |
def load_from_json(data):
"""
Load a :class:`ApplicationResponse` from a dictionary or string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
items = [Item.load_from_json(a) for a in data['items']] if data['items'] is not None else []
return ApplicationResponse(
data['title'], data['uri'], data['service_url'],
data['success'], data['has_references'], data['count'], items
) | Load a :class:`ApplicationResponse` from a dictionary or string (that
will be parsed as json). | Below is the the instruction that describes the task:
### Input:
Load a :class:`ApplicationResponse` from a dictionary or string (that
will be parsed as json).
### Response:
def load_from_json(data):
"""
Load a :class:`ApplicationResponse` from a dictionary or string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
items = [Item.load_from_json(a) for a in data['items']] if data['items'] is not None else []
return ApplicationResponse(
data['title'], data['uri'], data['service_url'],
data['success'], data['has_references'], data['count'], items
) |
def full_name(self):
"""Fully-qualified name of this variable.
Example:
``projects/my-project/configs/my-config``
:rtype: str
:returns: The full name based on project and config names.
:raises: :class:`ValueError` if the config is missing a name.
"""
if not self.name:
raise ValueError("Missing config name.")
return "projects/%s/configs/%s" % (self._client.project, self.name) | Fully-qualified name of this variable.
Example:
``projects/my-project/configs/my-config``
:rtype: str
:returns: The full name based on project and config names.
:raises: :class:`ValueError` if the config is missing a name. | Below is the the instruction that describes the task:
### Input:
Fully-qualified name of this variable.
Example:
``projects/my-project/configs/my-config``
:rtype: str
:returns: The full name based on project and config names.
:raises: :class:`ValueError` if the config is missing a name.
### Response:
def full_name(self):
"""Fully-qualified name of this variable.
Example:
``projects/my-project/configs/my-config``
:rtype: str
:returns: The full name based on project and config names.
:raises: :class:`ValueError` if the config is missing a name.
"""
if not self.name:
raise ValueError("Missing config name.")
return "projects/%s/configs/%s" % (self._client.project, self.name) |
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() is not None:
if self.value() > 0:
return queryset.filter(parent_process_id=self.value()) | Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`. | Below is the the instruction that describes the task:
### Input:
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
### Response:
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() is not None:
if self.value() > 0:
return queryset.filter(parent_process_id=self.value()) |
def make_basic_table(self, file_type):
""" Create table of key-value items in 'file_type'.
"""
table_data = {sample: items['kv']
for sample, items
in self.mod_data[file_type].items()
}
table_headers = {}
for column_header, (description, header_options) in file_types[file_type]['kv_descriptions'].items():
table_headers[column_header] = {
'rid': '{}_{}_bbmstheader'.format(file_type, column_header),
'title': column_header,
'description': description,
}
table_headers[column_header].update(header_options)
tconfig = {
'id': file_type + '_bbm_table',
'namespace': 'BBTools'
}
for sample in table_data:
for key, value in table_data[sample].items():
try:
table_data[sample][key] = float(value)
except ValueError:
pass
return table.plot(table_data, table_headers, tconfig) | Create table of key-value items in 'file_type'. | Below is the the instruction that describes the task:
### Input:
Create table of key-value items in 'file_type'.
### Response:
def make_basic_table(self, file_type):
""" Create table of key-value items in 'file_type'.
"""
table_data = {sample: items['kv']
for sample, items
in self.mod_data[file_type].items()
}
table_headers = {}
for column_header, (description, header_options) in file_types[file_type]['kv_descriptions'].items():
table_headers[column_header] = {
'rid': '{}_{}_bbmstheader'.format(file_type, column_header),
'title': column_header,
'description': description,
}
table_headers[column_header].update(header_options)
tconfig = {
'id': file_type + '_bbm_table',
'namespace': 'BBTools'
}
for sample in table_data:
for key, value in table_data[sample].items():
try:
table_data[sample][key] = float(value)
except ValueError:
pass
return table.plot(table_data, table_headers, tconfig) |
def get_symbol(num_classes, num_layers, image_shape, num_group=32, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 32:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnext(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
num_group = num_group,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype) | Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu | Below is the the instruction that describes the task:
### Input:
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
### Response:
def get_symbol(num_classes, num_layers, image_shape, num_group=32, conv_workspace=256, dtype='float32', **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(',')]
(nchannel, height, width) = image_shape
if height <= 32:
num_stages = 3
if (num_layers-2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers-2)//9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers-2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers-2)//6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers))
return resnext(units = units,
num_stages = num_stages,
filter_list = filter_list,
num_classes = num_classes,
num_group = num_group,
image_shape = image_shape,
bottle_neck = bottle_neck,
workspace = conv_workspace,
dtype = dtype) |
def fetch(self, *args, **kwargs):
"""同步服务器的 Notification 数据
"""
response = client.get('/tables/Notifications/{0}'.format(self.id))
self._update_data(response.json()) | 同步服务器的 Notification 数据 | Below is the the instruction that describes the task:
### Input:
同步服务器的 Notification 数据
### Response:
def fetch(self, *args, **kwargs):
"""同步服务器的 Notification 数据
"""
response = client.get('/tables/Notifications/{0}'.format(self.id))
self._update_data(response.json()) |
def Analyze(self, hashes):
"""Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set.
"""
if not self._api_key:
raise RuntimeError('No API key specified for VirusTotal lookup.')
hash_analyses = []
json_response = self._QueryHashes(hashes) or []
# VirusTotal returns a dictionary when a single hash is queried
# and a list when multiple hashes are queried.
if isinstance(json_response, dict):
json_response = [json_response]
for result in json_response:
resource = result['resource']
hash_analysis = interface.HashAnalysis(resource, result)
hash_analyses.append(hash_analysis)
return hash_analyses | Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set. | Below is the the instruction that describes the task:
### Input:
Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set.
### Response:
def Analyze(self, hashes):
"""Looks up hashes in VirusTotal using the VirusTotal HTTP API.
The API is documented here:
https://www.virustotal.com/en/documentation/public-api/
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: analysis results.
Raises:
RuntimeError: If the VirusTotal API key has not been set.
"""
if not self._api_key:
raise RuntimeError('No API key specified for VirusTotal lookup.')
hash_analyses = []
json_response = self._QueryHashes(hashes) or []
# VirusTotal returns a dictionary when a single hash is queried
# and a list when multiple hashes are queried.
if isinstance(json_response, dict):
json_response = [json_response]
for result in json_response:
resource = result['resource']
hash_analysis = interface.HashAnalysis(resource, result)
hash_analyses.append(hash_analysis)
return hash_analyses |
def run_top_task(self, task_name=None, sort=None, **kwargs):
"""Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail.
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
self._fill_project_info(kwargs)
kwargs.update({'status': 'pending'})
# find task and set status to running
task = self.db.Task.find_one_and_update(kwargs, {'$set': {'status': 'running'}}, sort=sort)
try:
# get task info e.g. hyper parameters, python script
if task is None:
logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort))
return False
else:
logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort))
_datetime = task['time']
_script = task['script']
_id = task['_id']
_hyper_parameters = task['hyper_parameters']
_saved_result_keys = task['saved_result_keys']
logging.info(" hyper parameters:")
for key in _hyper_parameters:
globals()[key] = _hyper_parameters[key]
logging.info(" {}: {}".format(key, _hyper_parameters[key]))
# run task
s = time.time()
logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime))
_script = _script.decode('utf-8')
with tf.Graph().as_default(): # as graph: # clear all TF graphs
exec(_script, globals())
# set status to finished
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}})
# return results
__result = {}
for _key in _saved_result_keys:
logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key])))
__result.update({"%s" % _key: globals()[_key]})
_ = self.db.Task.find_one_and_update(
{
'_id': _id
}, {'$set': {
'result': __result
}}, return_document=pymongo.ReturnDocument.AFTER
)
logging.info(
"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s".
format(task_name, sort, _datetime,
time.time() - s)
)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
logging.info("[Database] Fail to run task")
# if fail, set status back to pending
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'pending'}})
return False | Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail. | Below is the the instruction that describes the task:
### Input:
Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail.
### Response:
def run_top_task(self, task_name=None, sort=None, **kwargs):
"""Finds and runs a pending task that in the first of the sorting list.
Parameters
-----------
task_name : str
The task name.
sort : List of tuple
PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details.
kwargs : other parameters
Users customized parameters such as description, version number.
Examples
---------
Monitors the database and pull tasks to run
>>> while True:
>>> print("waiting task from distributor")
>>> db.run_top_task(task_name='mnist', sort=[("time", -1)])
>>> time.sleep(1)
Returns
--------
boolean : True for success, False for fail.
"""
if not isinstance(task_name, str): # is None:
raise Exception("task_name should be string")
self._fill_project_info(kwargs)
kwargs.update({'status': 'pending'})
# find task and set status to running
task = self.db.Task.find_one_and_update(kwargs, {'$set': {'status': 'running'}}, sort=sort)
try:
# get task info e.g. hyper parameters, python script
if task is None:
logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort))
return False
else:
logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort))
_datetime = task['time']
_script = task['script']
_id = task['_id']
_hyper_parameters = task['hyper_parameters']
_saved_result_keys = task['saved_result_keys']
logging.info(" hyper parameters:")
for key in _hyper_parameters:
globals()[key] = _hyper_parameters[key]
logging.info(" {}: {}".format(key, _hyper_parameters[key]))
# run task
s = time.time()
logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime))
_script = _script.decode('utf-8')
with tf.Graph().as_default(): # as graph: # clear all TF graphs
exec(_script, globals())
# set status to finished
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}})
# return results
__result = {}
for _key in _saved_result_keys:
logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key])))
__result.update({"%s" % _key: globals()[_key]})
_ = self.db.Task.find_one_and_update(
{
'_id': _id
}, {'$set': {
'result': __result
}}, return_document=pymongo.ReturnDocument.AFTER
)
logging.info(
"[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s".
format(task_name, sort, _datetime,
time.time() - s)
)
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e))
logging.info("[Database] Fail to run task")
# if fail, set status back to pending
_ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'pending'}})
return False |
def add_version_pattern(self, m):
"""For QR codes with a version 7 or higher, a special pattern
specifying the code's version is required.
For further information see:
http://www.thonky.com/qr-code-tutorial/format-version-information/#example-of-version-7-information-string
"""
if self.version < 7:
return
#Get the bit fields for this code's version
#We will iterate across the string, the bit string
#needs the least significant digit in the zero-th position
field = iter(tables.version_pattern[self.version][::-1])
#Where to start placing the pattern
start = len(m)-11
#The version pattern is pretty odd looking
for i in range(6):
#The pattern is three modules wide
for j in range(start, start+3):
bit = int(next(field))
#Bottom Left
m[i][j] = bit
#Upper right
m[j][i] = bit | For QR codes with a version 7 or higher, a special pattern
specifying the code's version is required.
For further information see:
http://www.thonky.com/qr-code-tutorial/format-version-information/#example-of-version-7-information-string | Below is the the instruction that describes the task:
### Input:
For QR codes with a version 7 or higher, a special pattern
specifying the code's version is required.
For further information see:
http://www.thonky.com/qr-code-tutorial/format-version-information/#example-of-version-7-information-string
### Response:
def add_version_pattern(self, m):
"""For QR codes with a version 7 or higher, a special pattern
specifying the code's version is required.
For further information see:
http://www.thonky.com/qr-code-tutorial/format-version-information/#example-of-version-7-information-string
"""
if self.version < 7:
return
#Get the bit fields for this code's version
#We will iterate across the string, the bit string
#needs the least significant digit in the zero-th position
field = iter(tables.version_pattern[self.version][::-1])
#Where to start placing the pattern
start = len(m)-11
#The version pattern is pretty odd looking
for i in range(6):
#The pattern is three modules wide
for j in range(start, start+3):
bit = int(next(field))
#Bottom Left
m[i][j] = bit
#Upper right
m[j][i] = bit |
def _get_pillar_cfg(pillar_key,
pillarenv=None,
saltenv=None):
'''
Retrieve the pillar data from the right environment.
'''
pillar_cfg = __salt__['pillar.get'](pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
return pillar_cfg | Retrieve the pillar data from the right environment. | Below is the the instruction that describes the task:
### Input:
Retrieve the pillar data from the right environment.
### Response:
def _get_pillar_cfg(pillar_key,
pillarenv=None,
saltenv=None):
'''
Retrieve the pillar data from the right environment.
'''
pillar_cfg = __salt__['pillar.get'](pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
return pillar_cfg |
def draw(self, mode="triangle_strip"):
""" Draw collection """
gl.glDepthMask(gl.GL_FALSE)
Collection.draw(self, mode)
gl.glDepthMask(gl.GL_TRUE) | Draw collection | Below is the the instruction that describes the task:
### Input:
Draw collection
### Response:
def draw(self, mode="triangle_strip"):
""" Draw collection """
gl.glDepthMask(gl.GL_FALSE)
Collection.draw(self, mode)
gl.glDepthMask(gl.GL_TRUE) |
def set_account_username(account, old_username, new_username):
""" Account's username was changed. """
for datastore in _get_datastores():
datastore.set_account_username(account, old_username, new_username) | Account's username was changed. | Below is the the instruction that describes the task:
### Input:
Account's username was changed.
### Response:
def set_account_username(account, old_username, new_username):
""" Account's username was changed. """
for datastore in _get_datastores():
datastore.set_account_username(account, old_username, new_username) |
def feed_batch(self, batch_info, data, target):
""" Run single batch of data """
data, target = data.to(self.device), target.to(self.device)
output, loss = self.model.loss(data, target)
# Store extra batch information for calculation of the statistics
batch_info['data'] = data
batch_info['target'] = target
batch_info['output'] = output
batch_info['loss'] = loss
return loss | Run single batch of data | Below is the the instruction that describes the task:
### Input:
Run single batch of data
### Response:
def feed_batch(self, batch_info, data, target):
""" Run single batch of data """
data, target = data.to(self.device), target.to(self.device)
output, loss = self.model.loss(data, target)
# Store extra batch information for calculation of the statistics
batch_info['data'] = data
batch_info['target'] = target
batch_info['output'] = output
batch_info['loss'] = loss
return loss |
def get_exchange(self, vhost, name):
"""
Gets a single exchange which requires a vhost and name.
:param string vhost: The vhost containing the target exchange
:param string name: The name of the exchange
:returns: dict
"""
vhost = quote(vhost, '')
name = quote(name, '')
path = Client.urls['exchange_by_name'] % (vhost, name)
exch = self._call(path, 'GET')
return exch | Gets a single exchange which requires a vhost and name.
:param string vhost: The vhost containing the target exchange
:param string name: The name of the exchange
:returns: dict | Below is the the instruction that describes the task:
### Input:
Gets a single exchange which requires a vhost and name.
:param string vhost: The vhost containing the target exchange
:param string name: The name of the exchange
:returns: dict
### Response:
def get_exchange(self, vhost, name):
"""
Gets a single exchange which requires a vhost and name.
:param string vhost: The vhost containing the target exchange
:param string name: The name of the exchange
:returns: dict
"""
vhost = quote(vhost, '')
name = quote(name, '')
path = Client.urls['exchange_by_name'] % (vhost, name)
exch = self._call(path, 'GET')
return exch |
def plot_summary_figure(info_df, summary_df, color_list, symbol_list,
selected_summaries,
batch_dir, batch_name, plot_style=None, show=False,
save=True,
figure_type=None):
"""Create a figure with summary graphs.
Args:
info_df: the pandas DataFrame with info about the runs.
summary_df: a pandas DataFrame with the summary data.
color_list: a list of colors to use (one pr. group)
symbol_list: a list of symbols to use (one pr. cell in largest group)
selected_summaries: a list of the selected summaries to plot
batch_dir: path to the folder where the figure should be saved.
batch_name: the batch name.
plot_style: the matplotlib plot-style to use.
show: show the figure if True.
save: save the figure if True.
figure_type: a string for selecting type of figure to make.
"""
figure_type_object = figure_types[figure_type]
logger.debug("creating figure ({})".format(figure_type))
standard_fig, ax = plt.subplots(nrows=figure_type_object.rows,
ncols=figure_type_object.columns,
sharex=True)
ce_ax = figure_type_object.retrieve_axis("ce_ax", ax)
cap_ax = figure_type_object.retrieve_axis("cap_ax", ax)
ir_ax = figure_type_object.retrieve_axis("ir_ax", ax)
ev_ax = figure_type_object.retrieve_axis("ev_ax", ax)
# pick data (common for all plot types)
# could include a if cd_ax: pick_summary_data...
ce_df = pick_summary_data("coulombic_efficiency", summary_df,
selected_summaries)
cc_df = pick_summary_data("charge_capacity", summary_df, selected_summaries)
dc_df = pick_summary_data("discharge_capacity", summary_df,
selected_summaries)
# generate labels
ce_labels = [info_df.get_value(filename, "labels") for filename in
ce_df.columns.get_level_values(0)]
# adding charge/discharge label
ce_labels.extend(["", "discharge", "charge"])
# plot ce
list_of_lines, plot_style = plot_summary_data(ce_ax, ce_df, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list,
is_charge=False,
plot_style=plot_style)
ce_ax.set_ylabel("Coulombic\nefficiency\n(%)")
ce_ax.locator_params(nbins=5)
# adding charge/discharge label
color = plot_style["color"]
markersize = plot_style["markersize"]
open_label = mpl.lines.Line2D([], [], color=color, marker='s',
markeredgecolor=color, markerfacecolor='none',
markersize=markersize)
closed_label = mpl.lines.Line2D([], [], color=color, marker='s',
markeredgecolor=color,
markerfacecolor=color,
markersize=markersize)
no_label = mpl.lines.Line2D([], [], color='none', marker='s', markersize=0)
list_of_lines.extend([no_label, closed_label, open_label])
# plotting capacity (common)
plot_summary_data(cap_ax, cc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
plot_summary_data(cap_ax, dc_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
cap_ax.set_ylabel("Capacity\n(mAh/g)")
cap_ax.locator_params(nbins=4)
# plotting ir data (common)
plot_ir_charge, plot_ir_discharge = figure_type_object.ir_selector
if plot_ir_charge:
irc_df = pick_summary_data("ir_charge", summary_df, selected_summaries)
plot_summary_data(ir_ax, irc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
if plot_ir_discharge:
ird_df = pick_summary_data("ir_discharge", summary_df,
selected_summaries)
plot_summary_data(ir_ax, ird_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
ir_ax.set_ylabel("Internal\nresistance\n(Ohms)")
ir_ax.set_xlabel("Cycle number")
ir_ax.locator_params(axis="y", nbins=4)
ir_ax.locator_params(axis="x", nbins=10)
# should use MaxNLocator here instead
# pick data (not common for all plot types)
if ev_ax is not None:
plot_ev_charge, plot_ev_discharge = figure_type_object\
.end_voltage_selector
if plot_ev_charge:
evc_df = pick_summary_data("end_voltage_charge", summary_df,
selected_summaries)
plot_summary_data(ev_ax, evc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
if plot_ev_discharge:
evd_df = pick_summary_data("end_voltage_discharge", summary_df,
selected_summaries)
plot_summary_data(ev_ax, evd_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
ev_ax.set_ylabel("End\nvoltage\n(V)")
ev_ax.locator_params(axis="y", nbins=4)
# tweaking
plt.subplots_adjust(left=0.07, right=0.93, top=0.9, wspace=0.25,
hspace=0.15)
# adding legend
logger.debug("trying to add legends " + str(ce_labels))
standard_fig.legend(handles=list_of_lines, labels=ce_labels,
bbox_to_anchor=(1.02, 1.1), loc=2,
# bbox_transform=plt.gcf().transFigure,
bbox_transform=ce_ax.transAxes,
numpoints=1,
ncol=1, labelspacing=0.,
prop={"size": 10})
# plt.tight_layout()
if save:
extension = prms.Batch["fig_extension"]
dpi = prms.Batch["dpi"]
figure_file_name = os.path.join(batch_dir, "%splot_%s.%s" % (
figure_type, batch_name, extension))
standard_fig.savefig(figure_file_name, dpi=dpi, bbox_inches='tight')
if show:
plt.show()
return standard_fig, (ce_ax, cap_ax, ir_ax) | Create a figure with summary graphs.
Args:
info_df: the pandas DataFrame with info about the runs.
summary_df: a pandas DataFrame with the summary data.
color_list: a list of colors to use (one pr. group)
symbol_list: a list of symbols to use (one pr. cell in largest group)
selected_summaries: a list of the selected summaries to plot
batch_dir: path to the folder where the figure should be saved.
batch_name: the batch name.
plot_style: the matplotlib plot-style to use.
show: show the figure if True.
save: save the figure if True.
figure_type: a string for selecting type of figure to make. | Below is the the instruction that describes the task:
### Input:
Create a figure with summary graphs.
Args:
info_df: the pandas DataFrame with info about the runs.
summary_df: a pandas DataFrame with the summary data.
color_list: a list of colors to use (one pr. group)
symbol_list: a list of symbols to use (one pr. cell in largest group)
selected_summaries: a list of the selected summaries to plot
batch_dir: path to the folder where the figure should be saved.
batch_name: the batch name.
plot_style: the matplotlib plot-style to use.
show: show the figure if True.
save: save the figure if True.
figure_type: a string for selecting type of figure to make.
### Response:
def plot_summary_figure(info_df, summary_df, color_list, symbol_list,
selected_summaries,
batch_dir, batch_name, plot_style=None, show=False,
save=True,
figure_type=None):
"""Create a figure with summary graphs.
Args:
info_df: the pandas DataFrame with info about the runs.
summary_df: a pandas DataFrame with the summary data.
color_list: a list of colors to use (one pr. group)
symbol_list: a list of symbols to use (one pr. cell in largest group)
selected_summaries: a list of the selected summaries to plot
batch_dir: path to the folder where the figure should be saved.
batch_name: the batch name.
plot_style: the matplotlib plot-style to use.
show: show the figure if True.
save: save the figure if True.
figure_type: a string for selecting type of figure to make.
"""
figure_type_object = figure_types[figure_type]
logger.debug("creating figure ({})".format(figure_type))
standard_fig, ax = plt.subplots(nrows=figure_type_object.rows,
ncols=figure_type_object.columns,
sharex=True)
ce_ax = figure_type_object.retrieve_axis("ce_ax", ax)
cap_ax = figure_type_object.retrieve_axis("cap_ax", ax)
ir_ax = figure_type_object.retrieve_axis("ir_ax", ax)
ev_ax = figure_type_object.retrieve_axis("ev_ax", ax)
# pick data (common for all plot types)
# could include a if cd_ax: pick_summary_data...
ce_df = pick_summary_data("coulombic_efficiency", summary_df,
selected_summaries)
cc_df = pick_summary_data("charge_capacity", summary_df, selected_summaries)
dc_df = pick_summary_data("discharge_capacity", summary_df,
selected_summaries)
# generate labels
ce_labels = [info_df.get_value(filename, "labels") for filename in
ce_df.columns.get_level_values(0)]
# adding charge/discharge label
ce_labels.extend(["", "discharge", "charge"])
# plot ce
list_of_lines, plot_style = plot_summary_data(ce_ax, ce_df, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list,
is_charge=False,
plot_style=plot_style)
ce_ax.set_ylabel("Coulombic\nefficiency\n(%)")
ce_ax.locator_params(nbins=5)
# adding charge/discharge label
color = plot_style["color"]
markersize = plot_style["markersize"]
open_label = mpl.lines.Line2D([], [], color=color, marker='s',
markeredgecolor=color, markerfacecolor='none',
markersize=markersize)
closed_label = mpl.lines.Line2D([], [], color=color, marker='s',
markeredgecolor=color,
markerfacecolor=color,
markersize=markersize)
no_label = mpl.lines.Line2D([], [], color='none', marker='s', markersize=0)
list_of_lines.extend([no_label, closed_label, open_label])
# plotting capacity (common)
plot_summary_data(cap_ax, cc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
plot_summary_data(cap_ax, dc_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
cap_ax.set_ylabel("Capacity\n(mAh/g)")
cap_ax.locator_params(nbins=4)
# plotting ir data (common)
plot_ir_charge, plot_ir_discharge = figure_type_object.ir_selector
if plot_ir_charge:
irc_df = pick_summary_data("ir_charge", summary_df, selected_summaries)
plot_summary_data(ir_ax, irc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
if plot_ir_discharge:
ird_df = pick_summary_data("ir_discharge", summary_df,
selected_summaries)
plot_summary_data(ir_ax, ird_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
ir_ax.set_ylabel("Internal\nresistance\n(Ohms)")
ir_ax.set_xlabel("Cycle number")
ir_ax.locator_params(axis="y", nbins=4)
ir_ax.locator_params(axis="x", nbins=10)
# should use MaxNLocator here instead
# pick data (not common for all plot types)
if ev_ax is not None:
plot_ev_charge, plot_ev_discharge = figure_type_object\
.end_voltage_selector
if plot_ev_charge:
evc_df = pick_summary_data("end_voltage_charge", summary_df,
selected_summaries)
plot_summary_data(ev_ax, evc_df, is_charge=True, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
if plot_ev_discharge:
evd_df = pick_summary_data("end_voltage_discharge", summary_df,
selected_summaries)
plot_summary_data(ev_ax, evd_df, is_charge=False, info_df=info_df,
color_list=color_list,
symbol_list=symbol_list, plot_style=plot_style)
ev_ax.set_ylabel("End\nvoltage\n(V)")
ev_ax.locator_params(axis="y", nbins=4)
# tweaking
plt.subplots_adjust(left=0.07, right=0.93, top=0.9, wspace=0.25,
hspace=0.15)
# adding legend
logger.debug("trying to add legends " + str(ce_labels))
standard_fig.legend(handles=list_of_lines, labels=ce_labels,
bbox_to_anchor=(1.02, 1.1), loc=2,
# bbox_transform=plt.gcf().transFigure,
bbox_transform=ce_ax.transAxes,
numpoints=1,
ncol=1, labelspacing=0.,
prop={"size": 10})
# plt.tight_layout()
if save:
extension = prms.Batch["fig_extension"]
dpi = prms.Batch["dpi"]
figure_file_name = os.path.join(batch_dir, "%splot_%s.%s" % (
figure_type, batch_name, extension))
standard_fig.savefig(figure_file_name, dpi=dpi, bbox_inches='tight')
if show:
plt.show()
return standard_fig, (ce_ax, cap_ax, ir_ax) |
def convert_mrf_to_syntax_mrf( mrf_lines, conversion_rules ):
''' Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '): # only consider lines of analysis
# 1) Convert punctuation
if _punctOrAbbrev.search(line):
mrf_lines[i] = _convert_punctuation( line )
if '_Y_' not in line:
i += 1
continue
# 2) Convert morphological analyses that have a form specified
withFormMatch = _morfWithForm.search(line)
if withFormMatch:
root = withFormMatch.group(1)
pos = withFormMatch.group(2)
formStr = withFormMatch.group(3)
forms = formStr.split(',')
all_new_lines = []
for form in forms:
morphKey = pos+' '+form.strip()
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
else:
withoutFormMatch = _morfWithoutForm.search(line)
if withoutFormMatch:
# 3) Convert morphological analyses that have only POS specified
root = withoutFormMatch.group(1)
pos = withoutFormMatch.group(2)
morphKey = pos
all_new_lines = []
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
i += 1
return mrf_lines | Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer; | Below is the the instruction that describes the task:
### Input:
Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer;
### Response:
def convert_mrf_to_syntax_mrf( mrf_lines, conversion_rules ):
''' Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '): # only consider lines of analysis
# 1) Convert punctuation
if _punctOrAbbrev.search(line):
mrf_lines[i] = _convert_punctuation( line )
if '_Y_' not in line:
i += 1
continue
# 2) Convert morphological analyses that have a form specified
withFormMatch = _morfWithForm.search(line)
if withFormMatch:
root = withFormMatch.group(1)
pos = withFormMatch.group(2)
formStr = withFormMatch.group(3)
forms = formStr.split(',')
all_new_lines = []
for form in forms:
morphKey = pos+' '+form.strip()
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
else:
withoutFormMatch = _morfWithoutForm.search(line)
if withoutFormMatch:
# 3) Convert morphological analyses that have only POS specified
root = withoutFormMatch.group(1)
pos = withoutFormMatch.group(2)
morphKey = pos
all_new_lines = []
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
i += 1
return mrf_lines |
def prettyPrintSequence(self, sequence, verbosity=1):
"""
Pretty print a sequence.
@param sequence (list) Sequence
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
text = ""
for i in xrange(len(sequence)):
pattern = sequence[i]
if pattern == None:
text += "<reset>"
if i < len(sequence) - 1:
text += "\n"
else:
text += self.patternMachine.prettyPrintPattern(pattern,
verbosity=verbosity)
return text | Pretty print a sequence.
@param sequence (list) Sequence
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text | Below is the the instruction that describes the task:
### Input:
Pretty print a sequence.
@param sequence (list) Sequence
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
### Response:
def prettyPrintSequence(self, sequence, verbosity=1):
"""
Pretty print a sequence.
@param sequence (list) Sequence
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
text = ""
for i in xrange(len(sequence)):
pattern = sequence[i]
if pattern == None:
text += "<reset>"
if i < len(sequence) - 1:
text += "\n"
else:
text += self.patternMachine.prettyPrintPattern(pattern,
verbosity=verbosity)
return text |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'normalized') and self.normalized is not None:
_dict['normalized'] = self.normalized
if hasattr(self, 'verb') and self.verb is not None:
_dict['verb'] = self.verb._to_dict()
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, 'normalized') and self.normalized is not None:
_dict['normalized'] = self.normalized
if hasattr(self, 'verb') and self.verb is not None:
_dict['verb'] = self.verb._to_dict()
return _dict |
def getMissingTexture(self):
"""
Returns a texture to be used as a placeholder for missing textures.
A default missing texture file is provided in the assets folder of the source distribution.
It consists of a simple checkerboard pattern of purple and black, this image may be copied to any project using peng3d for similar behavior.
If this texture cannot be found, a pattern is created in-memory, simply a solid square of purple.
This texture will also be cached separately from other textures.
"""
if self.missingTexture is None:
if self.resourceExists(self.missingtexturename,".png"):
self.missingTexture = pyglet.image.load(self.resourceNameToPath(self.missingtexturename,".png"))
return self.missingTexture
else: # Falls back to create pattern in-memory
self.missingTexture = pyglet.image.create(1,1,pyglet.image.SolidColorImagePattern([255,0,255,255]))
return self.missingTexture
else:
return self.missingTexture | Returns a texture to be used as a placeholder for missing textures.
A default missing texture file is provided in the assets folder of the source distribution.
It consists of a simple checkerboard pattern of purple and black, this image may be copied to any project using peng3d for similar behavior.
If this texture cannot be found, a pattern is created in-memory, simply a solid square of purple.
This texture will also be cached separately from other textures. | Below is the the instruction that describes the task:
### Input:
Returns a texture to be used as a placeholder for missing textures.
A default missing texture file is provided in the assets folder of the source distribution.
It consists of a simple checkerboard pattern of purple and black, this image may be copied to any project using peng3d for similar behavior.
If this texture cannot be found, a pattern is created in-memory, simply a solid square of purple.
This texture will also be cached separately from other textures.
### Response:
def getMissingTexture(self):
"""
Returns a texture to be used as a placeholder for missing textures.
A default missing texture file is provided in the assets folder of the source distribution.
It consists of a simple checkerboard pattern of purple and black, this image may be copied to any project using peng3d for similar behavior.
If this texture cannot be found, a pattern is created in-memory, simply a solid square of purple.
This texture will also be cached separately from other textures.
"""
if self.missingTexture is None:
if self.resourceExists(self.missingtexturename,".png"):
self.missingTexture = pyglet.image.load(self.resourceNameToPath(self.missingtexturename,".png"))
return self.missingTexture
else: # Falls back to create pattern in-memory
self.missingTexture = pyglet.image.create(1,1,pyglet.image.SolidColorImagePattern([255,0,255,255]))
return self.missingTexture
else:
return self.missingTexture |
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
records = self._records.values() if filter_by is None else filter(filter_by, self._records.values())
return Queryset(self, records=records) | Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
### Response:
def select(self, filter_by=None):
"""
Parameters
----------
filter_by: callable, default None
Callable must take one argument (a record of table), and return True to keep record, or False to skip it.
Example : .select(lambda x: x.name == "my_name").
If None, records are not filtered.
Returns
-------
Queryset instance, containing all selected records.
"""
records = self._records.values() if filter_by is None else filter(filter_by, self._records.values())
return Queryset(self, records=records) |
async def _connect_one(self, remote_address):
'''Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
'''
loop = asyncio.get_event_loop()
for info in await loop.getaddrinfo(str(self.address.host), self.address.port,
type=socket.SOCK_STREAM):
# This object has state so is only good for one connection
client = self.protocol(remote_address, self.auth)
sock = socket.socket(family=info[0])
try:
# A non-blocking socket is required by loop socket methods
sock.setblocking(False)
await loop.sock_connect(sock, info[4])
await self._handshake(client, sock, loop)
self.peername = sock.getpeername()
return sock
except (OSError, SOCKSProtocolError) as e:
exception = e
# Don't close the socket because of an asyncio bug
# see https://github.com/kyuupichan/aiorpcX/issues/8
return exception | Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure. | Below is the the instruction that describes the task:
### Input:
Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
### Response:
async def _connect_one(self, remote_address):
'''Connect to the proxy and perform a handshake requesting a connection.
Return the open socket on success, or the exception on failure.
'''
loop = asyncio.get_event_loop()
for info in await loop.getaddrinfo(str(self.address.host), self.address.port,
type=socket.SOCK_STREAM):
# This object has state so is only good for one connection
client = self.protocol(remote_address, self.auth)
sock = socket.socket(family=info[0])
try:
# A non-blocking socket is required by loop socket methods
sock.setblocking(False)
await loop.sock_connect(sock, info[4])
await self._handshake(client, sock, loop)
self.peername = sock.getpeername()
return sock
except (OSError, SOCKSProtocolError) as e:
exception = e
# Don't close the socket because of an asyncio bug
# see https://github.com/kyuupichan/aiorpcX/issues/8
return exception |
def listen_loop(self, address, family, internal=False):
"""
Accepts incoming connections.
"""
try:
sock = eventlet.listen(address, family)
except socket.error, e:
if e.errno == errno.EADDRINUSE:
logging.critical("Cannot listen on (%s, %s): already in use" % (address, family))
raise
elif e.errno == errno.EACCES and address[1] <= 1024:
logging.critical("Cannot listen on (%s, %s) (you might need to launch as root)" % (address, family))
return
logging.critical("Cannot listen on (%s, %s): %s" % (address, family, e))
return
# Sleep to ensure we've dropped privileges by the time we start serving
eventlet.sleep(0.5)
# Start serving
logging.info("Listening for requests on %s" % (address, ))
try:
eventlet.serve(
sock,
lambda sock, addr: self.handle(sock, addr, internal),
concurrency = 10000,
)
finally:
sock.close() | Accepts incoming connections. | Below is the the instruction that describes the task:
### Input:
Accepts incoming connections.
### Response:
def listen_loop(self, address, family, internal=False):
"""
Accepts incoming connections.
"""
try:
sock = eventlet.listen(address, family)
except socket.error, e:
if e.errno == errno.EADDRINUSE:
logging.critical("Cannot listen on (%s, %s): already in use" % (address, family))
raise
elif e.errno == errno.EACCES and address[1] <= 1024:
logging.critical("Cannot listen on (%s, %s) (you might need to launch as root)" % (address, family))
return
logging.critical("Cannot listen on (%s, %s): %s" % (address, family, e))
return
# Sleep to ensure we've dropped privileges by the time we start serving
eventlet.sleep(0.5)
# Start serving
logging.info("Listening for requests on %s" % (address, ))
try:
eventlet.serve(
sock,
lambda sock, addr: self.handle(sock, addr, internal),
concurrency = 10000,
)
finally:
sock.close() |
def agg_linear_trend(self, x, param=None):
"""
As in tsfresh `agg_inear_trend <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1727>`_
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks\
versus the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are\
"pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more \
information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f a str and l an int
:type param: list
:return: the different feature values
:rtype: pandas.Series
"""
if param is None:
param = [{'attr': 'intercept', 'chunk_len': 5, 'f_agg': 'min'},
{'attr': 'rvalue', 'chunk_len': 10, 'f_agg': 'var'},
{'attr': 'intercept', 'chunk_len': 10, 'f_agg': 'min'}]
agg = feature_calculators.agg_linear_trend(x, param)
logging.debug("agg linear trend by tsfresh calculated")
return list(agg) | As in tsfresh `agg_inear_trend <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1727>`_
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks\
versus the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are\
"pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more \
information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f a str and l an int
:type param: list
:return: the different feature values
:rtype: pandas.Series | Below is the the instruction that describes the task:
### Input:
As in tsfresh `agg_inear_trend <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1727>`_
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks\
versus the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are\
"pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more \
information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f a str and l an int
:type param: list
:return: the different feature values
:rtype: pandas.Series
### Response:
def agg_linear_trend(self, x, param=None):
"""
As in tsfresh `agg_inear_trend <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\
feature_extraction/feature_calculators.py#L1727>`_
Calculates a linear least-squares regression for values of the time series that were aggregated over chunks\
versus the sequence from 0 up to the number of chunks minus one.
This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model.
The parameters attr controls which of the characteristics are returned. Possible extracted attributes are\
"pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more \
information.
The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk.
Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median"
:param x: the time series to calculate the feature of
:type x: pandas.Series
:param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f a str and l an int
:type param: list
:return: the different feature values
:rtype: pandas.Series
"""
if param is None:
param = [{'attr': 'intercept', 'chunk_len': 5, 'f_agg': 'min'},
{'attr': 'rvalue', 'chunk_len': 10, 'f_agg': 'var'},
{'attr': 'intercept', 'chunk_len': 10, 'f_agg': 'min'}]
agg = feature_calculators.agg_linear_trend(x, param)
logging.debug("agg linear trend by tsfresh calculated")
return list(agg) |
def _full_sub_array(data_obj, xj_path, create_dict_path):
"""Retrieves all array or dictionary elements for '*' JSON path marker.
:param dict|list data_obj: The current data object.
:param str xj_path: A json path.
:param bool create_dict_path create a dict path.
:return: tuple with two values: first is a result and second
a boolean flag telling if this value exists or not.
"""
if isinstance(data_obj, list):
if xj_path:
res = []
for d in data_obj:
val, exists = path_lookup(d, xj_path, create_dict_path)
if exists:
res.append(val)
return tuple(res), True
else:
return tuple(data_obj), True
elif isinstance(data_obj, dict):
if xj_path:
res = []
for d in data_obj.values():
val, exists = path_lookup(d, xj_path, create_dict_path)
if exists:
res.append(val)
return tuple(res), True
else:
return tuple(data_obj.values()), True
else:
return None, False | Retrieves all array or dictionary elements for '*' JSON path marker.
:param dict|list data_obj: The current data object.
:param str xj_path: A json path.
:param bool create_dict_path create a dict path.
:return: tuple with two values: first is a result and second
a boolean flag telling if this value exists or not. | Below is the the instruction that describes the task:
### Input:
Retrieves all array or dictionary elements for '*' JSON path marker.
:param dict|list data_obj: The current data object.
:param str xj_path: A json path.
:param bool create_dict_path create a dict path.
:return: tuple with two values: first is a result and second
a boolean flag telling if this value exists or not.
### Response:
def _full_sub_array(data_obj, xj_path, create_dict_path):
"""Retrieves all array or dictionary elements for '*' JSON path marker.
:param dict|list data_obj: The current data object.
:param str xj_path: A json path.
:param bool create_dict_path create a dict path.
:return: tuple with two values: first is a result and second
a boolean flag telling if this value exists or not.
"""
if isinstance(data_obj, list):
if xj_path:
res = []
for d in data_obj:
val, exists = path_lookup(d, xj_path, create_dict_path)
if exists:
res.append(val)
return tuple(res), True
else:
return tuple(data_obj), True
elif isinstance(data_obj, dict):
if xj_path:
res = []
for d in data_obj.values():
val, exists = path_lookup(d, xj_path, create_dict_path)
if exists:
res.append(val)
return tuple(res), True
else:
return tuple(data_obj.values()), True
else:
return None, False |
def ListRecursivelyViaGlobbing(top):
"""Recursively lists all files within the directory.
This method does not list subdirectories (in addition to regular files), and
the file paths are all absolute. If the directory does not exist, this yields
nothing.
This method does so by glob-ing deeper and deeper directories, ie
foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file
paths are absolute, and this method lists subdirectories too.
For certain file systems, globbing via this method may prove significantly
faster than recursively walking a directory. Specifically, TF file systems
that implement TensorFlow's FileSystem.GetMatchingPaths method could save
costly disk reads by using this method. However, for other file systems, this
method might prove slower because the file system performs a walk per call to
glob (in which case it might as well just perform 1 walk).
Args:
top: A path to a directory.
Yields:
A (dir_path, file_paths) tuple for each directory/subdirectory.
"""
current_glob_string = os.path.join(_EscapeGlobCharacters(top), '*')
level = 0
while True:
logger.info('GlobAndListFiles: Starting to glob level %d', level)
glob = tf.io.gfile.glob(current_glob_string)
logger.info(
'GlobAndListFiles: %d files glob-ed at level %d', len(glob), level)
if not glob:
# This subdirectory level lacks files. Terminate.
return
# Map subdirectory to a list of files.
pairs = collections.defaultdict(list)
for file_path in glob:
pairs[os.path.dirname(file_path)].append(file_path)
for dir_name, file_paths in six.iteritems(pairs):
yield (dir_name, tuple(file_paths))
if len(pairs) == 1:
# If at any point the glob returns files that are all in a single
# directory, replace the current globbing path with that directory as the
# literal prefix. This should improve efficiency in cases where a single
# subdir is significantly deeper than the rest of the sudirs.
current_glob_string = os.path.join(list(pairs.keys())[0], '*')
# Iterate to the next level of subdirectories.
current_glob_string = os.path.join(current_glob_string, '*')
level += 1 | Recursively lists all files within the directory.
This method does not list subdirectories (in addition to regular files), and
the file paths are all absolute. If the directory does not exist, this yields
nothing.
This method does so by glob-ing deeper and deeper directories, ie
foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file
paths are absolute, and this method lists subdirectories too.
For certain file systems, globbing via this method may prove significantly
faster than recursively walking a directory. Specifically, TF file systems
that implement TensorFlow's FileSystem.GetMatchingPaths method could save
costly disk reads by using this method. However, for other file systems, this
method might prove slower because the file system performs a walk per call to
glob (in which case it might as well just perform 1 walk).
Args:
top: A path to a directory.
Yields:
A (dir_path, file_paths) tuple for each directory/subdirectory. | Below is the the instruction that describes the task:
### Input:
Recursively lists all files within the directory.
This method does not list subdirectories (in addition to regular files), and
the file paths are all absolute. If the directory does not exist, this yields
nothing.
This method does so by glob-ing deeper and deeper directories, ie
foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file
paths are absolute, and this method lists subdirectories too.
For certain file systems, globbing via this method may prove significantly
faster than recursively walking a directory. Specifically, TF file systems
that implement TensorFlow's FileSystem.GetMatchingPaths method could save
costly disk reads by using this method. However, for other file systems, this
method might prove slower because the file system performs a walk per call to
glob (in which case it might as well just perform 1 walk).
Args:
top: A path to a directory.
Yields:
A (dir_path, file_paths) tuple for each directory/subdirectory.
### Response:
def ListRecursivelyViaGlobbing(top):
"""Recursively lists all files within the directory.
This method does not list subdirectories (in addition to regular files), and
the file paths are all absolute. If the directory does not exist, this yields
nothing.
This method does so by glob-ing deeper and deeper directories, ie
foo/*, foo/*/*, foo/*/*/* and so on until all files are listed. All file
paths are absolute, and this method lists subdirectories too.
For certain file systems, globbing via this method may prove significantly
faster than recursively walking a directory. Specifically, TF file systems
that implement TensorFlow's FileSystem.GetMatchingPaths method could save
costly disk reads by using this method. However, for other file systems, this
method might prove slower because the file system performs a walk per call to
glob (in which case it might as well just perform 1 walk).
Args:
top: A path to a directory.
Yields:
A (dir_path, file_paths) tuple for each directory/subdirectory.
"""
current_glob_string = os.path.join(_EscapeGlobCharacters(top), '*')
level = 0
while True:
logger.info('GlobAndListFiles: Starting to glob level %d', level)
glob = tf.io.gfile.glob(current_glob_string)
logger.info(
'GlobAndListFiles: %d files glob-ed at level %d', len(glob), level)
if not glob:
# This subdirectory level lacks files. Terminate.
return
# Map subdirectory to a list of files.
pairs = collections.defaultdict(list)
for file_path in glob:
pairs[os.path.dirname(file_path)].append(file_path)
for dir_name, file_paths in six.iteritems(pairs):
yield (dir_name, tuple(file_paths))
if len(pairs) == 1:
# If at any point the glob returns files that are all in a single
# directory, replace the current globbing path with that directory as the
# literal prefix. This should improve efficiency in cases where a single
# subdir is significantly deeper than the rest of the sudirs.
current_glob_string = os.path.join(list(pairs.keys())[0], '*')
# Iterate to the next level of subdirectories.
current_glob_string = os.path.join(current_glob_string, '*')
level += 1 |
def instance_cache(cls, func):
""" Save the cache to `self`
This decorator take it for granted that the decorated function
is a method. The first argument of the function is `self`.
:param func: function to decorate
:return: the decorator
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
if not args:
raise ValueError('`self` is not available.')
else:
the_self = args[0]
func_key = cls.get_key(func)
val_cache = cls.get_self_cache(the_self, func_key)
lock = cls.get_self_cache_lock(the_self, func_key)
return cls._get_value_from_cache(
func, val_cache, lock, *args, **kwargs)
return func_wrapper | Save the cache to `self`
This decorator take it for granted that the decorated function
is a method. The first argument of the function is `self`.
:param func: function to decorate
:return: the decorator | Below is the the instruction that describes the task:
### Input:
Save the cache to `self`
This decorator take it for granted that the decorated function
is a method. The first argument of the function is `self`.
:param func: function to decorate
:return: the decorator
### Response:
def instance_cache(cls, func):
""" Save the cache to `self`
This decorator take it for granted that the decorated function
is a method. The first argument of the function is `self`.
:param func: function to decorate
:return: the decorator
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
if not args:
raise ValueError('`self` is not available.')
else:
the_self = args[0]
func_key = cls.get_key(func)
val_cache = cls.get_self_cache(the_self, func_key)
lock = cls.get_self_cache_lock(the_self, func_key)
return cls._get_value_from_cache(
func, val_cache, lock, *args, **kwargs)
return func_wrapper |
def libvlc_event_type_name(event_type):
'''Get an event's type name.
@param event_type: the desired event.
'''
f = _Cfunctions.get('libvlc_event_type_name', None) or \
_Cfunction('libvlc_event_type_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_uint)
return f(event_type) | Get an event's type name.
@param event_type: the desired event. | Below is the the instruction that describes the task:
### Input:
Get an event's type name.
@param event_type: the desired event.
### Response:
def libvlc_event_type_name(event_type):
'''Get an event's type name.
@param event_type: the desired event.
'''
f = _Cfunctions.get('libvlc_event_type_name', None) or \
_Cfunction('libvlc_event_type_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_uint)
return f(event_type) |
def addtoindex(self,norecurse=None):
"""Makes sure this element (and all subelements), are properly added to the index"""
if not norecurse: norecurse = (Word, Morpheme, Phoneme)
if self.id:
self.doc.index[self.id] = self
for e in self.data:
if all([not isinstance(e, C) for C in norecurse]):
try:
e.addtoindex(norecurse)
except AttributeError:
pass | Makes sure this element (and all subelements), are properly added to the index | Below is the the instruction that describes the task:
### Input:
Makes sure this element (and all subelements), are properly added to the index
### Response:
def addtoindex(self,norecurse=None):
"""Makes sure this element (and all subelements), are properly added to the index"""
if not norecurse: norecurse = (Word, Morpheme, Phoneme)
if self.id:
self.doc.index[self.id] = self
for e in self.data:
if all([not isinstance(e, C) for C in norecurse]):
try:
e.addtoindex(norecurse)
except AttributeError:
pass |
def transform(self, value, **kwargs):
"""
Primary function which handles recursively transforming
values via their serializers
"""
if value is None:
return None
objid = id(value)
if objid in self.context:
return '<...>'
self.context.add(objid)
try:
for serializer in self.serializers:
try:
if serializer.can(value):
return serializer.serialize(value, **kwargs)
except Exception as e:
logger.exception(e)
return text_type(type(value))
# if all else fails, lets use the repr of the object
try:
return repr(value)
except Exception as e:
logger.exception(e)
# It's common case that a model's __unicode__ definition
# may try to query the database which if it was not
# cleaned up correctly, would hit a transaction aborted
# exception
return text_type(type(value))
finally:
self.context.remove(objid) | Primary function which handles recursively transforming
values via their serializers | Below is the the instruction that describes the task:
### Input:
Primary function which handles recursively transforming
values via their serializers
### Response:
def transform(self, value, **kwargs):
"""
Primary function which handles recursively transforming
values via their serializers
"""
if value is None:
return None
objid = id(value)
if objid in self.context:
return '<...>'
self.context.add(objid)
try:
for serializer in self.serializers:
try:
if serializer.can(value):
return serializer.serialize(value, **kwargs)
except Exception as e:
logger.exception(e)
return text_type(type(value))
# if all else fails, lets use the repr of the object
try:
return repr(value)
except Exception as e:
logger.exception(e)
# It's common case that a model's __unicode__ definition
# may try to query the database which if it was not
# cleaned up correctly, would hit a transaction aborted
# exception
return text_type(type(value))
finally:
self.context.remove(objid) |
def full(self, asvector=False):
"""Returns full array (uncompressed).
.. warning::
TT compression allows to keep in memory tensors much larger than ones PC can handle in
raw format. Therefore this function is quite unsafe; use it at your own risk.
:returns: numpy.ndarray -- full tensor.
"""
# Generate correct size vector
sz = self.n.copy()
if self.r[0] > 1:
sz = _np.concatenate(([self.r[0]], sz))
if self.r[self.d] > 1:
sz = _np.concatenate(([self.r[self.d]], sz))
if (_np.iscomplex(self.core).any()):
a = _tt_f90.tt_f90.ztt_to_full(
self.n, self.r, self.ps, self.core, _np.prod(sz))
else:
a = _tt_f90.tt_f90.dtt_to_full(
self.n, self.r, self.ps, _np.real(
self.core), _np.prod(sz))
a = a.reshape(sz, order='F')
if asvector:
a=a.flatten(order='F')
return a | Returns full array (uncompressed).
.. warning::
TT compression allows to keep in memory tensors much larger than ones PC can handle in
raw format. Therefore this function is quite unsafe; use it at your own risk.
:returns: numpy.ndarray -- full tensor. | Below is the the instruction that describes the task:
### Input:
Returns full array (uncompressed).
.. warning::
TT compression allows to keep in memory tensors much larger than ones PC can handle in
raw format. Therefore this function is quite unsafe; use it at your own risk.
:returns: numpy.ndarray -- full tensor.
### Response:
def full(self, asvector=False):
"""Returns full array (uncompressed).
.. warning::
TT compression allows to keep in memory tensors much larger than ones PC can handle in
raw format. Therefore this function is quite unsafe; use it at your own risk.
:returns: numpy.ndarray -- full tensor.
"""
# Generate correct size vector
sz = self.n.copy()
if self.r[0] > 1:
sz = _np.concatenate(([self.r[0]], sz))
if self.r[self.d] > 1:
sz = _np.concatenate(([self.r[self.d]], sz))
if (_np.iscomplex(self.core).any()):
a = _tt_f90.tt_f90.ztt_to_full(
self.n, self.r, self.ps, self.core, _np.prod(sz))
else:
a = _tt_f90.tt_f90.dtt_to_full(
self.n, self.r, self.ps, _np.real(
self.core), _np.prod(sz))
a = a.reshape(sz, order='F')
if asvector:
a=a.flatten(order='F')
return a |
def calculate_integral_over_T(self, T1, T2, method):
r'''Method to calculate the integral of a property over temperature
with respect to temperature, using a specified method. Uses SciPy's
`quad` function to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units`]
'''
return float(quad(lambda T: self.calculate(T, method)/T, T1, T2)[0]) | r'''Method to calculate the integral of a property over temperature
with respect to temperature, using a specified method. Uses SciPy's
`quad` function to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units`] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate the integral of a property over temperature
with respect to temperature, using a specified method. Uses SciPy's
`quad` function to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units`]
### Response:
def calculate_integral_over_T(self, T1, T2, method):
r'''Method to calculate the integral of a property over temperature
with respect to temperature, using a specified method. Uses SciPy's
`quad` function to perform the integral, with no options.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T1 : float
Lower limit of integration, [K]
T2 : float
Upper limit of integration, [K]
method : str
Method for which to find the integral
Returns
-------
integral : float
Calculated integral of the property over the given range,
[`units`]
'''
return float(quad(lambda T: self.calculate(T, method)/T, T1, T2)[0]) |
def getVideoStreamTextureSize(self, nDeviceIndex, eFrameType):
"""Gets size of the image frame."""
fn = self.function_table.getVideoStreamTextureSize
pTextureBounds = VRTextureBounds_t()
pnWidth = c_uint32()
pnHeight = c_uint32()
result = fn(nDeviceIndex, eFrameType, byref(pTextureBounds), byref(pnWidth), byref(pnHeight))
return result, pTextureBounds, pnWidth.value, pnHeight.value | Gets size of the image frame. | Below is the the instruction that describes the task:
### Input:
Gets size of the image frame.
### Response:
def getVideoStreamTextureSize(self, nDeviceIndex, eFrameType):
"""Gets size of the image frame."""
fn = self.function_table.getVideoStreamTextureSize
pTextureBounds = VRTextureBounds_t()
pnWidth = c_uint32()
pnHeight = c_uint32()
result = fn(nDeviceIndex, eFrameType, byref(pTextureBounds), byref(pnWidth), byref(pnHeight))
return result, pTextureBounds, pnWidth.value, pnHeight.value |
def without(self, other):
"""
Subtract another Region by performing a difference operation on their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined.
"""
# work only on the lowest level
# TODO: Allow this to be done for regions with different depths.
if not (self.maxdepth == other.maxdepth): raise AssertionError("Regions must have the same maxdepth")
self._demote_all()
opd = set(other.get_demoted())
self.pixeldict[self.maxdepth].difference_update(opd)
self._renorm()
return | Subtract another Region by performing a difference operation on their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined. | Below is the the instruction that describes the task:
### Input:
Subtract another Region by performing a difference operation on their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined.
### Response:
def without(self, other):
"""
Subtract another Region by performing a difference operation on their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined.
"""
# work only on the lowest level
# TODO: Allow this to be done for regions with different depths.
if not (self.maxdepth == other.maxdepth): raise AssertionError("Regions must have the same maxdepth")
self._demote_all()
opd = set(other.get_demoted())
self.pixeldict[self.maxdepth].difference_update(opd)
self._renorm()
return |
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if isinstance(self.storage, CumulusStorage):
if self.storage.exists(prefixed_path):
try:
etag = self.storage._get_object(prefixed_path).etag
digest = "{0}".format(hashlib.md5(source_storage.open(path).read()).hexdigest())
if etag == digest:
self.log(u"Skipping '{0}' (not modified based on file hash)".format(path))
return False
except:
raise
return super(Command, self).delete_file(path, prefixed_path, source_storage) | Checks if the target file should be deleted if it already exists | Below is the the instruction that describes the task:
### Input:
Checks if the target file should be deleted if it already exists
### Response:
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if isinstance(self.storage, CumulusStorage):
if self.storage.exists(prefixed_path):
try:
etag = self.storage._get_object(prefixed_path).etag
digest = "{0}".format(hashlib.md5(source_storage.open(path).read()).hexdigest())
if etag == digest:
self.log(u"Skipping '{0}' (not modified based on file hash)".format(path))
return False
except:
raise
return super(Command, self).delete_file(path, prefixed_path, source_storage) |
def display(self, filename=None):
"""Displays/opens the doc using the OS's default application."""
if filename is None:
filename = 'display_temp.svg'
self.save(filename)
open_in_browser(filename) | Displays/opens the doc using the OS's default application. | Below is the the instruction that describes the task:
### Input:
Displays/opens the doc using the OS's default application.
### Response:
def display(self, filename=None):
"""Displays/opens the doc using the OS's default application."""
if filename is None:
filename = 'display_temp.svg'
self.save(filename)
open_in_browser(filename) |
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window) | Set the canvas size in pixels | Below is the the instruction that describes the task:
### Input:
Set the canvas size in pixels
### Response:
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window) |
def validate_args(self, qubits: Sequence[Qid]) -> None:
"""Checks if this gate can be applied to the given qubits.
By default checks if input is of type Qid and qubit count.
Child classes can override.
Args:
qubits: The collection of qubits to potentially apply the gate to.
Throws:
ValueError: The gate can't be applied to the qubits.
"""
if len(qubits) == 0:
raise ValueError(
"Applied a gate to an empty set of qubits. Gate: {}".format(
repr(self)))
if len(qubits) != self.num_qubits():
raise ValueError(
'Wrong number of qubits for <{!r}>. '
'Expected {} qubits but got <{!r}>.'.format(
self,
self.num_qubits(),
qubits))
if any([not isinstance(qubit, Qid)
for qubit in qubits]):
raise ValueError(
'Gate was called with type different than Qid.') | Checks if this gate can be applied to the given qubits.
By default checks if input is of type Qid and qubit count.
Child classes can override.
Args:
qubits: The collection of qubits to potentially apply the gate to.
Throws:
ValueError: The gate can't be applied to the qubits. | Below is the the instruction that describes the task:
### Input:
Checks if this gate can be applied to the given qubits.
By default checks if input is of type Qid and qubit count.
Child classes can override.
Args:
qubits: The collection of qubits to potentially apply the gate to.
Throws:
ValueError: The gate can't be applied to the qubits.
### Response:
def validate_args(self, qubits: Sequence[Qid]) -> None:
"""Checks if this gate can be applied to the given qubits.
By default checks if input is of type Qid and qubit count.
Child classes can override.
Args:
qubits: The collection of qubits to potentially apply the gate to.
Throws:
ValueError: The gate can't be applied to the qubits.
"""
if len(qubits) == 0:
raise ValueError(
"Applied a gate to an empty set of qubits. Gate: {}".format(
repr(self)))
if len(qubits) != self.num_qubits():
raise ValueError(
'Wrong number of qubits for <{!r}>. '
'Expected {} qubits but got <{!r}>.'.format(
self,
self.num_qubits(),
qubits))
if any([not isinstance(qubit, Qid)
for qubit in qubits]):
raise ValueError(
'Gate was called with type different than Qid.') |
def collect_hunt_results(self, hunt):
"""Download current set of files in results.
Args:
hunt: The GRR hunt object to download files from.
Returns:
list: tuples containing:
str: human-readable description of the source of the collection. For
example, the name of the source host.
str: path to the collected data.
Raises:
ValueError: if approval is needed and approvers were not specified.
"""
if not os.path.isdir(self.output_path):
os.makedirs(self.output_path)
output_file_path = os.path.join(
self.output_path, '.'.join((self.hunt_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
self._check_approval_wrapper(
hunt, self._get_and_write_archive, hunt, output_file_path)
results = self._extract_hunt_results(output_file_path)
print('Wrote results of {0:s} to {1:s}'.format(
hunt.hunt_id, output_file_path))
return results | Download current set of files in results.
Args:
hunt: The GRR hunt object to download files from.
Returns:
list: tuples containing:
str: human-readable description of the source of the collection. For
example, the name of the source host.
str: path to the collected data.
Raises:
ValueError: if approval is needed and approvers were not specified. | Below is the the instruction that describes the task:
### Input:
Download current set of files in results.
Args:
hunt: The GRR hunt object to download files from.
Returns:
list: tuples containing:
str: human-readable description of the source of the collection. For
example, the name of the source host.
str: path to the collected data.
Raises:
ValueError: if approval is needed and approvers were not specified.
### Response:
def collect_hunt_results(self, hunt):
"""Download current set of files in results.
Args:
hunt: The GRR hunt object to download files from.
Returns:
list: tuples containing:
str: human-readable description of the source of the collection. For
example, the name of the source host.
str: path to the collected data.
Raises:
ValueError: if approval is needed and approvers were not specified.
"""
if not os.path.isdir(self.output_path):
os.makedirs(self.output_path)
output_file_path = os.path.join(
self.output_path, '.'.join((self.hunt_id, 'zip')))
if os.path.exists(output_file_path):
print('{0:s} already exists: Skipping'.format(output_file_path))
return None
self._check_approval_wrapper(
hunt, self._get_and_write_archive, hunt, output_file_path)
results = self._extract_hunt_results(output_file_path)
print('Wrote results of {0:s} to {1:s}'.format(
hunt.hunt_id, output_file_path))
return results |
def crop(self, start_timestamp, end_timestamp):
"""
Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object.
"""
output = {}
for key, value in self.items():
if key >= start_timestamp and key <= end_timestamp:
output[key] = value
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') | Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object. | Below is the the instruction that describes the task:
### Input:
Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object.
### Response:
def crop(self, start_timestamp, end_timestamp):
"""
Return a new TimeSeries object contains all the timstamps and values within
the specified range.
:param int start_timestamp: the start timestamp value
:param int end_timestamp: the end timestamp value
:return: :class:`TimeSeries` object.
"""
output = {}
for key, value in self.items():
if key >= start_timestamp and key <= end_timestamp:
output[key] = value
if output:
return TimeSeries(output)
else:
raise ValueError('TimeSeries data was empty or invalid.') |
def send(self, content_type='HTML'):
""" Takes the recipients, body, and attachments of the Message and sends.
Args:
content_type: Can either be 'HTML' or 'Text', defaults to HTML.
"""
payload = self.api_representation(content_type)
endpoint = 'https://outlook.office.com/api/v1.0/me/sendmail'
self._make_api_call('post', endpoint=endpoint, data=json.dumps(payload)) | Takes the recipients, body, and attachments of the Message and sends.
Args:
content_type: Can either be 'HTML' or 'Text', defaults to HTML. | Below is the the instruction that describes the task:
### Input:
Takes the recipients, body, and attachments of the Message and sends.
Args:
content_type: Can either be 'HTML' or 'Text', defaults to HTML.
### Response:
def send(self, content_type='HTML'):
""" Takes the recipients, body, and attachments of the Message and sends.
Args:
content_type: Can either be 'HTML' or 'Text', defaults to HTML.
"""
payload = self.api_representation(content_type)
endpoint = 'https://outlook.office.com/api/v1.0/me/sendmail'
self._make_api_call('post', endpoint=endpoint, data=json.dumps(payload)) |
def repeater(pipe, how_many=2):
''' this function repeats each value in the pipeline however many times you need '''
r = range(how_many)
for i in pipe:
for _ in r:
yield i | this function repeats each value in the pipeline however many times you need | Below is the the instruction that describes the task:
### Input:
this function repeats each value in the pipeline however many times you need
### Response:
def repeater(pipe, how_many=2):
''' this function repeats each value in the pipeline however many times you need '''
r = range(how_many)
for i in pipe:
for _ in r:
yield i |
def override(self, value):
"""Temporarily overrides the old value with the new one."""
if self._value is not value:
return _ScopedValueOverrideContext(self, value)
else:
return empty_context | Temporarily overrides the old value with the new one. | Below is the the instruction that describes the task:
### Input:
Temporarily overrides the old value with the new one.
### Response:
def override(self, value):
"""Temporarily overrides the old value with the new one."""
if self._value is not value:
return _ScopedValueOverrideContext(self, value)
else:
return empty_context |
def draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color,
draw):
"""Draws a triangle on each half of the tile with the given coordinates
and size.
"""
assert split in ('right', 'left')
# The four corners of this tile
nw = (tile_x, tile_y)
ne = (tile_x + tile_size - 1, tile_y)
se = (tile_x + tile_size - 1, tile_y + tile_size)
sw = (tile_x, tile_y + tile_size)
if split == 'left':
# top right triangle
draw_triangle(nw, ne, se, top_color, draw)
# bottom left triangle
draw_triangle(nw, sw, se, bottom_color, draw)
else:
# top left triangle
draw_triangle(sw, nw, ne, top_color, draw)
# bottom right triangle
draw_triangle(sw, se, ne, bottom_color, draw) | Draws a triangle on each half of the tile with the given coordinates
and size. | Below is the the instruction that describes the task:
### Input:
Draws a triangle on each half of the tile with the given coordinates
and size.
### Response:
def draw_triangles(tile_x, tile_y, tile_size, split, top_color, bottom_color,
draw):
"""Draws a triangle on each half of the tile with the given coordinates
and size.
"""
assert split in ('right', 'left')
# The four corners of this tile
nw = (tile_x, tile_y)
ne = (tile_x + tile_size - 1, tile_y)
se = (tile_x + tile_size - 1, tile_y + tile_size)
sw = (tile_x, tile_y + tile_size)
if split == 'left':
# top right triangle
draw_triangle(nw, ne, se, top_color, draw)
# bottom left triangle
draw_triangle(nw, sw, se, bottom_color, draw)
else:
# top left triangle
draw_triangle(sw, nw, ne, top_color, draw)
# bottom right triangle
draw_triangle(sw, se, ne, bottom_color, draw) |
def start_event_loop_qt4(app=None):
"""Start the qt4 event loop in a consistent manner."""
if app is None:
app = get_app_qt4([''])
if not is_event_loop_running_qt4(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True | Start the qt4 event loop in a consistent manner. | Below is the the instruction that describes the task:
### Input:
Start the qt4 event loop in a consistent manner.
### Response:
def start_event_loop_qt4(app=None):
"""Start the qt4 event loop in a consistent manner."""
if app is None:
app = get_app_qt4([''])
if not is_event_loop_running_qt4(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True |
def rbac_policy_get(request, policy_id, **kwargs):
"""Get RBAC policy for a given policy id."""
policy = neutronclient(request).show_rbac_policy(
policy_id, **kwargs).get('rbac_policy')
return RBACPolicy(policy) | Get RBAC policy for a given policy id. | Below is the the instruction that describes the task:
### Input:
Get RBAC policy for a given policy id.
### Response:
def rbac_policy_get(request, policy_id, **kwargs):
"""Get RBAC policy for a given policy id."""
policy = neutronclient(request).show_rbac_policy(
policy_id, **kwargs).get('rbac_policy')
return RBACPolicy(policy) |
def from_yaml(cls, data: str, force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> T:
"""From yaml string to instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_yaml('''
... id: 1
... name: Tom
... favorites:
... - name: Apple
... names_by_lang:
... en: Apple
... de: Apfel
... - name: Orange
... ''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
"""
return cls.from_dict(util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict) | From yaml string to instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_yaml('''
... id: 1
... name: Tom
... favorites:
... - name: Apple
... names_by_lang:
... en: Apple
... de: Apfel
... - name: Orange
... ''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel' | Below is the the instruction that describes the task:
### Input:
From yaml string to instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_yaml('''
... id: 1
... name: Tom
... favorites:
... - name: Apple
... names_by_lang:
... en: Apple
... de: Apfel
... - name: Orange
... ''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
### Response:
def from_yaml(cls, data: str, force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> T:
"""From yaml string to instance
:param data: Yaml string
:param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True
:param force_cast: Cast forcibly if True
:param restrict: Prohibit extra parameters if True
:return: Instance
Usage:
>>> from owlmixin.samples import Human
>>> human: Human = Human.from_yaml('''
... id: 1
... name: Tom
... favorites:
... - name: Apple
... names_by_lang:
... en: Apple
... de: Apfel
... - name: Orange
... ''')
>>> human.id
1
>>> human.name
'Tom'
>>> human.favorites[0].names_by_lang.get()["de"]
'Apfel'
"""
return cls.from_dict(util.load_yaml(data),
force_snake_case=force_snake_case,
force_cast=force_cast,
restrict=restrict) |
def then(self, success=None, failure=None):
"""A utility method to add success and/or failure callback to the
promise which will also return another promise in the process.
"""
rv = Promise()
def on_success(v):
try:
rv.resolve(success(v))
except Exception as e:
rv.reject(e)
def on_failure(r):
try:
rv.resolve(failure(r))
except Exception as e:
rv.reject(e)
self.done(on_success, on_failure)
return rv | A utility method to add success and/or failure callback to the
promise which will also return another promise in the process. | Below is the the instruction that describes the task:
### Input:
A utility method to add success and/or failure callback to the
promise which will also return another promise in the process.
### Response:
def then(self, success=None, failure=None):
"""A utility method to add success and/or failure callback to the
promise which will also return another promise in the process.
"""
rv = Promise()
def on_success(v):
try:
rv.resolve(success(v))
except Exception as e:
rv.reject(e)
def on_failure(r):
try:
rv.resolve(failure(r))
except Exception as e:
rv.reject(e)
self.done(on_success, on_failure)
return rv |
def read_names(rows, source_id=1):
"""Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
"""
ncbi_keys = ['tax_id', 'tax_name', 'unique_name', 'name_class']
extra_keys = ['source_id', 'is_primary', 'is_classified']
# is_classified applies to species only; we will set this value
# later
is_classified = None
tax_id = ncbi_keys.index('tax_id')
tax_name = ncbi_keys.index('tax_name')
unique_name = ncbi_keys.index('unique_name')
name_class = ncbi_keys.index('name_class')
yield ncbi_keys + extra_keys
for tid, grp in itertools.groupby(rows, itemgetter(tax_id)):
# confirm that each tax_id has exactly one scientific name
num_primary = 0
for r in grp:
is_primary = r[name_class] == 'scientific name'
# fix primary key uniqueness violation
if r[unique_name]:
r[tax_name] = r[unique_name]
num_primary += is_primary
yield (r + [source_id, is_primary, is_classified])
assert num_primary == 1 | Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...) | Below is the the instruction that describes the task:
### Input:
Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
### Response:
def read_names(rows, source_id=1):
"""Return an iterator of rows ready to insert into table
"names". Adds columns "is_primary" (identifying the primary name
for each tax_id with a vaule of 1) and "is_classified" (always None).
* rows - iterator of lists (eg, output from read_archive or read_dmp)
* unclassified_regex - a compiled re matching "unclassified" names
From the NCBI docs:
Taxonomy names file (names.dmp):
tax_id -- the id of node associated with this name
name_txt -- name itself
unique name -- the unique variant of this name if name not unique
name class -- (synonym, common name, ...)
"""
ncbi_keys = ['tax_id', 'tax_name', 'unique_name', 'name_class']
extra_keys = ['source_id', 'is_primary', 'is_classified']
# is_classified applies to species only; we will set this value
# later
is_classified = None
tax_id = ncbi_keys.index('tax_id')
tax_name = ncbi_keys.index('tax_name')
unique_name = ncbi_keys.index('unique_name')
name_class = ncbi_keys.index('name_class')
yield ncbi_keys + extra_keys
for tid, grp in itertools.groupby(rows, itemgetter(tax_id)):
# confirm that each tax_id has exactly one scientific name
num_primary = 0
for r in grp:
is_primary = r[name_class] == 'scientific name'
# fix primary key uniqueness violation
if r[unique_name]:
r[tax_name] = r[unique_name]
num_primary += is_primary
yield (r + [source_id, is_primary, is_classified])
assert num_primary == 1 |
def compute(self, write_to_tar=True):
"""Perform all desired calculations on the data and save externally."""
data = self._get_all_data(self.start_date, self.end_date)
logging.info('Computing timeseries for {0} -- '
'{1}.'.format(self.start_date, self.end_date))
full, full_dt = self._compute_full_ts(data)
full_out = self._full_to_yearly_ts(full, full_dt)
reduced = self._apply_all_time_reductions(full_out)
logging.info("Writing desired gridded outputs to disk.")
for dtype_time, data in reduced.items():
data = _add_metadata_as_attrs(data, self.var.units,
self.var.description,
self.dtype_out_vert)
self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert,
save_files=True, write_to_tar=write_to_tar)
return self | Perform all desired calculations on the data and save externally. | Below is the the instruction that describes the task:
### Input:
Perform all desired calculations on the data and save externally.
### Response:
def compute(self, write_to_tar=True):
"""Perform all desired calculations on the data and save externally."""
data = self._get_all_data(self.start_date, self.end_date)
logging.info('Computing timeseries for {0} -- '
'{1}.'.format(self.start_date, self.end_date))
full, full_dt = self._compute_full_ts(data)
full_out = self._full_to_yearly_ts(full, full_dt)
reduced = self._apply_all_time_reductions(full_out)
logging.info("Writing desired gridded outputs to disk.")
for dtype_time, data in reduced.items():
data = _add_metadata_as_attrs(data, self.var.units,
self.var.description,
self.dtype_out_vert)
self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert,
save_files=True, write_to_tar=write_to_tar)
return self |
def execute_streaming_sql(
self,
session,
sql,
transaction=None,
params=None,
param_types=None,
resume_token=None,
query_mode=None,
partition_token=None,
seqno=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Like ``ExecuteSql``, except returns the result set as a stream. Unlike
``ExecuteSql``, there is no limit on the size of the returned result
set. However, no individual row in the result set can exceed 100 MiB,
and no column value can exceed 10 MiB.
Example:
>>> from google.cloud import spanner_v1
>>>
>>> client = spanner_v1.SpannerClient()
>>>
>>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]')
>>>
>>> # TODO: Initialize `sql`:
>>> sql = ''
>>>
>>> for element in client.execute_streaming_sql(session, sql):
... # process element
... pass
Args:
session (str): Required. The session in which the SQL query should be performed.
sql (str): Required. The SQL string.
transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a
temporary read-only transaction with strong concurrency.
The transaction to use.
For queries, if none is provided, the default is a temporary read-only
transaction with strong concurrency.
Standard DML statements require a ReadWrite transaction. Single-use
transactions are not supported (to avoid replay). The caller must
either supply an existing transaction ID or begin a new transaction.
Partitioned DML requires an existing PartitionedDml transaction ID.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.TransactionSelector`
params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL string can contain parameter placeholders. A parameter
placeholder consists of ``'@'`` followed by the parameter name.
Parameter names consist of any combination of letters, numbers, and
underscores.
Parameters can appear anywhere that a literal value is expected. The
same parameter name can be used more than once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute an SQL statement with unbound parameters.
Parameter values are specified using ``params``, which is a JSON object
whose keys are parameter names, and whose values are the corresponding
parameter values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Struct`
param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type
from a JSON value. For example, values of type ``BYTES`` and values of
type ``STRING`` both appear in ``params`` as JSON strings.
In these cases, ``param_types`` can be used to specify the exact SQL
type for some or all of the SQL statement parameters. See the definition
of ``Type`` for more information about SQL types.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Type`
resume_token (bytes): If this request is resuming a previously interrupted SQL statement
execution, ``resume_token`` should be copied from the last
``PartialResultSet`` yielded before the interruption. Doing this enables
the new SQL statement execution to resume where the last one left off.
The rest of the request parameters must exactly match the request that
yielded this token.
query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in
``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can
only be set to ``QueryMode.NORMAL``.
partition_token (bytes): If present, results will be restricted to the specified partition
previously created using PartitionQuery(). There must be an exact match
for the values of fields common to this message and the
PartitionQueryRequest message used to create this partition\_token.
seqno (long): A per-transaction sequence number used to identify this request. This
makes each request idempotent such that if the request is received multiple
times, at most one will succeed.
The sequence number must be monotonically increasing within the
transaction. If a request arrives for the first time with an out-of-order
sequence number, the transaction may be aborted. Replays of previously
handled requests will yield the same response as the first execution.
Required for DML statements. Ignored for queries.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.spanner_v1.types.PartialResultSet].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "execute_streaming_sql" not in self._inner_api_calls:
self._inner_api_calls[
"execute_streaming_sql"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.execute_streaming_sql,
default_retry=self._method_configs["ExecuteStreamingSql"].retry,
default_timeout=self._method_configs["ExecuteStreamingSql"].timeout,
client_info=self._client_info,
)
request = spanner_pb2.ExecuteSqlRequest(
session=session,
sql=sql,
transaction=transaction,
params=params,
param_types=param_types,
resume_token=resume_token,
query_mode=query_mode,
partition_token=partition_token,
seqno=seqno,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("session", session)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["execute_streaming_sql"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Like ``ExecuteSql``, except returns the result set as a stream. Unlike
``ExecuteSql``, there is no limit on the size of the returned result
set. However, no individual row in the result set can exceed 100 MiB,
and no column value can exceed 10 MiB.
Example:
>>> from google.cloud import spanner_v1
>>>
>>> client = spanner_v1.SpannerClient()
>>>
>>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]')
>>>
>>> # TODO: Initialize `sql`:
>>> sql = ''
>>>
>>> for element in client.execute_streaming_sql(session, sql):
... # process element
... pass
Args:
session (str): Required. The session in which the SQL query should be performed.
sql (str): Required. The SQL string.
transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a
temporary read-only transaction with strong concurrency.
The transaction to use.
For queries, if none is provided, the default is a temporary read-only
transaction with strong concurrency.
Standard DML statements require a ReadWrite transaction. Single-use
transactions are not supported (to avoid replay). The caller must
either supply an existing transaction ID or begin a new transaction.
Partitioned DML requires an existing PartitionedDml transaction ID.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.TransactionSelector`
params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL string can contain parameter placeholders. A parameter
placeholder consists of ``'@'`` followed by the parameter name.
Parameter names consist of any combination of letters, numbers, and
underscores.
Parameters can appear anywhere that a literal value is expected. The
same parameter name can be used more than once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute an SQL statement with unbound parameters.
Parameter values are specified using ``params``, which is a JSON object
whose keys are parameter names, and whose values are the corresponding
parameter values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Struct`
param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type
from a JSON value. For example, values of type ``BYTES`` and values of
type ``STRING`` both appear in ``params`` as JSON strings.
In these cases, ``param_types`` can be used to specify the exact SQL
type for some or all of the SQL statement parameters. See the definition
of ``Type`` for more information about SQL types.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Type`
resume_token (bytes): If this request is resuming a previously interrupted SQL statement
execution, ``resume_token`` should be copied from the last
``PartialResultSet`` yielded before the interruption. Doing this enables
the new SQL statement execution to resume where the last one left off.
The rest of the request parameters must exactly match the request that
yielded this token.
query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in
``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can
only be set to ``QueryMode.NORMAL``.
partition_token (bytes): If present, results will be restricted to the specified partition
previously created using PartitionQuery(). There must be an exact match
for the values of fields common to this message and the
PartitionQueryRequest message used to create this partition\_token.
seqno (long): A per-transaction sequence number used to identify this request. This
makes each request idempotent such that if the request is received multiple
times, at most one will succeed.
The sequence number must be monotonically increasing within the
transaction. If a request arrives for the first time with an out-of-order
sequence number, the transaction may be aborted. Replays of previously
handled requests will yield the same response as the first execution.
Required for DML statements. Ignored for queries.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.spanner_v1.types.PartialResultSet].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Like ``ExecuteSql``, except returns the result set as a stream. Unlike
``ExecuteSql``, there is no limit on the size of the returned result
set. However, no individual row in the result set can exceed 100 MiB,
and no column value can exceed 10 MiB.
Example:
>>> from google.cloud import spanner_v1
>>>
>>> client = spanner_v1.SpannerClient()
>>>
>>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]')
>>>
>>> # TODO: Initialize `sql`:
>>> sql = ''
>>>
>>> for element in client.execute_streaming_sql(session, sql):
... # process element
... pass
Args:
session (str): Required. The session in which the SQL query should be performed.
sql (str): Required. The SQL string.
transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a
temporary read-only transaction with strong concurrency.
The transaction to use.
For queries, if none is provided, the default is a temporary read-only
transaction with strong concurrency.
Standard DML statements require a ReadWrite transaction. Single-use
transactions are not supported (to avoid replay). The caller must
either supply an existing transaction ID or begin a new transaction.
Partitioned DML requires an existing PartitionedDml transaction ID.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.TransactionSelector`
params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL string can contain parameter placeholders. A parameter
placeholder consists of ``'@'`` followed by the parameter name.
Parameter names consist of any combination of letters, numbers, and
underscores.
Parameters can appear anywhere that a literal value is expected. The
same parameter name can be used more than once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute an SQL statement with unbound parameters.
Parameter values are specified using ``params``, which is a JSON object
whose keys are parameter names, and whose values are the corresponding
parameter values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Struct`
param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type
from a JSON value. For example, values of type ``BYTES`` and values of
type ``STRING`` both appear in ``params`` as JSON strings.
In these cases, ``param_types`` can be used to specify the exact SQL
type for some or all of the SQL statement parameters. See the definition
of ``Type`` for more information about SQL types.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Type`
resume_token (bytes): If this request is resuming a previously interrupted SQL statement
execution, ``resume_token`` should be copied from the last
``PartialResultSet`` yielded before the interruption. Doing this enables
the new SQL statement execution to resume where the last one left off.
The rest of the request parameters must exactly match the request that
yielded this token.
query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in
``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can
only be set to ``QueryMode.NORMAL``.
partition_token (bytes): If present, results will be restricted to the specified partition
previously created using PartitionQuery(). There must be an exact match
for the values of fields common to this message and the
PartitionQueryRequest message used to create this partition\_token.
seqno (long): A per-transaction sequence number used to identify this request. This
makes each request idempotent such that if the request is received multiple
times, at most one will succeed.
The sequence number must be monotonically increasing within the
transaction. If a request arrives for the first time with an out-of-order
sequence number, the transaction may be aborted. Replays of previously
handled requests will yield the same response as the first execution.
Required for DML statements. Ignored for queries.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.spanner_v1.types.PartialResultSet].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def execute_streaming_sql(
self,
session,
sql,
transaction=None,
params=None,
param_types=None,
resume_token=None,
query_mode=None,
partition_token=None,
seqno=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Like ``ExecuteSql``, except returns the result set as a stream. Unlike
``ExecuteSql``, there is no limit on the size of the returned result
set. However, no individual row in the result set can exceed 100 MiB,
and no column value can exceed 10 MiB.
Example:
>>> from google.cloud import spanner_v1
>>>
>>> client = spanner_v1.SpannerClient()
>>>
>>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]')
>>>
>>> # TODO: Initialize `sql`:
>>> sql = ''
>>>
>>> for element in client.execute_streaming_sql(session, sql):
... # process element
... pass
Args:
session (str): Required. The session in which the SQL query should be performed.
sql (str): Required. The SQL string.
transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a
temporary read-only transaction with strong concurrency.
The transaction to use.
For queries, if none is provided, the default is a temporary read-only
transaction with strong concurrency.
Standard DML statements require a ReadWrite transaction. Single-use
transactions are not supported (to avoid replay). The caller must
either supply an existing transaction ID or begin a new transaction.
Partitioned DML requires an existing PartitionedDml transaction ID.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.TransactionSelector`
params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL string can contain parameter placeholders. A parameter
placeholder consists of ``'@'`` followed by the parameter name.
Parameter names consist of any combination of letters, numbers, and
underscores.
Parameters can appear anywhere that a literal value is expected. The
same parameter name can be used more than once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute an SQL statement with unbound parameters.
Parameter values are specified using ``params``, which is a JSON object
whose keys are parameter names, and whose values are the corresponding
parameter values.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Struct`
param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type
from a JSON value. For example, values of type ``BYTES`` and values of
type ``STRING`` both appear in ``params`` as JSON strings.
In these cases, ``param_types`` can be used to specify the exact SQL
type for some or all of the SQL statement parameters. See the definition
of ``Type`` for more information about SQL types.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.Type`
resume_token (bytes): If this request is resuming a previously interrupted SQL statement
execution, ``resume_token`` should be copied from the last
``PartialResultSet`` yielded before the interruption. Doing this enables
the new SQL statement execution to resume where the last one left off.
The rest of the request parameters must exactly match the request that
yielded this token.
query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in
``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can
only be set to ``QueryMode.NORMAL``.
partition_token (bytes): If present, results will be restricted to the specified partition
previously created using PartitionQuery(). There must be an exact match
for the values of fields common to this message and the
PartitionQueryRequest message used to create this partition\_token.
seqno (long): A per-transaction sequence number used to identify this request. This
makes each request idempotent such that if the request is received multiple
times, at most one will succeed.
The sequence number must be monotonically increasing within the
transaction. If a request arrives for the first time with an out-of-order
sequence number, the transaction may be aborted. Replays of previously
handled requests will yield the same response as the first execution.
Required for DML statements. Ignored for queries.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.spanner_v1.types.PartialResultSet].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "execute_streaming_sql" not in self._inner_api_calls:
self._inner_api_calls[
"execute_streaming_sql"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.execute_streaming_sql,
default_retry=self._method_configs["ExecuteStreamingSql"].retry,
default_timeout=self._method_configs["ExecuteStreamingSql"].timeout,
client_info=self._client_info,
)
request = spanner_pb2.ExecuteSqlRequest(
session=session,
sql=sql,
transaction=transaction,
params=params,
param_types=param_types,
resume_token=resume_token,
query_mode=query_mode,
partition_token=partition_token,
seqno=seqno,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("session", session)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["execute_streaming_sql"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def install_package(self, client, package):
"""Install package on instance."""
install_cmd = "{sudo} '{install} {package}'".format(
sudo=self.get_sudo_exec_wrapper(),
install=self.get_install_cmd(),
package=package
)
try:
out = ipa_utils.execute_ssh_command(
client,
install_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred installing package {package} '
'on instance: {error}'.format(
package=package,
error=error
)
)
else:
return out | Install package on instance. | Below is the the instruction that describes the task:
### Input:
Install package on instance.
### Response:
def install_package(self, client, package):
"""Install package on instance."""
install_cmd = "{sudo} '{install} {package}'".format(
sudo=self.get_sudo_exec_wrapper(),
install=self.get_install_cmd(),
package=package
)
try:
out = ipa_utils.execute_ssh_command(
client,
install_cmd
)
except Exception as error:
raise IpaDistroException(
'An error occurred installing package {package} '
'on instance: {error}'.format(
package=package,
error=error
)
)
else:
return out |
def validate_yubikey_otp(self, params):
"""
Validate YubiKey OTP using YubiHSM internal database.
"""
vres = {}
from_key = params["otp"][0]
if not re.match(ykotp_valid_input, from_key):
self.log_error("IN: %s, Invalid OTP" % (from_key))
vres["status"] = "BAD_OTP"
else:
vres["otp"] = from_key
if not "nonce" in params:
self.log_error("IN: %s, no nonce" % (from_key))
vres["status"] = "MISSING_PARAMETER"
else:
nonce = params["nonce"][0]
if len(nonce) < 16 or len(nonce) > 40:
self.log_error("IN: %s, bad nonce : %s" % (from_key, nonce))
vres["status"] = "MISSING_PARAMETER"
else:
vres["nonce"] = nonce
if "sl" in params and not (params["sl"] == "100" or params["sl"] == "secure"):
self.log_error("IN: %s, sync level unsupported" % (from_key))
vres["status"] = "BACKEND_ERROR"
sig, client_key, = check_signature(params)
if sig != True:
self.log_error("IN: %s, signature validation error" % (from_key))
if client_key == None:
vres["status"] = "NO_SUCH_CLIENT"
# To be compatible with YK-VAL, we will sign this response using a null key
client_key = chr(0)
else:
vres["status"] = "BAD_SIGNATURE"
if "status" not in vres:
try:
res = pyhsm.yubikey.validate_otp(hsm, from_key)
vres.update({"status": "OK",
"sessioncounter": str(res.use_ctr), # known confusion
"sessionuse": str(res.session_ctr), # known confusion
"timestamp": str((res.ts_high << 16 | res.ts_low) / 8)
}
)
if "sl" in params:
vres["sl"] = "100"
if "timestamp" in params:
vres["t"] = time.strftime("%FT%TZ0000", time.gmtime())
except pyhsm.exception.YHSM_CommandFailed, e:
if e.status == pyhsm.defines.YSM_ID_NOT_FOUND:
vres["status"] = "BAD_OTP"
elif e.status == pyhsm.defines.YSM_OTP_REPLAY:
vres["status"] = "REPLAYED_OTP"
elif e.status == pyhsm.defines.YSM_OTP_INVALID:
vres["status"] = "BAD_OTP"
else:
vres["status"] = "BACKEND_ERROR"
self.log_error("IN: %s, validation result %s (replying %s)" % (from_key, pyhsm.defines.status2str(e.status), vres["status"]))
return make_otp_response(vres, client_key) | Validate YubiKey OTP using YubiHSM internal database. | Below is the the instruction that describes the task:
### Input:
Validate YubiKey OTP using YubiHSM internal database.
### Response:
def validate_yubikey_otp(self, params):
"""
Validate YubiKey OTP using YubiHSM internal database.
"""
vres = {}
from_key = params["otp"][0]
if not re.match(ykotp_valid_input, from_key):
self.log_error("IN: %s, Invalid OTP" % (from_key))
vres["status"] = "BAD_OTP"
else:
vres["otp"] = from_key
if not "nonce" in params:
self.log_error("IN: %s, no nonce" % (from_key))
vres["status"] = "MISSING_PARAMETER"
else:
nonce = params["nonce"][0]
if len(nonce) < 16 or len(nonce) > 40:
self.log_error("IN: %s, bad nonce : %s" % (from_key, nonce))
vres["status"] = "MISSING_PARAMETER"
else:
vres["nonce"] = nonce
if "sl" in params and not (params["sl"] == "100" or params["sl"] == "secure"):
self.log_error("IN: %s, sync level unsupported" % (from_key))
vres["status"] = "BACKEND_ERROR"
sig, client_key, = check_signature(params)
if sig != True:
self.log_error("IN: %s, signature validation error" % (from_key))
if client_key == None:
vres["status"] = "NO_SUCH_CLIENT"
# To be compatible with YK-VAL, we will sign this response using a null key
client_key = chr(0)
else:
vres["status"] = "BAD_SIGNATURE"
if "status" not in vres:
try:
res = pyhsm.yubikey.validate_otp(hsm, from_key)
vres.update({"status": "OK",
"sessioncounter": str(res.use_ctr), # known confusion
"sessionuse": str(res.session_ctr), # known confusion
"timestamp": str((res.ts_high << 16 | res.ts_low) / 8)
}
)
if "sl" in params:
vres["sl"] = "100"
if "timestamp" in params:
vres["t"] = time.strftime("%FT%TZ0000", time.gmtime())
except pyhsm.exception.YHSM_CommandFailed, e:
if e.status == pyhsm.defines.YSM_ID_NOT_FOUND:
vres["status"] = "BAD_OTP"
elif e.status == pyhsm.defines.YSM_OTP_REPLAY:
vres["status"] = "REPLAYED_OTP"
elif e.status == pyhsm.defines.YSM_OTP_INVALID:
vres["status"] = "BAD_OTP"
else:
vres["status"] = "BACKEND_ERROR"
self.log_error("IN: %s, validation result %s (replying %s)" % (from_key, pyhsm.defines.status2str(e.status), vres["status"]))
return make_otp_response(vres, client_key) |
async def cli(self):
"""Enter commands in a simple CLI."""
print('Enter commands and press enter')
print('Type help for help and exit to quit')
while True:
command = await _read_input(self.loop, 'pyatv> ')
if command.lower() == 'exit':
break
elif command == 'cli':
print('Command not availble here')
continue
await _handle_device_command(
self.args, command, self.atv, self.loop) | Enter commands in a simple CLI. | Below is the the instruction that describes the task:
### Input:
Enter commands in a simple CLI.
### Response:
async def cli(self):
"""Enter commands in a simple CLI."""
print('Enter commands and press enter')
print('Type help for help and exit to quit')
while True:
command = await _read_input(self.loop, 'pyatv> ')
if command.lower() == 'exit':
break
elif command == 'cli':
print('Command not availble here')
continue
await _handle_device_command(
self.args, command, self.atv, self.loop) |
def get_aws_index(self):
"""
Returns tile index on AWS. If `tile_index` was not set during class initialization it will be determined
according to existing tiles on AWS.
:return: Index of tile on AWS
:rtype: int
"""
if self.aws_index is not None:
return self.aws_index
tile_info_list = get_tile_info(self.tile_name, self.datetime, all_tiles=True)
if not tile_info_list:
raise ValueError('Cannot find aws_index for specified tile and time')
if self.data_source is DataSource.SENTINEL2_L2A:
for tile_info in sorted(tile_info_list, key=self._parse_aws_index):
try:
self.aws_index = self._parse_aws_index(tile_info)
self.get_tile_info()
return self.aws_index
except AwsDownloadFailedException:
pass
return self._parse_aws_index(tile_info_list[0]) | Returns tile index on AWS. If `tile_index` was not set during class initialization it will be determined
according to existing tiles on AWS.
:return: Index of tile on AWS
:rtype: int | Below is the the instruction that describes the task:
### Input:
Returns tile index on AWS. If `tile_index` was not set during class initialization it will be determined
according to existing tiles on AWS.
:return: Index of tile on AWS
:rtype: int
### Response:
def get_aws_index(self):
"""
Returns tile index on AWS. If `tile_index` was not set during class initialization it will be determined
according to existing tiles on AWS.
:return: Index of tile on AWS
:rtype: int
"""
if self.aws_index is not None:
return self.aws_index
tile_info_list = get_tile_info(self.tile_name, self.datetime, all_tiles=True)
if not tile_info_list:
raise ValueError('Cannot find aws_index for specified tile and time')
if self.data_source is DataSource.SENTINEL2_L2A:
for tile_info in sorted(tile_info_list, key=self._parse_aws_index):
try:
self.aws_index = self._parse_aws_index(tile_info)
self.get_tile_info()
return self.aws_index
except AwsDownloadFailedException:
pass
return self._parse_aws_index(tile_info_list[0]) |
def ProfileRunValidationOutputFromOptions(feed, options):
"""Run RunValidationOutputFromOptions, print profile and return exit code."""
import cProfile
import pstats
# runctx will modify a dict, but not locals(). We need a way to get rv back.
locals_for_exec = locals()
cProfile.runctx('rv = RunValidationOutputFromOptions(feed, options)',
globals(), locals_for_exec, 'validate-stats')
# Only available on Unix, http://docs.python.org/lib/module-resource.html
import resource
print("Time: %d seconds" % (
resource.getrusage(resource.RUSAGE_SELF).ru_utime +
resource.getrusage(resource.RUSAGE_SELF).ru_stime))
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/286222
# http://aspn.activestate.com/ASPN/Cookbook/ "The recipes are freely
# available for review and use."
def _VmB(VmKey):
"""Return size from proc status in bytes."""
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
raise Exception("no proc file %s" % _proc_status)
return 0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
try:
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
except:
return 0 # v is empty
if len(v) < 3:
raise Exception("%s" % v)
return 0 # invalid format?
# convert Vm value to bytes
return int(float(v[1]) * _scale[v[2]])
# I ran this on over a hundred GTFS files, comparing VmSize to VmRSS
# (resident set size). The difference was always under 2% or 3MB.
print("Virtual Memory Size: %d bytes" % _VmB('VmSize:'))
# Output report of where CPU time was spent.
p = pstats.Stats('validate-stats')
p.strip_dirs()
p.sort_stats('cumulative').print_stats(30)
p.sort_stats('cumulative').print_callers(30)
return locals_for_exec['rv'] | Run RunValidationOutputFromOptions, print profile and return exit code. | Below is the the instruction that describes the task:
### Input:
Run RunValidationOutputFromOptions, print profile and return exit code.
### Response:
def ProfileRunValidationOutputFromOptions(feed, options):
"""Run RunValidationOutputFromOptions, print profile and return exit code."""
import cProfile
import pstats
# runctx will modify a dict, but not locals(). We need a way to get rv back.
locals_for_exec = locals()
cProfile.runctx('rv = RunValidationOutputFromOptions(feed, options)',
globals(), locals_for_exec, 'validate-stats')
# Only available on Unix, http://docs.python.org/lib/module-resource.html
import resource
print("Time: %d seconds" % (
resource.getrusage(resource.RUSAGE_SELF).ru_utime +
resource.getrusage(resource.RUSAGE_SELF).ru_stime))
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/286222
# http://aspn.activestate.com/ASPN/Cookbook/ "The recipes are freely
# available for review and use."
def _VmB(VmKey):
"""Return size from proc status in bytes."""
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
raise Exception("no proc file %s" % _proc_status)
return 0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
try:
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
except:
return 0 # v is empty
if len(v) < 3:
raise Exception("%s" % v)
return 0 # invalid format?
# convert Vm value to bytes
return int(float(v[1]) * _scale[v[2]])
# I ran this on over a hundred GTFS files, comparing VmSize to VmRSS
# (resident set size). The difference was always under 2% or 3MB.
print("Virtual Memory Size: %d bytes" % _VmB('VmSize:'))
# Output report of where CPU time was spent.
p = pstats.Stats('validate-stats')
p.strip_dirs()
p.sort_stats('cumulative').print_stats(30)
p.sort_stats('cumulative').print_callers(30)
return locals_for_exec['rv'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.