code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def add_to_buffer(self, content, read_position):
"""Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
"""
self.read_position = read_position
if self.read_buffer is None:
self.read_buffer = content
else:
self.read_buffer = content + self.read_buffer
|
Add additional bytes content as read from the read_position.
Args:
content (bytes): data to be added to buffer working BufferWorkSpac.
read_position (int): where in the file pointer the data was read from.
|
def serve_get(self, path, **params):
"""
Find a GET callback for the given HTTP path, call it and return the
results. The callback is called with two arguments, the path used to
match it, and params which include the BaseHTTPRequestHandler instance.
The callback must return a tuple:
(code, content, content_type)
If multiple registrations match the path, the one with the longest
matching text will be used. Matches are always anchored at the start
of the path.
None is returned if no registered callback is willing to handle a path.
"""
if path is None: return None
matched = self._match_path(path, self.get_registrations)
if matched is None:
return None
else:
return matched(path, **params)
|
Find a GET callback for the given HTTP path, call it and return the
results. The callback is called with two arguments, the path used to
match it, and params which include the BaseHTTPRequestHandler instance.
The callback must return a tuple:
(code, content, content_type)
If multiple registrations match the path, the one with the longest
matching text will be used. Matches are always anchored at the start
of the path.
None is returned if no registered callback is willing to handle a path.
|
def __stringify_predicate(predicate):
""" Reflection of function name and parameters of the predicate being used.
"""
funname = getsource(predicate).strip().split(' ')[2].rstrip(',')
params = 'None'
# if args dig in the stack
if '()' not in funname:
stack = getouterframes(currentframe())
for frame in range(0, len(stack)):
if funname in str(stack[frame]):
_, _, _, params = getargvalues(stack[frame][0])
return "function: {} params: {}".format(funname, params)
|
Reflection of function name and parameters of the predicate being used.
|
def newPage(doc, pno=-1, width=595, height=842):
"""Create and return a new page object.
"""
doc._newPage(pno, width=width, height=height)
return doc[pno]
|
Create and return a new page object.
|
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
|
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
|
def fitness(self, parsimony_coefficient=None):
"""Evaluate the penalized fitness of the program according to X, y.
Parameters
----------
parsimony_coefficient : float, optional
If automatic parsimony is being used, the computed value according
to the population. Otherwise the initialized value is used.
Returns
-------
fitness : float
The penalized fitness of the program.
"""
if parsimony_coefficient is None:
parsimony_coefficient = self.parsimony_coefficient
penalty = parsimony_coefficient * len(self.program) * self.metric.sign
return self.raw_fitness_ - penalty
|
Evaluate the penalized fitness of the program according to X, y.
Parameters
----------
parsimony_coefficient : float, optional
If automatic parsimony is being used, the computed value according
to the population. Otherwise the initialized value is used.
Returns
-------
fitness : float
The penalized fitness of the program.
|
def _request(self, buf, properties, date=None):
"""Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:param (str) date: reference date of document, used by server to set docDate - expects YYYY-MM-DD
:return: request result
"""
self.ensure_alive()
try:
input_format = properties.get("inputFormat", "text")
if input_format == "text":
ctype = "text/plain; charset=utf-8"
elif input_format == "serialized":
ctype = "application/x-protobuf"
else:
raise ValueError("Unrecognized inputFormat " + input_format)
if date:
params = {'properties': str(properties),'date': str(date)}
else:
params = {'properties': str(properties)}
r = requests.post(self.endpoint,
params=params,
data=buf, headers={'content-type': ctype},
timeout=(self.timeout*2)/1000)
r.raise_for_status()
return r
except requests.HTTPError as e:
if r.text == "CoreNLP request timed out. Your document may be too long.":
raise TimeoutException(r.text)
else:
raise AnnotationException(r.text)
|
Send a request to the CoreNLP server.
:param (str | unicode) text: raw text for the CoreNLPServer to parse
:param (dict) properties: properties that the server expects
:param (str) date: reference date of document, used by server to set docDate - expects YYYY-MM-DD
:return: request result
|
def disable(cls, args):
"""Disable subcommand."""
mgr = NAppsManager()
if args['all']:
napps = mgr.get_enabled()
else:
napps = args['<napp>']
for napp in napps:
mgr.set_napp(*napp)
LOG.info('NApp %s:', mgr.napp_id)
cls.disable_napp(mgr)
|
Disable subcommand.
|
def _config_profile_list(self):
"""Get list of supported config profile from DCNM."""
url = self._cfg_profile_list_url
payload = {}
try:
res = self._send_request('GET', url, payload, 'config-profile')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.")
|
Get list of supported config profile from DCNM.
|
def sample(self, size=1):
"""Generate samples of the random variable.
Parameters
----------
size : int
The number of samples to generate.
Returns
-------
:obj:`numpy.ndarray` of int or int
The samples of the random variable. If `size == 1`, then
the returned value will not be wrapped in an array.
"""
samples = scipy.stats.bernoulli.rvs(self.p, size=size)
if size == 1:
return samples[0]
return samples
|
Generate samples of the random variable.
Parameters
----------
size : int
The number of samples to generate.
Returns
-------
:obj:`numpy.ndarray` of int or int
The samples of the random variable. If `size == 1`, then
the returned value will not be wrapped in an array.
|
def GammaContrast(gamma=1, per_channel=False, name=None, deterministic=False, random_state=None):
"""
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
Values in the range ``gamma=(0.5, 2.0)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_gamma`.
Parameters
----------
gamma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Exponent for the contrast adjustment. Higher values darken the image.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform gamma contrast adjustment.
"""
params1d = [iap.handle_continuous_param(gamma, "gamma", value_range=None, tuple_to_uniform=True,
list_to_choice=True)]
func = adjust_contrast_gamma
return _ContrastFuncWrapper(
func, params1d, per_channel,
dtypes_allowed=["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"],
dtypes_disallowed=["float96", "float128", "float256", "bool"],
name=name if name is not None else ia.caller_name(),
deterministic=deterministic,
random_state=random_state
)
|
Adjust contrast by scaling each pixel value to ``255 * ((I_ij/255)**gamma)``.
Values in the range ``gamma=(0.5, 2.0)`` seem to be sensible.
dtype support::
See :func:`imgaug.augmenters.contrast.adjust_contrast_gamma`.
Parameters
----------
gamma : number or tuple of number or list of number or imgaug.parameters.StochasticParameter, optional
Exponent for the contrast adjustment. Higher values darken the image.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the range ``[a, b]`` will be used per image.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
per_channel : bool or float, optional
Whether to use the same value for all channels (False) or to sample a new value for each
channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel`
will be treated as True, otherwise as False.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Returns
-------
_ContrastFuncWrapper
Augmenter to perform gamma contrast adjustment.
|
def set_value(self, value, timeout):
"""
Sets a new value and extends its expiration.
:param value: a new cached value.
:param timeout: a expiration timeout in milliseconds.
"""
self.value = value
self.expiration = time.perf_counter() * 1000 + timeout
|
Sets a new value and extends its expiration.
:param value: a new cached value.
:param timeout: a expiration timeout in milliseconds.
|
def l2_regularizer(decay, name_filter='weights'):
"""Create an l2 regularizer."""
return regularizer(
'l2_regularizer',
lambda x: tf.nn.l2_loss(x) * decay,
name_filter=name_filter)
|
Create an l2 regularizer.
|
def gen_accept(id_, keysize=2048, force=False):
r'''
Generate a key pair then accept the public key. This function returns the
key pair in a dict, only the public key is preserved on the master. Returns
a dictionary.
id\_
The name of the minion for which to generate a key pair.
keysize
The size of the key pair to generate. The size must be ``2048``, which
is the default, or greater. If set to a value less than ``2048``, the
key size will be rounded up to ``2048``.
force
If a public key has already been accepted for the given minion on the
master, then the gen_accept function will return an empty dictionary
and not create a new key. This is the default behavior. If ``force``
is set to ``True``, then the minion's previously accepted key will be
overwritten.
.. code-block:: python
>>> wheel.cmd('key.gen_accept', ['foo'])
{'pub': '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBC
...
BBPfamX9gGPQTpN9e8HwcZjXQnmg8OrcUl10WHw09SDWLOlnW+ueTWugEQpPt\niQIDAQAB\n
-----END PUBLIC KEY-----',
'priv': '-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA42Kf+w9XeZWgguzv
...
QH3/W74X1+WTBlx4R2KGLYBiH+bCCFEQ/Zvcu4Xp4bIOPtRKozEQ==\n
-----END RSA PRIVATE KEY-----'}
We can now see that the ``foo`` minion's key has been accepted by the master:
.. code-block:: python
>>> wheel.cmd('key.list', ['accepted'])
{'minions': ['foo', 'minion1', 'minion2', 'minion3']}
'''
id_ = clean.id(id_)
ret = gen(id_, keysize)
acc_path = os.path.join(__opts__['pki_dir'], 'minions', id_)
if os.path.isfile(acc_path) and not force:
return {}
with salt.utils.files.fopen(acc_path, 'w+') as fp_:
fp_.write(salt.utils.stringutils.to_str(ret['pub']))
return ret
|
r'''
Generate a key pair then accept the public key. This function returns the
key pair in a dict, only the public key is preserved on the master. Returns
a dictionary.
id\_
The name of the minion for which to generate a key pair.
keysize
The size of the key pair to generate. The size must be ``2048``, which
is the default, or greater. If set to a value less than ``2048``, the
key size will be rounded up to ``2048``.
force
If a public key has already been accepted for the given minion on the
master, then the gen_accept function will return an empty dictionary
and not create a new key. This is the default behavior. If ``force``
is set to ``True``, then the minion's previously accepted key will be
overwritten.
.. code-block:: python
>>> wheel.cmd('key.gen_accept', ['foo'])
{'pub': '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBC
...
BBPfamX9gGPQTpN9e8HwcZjXQnmg8OrcUl10WHw09SDWLOlnW+ueTWugEQpPt\niQIDAQAB\n
-----END PUBLIC KEY-----',
'priv': '-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA42Kf+w9XeZWgguzv
...
QH3/W74X1+WTBlx4R2KGLYBiH+bCCFEQ/Zvcu4Xp4bIOPtRKozEQ==\n
-----END RSA PRIVATE KEY-----'}
We can now see that the ``foo`` minion's key has been accepted by the master:
.. code-block:: python
>>> wheel.cmd('key.list', ['accepted'])
{'minions': ['foo', 'minion1', 'minion2', 'minion3']}
|
def parse_valu(text, off=0):
'''
Special syntax for the right side of equals in a macro
'''
_, off = nom(text, off, whites)
if nextchar(text, off, '('):
return parse_list(text, off)
if isquote(text, off):
return parse_string(text, off)
# since it's not quoted, we can assume we are bound by both
# white space and storm syntax chars ( ) , =
valu, off = meh(text, off, valmeh)
# for now, give it a shot as an int... maybe eventually
# we'll be able to disable this completely, but for now
# lets maintain backward compatibility...
try:
# NOTE: this is ugly, but faster than parsing the string
valu = int(valu, 0)
except ValueError:
pass
return valu, off
|
Special syntax for the right side of equals in a macro
|
def import_locations(self, data):
"""Parse geonames.org country database exports.
``import_locations()`` returns a list of :class:`trigpoints.Trigpoint`
objects generated from the data exported by geonames.org_.
It expects data files in the following tab separated format::
2633441 Afon Wyre Afon Wyre River Wayrai,River Wyrai,Wyre 52.3166667 -4.1666667 H STM GB GB 00 0 -9999 Europe/London 1994-01-13
2633442 Wyre Wyre Viera 59.1166667 -2.9666667 T ISL GB GB V9 0 1 Europe/London 2004-09-24
2633443 Wraysbury Wraysbury Wyrardisbury 51.45 -0.55 P PPL GB P9 0 28 Europe/London 2006-08-21
Files containing the data in this format can be downloaded from the
geonames.org_ site in their `database export page`_.
Files downloaded from the geonames site when processed by
``import_locations()`` will return ``list`` objects of the following
style::
[Location(2633441, "Afon Wyre", "Afon Wyre",
['River Wayrai', 'River Wyrai', 'Wyre'],
52.3166667, -4.1666667, "H", "STM", "GB", ['GB'], "00",
None, None, None, 0, None, -9999, "Europe/London",
datetime.date(1994, 1, 13)),
Location(2633442, "Wyre", "Wyre", ['Viera'], 59.1166667,
-2.9666667, "T", "ISL", "GB", ['GB'], "V9", None, None,
None, 0, None, 1, "Europe/London",
datetime.date(2004, 9, 24)),
Location(2633443, "Wraysbury", "Wraysbury", ['Wyrardisbury'],
51.45, -0.55, "P", "PPL", "GB", None, "P9", None, None,
None, 0, None, 28, "Europe/London",
datetime.date(2006, 8, 21))]
Args:
data (iter): geonames.org locations data to read
Returns:
list: geonames.org identifiers with :class:`Location` objects
Raises:
FileFormatError: Unknown file format
.. _geonames.org: http://www.geonames.org/
.. _database export page: http://download.geonames.org/export/dump/
"""
self._data = data
field_names = ('geonameid', 'name', 'asciiname', 'alt_names',
'latitude', 'longitude', 'feature_class',
'feature_code', 'country', 'alt_country', 'admin1',
'admin2', 'admin3', 'admin4', 'population', 'altitude',
'gtopo30', 'tzname', 'modified_date')
comma_split = lambda s: s.split(',')
date_parse = lambda s: datetime.date(*map(int, s.split('-')))
or_none = lambda x, s: x(s) if s else None
str_or_none = lambda s: or_none(str, s)
float_or_none = lambda s: or_none(float, s)
int_or_none = lambda s: or_none(int, s)
tz_parse = lambda s: self.timezones[s][0] if self.timezones else None
field_parsers = (int_or_none, str_or_none, str_or_none, comma_split,
float_or_none, float_or_none, str_or_none,
str_or_none, str_or_none, comma_split, str_or_none,
str_or_none, str_or_none, str_or_none, int_or_none,
int_or_none, int_or_none, tz_parse, date_parse)
data = utils.prepare_csv_read(data, field_names, delimiter=r" ")
for row in data:
try:
for name, parser in zip(field_names, field_parsers):
row[name] = parser(row[name])
except ValueError:
raise utils.FileFormatError('geonames.org')
self.append(Location(**row))
|
Parse geonames.org country database exports.
``import_locations()`` returns a list of :class:`trigpoints.Trigpoint`
objects generated from the data exported by geonames.org_.
It expects data files in the following tab separated format::
2633441 Afon Wyre Afon Wyre River Wayrai,River Wyrai,Wyre 52.3166667 -4.1666667 H STM GB GB 00 0 -9999 Europe/London 1994-01-13
2633442 Wyre Wyre Viera 59.1166667 -2.9666667 T ISL GB GB V9 0 1 Europe/London 2004-09-24
2633443 Wraysbury Wraysbury Wyrardisbury 51.45 -0.55 P PPL GB P9 0 28 Europe/London 2006-08-21
Files containing the data in this format can be downloaded from the
geonames.org_ site in their `database export page`_.
Files downloaded from the geonames site when processed by
``import_locations()`` will return ``list`` objects of the following
style::
[Location(2633441, "Afon Wyre", "Afon Wyre",
['River Wayrai', 'River Wyrai', 'Wyre'],
52.3166667, -4.1666667, "H", "STM", "GB", ['GB'], "00",
None, None, None, 0, None, -9999, "Europe/London",
datetime.date(1994, 1, 13)),
Location(2633442, "Wyre", "Wyre", ['Viera'], 59.1166667,
-2.9666667, "T", "ISL", "GB", ['GB'], "V9", None, None,
None, 0, None, 1, "Europe/London",
datetime.date(2004, 9, 24)),
Location(2633443, "Wraysbury", "Wraysbury", ['Wyrardisbury'],
51.45, -0.55, "P", "PPL", "GB", None, "P9", None, None,
None, 0, None, 28, "Europe/London",
datetime.date(2006, 8, 21))]
Args:
data (iter): geonames.org locations data to read
Returns:
list: geonames.org identifiers with :class:`Location` objects
Raises:
FileFormatError: Unknown file format
.. _geonames.org: http://www.geonames.org/
.. _database export page: http://download.geonames.org/export/dump/
|
def portdate(port_number, date=None, return_format=None):
"""Information about a particular port at a particular date.
If the date is ommited, today's date is used.
:param port_number: a string or integer port number
:param date: an optional string in 'Y-M-D' format or datetime.date() object
"""
uri = 'portdate/{number}'.format(number=port_number)
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
response = _get(uri, return_format)
if 'bad port number' in str(response):
raise Error('Bad port number, {number}'.format(number=port_number))
else:
return response
|
Information about a particular port at a particular date.
If the date is ommited, today's date is used.
:param port_number: a string or integer port number
:param date: an optional string in 'Y-M-D' format or datetime.date() object
|
def _GetTSKPartitionIdentifiers(self, scan_node):
"""Determines the TSK partition identifiers.
Args:
scan_node (SourceScanNode): scan node.
Returns:
list[str]: TSK partition identifiers.
Raises:
ScannerError: if the format of or within the source is not supported or
the scan node is invalid or if the volume for a specific identifier
cannot be retrieved.
UserAbort: if the user requested to abort.
"""
if not scan_node or not scan_node.path_spec:
raise errors.ScannerError('Invalid scan node.')
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(scan_node.path_spec)
volume_identifiers = self._source_scanner.GetVolumeIdentifiers(
volume_system)
if not volume_identifiers:
return []
if len(volume_identifiers) == 1:
return volume_identifiers
if not self._mediator:
raise errors.ScannerError(
'Unable to proceed. Partitions found but no mediator to determine '
'how they should be used.')
try:
volume_identifiers = self._mediator.GetPartitionIdentifiers(
volume_system, volume_identifiers)
except KeyboardInterrupt:
raise errors.UserAbort('File system scan aborted.')
return self._NormalizedVolumeIdentifiers(
volume_system, volume_identifiers, prefix='p')
|
Determines the TSK partition identifiers.
Args:
scan_node (SourceScanNode): scan node.
Returns:
list[str]: TSK partition identifiers.
Raises:
ScannerError: if the format of or within the source is not supported or
the scan node is invalid or if the volume for a specific identifier
cannot be retrieved.
UserAbort: if the user requested to abort.
|
def vofile(filename, **kwargs):
"""
Open and return a handle on a VOSpace data connection
@param filename:
@param kwargs:
@return:
"""
basename = os.path.basename(filename)
if os.access(basename, os.R_OK):
return open(basename, 'r')
kwargs['view'] = kwargs.get('view', 'data')
return client.open(filename, **kwargs)
|
Open and return a handle on a VOSpace data connection
@param filename:
@param kwargs:
@return:
|
def hybrid_forward(self, F, inputs, token_types, valid_length=None, masked_positions=None):
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
"""Generate the representation given the inputs.
This is used in training or fine-tuning a static (hybridized) BERT model.
"""
outputs = []
seq_out, attention_out = self._encode_sequence(F, inputs, token_types, valid_length)
outputs.append(seq_out)
if self.encoder._output_all_encodings:
assert isinstance(seq_out, list)
output = seq_out[-1]
else:
output = seq_out
if attention_out:
outputs.append(attention_out)
if self._use_pooler:
pooled_out = self._apply_pooling(output)
outputs.append(pooled_out)
if self._use_classifier:
next_sentence_classifier_out = self.classifier(pooled_out)
outputs.append(next_sentence_classifier_out)
if self._use_decoder:
assert masked_positions is not None, \
'masked_positions tensor is required for decoding masked language model'
decoder_out = self._decode(output, masked_positions)
outputs.append(decoder_out)
return tuple(outputs) if len(outputs) > 1 else outputs[0]
|
Generate the representation given the inputs.
This is used in training or fine-tuning a static (hybridized) BERT model.
|
def find_first_fit(unoccupied_columns, row, row_length):
"""
Finds the first index that the row's items can fit.
"""
for free_col in unoccupied_columns:
# The offset is that such that the first item goes in the free column.
first_item_x = row[0][0]
offset = free_col - first_item_x
if check_columns_fit(unoccupied_columns, row, offset, row_length):
return offset
raise ValueError("Row cannot bossily fit in %r: %r"
% (list(unoccupied_columns.keys()), row))
|
Finds the first index that the row's items can fit.
|
def _to_numeric_float(number, nums_int):
"""
Transforms a string into a float.
The nums_int parameter indicates the number of characters, starting from
the left, to be used for the integer value. All the remaining ones will be
used for the decimal value.
:param number: string with the number
:param nums_int: characters, counting from the left, for the integer value
:return: a float created from the string
"""
index_end = len(number) - nums_int
return float(number[:nums_int] + '.' + number[-index_end:])
|
Transforms a string into a float.
The nums_int parameter indicates the number of characters, starting from
the left, to be used for the integer value. All the remaining ones will be
used for the decimal value.
:param number: string with the number
:param nums_int: characters, counting from the left, for the integer value
:return: a float created from the string
|
def unmarshal_json(
obj,
cls,
allow_extra_keys=True,
ctor=None,
):
""" Unmarshal @obj into @cls
Args:
obj: dict, A JSON object
cls: type, The class to unmarshal into
allow_extra_keys: bool, False to raise an exception when extra
keys are present, True to ignore
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
Returns:
instance of @cls
Raises:
ExtraKeysError: If allow_extra_keys == False, and extra keys
are present in @obj and not in @cls.__init__
ValueError: If @cls.__init__ does not contain a self argument
"""
return unmarshal_dict(
obj,
cls,
allow_extra_keys,
ctor=ctor,
)
|
Unmarshal @obj into @cls
Args:
obj: dict, A JSON object
cls: type, The class to unmarshal into
allow_extra_keys: bool, False to raise an exception when extra
keys are present, True to ignore
ctor: None-or-static-method: Use this method as the
constructor instead of __init__
Returns:
instance of @cls
Raises:
ExtraKeysError: If allow_extra_keys == False, and extra keys
are present in @obj and not in @cls.__init__
ValueError: If @cls.__init__ does not contain a self argument
|
def tostring(self, fully_qualified=True, pretty_print=True, encoding="UTF-8"):
"""
Serialize and return a string of this METS document.
To write to file, see :meth:`write`.
The default encoding is ``UTF-8``. This method will return a unicode
string when ``encoding`` is set to ``unicode``.
:return: String of this document
"""
root = self.serialize(fully_qualified=fully_qualified)
kwargs = {"pretty_print": pretty_print, "encoding": encoding}
if encoding != "unicode":
kwargs["xml_declaration"] = True
return etree.tostring(root, **kwargs)
|
Serialize and return a string of this METS document.
To write to file, see :meth:`write`.
The default encoding is ``UTF-8``. This method will return a unicode
string when ``encoding`` is set to ``unicode``.
:return: String of this document
|
def sigma_filter(filename, region, step_size, box_size, shape, domask, sid):
"""
Calculate the background and rms for a sub region of an image. The results are
written to shared memory - irms and ibkg.
Parameters
----------
filename : string
Fits file to open
region : list
Region within the fits file that is to be processed. (row_min, row_max).
step_size : (int, int)
The filtering step size
box_size : (int, int)
The size of the box over which the filter is applied (each step).
shape : tuple
The shape of the fits image
domask : bool
If true then copy the data mask to the output.
sid : int
The stripe number
Returns
-------
None
"""
ymin, ymax = region
logging.debug('rows {0}-{1} starting at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
# cut out the region of interest plus 1/2 the box size, but clip to the image size
data_row_min = max(0, ymin - box_size[0]//2)
data_row_max = min(shape[0], ymax + box_size[0]//2)
# Figure out how many axes are in the datafile
NAXIS = fits.getheader(filename)["NAXIS"]
with fits.open(filename, memmap=True) as a:
if NAXIS == 2:
data = a[0].section[data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 3:
data = a[0].section[0, data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 4:
data = a[0].section[0, 0, data_row_min:data_row_max, 0:shape[1]]
else:
logging.error("Too many NAXIS for me {0}".format(NAXIS))
logging.error("fix your file to be more sane")
raise Exception("Too many NAXIS")
row_len = shape[1]
logging.debug('data size is {0}'.format(data.shape))
def box(r, c):
"""
calculate the boundaries of the box centered at r,c
with size = box_size
"""
r_min = max(0, r - box_size[0] // 2)
r_max = min(data.shape[0] - 1, r + box_size[0] // 2)
c_min = max(0, c - box_size[1] // 2)
c_max = min(data.shape[1] - 1, c + box_size[1] // 2)
return r_min, r_max, c_min, c_max
# set up a grid of rows/cols at which we will compute the bkg/rms
rows = list(range(ymin-data_row_min, ymax-data_row_min, step_size[0]))
rows.append(ymax-data_row_min)
cols = list(range(0, shape[1], step_size[1]))
cols.append(shape[1])
# store the computed bkg/rms in this smaller array
vals = np.zeros(shape=(len(rows),len(cols)))
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
bkg, _ = sigmaclip(new, 3, 3)
vals[i,j] = bkg
# indices of all the pixels within our region
gr, gc = np.mgrid[ymin-data_row_min:ymax-data_row_min, 0:shape[1]]
logging.debug("Interpolating bkg to sharemem")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
ibkg[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row)
del ifunc
logging.debug(" ... done writing bkg")
# signal that the bkg is done for this region, and wait for neighbours
barrier(bkg_events, sid)
logging.debug("{0} background subtraction".format(sid))
for i in range(data_row_max - data_row_min):
start_idx = np.ravel_multi_index((data_row_min + i, 0), shape)
end_idx = start_idx + row_len
data[i, :] = data[i, :] - ibkg[start_idx:end_idx]
# reset/recycle the vals array
vals[:] = 0
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
_ , rms = sigmaclip(new, 3, 3)
vals[i,j] = rms
logging.debug("Interpolating rm to sharemem rms")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
irms[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row)
del ifunc
logging.debug(" .. done writing rms")
if domask:
barrier(mask_events, sid)
logging.debug("applying mask")
for i in range(gr.shape[0]):
mask = np.where(np.bitwise_not(np.isfinite(data[i + ymin-data_row_min,:])))[0]
for j in mask:
idx = np.ravel_multi_index((i + ymin,j),shape)
ibkg[idx] = np.nan
irms[idx] = np.nan
logging.debug(" ... done applying mask")
logging.debug('rows {0}-{1} finished at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
return
|
Calculate the background and rms for a sub region of an image. The results are
written to shared memory - irms and ibkg.
Parameters
----------
filename : string
Fits file to open
region : list
Region within the fits file that is to be processed. (row_min, row_max).
step_size : (int, int)
The filtering step size
box_size : (int, int)
The size of the box over which the filter is applied (each step).
shape : tuple
The shape of the fits image
domask : bool
If true then copy the data mask to the output.
sid : int
The stripe number
Returns
-------
None
|
def get_clipboard_text_and_convert(paste_list=False):
u"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric"""
txt = GetClipboardText()
if txt:
if paste_list and u"\t" in txt:
array, flag = make_list_of_list(txt)
if flag:
txt = repr(array)
else:
txt = u"array(%s)"%repr(array)
txt = u"".join([c for c in txt if c not in u" \t\r\n"])
return txt
|
u"""Get txt from clipboard. if paste_list==True the convert tab separated
data to list of lists. Enclose list of list in array() if all elements are
numeric
|
def image_id_from_registry(image_name):
"""Get the docker id from a public or private registry"""
registry, repository, tag = parse(image_name)
try:
token = auth_token(registry, repository).get("token")
# dockerhub is crazy
if registry == "index.docker.io":
registry = "registry-1.docker.io"
res = requests.head("https://{}/v2/{}/manifests/{}".format(registry, repository, tag), headers={
"Authorization": "Bearer {}".format(token),
"Accept": "application/vnd.docker.distribution.manifest.v2+json"
}, timeout=5)
res.raise_for_status()
except requests.RequestException:
log.error("Received {} when attempting to get digest for {}".format(
res, image_name))
return None
return "@".join([registry+"/"+repository, res.headers["Docker-Content-Digest"]])
|
Get the docker id from a public or private registry
|
def acceptRecord(self, item):
"""
Closes the tree popup and sets the current record.
:param record | <orb.Table>
"""
record = item.record()
self.treePopupWidget().close()
self.setCurrentRecord(record)
|
Closes the tree popup and sets the current record.
:param record | <orb.Table>
|
def _publish_message(host, amqp_settings, routing_key, data):
"""Publish an AMQP message.
Returns:
bool: True if message was sent successfully.
"""
if host == "stdout":
print("Published to %s: %s" % (routing_key, data))
return True
try:
conn = Connection(**remove_nones(
host=host,
userid=amqp_settings.get("userid"),
password=amqp_settings.get("password"),
connect_timeout=amqp_settings.get("connect_timeout")
))
except socket.error as e:
print_error("Cannot connect to the message broker: %s" % (e))
return False
channel = conn.channel()
# build the message
msg = basic_message.Message(**remove_nones(
body=json.dumps(data),
delivery_mode=amqp_settings.get("message_delivery_mode"),
content_type="application/json",
content_encoding="utf-8"
))
# publish the message
try:
channel.basic_publish(
msg,
amqp_settings["exchange_name"],
routing_key
)
except Exception as e:
print_error("Failed to publish message: %s" % (e))
return False
return True
|
Publish an AMQP message.
Returns:
bool: True if message was sent successfully.
|
def _add_generic(self, start_node, type_name, group_type_name, args, kwargs,
add_prefix=True, check_naming=True):
"""Adds a given item to the tree irrespective of the subtree.
Infers the subtree from the arguments.
:param start_node: The parental node the adding was initiated from
:param type_name:
The type of the new instance. Whether it is a parameter, parameter group, config,
config group, etc. See the name of the corresponding constants at the top of this
python module.
:param group_type_name:
Type of the subbranch. i.e. whether the item is added to the 'parameters',
'results' etc. These subbranch types are named as the group names
(e.g. 'PARAMETER_GROUP') in order to have less constants.
For all constants used see beginning of this python module.
:param args:
Arguments specifying how the item is added.
If len(args)==1 and the argument is the a given instance of a result or parameter,
this one is added to the tree.
Otherwise it is checked if the first argument is a class specifying how to
construct a new item and the second argument is the name of the new class.
If the first argument is not a class but a string, the string is assumed to be
the name of the new instance.
Additional args are later on used for the construction of the instance.
:param kwargs:
Additional keyword arguments that might be handed over to the instance constructor.
:param add_prefix:
If a prefix group, i.e. `results`, `config`, etc. should be added
:param check_naming:
If it should be checked for correct namings, can be set to ``False`` if data is loaded
and we know that all names are correct.
:return: The new added instance
"""
args = list(args)
create_new = True
name = ''
instance = None
constructor = None
add_link = type_name == LINK
# First check if the item is already a given instance or we want to add a link
if add_link:
name = args[0]
instance = args[1]
create_new = False
elif len(args) == 1 and len(kwargs) == 0:
item = args[0]
try:
name = item.v_full_name
instance = item
create_new = False
except AttributeError:
pass
# If the item is not an instance yet, check if args[0] is a class and args[1] is
# a string describing the new name of the instance.
# If args[0] is not a class it is assumed to be the name of the new instance.
if create_new:
if len(args) > 0 and inspect.isclass(args[0]):
constructor = args.pop(0)
if len(args) > 0 and isinstance(args[0], str):
name = args.pop(0)
elif 'name' in kwargs:
name = kwargs.pop('name')
elif 'full_name' in kwargs:
name = kwargs.pop('full_name')
else:
raise ValueError('Could not determine a name of the new item you want to add. '
'Either pass the name as positional argument or as a keyword '
'argument `name`.')
split_names = name.split('.')
if check_naming:
for idx, name in enumerate(split_names):
translated_shortcut, name = self._translate_shortcut(name)
replaced, name = self._replace_wildcards(name)
if translated_shortcut or replaced:
split_names[idx] = name
# First check if the naming of the new item is appropriate
faulty_names = self._check_names(split_names, start_node)
if faulty_names:
full_name = '.'.join(split_names)
raise ValueError(
'Your Parameter/Result/Node `%s` contains the following not admissible names: '
'%s please choose other names.' % (full_name, faulty_names))
if add_link:
if instance is None:
raise ValueError('You must provide an instance to link to!')
if instance.v_is_root:
raise ValueError('You cannot create a link to the root node')
if start_node.v_is_root and name in SUBTREE_MAPPING:
raise ValueError('`%s` is a reserved name for a group under root.' % name)
if not self._root_instance.f_contains(instance, with_links=False, shortcuts=False):
raise ValueError('You can only link to items within the trajectory tree!')
# Check if the name fulfils the prefix conditions, if not change the name accordingly.
if add_prefix:
split_names = self._add_prefix(split_names, start_node, group_type_name)
if group_type_name == GROUP:
add_leaf = type_name != group_type_name and not add_link
# If this is equal we add a group node
group_type_name, type_name = self._determine_types(start_node, split_names[0],
add_leaf, add_link)
# Check if we are allowed to add the data
if self._root_instance._is_run and type_name in SENSITIVE_TYPES:
raise TypeError('You are not allowed to add config or parameter data or groups '
'during a single run.')
return self._add_to_tree(start_node, split_names, type_name, group_type_name, instance,
constructor, args, kwargs)
|
Adds a given item to the tree irrespective of the subtree.
Infers the subtree from the arguments.
:param start_node: The parental node the adding was initiated from
:param type_name:
The type of the new instance. Whether it is a parameter, parameter group, config,
config group, etc. See the name of the corresponding constants at the top of this
python module.
:param group_type_name:
Type of the subbranch. i.e. whether the item is added to the 'parameters',
'results' etc. These subbranch types are named as the group names
(e.g. 'PARAMETER_GROUP') in order to have less constants.
For all constants used see beginning of this python module.
:param args:
Arguments specifying how the item is added.
If len(args)==1 and the argument is the a given instance of a result or parameter,
this one is added to the tree.
Otherwise it is checked if the first argument is a class specifying how to
construct a new item and the second argument is the name of the new class.
If the first argument is not a class but a string, the string is assumed to be
the name of the new instance.
Additional args are later on used for the construction of the instance.
:param kwargs:
Additional keyword arguments that might be handed over to the instance constructor.
:param add_prefix:
If a prefix group, i.e. `results`, `config`, etc. should be added
:param check_naming:
If it should be checked for correct namings, can be set to ``False`` if data is loaded
and we know that all names are correct.
:return: The new added instance
|
def rotate_v1(array, k):
"""
Rotate the entire array 'k' times
T(n)- O(nk)
:type array: List[int]
:type k: int
:rtype: void Do not return anything, modify array in-place instead.
"""
array = array[:]
n = len(array)
for i in range(k): # unused variable is not a problem
temp = array[n - 1]
for j in range(n-1, 0, -1):
array[j] = array[j - 1]
array[0] = temp
return array
|
Rotate the entire array 'k' times
T(n)- O(nk)
:type array: List[int]
:type k: int
:rtype: void Do not return anything, modify array in-place instead.
|
def update(self):
"""re-persist the updated field values of this orm that has a primary key"""
ret = True
fields = self.depopulate(True)
q = self.query
q.set_fields(fields)
pk = self.pk
if pk:
q.is_field(self.schema.pk.name, pk)
else:
raise ValueError("You cannot update without a primary key")
if q.update():
fields = q.fields
self._populate(fields)
else:
ret = False
return ret
|
re-persist the updated field values of this orm that has a primary key
|
def generate_monthly(rain_day_threshold, day_end_hour, use_dst,
daily_data, monthly_data, process_from):
"""Generate monthly summaries from daily data."""
start = monthly_data.before(datetime.max)
if start is None:
start = datetime.min
start = daily_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# set start to start of first day of month (local time)
start = timezone.local_replace(
start, use_dst=use_dst, day=1, hour=day_end_hour, minute=0, second=0)
if day_end_hour >= 12:
# month actually starts on the last day of previous month
start -= DAY
del monthly_data[start:]
stop = daily_data.before(datetime.max)
if stop is None:
return None
acc = MonthAcc(rain_day_threshold)
def monthlygen(inputdata):
"""Internal generator function"""
month_start = start
count = 0
while month_start <= stop:
count += 1
if count % 12 == 0:
logger.info("monthly: %s", month_start.isoformat(' '))
else:
logger.debug("monthly: %s", month_start.isoformat(' '))
month_end = month_start + WEEK
if month_end.month < 12:
month_end = month_end.replace(month=month_end.month+1)
else:
month_end = month_end.replace(month=1, year=month_end.year+1)
month_end = month_end - WEEK
if use_dst:
# month might straddle summer time start or end
month_end = timezone.local_replace(
month_end + HOURx3, use_dst=use_dst, hour=day_end_hour)
acc.reset()
for data in inputdata[month_start:month_end]:
acc.add_daily(data)
new_data = acc.result()
if new_data:
new_data['start'] = month_start
yield new_data
month_start = month_end
monthly_data.update(monthlygen(daily_data))
return start
|
Generate monthly summaries from daily data.
|
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
lineno = lineno - 1
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
source = None
if loader is not None and hasattr(loader, "get_source"):
result = get_source_lines_from_loader(loader, module_name, lineno, lower_bound, upper_bound)
if result is not None:
return result
if source is None:
try:
with open(filename, "rb") as file_obj:
encoding = "utf8"
# try to find encoding of source file by "coding" header
# if none is found, utf8 is used as a fallback
for line in itertools.islice(file_obj, 0, 2):
match = _coding_re.search(line.decode("utf8"))
if match:
encoding = match.group(1)
break
file_obj.seek(0)
lines = [
compat.text_type(line, encoding, "replace")
for line in itertools.islice(file_obj, lower_bound, upper_bound + 1)
]
offset = lineno - lower_bound
return (
[l.strip("\r\n") for l in lines[0:offset]],
lines[offset].strip("\r\n"),
[l.strip("\r\n") for l in lines[offset + 1 :]] if len(lines) > offset else [],
)
except (OSError, IOError, IndexError):
pass
return None, None, None
|
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
|
def store(self, database, validate=True, role=None):
"""Store the document in the given database.
:param database: the `Database` object source for storing the document.
:return: an updated instance of `Document` / self.
"""
if validate:
self.validate()
self._id, self._rev = database.save(self.to_primitive(role=role))
return self
|
Store the document in the given database.
:param database: the `Database` object source for storing the document.
:return: an updated instance of `Document` / self.
|
def batch_run(self, *commands):
""" Run batch of commands in sequence.
Input is positional arguments with (function pointer, *args) tuples.
This method is useful for executing commands to multiple groups with retries,
without having too long delays. For example,
- Set group 1 to red and brightness to 10%
- Set group 2 to red and brightness to 10%
- Set group 3 to white and brightness to 100%
- Turn off group 4
With three repeats, running these consecutively takes approximately 100ms * 13 commands * 3 times = 3.9 seconds.
With batch_run, execution takes same time, but first loop - each command is sent once to every group -
is finished within 1.3 seconds. After that, each command is repeated two times. Most of the time, this ensures
slightly faster changes for each group.
Usage:
led.batch_run((led.set_color, "red", 1), (led.set_brightness, 10, 1), (led.set_color, "white", 3), ...)
"""
original_retries = self.repeat_commands
self.repeat_commands = 1
for _ in range(original_retries):
for command in commands:
cmd = command[0]
args = command[1:]
cmd(*args)
self.repeat_commands = original_retries
|
Run batch of commands in sequence.
Input is positional arguments with (function pointer, *args) tuples.
This method is useful for executing commands to multiple groups with retries,
without having too long delays. For example,
- Set group 1 to red and brightness to 10%
- Set group 2 to red and brightness to 10%
- Set group 3 to white and brightness to 100%
- Turn off group 4
With three repeats, running these consecutively takes approximately 100ms * 13 commands * 3 times = 3.9 seconds.
With batch_run, execution takes same time, but first loop - each command is sent once to every group -
is finished within 1.3 seconds. After that, each command is repeated two times. Most of the time, this ensures
slightly faster changes for each group.
Usage:
led.batch_run((led.set_color, "red", 1), (led.set_brightness, 10, 1), (led.set_color, "white", 3), ...)
|
def _get_schema_loader(self, strict=False):
"""Gets a closure for schema.load_schema with the correct/current
Opsview version
"""
return functools.partial(schema.load_schema, version=self.version,
strict=strict)
|
Gets a closure for schema.load_schema with the correct/current
Opsview version
|
def readin_rho(filename, rhofile=True, aniso=False):
"""Read in the values of the resistivity in Ohmm.
The format is variable: rho-file or mag-file.
"""
if aniso:
a = [[0, 1, 2], [2, 3, 4]]
else:
a = [0, 2]
if rhofile:
if filename is None:
filename = 'rho/rho.dat'
with open(filename, 'r') as fid:
mag = np.loadtxt(fid, skiprows=1, usecols=(a[0]))
else:
if filename is None:
filename = read_iter()
with open(filename, 'r') as fid:
mag = np.power(10, np.loadtxt(fid, skiprows=1, usecols=(a[1])))
return mag
|
Read in the values of the resistivity in Ohmm.
The format is variable: rho-file or mag-file.
|
def sighash(self, sighash_type, index=0, joinsplit=False, script_code=None,
anyone_can_pay=False, prevout_value=None):
'''
ZIP243
https://github.com/zcash/zips/blob/master/zip-0243.rst
'''
if joinsplit and anyone_can_pay:
raise ValueError('ANYONECANPAY can\'t be used with joinsplits')
data = z.ZcashByteData()
data += self.header
data += self.group_id
data += self._hash_prevouts(anyone_can_pay)
data += self._hash_sequence(sighash_type, anyone_can_pay)
data += self._hash_outputs(sighash_type, index)
data += self._hash_joinsplits()
data += self._hash_shielded_spends()
data += self._hash_shielded_outputs()
data += self.lock_time
data += self.expiry_height
data += self.value_balance
if anyone_can_pay:
sighash_type = sighash_type | shared.SIGHASH_ANYONECANPAY
data += utils.i2le_padded(sighash_type, 4)
if not joinsplit:
data += self.tx_ins[index].outpoint
data += script_code
data += prevout_value
data += self.tx_ins[index].sequence
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashSigHash' + bytes.fromhex('bb09b876'))
|
ZIP243
https://github.com/zcash/zips/blob/master/zip-0243.rst
|
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data):
"""Validate a caller VCF against truth within callable regions using SURVIVOR.
Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/)
"""
stats = _calculate_comparison_stats(truth_vcf)
call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data),
stats, work_dir, data)
truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0],
"%s-truth" % dd.get_sample_name(data), stats, work_dir, data)
cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data)
return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
|
Validate a caller VCF against truth within callable regions using SURVIVOR.
Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/)
|
def _ValidateDataTypeDefinition(cls, data_type_definition):
"""Validates the data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
ValueError: if the data type definition is not considered valid.
"""
if not cls._IsIdentifier(data_type_definition.name):
raise ValueError(
'Data type definition name: {0!s} not a valid identifier'.format(
data_type_definition.name))
if keyword.iskeyword(data_type_definition.name):
raise ValueError(
'Data type definition name: {0!s} matches keyword'.format(
data_type_definition.name))
members = getattr(data_type_definition, 'members', None)
if not members:
raise ValueError(
'Data type definition name: {0!s} missing members'.format(
data_type_definition.name))
defined_attribute_names = set()
for member_definition in members:
attribute_name = member_definition.name
if not cls._IsIdentifier(attribute_name):
raise ValueError('Attribute name: {0!s} not a valid identifier'.format(
attribute_name))
if attribute_name.startswith('_'):
raise ValueError('Attribute name: {0!s} starts with underscore'.format(
attribute_name))
if keyword.iskeyword(attribute_name):
raise ValueError('Attribute name: {0!s} matches keyword'.format(
attribute_name))
if attribute_name in defined_attribute_names:
raise ValueError('Attribute name: {0!s} already defined'.format(
attribute_name))
defined_attribute_names.add(attribute_name)
|
Validates the data type definition.
Args:
data_type_definition (DataTypeDefinition): data type definition.
Raises:
ValueError: if the data type definition is not considered valid.
|
def decode_bbox_target(box_predictions, anchors):
"""
Args:
box_predictions: (..., 4), logits
anchors: (..., 4), floatbox. Must have the same shape
Returns:
box_decoded: (..., 4), float32. With the same shape.
"""
orig_shape = tf.shape(anchors)
box_pred_txtytwth = tf.reshape(box_predictions, (-1, 2, 2))
box_pred_txty, box_pred_twth = tf.split(box_pred_txtytwth, 2, axis=1)
# each is (...)x1x2
anchors_x1y1x2y2 = tf.reshape(anchors, (-1, 2, 2))
anchors_x1y1, anchors_x2y2 = tf.split(anchors_x1y1x2y2, 2, axis=1)
waha = anchors_x2y2 - anchors_x1y1
xaya = (anchors_x2y2 + anchors_x1y1) * 0.5
clip = np.log(config.PREPROC.MAX_SIZE / 16.)
wbhb = tf.exp(tf.minimum(box_pred_twth, clip)) * waha
xbyb = box_pred_txty * waha + xaya
x1y1 = xbyb - wbhb * 0.5
x2y2 = xbyb + wbhb * 0.5 # (...)x1x2
out = tf.concat([x1y1, x2y2], axis=-2)
return tf.reshape(out, orig_shape)
|
Args:
box_predictions: (..., 4), logits
anchors: (..., 4), floatbox. Must have the same shape
Returns:
box_decoded: (..., 4), float32. With the same shape.
|
async def serve(
app: ASGIFramework,
config: Config,
*,
task_status: trio._core._run._TaskStatus = trio.TASK_STATUS_IGNORED,
) -> None:
"""Serve an ASGI framework app given the config.
This allows for a programmatic way to serve an ASGI framework, it
can be used via,
.. code-block:: python
trio.run(partial(serve, app, config))
It is assumed that the event-loop is configured before calling
this function, therefore configuration values that relate to loop
setup or process setup are ignored.
"""
if config.debug:
warnings.warn("The config `debug` has no affect when using serve", Warning)
if config.workers != 1:
warnings.warn("The config `workers` has no affect when using serve", Warning)
if config.worker_class != "asyncio":
warnings.warn("The config `worker_class` has no affect when using serve", Warning)
await worker_serve(app, config, task_status=task_status)
|
Serve an ASGI framework app given the config.
This allows for a programmatic way to serve an ASGI framework, it
can be used via,
.. code-block:: python
trio.run(partial(serve, app, config))
It is assumed that the event-loop is configured before calling
this function, therefore configuration values that relate to loop
setup or process setup are ignored.
|
def gene_tree(
self,
scale_to=None,
population_size=1,
trim_names=True,
):
""" Using the current tree object as a species tree, generate a gene
tree using the constrained Kingman coalescent process from dendropy. The
species tree should probably be a valid, ultrametric tree, generated by
some pure birth, birth-death or coalescent process, but no checks are
made. Optional kwargs are: -- scale_to, which is a floating point value
to scale the total tree tip-to-root length to, -- population_size, which
is a floating point value which all branch lengths will be divided by to
convert them to coalescent units, and -- trim_names, boolean, defaults
to true, trims off the number which dendropy appends to the sequence
name """
tree = self.template or self.yule()
for leaf in tree._tree.leaf_node_iter():
leaf.num_genes = 1
dfr = tree._tree.seed_node.distance_from_root()
dft = tree._tree.seed_node.distance_from_tip()
tree_height = dfr + dft
if scale_to:
population_size = tree_height / scale_to
for edge in tree._tree.preorder_edge_iter():
edge.pop_size = population_size
gene_tree = dpy.simulate.treesim.constrained_kingman_tree(tree._tree)[0]
if trim_names:
for leaf in gene_tree.leaf_node_iter():
leaf.taxon.label = leaf.taxon.label.replace('\'', '').split('_')[0]
# Dendropy changed its API
return {'gene_tree': tree.__class__(gene_tree.as_string('newick', suppress_rooting=True).strip(';\n') + ';'),
'species_tree': tree}
|
Using the current tree object as a species tree, generate a gene
tree using the constrained Kingman coalescent process from dendropy. The
species tree should probably be a valid, ultrametric tree, generated by
some pure birth, birth-death or coalescent process, but no checks are
made. Optional kwargs are: -- scale_to, which is a floating point value
to scale the total tree tip-to-root length to, -- population_size, which
is a floating point value which all branch lengths will be divided by to
convert them to coalescent units, and -- trim_names, boolean, defaults
to true, trims off the number which dendropy appends to the sequence
name
|
def get_plugin(self, name):
"""
Get a plugin by its name from the plugins loaded for the current namespace
:param name:
:return:
"""
for p in self._plugins:
if p.name == name:
return p
return None
|
Get a plugin by its name from the plugins loaded for the current namespace
:param name:
:return:
|
def parse_def(self, text):
"""Parse the function definition text."""
self.__init__()
if not is_start_of_function(text):
return
self.func_indent = get_indent(text)
text = text.strip()
text = text.replace('\r\n', '')
text = text.replace('\n', '')
return_type_re = re.search(r'->[ ]*([a-zA-Z0-9_,()\[\] ]*):$', text)
if return_type_re:
self.return_type_annotated = return_type_re.group(1)
text_end = text.rfind(return_type_re.group(0))
else:
self.return_type_annotated = None
text_end = len(text)
pos_args_start = text.find('(') + 1
pos_args_end = text.rfind(')', pos_args_start, text_end)
self.args_text = text[pos_args_start:pos_args_end]
args_list = self.split_args_text_to_list(self.args_text)
if args_list is not None:
self.has_info = True
self.split_arg_to_name_type_value(args_list)
|
Parse the function definition text.
|
def draw(self, viewer):
"""General draw method for RGB image types.
Note that actual insertion of the image into the output is
handled in `draw_image()`
"""
cache = self.get_cache(viewer)
if not cache.drawn:
cache.drawn = True
viewer.redraw(whence=2)
cpoints = self.get_cpoints(viewer)
cr = viewer.renderer.setup_cr(self)
# draw optional border
if self.linewidth > 0:
cr.draw_polygon(cpoints)
if self.showcap:
self.draw_caps(cr, self.cap, cpoints)
|
General draw method for RGB image types.
Note that actual insertion of the image into the output is
handled in `draw_image()`
|
def make_geohash_tables(table,listofprecisions,**kwargs):
'''
sort_by - field to sort by for each group
return_squares - boolean arg if true returns a list of squares instead of writing out to table
'''
return_squares = False
sort_by = 'COUNT'
# logic for accepting kwarg inputs
for key,value in kwargs.iteritems():
if key == 'sort_by':
sort_by = value
if key == 'return_squares':
return_squares = value
# getting header
header = df2list(table)[0]
# getting columns
columns = header[10:]
# getting original table
originaltable = table
if not sort_by == 'COUNT':
originaltable = originaltable.sort([sort_by],ascending=[0])
listofprecisions = sorted(listofprecisions,reverse=True)
# making total table to hold a list of dfs
if return_squares == True and listofprecisions[-1] == 8:
total_list = [table]
elif return_squares == True:
total_list = []
for row in listofprecisions:
precision = int(row)
table = originaltable
table['GEOHASH'] = table.GEOHASH.str[:precision]
table = table[['GEOHASH','COUNT']+columns].groupby(['GEOHASH'],sort=True).sum()
table = table.sort([sort_by],ascending=[0])
table = table.reset_index()
newsquares = [header]
# iterating through each square here
for row in df2list(table)[1:]:
# getting points
points = get_points_geohash(row[0])
# making new row
newrow = [row[0]] + points + row[1:]
# appending to newsquares
newsquares.append(newrow)
# taking newsquares to dataframe
table = list2df(newsquares)
if return_squares == True:
total_list.append(table)
else:
table.to_csv('squares' + str(precision) + '.csv',index=False)
if return_squares == True:
return total_list
else:
print 'Wrote output squares tables to csv files.'
|
sort_by - field to sort by for each group
return_squares - boolean arg if true returns a list of squares instead of writing out to table
|
def ckf_transform(Xs, Q):
"""
Compute mean and covariance of array of cubature points.
Parameters
----------
Xs : ndarray
Cubature points
Q : ndarray
Noise covariance
Returns
-------
mean : ndarray
mean of the cubature points
variance: ndarray
covariance matrix of the cubature points
"""
m, n = Xs.shape
x = sum(Xs, 0)[:, None] / m
P = np.zeros((n, n))
xf = x.flatten()
for k in range(m):
P += np.outer(Xs[k], Xs[k]) - np.outer(xf, xf)
P *= 1 / m
P += Q
return x, P
|
Compute mean and covariance of array of cubature points.
Parameters
----------
Xs : ndarray
Cubature points
Q : ndarray
Noise covariance
Returns
-------
mean : ndarray
mean of the cubature points
variance: ndarray
covariance matrix of the cubature points
|
def add_team_repo(repo_name, team_name, profile="github", permission=None):
'''
Adds a repository to a team with team_name.
repo_name
The name of the repository to add.
team_name
The name of the team of which to add the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
permission
The permission for team members within the repository, can be 'pull',
'push' or 'admin'. If not specified, the default permission specified on
the team will be used.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt myminion github.add_team_repo 'my_repo' 'team_name'
.. versionadded:: 2016.11.0
'''
team = get_team(team_name, profile=profile)
if not team:
log.error('Team %s does not exist', team_name)
return False
try:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
team = organization.get_team(team['id'])
repo = organization.get_repo(repo_name)
except UnknownObjectException:
log.exception('Resource not found: %s', team['id'])
return False
params = None
if permission is not None:
params = {'permission': permission}
headers, data = team._requester.requestJsonAndCheck(
"PUT",
team.url + "/repos/" + repo._identity,
input=params
)
# Try to refresh cache
list_team_repos(team_name, profile=profile, ignore_cache=True)
return True
|
Adds a repository to a team with team_name.
repo_name
The name of the repository to add.
team_name
The name of the team of which to add the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
permission
The permission for team members within the repository, can be 'pull',
'push' or 'admin'. If not specified, the default permission specified on
the team will be used.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt myminion github.add_team_repo 'my_repo' 'team_name'
.. versionadded:: 2016.11.0
|
def PILTowx(pimg):
'''convert a PIL Image to a wx image'''
from MAVProxy.modules.lib.wx_loader import wx
wimg = wx.EmptyImage(pimg.size[0], pimg.size[1])
try:
wimg.SetData(pimg.convert('RGB').tobytes())
except NotImplementedError:
# old, removed method:
wimg.SetData(pimg.convert('RGB').tostring())
return wimg
|
convert a PIL Image to a wx image
|
def _what_default(self, pronunciation):
"""Provide the default prediction of the what task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param pronunciation: The list or array of confusion probabilities at each index
"""
token_default = self['metadata']['token_default']['what']
index_count = 2*len(pronunciation) + 1
predictions = {}
for i in range(index_count):
index_predictions = {}
if i % 2 == 0:
index_predictions.update(token_default['0'])
else:
presented_phoneme = pronunciation[int((i-1)/2)]
index_predictions[presented_phoneme] = token_default['1']['=']
index_predictions['*'] = token_default['1']['*']
index_predictions[''] = token_default['1']['']
predictions['{}'.format(i)] = index_predictions
return predictions
|
Provide the default prediction of the what task.
This function is used to predict the probability of a given pronunciation being reported for a given token.
:param pronunciation: The list or array of confusion probabilities at each index
|
def purge_stream(self, stream_id, remove_definition=False, sandbox=None):
"""
Purge the stream
:param stream_id: The stream identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox for this stream
:return: None
:raises: NotImplementedError
"""
# TODO: Add time interval to this
if sandbox is not None:
raise NotImplementedError
if stream_id not in self.streams:
raise StreamNotFoundError("Stream with id '{}' not found".format(stream_id))
stream = self.streams[stream_id]
query = stream_id.as_raw()
with switch_db(StreamInstanceModel, 'hyperstream'):
StreamInstanceModel.objects(__raw__=query).delete()
# Also update the stream status
stream.calculated_intervals = TimeIntervals([])
if remove_definition:
with switch_db(StreamDefinitionModel, 'hyperstream'):
StreamDefinitionModel.objects(__raw__=query).delete()
logging.info("Purged stream {}".format(stream_id))
|
Purge the stream
:param stream_id: The stream identifier
:param remove_definition: Whether to remove the stream definition as well
:param sandbox: The sandbox for this stream
:return: None
:raises: NotImplementedError
|
def is_empty(self):
"""Returns True if this node has no children, or if all of its children are ParseNode instances
and are empty.
"""
return all(isinstance(c, ParseNode) and c.is_empty for c in self.children)
|
Returns True if this node has no children, or if all of its children are ParseNode instances
and are empty.
|
def join(chord_root, quality='', extensions=None, bass=''):
r"""Join the parts of a chord into a complete chord label.
Parameters
----------
chord_root : str
Root pitch class of the chord, e.g. 'C', 'Eb'
quality : str
Quality of the chord, e.g. 'maj', 'hdim7'
(Default value = '')
extensions : list
Any added or absent scaled degrees for this chord, e.g. ['4', '\*3']
(Default value = None)
bass : str
Scale degree of the bass note, e.g. '5'.
(Default value = '')
Returns
-------
chord_label : str
A complete chord label.
"""
chord_label = chord_root
if quality or extensions:
chord_label += ":%s" % quality
if extensions:
chord_label += "(%s)" % ",".join(extensions)
if bass and bass != '1':
chord_label += "/%s" % bass
validate_chord_label(chord_label)
return chord_label
|
r"""Join the parts of a chord into a complete chord label.
Parameters
----------
chord_root : str
Root pitch class of the chord, e.g. 'C', 'Eb'
quality : str
Quality of the chord, e.g. 'maj', 'hdim7'
(Default value = '')
extensions : list
Any added or absent scaled degrees for this chord, e.g. ['4', '\*3']
(Default value = None)
bass : str
Scale degree of the bass note, e.g. '5'.
(Default value = '')
Returns
-------
chord_label : str
A complete chord label.
|
def get_all_alert(self, **kwargs): # noqa: E501
"""Get all alerts for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_alert(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_alert_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_alert_with_http_info(**kwargs) # noqa: E501
return data
|
Get all alerts for a customer # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_alert(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedAlert
If the method is called asynchronously,
returns the request thread.
|
def remove(self, element, multiplicity=None):
"""Removes an element from the multiset.
If no multiplicity is specified, the element is completely removed from the multiset:
>>> ms = Multiset('aabbbc')
>>> ms.remove('a')
2
>>> sorted(ms)
['b', 'b', 'b', 'c']
If the multiplicity is given, it is subtracted from the element's multiplicity in the multiset:
>>> ms.remove('b', 2)
3
>>> sorted(ms)
['b', 'c']
It is not an error to remove more elements than are in the set:
>>> ms.remove('b', 2)
1
>>> sorted(ms)
['c']
This extends the :meth:`MutableSet.remove` signature to allow specifying the multiplicity.
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
Raises:
KeyError: if the element is not contained in the set. Use :meth:`discard` if
you do not want an exception to be raised.
"""
_elements = self._elements
if element not in _elements:
raise KeyError
old_multiplicity = _elements.get(element, 0)
if multiplicity is None or multiplicity >= old_multiplicity:
del _elements[element]
self._total -= old_multiplicity
elif multiplicity < 0:
raise ValueError("Multiplicity must be not be negative")
elif multiplicity > 0:
_elements[element] -= multiplicity
self._total -= multiplicity
return old_multiplicity
|
Removes an element from the multiset.
If no multiplicity is specified, the element is completely removed from the multiset:
>>> ms = Multiset('aabbbc')
>>> ms.remove('a')
2
>>> sorted(ms)
['b', 'b', 'b', 'c']
If the multiplicity is given, it is subtracted from the element's multiplicity in the multiset:
>>> ms.remove('b', 2)
3
>>> sorted(ms)
['b', 'c']
It is not an error to remove more elements than are in the set:
>>> ms.remove('b', 2)
1
>>> sorted(ms)
['c']
This extends the :meth:`MutableSet.remove` signature to allow specifying the multiplicity.
Args:
element:
The element to remove from the multiset.
multiplicity:
An optional multiplicity i.e. count of elements to remove.
Returns:
The multiplicity of the element in the multiset before
the removal.
Raises:
KeyError: if the element is not contained in the set. Use :meth:`discard` if
you do not want an exception to be raised.
|
def resolve_memory_access(self, tb, x86_mem_operand):
"""Return operand memory access translation.
"""
size = self.__get_memory_access_size(x86_mem_operand)
addr = None
if x86_mem_operand.base:
addr = ReilRegisterOperand(x86_mem_operand.base, size)
if x86_mem_operand.index and x86_mem_operand.scale != 0x0:
index = ReilRegisterOperand(x86_mem_operand.index, size)
scale = ReilImmediateOperand(x86_mem_operand.scale, size)
scaled_index = tb.temporal(size)
tb.add(tb._builder.gen_mul(index, scale, scaled_index))
if addr:
tmp = tb.temporal(size)
tb.add(tb._builder.gen_add(addr, scaled_index, tmp))
addr = tmp
else:
addr = scaled_index
if x86_mem_operand.displacement != 0x0:
disp = ReilImmediateOperand(x86_mem_operand.displacement, size)
if addr:
tmp = tb.temporal(size)
tb.add(tb._builder.gen_add(addr, disp, tmp))
addr = tmp
else:
addr = disp
else:
if not addr:
disp = ReilImmediateOperand(x86_mem_operand.displacement, size)
addr = disp
# TODO Improve this code and add support for the rest of the segments.
if x86_mem_operand.segment in ["gs", "fs"]:
seg_base_addr_map = {
"gs": "gs_base_addr",
"fs": "fs_base_addr",
}
seg_base = ReilRegisterOperand(seg_base_addr_map[x86_mem_operand.segment], size)
if addr:
tmp = tb.temporal(size)
tb.add(tb._builder.gen_add(addr, seg_base, tmp))
addr = tmp
else:
addr = seg_base
return addr
|
Return operand memory access translation.
|
def change_dir(self, session, path):
"""
Changes the working directory
"""
if path == "-":
# Previous directory
path = self._previous_path or "."
try:
previous = os.getcwd()
os.chdir(path)
except IOError as ex:
# Can't change directory
session.write_line("Error changing directory: {0}", ex)
else:
# Store previous path
self._previous_path = previous
session.write_line(os.getcwd())
|
Changes the working directory
|
def LogoOverlay(sites, overlayfile, overlay, nperline, sitewidth, rmargin, logoheight, barheight, barspacing, fix_limits={}, fixlongname=False, overlay_cmap=None, underlay=False, scalebar=False):
"""Makes overlay for *LogoPlot*.
This function creates colored bars overlay bars showing up to two
properties.
The trick of this function is to create the bars the right
size so they align when they overlay the logo plot.
CALLING VARIABLES:
* *sites* : same as the variable of this name used by *LogoPlot*.
* *overlayfile* is a string giving the name of created PDF file containing
the overlay. It must end in the extension ``.pdf``.
* *overlay* : same as the variable of this name used by *LogoPlot*.
* *nperline* : same as the variable of this name used by *LogoPlot*.
* *sitewidth* is the width of each site in points.
* *rmargin* is the right margin in points.
* *logoheight* is the total height of each logo row in points.
* *barheight* is the total height of each bar in points.
* *barspacing* is the vertical spacing between bars in points.
* *fix_limits* has the same meaning of the variable of this name used by *LogoPlot*.
* *fixlongname* has the same meaning of the variable of this name used by *LogoPlot*.
* *overlay_cmap* has the same meaning of the variable of this name used by *LogoPlot*.
* *underlay* is a bool. If `True`, make an underlay rather than an overlay.
* *scalebar: if not `False`, is 2-tuple `(scalebarheight, scalebarlabel)`
where `scalebarheight` is in points.
"""
if os.path.splitext(overlayfile)[1] != '.pdf':
raise ValueError("overlayfile must end in .pdf: %s" % overlayfile)
if not overlay_cmap:
(cmap, mapping_d, mapper) = KyteDoolittleColorMapping()
else:
mapper = pylab.cm.ScalarMappable(cmap=overlay_cmap)
cmap = mapper.get_cmap()
pts_per_inch = 72.0 # to convert between points and inches
# some general properties of the plot
matplotlib.rc('text', usetex=True)
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('xtick', direction='out')
matplotlib.rc('ytick', direction='out')
matplotlib.rc('axes', linewidth=0.5)
matplotlib.rc('ytick.major', size=3)
matplotlib.rc('xtick.major', size=2.5)
# define sizes (still in points)
colorbar_bmargin = 20 # margin below color bars in points
colorbar_tmargin = 15 # margin above color bars in points
nlines = int(math.ceil(len(sites) / float(nperline)))
lmargin = 25 # left margin in points
barwidth = nperline * sitewidth
figwidth = lmargin + rmargin + barwidth
figheight = nlines * (logoheight + len(overlay) * (barheight +
barspacing)) + (barheight + colorbar_bmargin + colorbar_tmargin) + (
int(underlay) * len(overlay) * (barheight + barspacing))
# set up the figure and axes
fig = pylab.figure(figsize=(figwidth / pts_per_inch, figheight / pts_per_inch))
# determine property types
prop_types = {}
for (prop_d, shortname, longname) in overlay:
if shortname == longname == 'wildtype':
assert all([(isinstance(prop, str) and len(prop) == 1) for
prop in prop_d.values()]), 'prop_d does not give letters'
proptype = 'wildtype'
(vmin, vmax) = (0, 1) # not used, but need to be assigned
propcategories = None # not used, but needs to be assigned
elif all([isinstance(prop, str) for prop in prop_d.values()]):
proptype = 'discrete'
propcategories = list(set(prop_d.values()))
propcategories.sort()
(vmin, vmax) = (0, len(propcategories) - 1)
elif all ([isinstance(prop, (int, float)) for prop in prop_d.values()]):
proptype = 'continuous'
propcategories = None
(vmin, vmax) = (min(prop_d.values()), max(prop_d.values()))
# If vmin is slightly greater than zero, set it to zero. This helps for RSA properties.
if vmin >= 0 and vmin / float(vmax - vmin) < 0.05:
vmin = 0.0
# And if vmax is just a bit less than one, set it to that...
if 0.9 <= vmax <= 1.0:
vmax = 1.0
else:
raise ValueError("Property %s is neither continuous or discrete. Values are:\n%s" % (shortname, str(prop_d.items())))
if shortname in fix_limits:
(vmin, vmax) = (min(fix_limits[shortname][0]), max(fix_limits[shortname][0]))
assert vmin < vmax, "vmin >= vmax, did you incorrectly use fix_vmin and fix_vmax?"
prop_types[shortname] = (proptype, vmin, vmax, propcategories)
assert len(prop_types) == len(overlay), "Not as many property types as overlays. Did you give the same name (shortname) to multiple properties in the overlay?"
# loop over each line of the multi-lined plot
prop_image = {}
for iline in range(nlines):
isites = sites[iline * nperline : min(len(sites), (iline + 1) * nperline)]
xlength = len(isites) * sitewidth
logo_ax = pylab.axes([lmargin / figwidth, ((nlines - iline - 1) * (logoheight + len(overlay) * (barspacing + barheight))) / figheight, xlength / figwidth, logoheight / figheight], frameon=False)
logo_ax.yaxis.set_ticks_position('none')
logo_ax.xaxis.set_ticks_position('none')
pylab.yticks([])
pylab.xlim(0.5, len(isites) + 0.5)
pylab.xticks([])
for (iprop, (prop_d, shortname, longname)) in enumerate(overlay):
(proptype, vmin, vmax, propcategories) = prop_types[shortname]
prop_ax = pylab.axes([
lmargin / figwidth,
((nlines - iline - 1) * (logoheight +
len(overlay) * (barspacing + barheight)) +
(1 - int(underlay)) * logoheight + int(underlay) *
barspacing + iprop * (barspacing + barheight))
/ figheight,
xlength / figwidth,
barheight / figheight],
frameon=(proptype != 'wildtype'))
prop_ax.xaxis.set_ticks_position('none')
pylab.xticks([])
pylab.xlim((0, len(isites)))
pylab.ylim(-0.5, 0.5)
if proptype == 'wildtype':
pylab.yticks([])
prop_ax.yaxis.set_ticks_position('none')
for (isite, site) in enumerate(isites):
pylab.text(isite + 0.5, -0.5, prop_d[site], size=9,
horizontalalignment='center', family='monospace')
continue
pylab.yticks([0], [shortname], size=8)
prop_ax.yaxis.set_ticks_position('left')
propdata = pylab.zeros(shape=(1, len(isites)))
propdata[ : ] = pylab.nan # set to nan for all entries
for (isite, site) in enumerate(isites):
if site in prop_d:
if proptype == 'continuous':
propdata[(0, isite)] = prop_d[site]
elif proptype == 'discrete':
propdata[(0, isite)] = propcategories.index(prop_d[site])
else:
raise ValueError('neither continuous nor discrete')
prop_image[shortname] = pylab.imshow(propdata, interpolation='nearest', aspect='auto', extent=[0, len(isites), 0.5, -0.5], cmap=cmap, vmin=vmin, vmax=vmax)
pylab.yticks([0], [shortname], size=8)
# set up colorbar axes, then color bars
ncolorbars = len([p for p in prop_types.values() if p[0] != 'wildtype'])
if scalebar:
ncolorbars += 1
if ncolorbars == 1:
colorbarwidth = 0.4
colorbarspacingwidth = 1.0 - colorbarwidth
elif ncolorbars:
colorbarspacingfrac = 0.5 # space between color bars is this fraction of bar width
colorbarwidth = 1.0 / (ncolorbars * (1.0 + colorbarspacingfrac)) # width of color bars in fraction of figure width
colorbarspacingwidth = colorbarwidth * colorbarspacingfrac # width of color bar spacing in fraction of figure width
# bottom of color bars
ybottom = 1.0 - (colorbar_tmargin + barheight) / figheight
propnames = {}
icolorbar = -1
icolorbarshift = 0
while icolorbar < len(overlay):
if icolorbar == -1:
# show scale bar if being used
icolorbar += 1
if scalebar:
(scalebarheight, scalebarlabel) = scalebar
xleft = (colorbarspacingwidth * 0.5 + icolorbar *
(colorbarwidth + colorbarspacingwidth))
ytop = 1 - colorbar_tmargin / figheight
scalebarheightfrac = scalebarheight / figheight
# follow here for fig axes: https://stackoverflow.com/a/5022412
fullfigax = pylab.axes([0, 0, 1, 1], facecolor=(1, 1, 1, 0))
fullfigax.axvline(x=xleft, ymin=ytop - scalebarheightfrac,
ymax=ytop, color='black', linewidth=1.5)
pylab.text(xleft + 0.005, ytop - scalebarheightfrac / 2.0,
scalebarlabel, verticalalignment='center',
horizontalalignment='left',
transform=fullfigax.transAxes)
continue
(prop_d, shortname, longname) = overlay[icolorbar]
icolorbar += 1
(proptype, vmin, vmax, propcategories) = prop_types[shortname]
if proptype == 'wildtype':
icolorbarshift += 1
continue
if shortname == longname or not longname:
propname = shortname
elif fixlongname:
propname = longname
else:
propname = "%s (%s)" % (longname, shortname)
colorbar_ax = pylab.axes([colorbarspacingwidth * 0.5 + (icolorbar - icolorbarshift - int(not bool(scalebar))) * (colorbarwidth + colorbarspacingwidth), ybottom, colorbarwidth, barheight / figheight], frameon=True)
colorbar_ax.xaxis.set_ticks_position('bottom')
colorbar_ax.yaxis.set_ticks_position('none')
pylab.xticks([])
pylab.yticks([])
pylab.title(propname, size=9)
if proptype == 'continuous':
cb = pylab.colorbar(prop_image[shortname], cax=colorbar_ax, orientation='horizontal')
# if range is close to zero to one, manually set tics to 0, 0.5, 1. This helps for RSA
if -0.1 <= vmin <= 0 and 1.0 <= vmax <= 1.15:
cb.set_ticks([0, 0.5, 1])
cb.set_ticklabels(['0', '0.5', '1'])
# if it seems plausible, set integer ticks
if 4 < (vmax - vmin) <= 11:
fixedticks = [itick for itick in range(int(vmin), int(vmax) + 1)]
cb.set_ticks(fixedticks)
cb.set_ticklabels([str(itick) for itick in fixedticks])
elif proptype == 'discrete':
cb = pylab.colorbar(prop_image[shortname], cax=colorbar_ax, orientation='horizontal', boundaries=[i for i in range(len(propcategories) + 1)], values=[i for i in range(len(propcategories))])
cb.set_ticks([i + 0.5 for i in range(len(propcategories))])
cb.set_ticklabels(propcategories)
else:
raise ValueError("Invalid proptype")
if shortname in fix_limits:
(ticklocs, ticknames) = fix_limits[shortname]
cb.set_ticks(ticklocs)
cb.set_ticklabels(ticknames)
# save the plot
pylab.savefig(overlayfile, transparent=True)
|
Makes overlay for *LogoPlot*.
This function creates colored bars overlay bars showing up to two
properties.
The trick of this function is to create the bars the right
size so they align when they overlay the logo plot.
CALLING VARIABLES:
* *sites* : same as the variable of this name used by *LogoPlot*.
* *overlayfile* is a string giving the name of created PDF file containing
the overlay. It must end in the extension ``.pdf``.
* *overlay* : same as the variable of this name used by *LogoPlot*.
* *nperline* : same as the variable of this name used by *LogoPlot*.
* *sitewidth* is the width of each site in points.
* *rmargin* is the right margin in points.
* *logoheight* is the total height of each logo row in points.
* *barheight* is the total height of each bar in points.
* *barspacing* is the vertical spacing between bars in points.
* *fix_limits* has the same meaning of the variable of this name used by *LogoPlot*.
* *fixlongname* has the same meaning of the variable of this name used by *LogoPlot*.
* *overlay_cmap* has the same meaning of the variable of this name used by *LogoPlot*.
* *underlay* is a bool. If `True`, make an underlay rather than an overlay.
* *scalebar: if not `False`, is 2-tuple `(scalebarheight, scalebarlabel)`
where `scalebarheight` is in points.
|
def _encode_sequence(self, inputs, token_types, valid_length=None):
"""Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT model.
"""
# embedding
word_embedding = self.word_embed(inputs)
type_embedding = self.token_type_embed(token_types)
embedding = word_embedding + type_embedding
# encoding
outputs, additional_outputs = self.encoder(embedding, None, valid_length)
return outputs, additional_outputs
|
Generate the representation given the input sequences.
This is used for pre-training or fine-tuning a BERT model.
|
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
|
od.clear() -> None. Remove all items from od.
|
def well(self, idx) -> Well:
"""Deprecated---use result of `wells` or `wells_by_index`"""
if isinstance(idx, int):
res = self._wells[idx]
elif isinstance(idx, str):
res = self.wells_by_index()[idx]
else:
res = NotImplemented
return res
|
Deprecated---use result of `wells` or `wells_by_index`
|
def determine_override_options(selected_options: tuple, override_opts: DictLike, set_of_possible_options: tuple = ()) -> Dict[str, Any]:
""" Recursively extract the dict described in override_options().
In particular, this searches for selected options in the override_opts dict. It stores only
the override options that are selected.
Args:
selected_options: The options selected for this analysis, in the order defined used
with ``override_options()`` and in the configuration file.
override_opts: dict-like object returned by ruamel.yaml which contains the options that
should be used to override the configuration options.
set_of_possible_options (tuple of enums): Possible options for the override value categories.
"""
override_dict: Dict[str, Any] = {}
for option in override_opts:
# We need to cast the option to a string to effectively compare to the selected option,
# since only some of the options will already be strings
if str(option) in list(map(lambda opt: str(opt), selected_options)):
override_dict.update(determine_override_options(selected_options, override_opts[option], set_of_possible_options))
else:
logger.debug(f"override_opts: {override_opts}")
# Look for whether the key is one of the possible but unselected options.
# If so, we haven't selected it for this analysis, and therefore they should be ignored.
# NOTE: We compare both the names and value because sometimes the name is not sufficient,
# such as in the case of the energy (because a number is not allowed to be a field name.)
found_as_possible_option = False
for possible_options in set_of_possible_options:
# Same type of comparison as above, but for all possible options instead of the selected
# options.
if str(option) in list(map(lambda opt: str(opt), possible_options)):
found_as_possible_option = True
# Below is more or less equivalent to the above (although .str() hides the details or
# whether we should compare to the name or the value in the enum and only compares against
# the designated value).
#for possible_opt in possible_options:
#if possible_opt.name == option or possible_opt.value == option:
# found_as_possible_option = True
if not found_as_possible_option:
# Store the override value, since it doesn't correspond with a selected option or a possible
# option and therefore must be an option that we want to override.
logger.debug(f"Storing override option \"{option}\", with value \"{override_opts[option]}\"")
override_dict[option] = override_opts[option]
else:
logger.debug(f"Found option \"{option}\" as possible option, so skipping!")
return override_dict
|
Recursively extract the dict described in override_options().
In particular, this searches for selected options in the override_opts dict. It stores only
the override options that are selected.
Args:
selected_options: The options selected for this analysis, in the order defined used
with ``override_options()`` and in the configuration file.
override_opts: dict-like object returned by ruamel.yaml which contains the options that
should be used to override the configuration options.
set_of_possible_options (tuple of enums): Possible options for the override value categories.
|
def broadcast_1d_array(arr, ndim, axis=1):
"""
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid.
"""
ext_arr = arr
for i in range(ndim - 1):
ext_arr = np.expand_dims(ext_arr, axis=axis)
return ext_arr
|
Broadcast 1-d array `arr` to `ndim` dimensions on the first axis
(`axis`=0) or on the last axis (`axis`=1).
Useful for 'outer' calculations involving 1-d arrays that are related to
different axes on a multidimensional grid.
|
def read_tpld_stats(self):
"""
:return: dictionary {tpld index {group name {stat name: value}}}.
Sea XenaTpld.stats_captions.
"""
payloads_stats = OrderedDict()
for tpld in self.tplds.values():
payloads_stats[tpld] = tpld.read_stats()
return payloads_stats
|
:return: dictionary {tpld index {group name {stat name: value}}}.
Sea XenaTpld.stats_captions.
|
def format_out_of_country_keeping_alpha_chars(numobj, region_calling_from):
"""Formats a phone number for out-of-country dialing purposes.
Note that in this version, if the number was entered originally using
alpha characters and this version of the number is stored in raw_input,
this representation of the number will be used rather than the digit
representation. Grouping information, as specified by characters such as
"-" and " ", will be retained.
Caveats:
- This will not produce good results if the country calling code is both
present in the raw input _and_ is the start of the national
number. This is not a problem in the regions which typically use alpha
numbers.
- This will also not produce good results if the raw input has any
grouping information within the first three digits of the national
number, and if the function needs to strip preceding digits/words in
the raw input before these digits. Normally people group the first
three digits together so this is not a huge problem - and will be fixed
if it proves to be so.
Arguments:
numobj -- The phone number that needs to be formatted.
region_calling_from -- The region where the call is being placed.
Returns the formatted phone number
"""
num_raw_input = numobj.raw_input
# If there is no raw input, then we can't keep alpha characters because there aren't any.
# In this case, we return format_out_of_country_calling_number.
if num_raw_input is None or len(num_raw_input) == 0:
return format_out_of_country_calling_number(numobj, region_calling_from)
country_code = numobj.country_code
if not _has_valid_country_calling_code(country_code):
return num_raw_input
# Strip any prefix such as country calling code, IDD, that was present. We
# do this by comparing the number in raw_input with the parsed number. To
# do this, first we normalize punctuation. We retain number grouping
# symbols such as " " only.
num_raw_input = _normalize_helper(num_raw_input,
_ALL_PLUS_NUMBER_GROUPING_SYMBOLS,
True)
# Now we trim everything before the first three digits in the parsed
# number. We choose three because all valid alpha numbers have 3 digits at
# the start - if it does not, then we don't trim anything at
# all. Similarly, if the national number was less than three digits, we
# don't trim anything at all.
national_number = national_significant_number(numobj)
if len(national_number) > 3:
first_national_number_digit = num_raw_input.find(national_number[:3])
if first_national_number_digit != -1:
num_raw_input = num_raw_input[first_national_number_digit:]
metadata_for_region_calling_from = PhoneMetadata.metadata_for_region(region_calling_from.upper(), None)
if country_code == _NANPA_COUNTRY_CODE:
if is_nanpa_country(region_calling_from):
return unicod(country_code) + U_SPACE + num_raw_input
elif (metadata_for_region_calling_from is not None and
country_code == country_code_for_region(region_calling_from)):
formatting_pattern = _choose_formatting_pattern_for_number(metadata_for_region_calling_from.number_format,
national_number)
if formatting_pattern is None:
# If no pattern above is matched, we format the original input
return num_raw_input
new_format = _copy_number_format(formatting_pattern)
# The first group is the first group of digits that the user
# wrote together.
new_format.pattern = u("(\\d+)(.*)")
# Here we just concatenate them back together after the national
# prefix has been fixed.
new_format.format = u(r"\1\2")
# Now we format using this pattern instead of the default pattern,
# but with the national prefix prefixed if necessary.
# This will not work in the cases where the pattern (and not the
# leading digits) decide whether a national prefix needs to be used,
# since we have overridden the pattern to match anything, but that is
# not the case in the metadata to date.
return _format_nsn_using_pattern(num_raw_input,
new_format,
PhoneNumberFormat.NATIONAL)
i18n_prefix_for_formatting = U_EMPTY_STRING
# If an unsupported region-calling-from is entered, or a country with
# multiple international prefixes, the international format of the number
# is returned, unless there is a preferred international prefix.
if metadata_for_region_calling_from is not None:
international_prefix = metadata_for_region_calling_from.international_prefix
i18n_match = fullmatch(_SINGLE_INTERNATIONAL_PREFIX, international_prefix)
if i18n_match:
i18n_prefix_for_formatting = international_prefix
else:
i18n_prefix_for_formatting = metadata_for_region_calling_from.preferred_international_prefix
region_code = region_code_for_country_code(country_code)
# Metadata cannot be None because the country calling code is valid.
metadata_for_region = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code)
formatted_number = _maybe_append_formatted_extension(numobj,
metadata_for_region,
PhoneNumberFormat.INTERNATIONAL,
num_raw_input)
if i18n_prefix_for_formatting:
formatted_number = (i18n_prefix_for_formatting + U_SPACE +
unicod(country_code) + U_SPACE + formatted_number)
else:
# Invalid region entered as country-calling-from (so no metadata was
# found for it) or the region chosen has multiple international
# dialling prefixes.
formatted_number = _prefix_number_with_country_calling_code(country_code,
PhoneNumberFormat.INTERNATIONAL,
formatted_number)
return formatted_number
|
Formats a phone number for out-of-country dialing purposes.
Note that in this version, if the number was entered originally using
alpha characters and this version of the number is stored in raw_input,
this representation of the number will be used rather than the digit
representation. Grouping information, as specified by characters such as
"-" and " ", will be retained.
Caveats:
- This will not produce good results if the country calling code is both
present in the raw input _and_ is the start of the national
number. This is not a problem in the regions which typically use alpha
numbers.
- This will also not produce good results if the raw input has any
grouping information within the first three digits of the national
number, and if the function needs to strip preceding digits/words in
the raw input before these digits. Normally people group the first
three digits together so this is not a huge problem - and will be fixed
if it proves to be so.
Arguments:
numobj -- The phone number that needs to be formatted.
region_calling_from -- The region where the call is being placed.
Returns the formatted phone number
|
def decrypt(*args, **kwargs):
""" Decrypts legacy or spec-compliant JOSE token.
First attempts to decrypt the token in a legacy mode
(https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-19).
If it is not a valid legacy token then attempts to decrypt it in a
spec-compliant way (http://tools.ietf.org/html/rfc7519)
"""
try:
return legacy_decrypt(*args, **kwargs)
except (NotYetValid, Expired) as e:
# these should be raised immediately.
# The token has been decrypted successfully to get to here.
# decrypting using `legacy_decrypt` will not help things.
raise e
except (Error, ValueError) as e:
return spec_compliant_decrypt(*args, **kwargs)
|
Decrypts legacy or spec-compliant JOSE token.
First attempts to decrypt the token in a legacy mode
(https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-19).
If it is not a valid legacy token then attempts to decrypt it in a
spec-compliant way (http://tools.ietf.org/html/rfc7519)
|
def stats(self) -> pd.DataFrame:
"""Statistics about flights contained in the structure.
Useful for a meaningful representation.
"""
key = ["icao24", "callsign"] if self.flight_ids is None else "flight_id"
return (
self.data.groupby(key)[["timestamp"]]
.count()
.sort_values("timestamp", ascending=False)
.rename(columns={"timestamp": "count"})
)
|
Statistics about flights contained in the structure.
Useful for a meaningful representation.
|
def cross_entropy_reward_loss(logits, actions, rewards, name=None):
"""Calculate the loss for Policy Gradient Network.
Parameters
----------
logits : tensor
The network outputs without softmax. This function implements softmax inside.
actions : tensor or placeholder
The agent actions.
rewards : tensor or placeholder
The rewards.
Returns
--------
Tensor
The TensorFlow loss function.
Examples
----------
>>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])
>>> network = InputLayer(states_batch_pl, name='input')
>>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')
>>> network = DenseLayer(network, n_units=3, name='out')
>>> probs = network.outputs
>>> sampling_prob = tf.nn.softmax(probs)
>>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])
>>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])
>>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
"""
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=actions, logits=logits, name=name)
return tf.reduce_sum(tf.multiply(cross_entropy, rewards))
|
Calculate the loss for Policy Gradient Network.
Parameters
----------
logits : tensor
The network outputs without softmax. This function implements softmax inside.
actions : tensor or placeholder
The agent actions.
rewards : tensor or placeholder
The rewards.
Returns
--------
Tensor
The TensorFlow loss function.
Examples
----------
>>> states_batch_pl = tf.placeholder(tf.float32, shape=[None, D])
>>> network = InputLayer(states_batch_pl, name='input')
>>> network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='relu1')
>>> network = DenseLayer(network, n_units=3, name='out')
>>> probs = network.outputs
>>> sampling_prob = tf.nn.softmax(probs)
>>> actions_batch_pl = tf.placeholder(tf.int32, shape=[None])
>>> discount_rewards_batch_pl = tf.placeholder(tf.float32, shape=[None])
>>> loss = tl.rein.cross_entropy_reward_loss(probs, actions_batch_pl, discount_rewards_batch_pl)
>>> train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
|
def call(self, itemMethod):
"""
Invoke the given bound item method in the batch process.
Return a Deferred which fires when the method has been invoked.
"""
item = itemMethod.im_self
method = itemMethod.im_func.func_name
return self.batchController.getProcess().addCallback(
CallItemMethod(storepath=item.store.dbdir,
storeid=item.storeID,
method=method).do)
|
Invoke the given bound item method in the batch process.
Return a Deferred which fires when the method has been invoked.
|
def mark_validation(institute_id, case_name, variant_id):
"""Mark a variant as sanger validated."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
variant_obj = store.variant(variant_id)
user_obj = store.user(current_user.email)
validate_type = request.form['type'] or None
link = url_for('variants.variant', institute_id=institute_id, case_name=case_name,
variant_id=variant_id)
store.validate(institute_obj, case_obj, user_obj, link, variant_obj, validate_type)
return redirect(request.referrer or link)
|
Mark a variant as sanger validated.
|
def _get_variant_effect(cls, variants, ref_sequence):
'''variants = list of variants in the same codon.
returns type of variant (cannot handle more than one indel in the same codon).'''
assert len(variants) != 0
var_types = [x.var_type for x in variants]
if len(set(var_types)) != 1:
return 'MULTIPLE', '.', '.'
var_type = var_types[0]
assert set([x.ref_name for x in variants]) == set([ref_sequence.id])
codon_starts = [AssemblyVariants._get_codon_start(0, x.ref_start) for x in variants]
assert len(set(codon_starts)) == 1
codon_start = codon_starts[0]
aa_start = codon_start // 3
ref_codon = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start:codon_start+3])
ref_aa = ref_codon.translate()
if var_type == pymummer.variant.SNP:
new_codon = list(ref_codon.seq)
for v in variants:
new_codon[v.ref_start - codon_start] = v.qry_base
new_codon = pyfastaq.sequences.Fasta('new', ''.join(new_codon))
qry_aa = new_codon.translate()
if ref_aa.seq == qry_aa.seq:
return ('SYN', '.', aa_start)
elif qry_aa.seq == '*':
return ('TRUNC', ref_aa.seq + str(aa_start + 1) + 'trunc', aa_start)
else:
return ('NONSYN', ref_aa.seq + str(aa_start + 1) + qry_aa.seq, aa_start)
elif var_type in [pymummer.variant.INS, pymummer.variant.DEL]:
if len(variants) > 1:
return 'INDELS', '.', aa_start
var = variants[0]
if var_type == pymummer.variant.INS:
new_seq = pyfastaq.sequences.Fasta('seq', var.qry_base)
else:
new_seq = pyfastaq.sequences.Fasta('seq', var.ref_base)
if len(new_seq) % 3 != 0:
return ('FSHIFT', ref_aa.seq + str(aa_start + 1) + 'fs', aa_start)
new_seq_aa = new_seq.translate()
if '*' in new_seq_aa.seq:
return ('TRUNC', ref_aa.seq + str(aa_start + 1) + 'trunc', aa_start)
elif var_type == pymummer.variant.INS:
ref_codon_after_ins = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return ('INS', ref_aa.seq + str(aa_start + 1) + '_' + aa_after_ins.seq + str(aa_start + 2) + 'ins' + new_seq_aa.seq , aa_start)
else:
if len(new_seq) == 3:
return ('DEL', ref_aa.seq + str(aa_start + 1) + 'del', aa_start)
else:
assert len(new_seq) % 3 == 0
ref_codon_after_ins = pyfastaq.sequences.Fasta('codon', ref_sequence[codon_start+3:codon_start+6])
aa_after_ins = ref_codon_after_ins.translate()
return ('DEL', ref_aa.seq + str(aa_start + 1)+ '_' + aa_after_ins.seq + str(aa_start + 2) + 'del', aa_start)
else:
return ('UNKNOWN', '.', aa_start)
|
variants = list of variants in the same codon.
returns type of variant (cannot handle more than one indel in the same codon).
|
def get_groups(self, username):
""" Get a user's groups
:param username: 'key' attribute of the user
:type username: string
:rtype: list of groups
"""
try:
return self.users[username]['groups']
except Exception as e:
raise UserDoesntExist(username, self.backend_name)
|
Get a user's groups
:param username: 'key' attribute of the user
:type username: string
:rtype: list of groups
|
def submit(self, command, blocksize, job_name="parsl.auto"):
''' Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
'''
if self.provisioned_blocks >= self.max_blocks:
logger.warn("[%s] at capacity, cannot add more blocks now", self.label)
return None
# Note: Fix this later to avoid confusing behavior.
# We should always allocate blocks in integer counts of node_granularity
if blocksize < self.nodes_per_block:
blocksize = self.nodes_per_block
# Set job name
job_name = "parsl.{0}.{1}".format(job_name, time.time())
# Set script path
script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
script_path = os.path.abspath(script_path)
logger.debug("Requesting blocksize:%s nodes_per_block:%s tasks_per_node:%s", blocksize, self.nodes_per_block,
self.tasks_per_node)
job_config = {}
# TODO : script_path might need to change to accommodate script dir set via channels
job_config["submit_script_dir"] = self.channel.script_dir
job_config["nodes"] = self.nodes_per_block
job_config["task_blocks"] = self.nodes_per_block * self.tasks_per_node
job_config["nodes_per_block"] = self.nodes_per_block
job_config["tasks_per_node"] = self.tasks_per_node
job_config["walltime"] = self.walltime
job_config["overrides"] = self.overrides
job_config["user_script"] = command
# Wrap the command
job_config["user_script"] = self.launcher(command,
self.tasks_per_node,
self.nodes_per_block)
logger.debug("Writing submit script")
self._write_submit_script(template_string, script_path, job_name, job_config)
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
submit_options = ''
if self.queue is not None:
submit_options = '{0} -q {1}'.format(submit_options, self.queue)
if self.account is not None:
submit_options = '{0} -A {1}'.format(submit_options, self.account)
launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
retcode, stdout, stderr = self.channel.execute_wait(launch_cmd, 10)
job_id = None
if retcode == 0:
for line in stdout.split('\n'):
if line.strip():
job_id = line.strip()
self.resources[job_id] = {'job_id': job_id, 'status': 'PENDING', 'blocksize': blocksize}
else:
message = "Command '{}' failed with return code {}".format(launch_cmd, retcode)
if (stdout is not None) and (stderr is not None):
message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip())
logger.error(message)
return job_id
|
Submits the command onto an Local Resource Manager job of blocksize parallel elements.
Submit returns an ID that corresponds to the task that was just submitted.
If tasks_per_node < 1 : ! This is illegal. tasks_per_node should be integer
If tasks_per_node == 1:
A single node is provisioned
If tasks_per_node > 1 :
tasks_per_node * blocksize number of nodes are provisioned.
Args:
- command :(String) Commandline invocation to be made on the remote side.
- blocksize :(float)
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
|
def _init_scratch(self):
"""Initializes a scratch pad equal in size to the wavefunction."""
scratch = np.zeros((self._num_shards, self._shard_size),
dtype=np.complex64)
scratch_handle = mem_manager.SharedMemManager.create_array(
scratch.view(dtype=np.float32))
self._shared_mem_dict['scratch_handle'] = scratch_handle
|
Initializes a scratch pad equal in size to the wavefunction.
|
def SetColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
Set all color attributes with one method call.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self.SetFillColor(color)
self.SetLineColor(color)
self.SetMarkerColor(color)
|
*color* may be any color understood by ROOT or matplotlib.
Set all color attributes with one method call.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
|
def alias_field(model, field):
"""
Return the prefix name of a field
"""
for part in field.split(LOOKUP_SEP)[:-1]:
model = associate_model(model,part)
return model.__name__ + "-" + field.split(LOOKUP_SEP)[-1]
|
Return the prefix name of a field
|
def get_contingency_tables(self):
"""
Create an Array of ContingencyTable objects for each probability threshold.
Returns:
Array of ContingencyTable objects
"""
return np.array([ContingencyTable(*ct) for ct in self.contingency_tables.values])
|
Create an Array of ContingencyTable objects for each probability threshold.
Returns:
Array of ContingencyTable objects
|
def driver_name(self):
"""
Returns the name of the motor driver that loaded this device. See the list
of [supported devices] for a list of drivers.
"""
self._driver_name, value = self.get_attr_string(self._driver_name, 'driver_name')
return value
|
Returns the name of the motor driver that loaded this device. See the list
of [supported devices] for a list of drivers.
|
def _hash_categorical(c, encoding, hash_key):
"""
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array, same size as len(c)
"""
# Convert ExtensionArrays to ndarrays
values = np.asarray(c.categories.values)
hashed = hash_array(values, encoding, hash_key,
categorize=False)
# we have uint64, as we don't directly support missing values
# we don't want to use take_nd which will coerce to float
# instead, directly construct the result with a
# max(np.uint64) as the missing value indicator
#
# TODO: GH 15362
mask = c.isna()
if len(hashed):
result = hashed.take(c.codes)
else:
result = np.zeros(len(mask), dtype='uint64')
if mask.any():
result[mask] = np.iinfo(np.uint64).max
return result
|
Hash a Categorical by hashing its categories, and then mapping the codes
to the hashes
Parameters
----------
c : Categorical
encoding : string, default 'utf8'
hash_key : string key to encode, default to _default_hash_key
Returns
-------
ndarray of hashed values array, same size as len(c)
|
def _fs_match(pattern, filename, sep, follow, symlinks):
"""
Match path against the pattern.
Since `globstar` doesn't match symlinks (unless `FOLLOW` is enabled), we must look for symlinks.
If we identify a symlink in a `globstar` match, we know this result should not actually match.
"""
matched = False
base = None
m = pattern.fullmatch(filename)
if m:
matched = True
# Lets look at the captured `globstar` groups and see if that part of the path
# contains symlinks.
if not follow:
groups = m.groups()
last = len(groups)
for i, star in enumerate(m.groups(), 1):
if star:
parts = star.strip(sep).split(sep)
if base is None:
base = filename[:m.start(i)]
for part in parts:
base = os.path.join(base, part)
is_link = symlinks.get(base, None)
if is_link is not None:
matched = not is_link
elif i != last or os.path.isdir(base):
is_link = os.path.islink(base)
symlinks[base] = is_link
matched = not is_link
if not matched:
break
if matched:
break
return matched
|
Match path against the pattern.
Since `globstar` doesn't match symlinks (unless `FOLLOW` is enabled), we must look for symlinks.
If we identify a symlink in a `globstar` match, we know this result should not actually match.
|
def commit(
self,
confirm=False,
confirm_delay=None,
check=False,
comment="",
and_quit=False,
delay_factor=1,
):
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check
"""
delay_factor = self.select_delay_factor(delay_factor)
if check and (confirm or confirm_delay or comment):
raise ValueError("Invalid arguments supplied with commit check")
if confirm_delay and not confirm:
raise ValueError(
"Invalid arguments supplied to commit method both confirm and check"
)
# Select proper command string based on arguments provided
command_string = "commit"
commit_marker = "Commit complete."
if check:
command_string = "commit check"
commit_marker = "Validation complete"
elif confirm:
if confirm_delay:
command_string = "commit confirmed " + str(confirm_delay)
else:
command_string = "commit confirmed"
commit_marker = "commit confirmed will be automatically rolled back in"
# wrap the comment in quotes
if comment:
if '"' in comment:
raise ValueError("Invalid comment contains double quote")
comment = '"{0}"'.format(comment)
command_string += " comment " + comment
if and_quit:
command_string += " and-quit"
# Enter config mode (if necessary)
output = self.config_mode()
# and_quit will get out of config mode on commit
if and_quit:
prompt = self.base_prompt
output += self.send_command_expect(
command_string,
expect_string=prompt,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
else:
output += self.send_command_expect(
command_string,
strip_prompt=True,
strip_command=True,
delay_factor=delay_factor,
)
if commit_marker not in output:
raise ValueError(
"Commit failed with the following errors:\n\n{0}".format(output)
)
return output
|
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
Automatically enters configuration mode
default:
command_string = commit
check and (confirm or confirm_dely or comment):
Exception
confirm_delay and no confirm:
Exception
confirm:
confirm_delay option
comment option
command_string = commit confirmed or commit confirmed <confirm_delay>
check:
command_string = commit check
|
def apply_integer_offsets(image2d, offx, offy):
"""Apply global (integer) offsets to image.
Parameters
----------
image2d : numpy array
Input image
offx : int
Offset in the X direction (must be integer).
offy : int
Offset in the Y direction (must be integer).
Returns
-------
image2d_shifted : numpy array
Shifted image
"""
# protections
if type(offx) != int or type(offy) != int:
raise ValueError('Invalid non-integer offsets')
# image dimensions
naxis2, naxis1 = image2d.shape
# initialize output image
image2d_shifted = np.zeros((naxis2, naxis1))
# handle negative and positive shifts accordingly
non = lambda s: s if s < 0 else None
mom = lambda s: max(0,s)
# shift image
image2d_shifted[mom(offy):non(offy), mom(offx):non(offx)] = \
image2d[mom(-offy):non(-offy), mom(-offx):non(-offx)]
# return shifted image
return image2d_shifted
|
Apply global (integer) offsets to image.
Parameters
----------
image2d : numpy array
Input image
offx : int
Offset in the X direction (must be integer).
offy : int
Offset in the Y direction (must be integer).
Returns
-------
image2d_shifted : numpy array
Shifted image
|
def wait_and_ignore(condition, timeout=WTF_TIMEOUT_MANAGER.NORMAL, sleep=0.5):
'''
Waits wrapper that'll wait for the condition to become true, but will
not error if the condition isn't met.
Args:
condition (lambda) - Lambda expression to wait for to evaluate to True.
Kwargs:
timeout (number) : Maximum number of seconds to wait.
sleep (number) : Sleep time to wait between iterations.
Example::
wait_and_ignore(lambda: driver.find_element_by_id("success").is_displayed(),
timeout=30,
sleep=0.5)
is equivalent to::
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
if driver.find_element_by_id("success").is_displayed():
break;
except:
pass
time.sleep(0.5)
'''
try:
return wait_until(condition, timeout, sleep)
except:
pass
|
Waits wrapper that'll wait for the condition to become true, but will
not error if the condition isn't met.
Args:
condition (lambda) - Lambda expression to wait for to evaluate to True.
Kwargs:
timeout (number) : Maximum number of seconds to wait.
sleep (number) : Sleep time to wait between iterations.
Example::
wait_and_ignore(lambda: driver.find_element_by_id("success").is_displayed(),
timeout=30,
sleep=0.5)
is equivalent to::
end_time = datetime.now() + timedelta(seconds=30)
while datetime.now() < end_time:
try:
if driver.find_element_by_id("success").is_displayed():
break;
except:
pass
time.sleep(0.5)
|
def visit_Expr(self, node: AST, dfltChaining: bool = True) -> str:
"""Return representation of nested expression."""
return self.visit(node.value)
|
Return representation of nested expression.
|
def _dict_native_ok(d):
"""
This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled.
"""
if len(d) >= 256:
return False
# All keys must be strings
for k in d:
if not isinstance(k, six.string_types):
return False
return True
|
This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled.
|
def wait(self, condition, interval, *args):
"""
:Description: Create an interval in vm.window, will clear interval after condition met.
:param condition: Condition in javascript to pass to interval.
:example: '$el.innerText == "cheesecake"'
:example: '$el[0].disabled && $el[1].disabled'
:type condition: string
:param interval: Time in milliseconds to execute interval.
:type interval: int or float
:param *args: WebElement or selector of condition element.
:type *args: tuple
:return: string
"""
hid = lambda: '$' + str(uuid.uuid1())[:8]
handle = hid()
if len(args):
element_handle = hid()
self.browser.execute_script(
'window["{}"] = [];'.format(element_handle)
) # create element container in window scope
for el in args:
if isinstance(el, string_types):
# assume selector
self.browser.execute_script('window["{}"].push({});'.format(
element_handle, 'function() { return document.querySelector("%s") }' % el))
else:
# assume web element
self.browser.execute_script(
'window["{}"].push(arguments[0]);'.format(element_handle), el)
if len(args) == 1:
condition = condition.replace('$el', 'window["{}"][0]{}'.format(
element_handle, '()' if isinstance(args[0], string_types) else ''))
else:
regex = r'(\$el\[([0-9]{0,3})\])'
results = re.findall(regex, condition) # [('$el[0]', '0'), ('$el[1]', '1'), ...]
for result in results:
pos = eval(result[1])
if pos + 1 <= len(args):
condition = condition.replace(result[0], 'window["{}"][{}]{}'.format(
element_handle, pos, '()' if isinstance(args[pos], string_types) else ''))
self.browser.execute_script(
'window["%s"]=window.setInterval(function(){if(%s){ \
(window.clearInterval(window["%s"])||true)&&(window["%s"]=-1); \
delete window["%s"];}}, %s)' % (handle, condition, handle, handle, \
element_handle, interval)) # create interval
else:
self.browser.execute_script(
'window["%s"]=window.setInterval(function(){if(%s){ \
(window.clearInterval(window["%s"])||true)&&(window["%s"]=-1);}}, %s)' % (
handle, condition, handle, handle, interval)) # create interval
return handle
|
:Description: Create an interval in vm.window, will clear interval after condition met.
:param condition: Condition in javascript to pass to interval.
:example: '$el.innerText == "cheesecake"'
:example: '$el[0].disabled && $el[1].disabled'
:type condition: string
:param interval: Time in milliseconds to execute interval.
:type interval: int or float
:param *args: WebElement or selector of condition element.
:type *args: tuple
:return: string
|
def get_class_name(class_key, classification_key):
"""Helper to get class name from a class_key of a classification.
:param class_key: The key of the class.
:type class_key: str
:type classification_key: The key of a classification.
:param classification_key: str
:returns: The name of the class.
:rtype: str
"""
classification = definition(classification_key)
for the_class in classification['classes']:
if the_class.get('key') == class_key:
return the_class.get('name', class_key)
return class_key
|
Helper to get class name from a class_key of a classification.
:param class_key: The key of the class.
:type class_key: str
:type classification_key: The key of a classification.
:param classification_key: str
:returns: The name of the class.
:rtype: str
|
def build_target_areas(entry):
"""Cleanup the raw target areas description string"""
target_areas = []
areas = str(entry['cap:areaDesc']).split(';')
for area in areas:
target_areas.append(area.strip())
return target_areas
|
Cleanup the raw target areas description string
|
def uavionix_adsb_out_cfg_encode(self, ICAO, callsign, emitterType, aircraftSize, gpsOffsetLat, gpsOffsetLon, stallSpeed, rfSelect):
'''
Static data to configure the ADS-B transponder (send within 10 sec of
a POR and every 10 sec thereafter)
ICAO : Vehicle address (24 bit) (uint32_t)
callsign : Vehicle identifier (8 characters, null terminated, valid characters are A-Z, 0-9, " " only) (char)
emitterType : Transmitting vehicle type. See ADSB_EMITTER_TYPE enum (uint8_t)
aircraftSize : Aircraft length and width encoding (table 2-35 of DO-282B) (uint8_t)
gpsOffsetLat : GPS antenna lateral offset (table 2-36 of DO-282B) (uint8_t)
gpsOffsetLon : GPS antenna longitudinal offset from nose [if non-zero, take position (in meters) divide by 2 and add one] (table 2-37 DO-282B) (uint8_t)
stallSpeed : Aircraft stall speed in cm/s (uint16_t)
rfSelect : ADS-B transponder reciever and transmit enable flags (uint8_t)
'''
return MAVLink_uavionix_adsb_out_cfg_message(ICAO, callsign, emitterType, aircraftSize, gpsOffsetLat, gpsOffsetLon, stallSpeed, rfSelect)
|
Static data to configure the ADS-B transponder (send within 10 sec of
a POR and every 10 sec thereafter)
ICAO : Vehicle address (24 bit) (uint32_t)
callsign : Vehicle identifier (8 characters, null terminated, valid characters are A-Z, 0-9, " " only) (char)
emitterType : Transmitting vehicle type. See ADSB_EMITTER_TYPE enum (uint8_t)
aircraftSize : Aircraft length and width encoding (table 2-35 of DO-282B) (uint8_t)
gpsOffsetLat : GPS antenna lateral offset (table 2-36 of DO-282B) (uint8_t)
gpsOffsetLon : GPS antenna longitudinal offset from nose [if non-zero, take position (in meters) divide by 2 and add one] (table 2-37 DO-282B) (uint8_t)
stallSpeed : Aircraft stall speed in cm/s (uint16_t)
rfSelect : ADS-B transponder reciever and transmit enable flags (uint8_t)
|
def _read_journal(self):
"""Extracts the USN journal from the disk and parses its content."""
root = self._filesystem.inspect_get_roots()[0]
inode = self._filesystem.stat('C:\\$Extend\\$UsnJrnl')['ino']
with NamedTemporaryFile(buffering=0) as tempfile:
self._filesystem.download_inode(root, inode, tempfile.name)
journal = usn_journal(tempfile.name)
return parse_journal(journal)
|
Extracts the USN journal from the disk and parses its content.
|
def set(self, subscribed, ignored):
"""Set the user's subscription for this subscription
:param bool subscribed: (required), determines if notifications should
be received from this thread.
:param bool ignored: (required), determines if notifications should be
ignored from this thread.
"""
sub = {'subscribed': subscribed, 'ignored': ignored}
json = self._json(self._put(self._api, data=dumps(sub)), 200)
self.__init__(json, self._session)
|
Set the user's subscription for this subscription
:param bool subscribed: (required), determines if notifications should
be received from this thread.
:param bool ignored: (required), determines if notifications should be
ignored from this thread.
|
def hourly_horizontal_infrared(self):
"""A data collection containing hourly horizontal infrared intensity in W/m2.
"""
sky_cover = self._sky_condition.hourly_sky_cover
db_temp = self._dry_bulb_condition.hourly_values
dp_temp = self._humidity_condition.hourly_dew_point_values(
self._dry_bulb_condition)
horiz_ir = []
for i in xrange(len(sky_cover)):
horiz_ir.append(
calc_horizontal_infrared(sky_cover[i], db_temp[i], dp_temp[i]))
return self._get_daily_data_collections(
energyflux.HorizontalInfraredRadiationIntensity(), 'W/m2', horiz_ir)
|
A data collection containing hourly horizontal infrared intensity in W/m2.
|
def sign(self):
"""
Returns 1 if a credit should increase the value of the
account, or -1 if a credit should decrease the value of the
account.
This is based on the account type as is standard accounting practice.
The signs can be derrived from the following expanded form of the
accounting equation:
Assets = Liabilities + Equity + (Income - Expenses)
Which can be rearranged as:
0 = Liabilities + Equity + Income - Expenses - Assets
Further details here: https://en.wikipedia.org/wiki/Debits_and_credits
"""
return -1 if self.type in (Account.TYPES.asset, Account.TYPES.expense) else 1
|
Returns 1 if a credit should increase the value of the
account, or -1 if a credit should decrease the value of the
account.
This is based on the account type as is standard accounting practice.
The signs can be derrived from the following expanded form of the
accounting equation:
Assets = Liabilities + Equity + (Income - Expenses)
Which can be rearranged as:
0 = Liabilities + Equity + Income - Expenses - Assets
Further details here: https://en.wikipedia.org/wiki/Debits_and_credits
|
def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'>
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value]
|
Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'>
|
def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN):
"""
Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode
"""
random_part = u''.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(lg - len(prefix or "") - 1)
)
if prefix is not None:
return u'%s-%s' % (prefix, random_part)
else:
return random_part
|
Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode
|
def block_splitter(data, block_size):
"""
Creates a generator by slicing ``data`` into chunks of ``block_size``.
>>> data = range(10)
>>> list(block_splitter(data, 2))
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
If ``data`` cannot be evenly divided by ``block_size``, the last block will
simply be the remainder of the data. Example:
>>> data = range(10)
>>> list(block_splitter(data, 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
If the ``block_size`` is greater than the total length of ``data``, a
single block will be generated:
>>> data = range(3)
>>> list(block_splitter(data, 4))
[[0, 1, 2]]
:param data:
Any iterable. If ``data`` is a generator, it will be exhausted,
obviously.
:param int block_site:
Desired (maximum) block size.
"""
buf = []
for i, datum in enumerate(data):
buf.append(datum)
if len(buf) == block_size:
yield buf
buf = []
# If there's anything leftover (a partial block),
# yield it as well.
if buf:
yield buf
|
Creates a generator by slicing ``data`` into chunks of ``block_size``.
>>> data = range(10)
>>> list(block_splitter(data, 2))
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
If ``data`` cannot be evenly divided by ``block_size``, the last block will
simply be the remainder of the data. Example:
>>> data = range(10)
>>> list(block_splitter(data, 3))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
If the ``block_size`` is greater than the total length of ``data``, a
single block will be generated:
>>> data = range(3)
>>> list(block_splitter(data, 4))
[[0, 1, 2]]
:param data:
Any iterable. If ``data`` is a generator, it will be exhausted,
obviously.
:param int block_site:
Desired (maximum) block size.
|
def getmakeidfobject(idf, key, name):
"""get idfobject or make it if it does not exist"""
idfobject = idf.getobject(key, name)
if not idfobject:
return idf.newidfobject(key, Name=name)
else:
return idfobject
|
get idfobject or make it if it does not exist
|
def has_permissions(**perms):
"""A :func:`.check` that is added that checks if the member has all of
the permissions necessary.
The permissions passed in must be exactly like the properties shown under
:class:`.discord.Permissions`.
This check raises a special exception, :exc:`.MissingPermissions`
that is inherited from :exc:`.CheckFailure`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python3
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test(ctx):
await ctx.send('You can manage messages.')
"""
def predicate(ctx):
ch = ctx.channel
permissions = ch.permissions_for(ctx.author)
missing = [perm for perm, value in perms.items() if getattr(permissions, perm, None) != value]
if not missing:
return True
raise MissingPermissions(missing)
return check(predicate)
|
A :func:`.check` that is added that checks if the member has all of
the permissions necessary.
The permissions passed in must be exactly like the properties shown under
:class:`.discord.Permissions`.
This check raises a special exception, :exc:`.MissingPermissions`
that is inherited from :exc:`.CheckFailure`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python3
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test(ctx):
await ctx.send('You can manage messages.')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.